index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
|---|---|---|---|---|---|
722,213
|
openap.kinematic
|
descent_const_mach
|
Get speed during the constant Mach descent.
|
def descent_const_mach(self):
"""Get speed during the constant Mach descent."""
return self._get_var("de_v_mach_const")
|
(self)
|
722,214
|
openap.kinematic
|
descent_const_vcas
|
Get speed during the constant CAS descent.
|
def descent_const_vcas(self):
"""Get speed during the constant CAS descent."""
return self._get_var("de_v_cas_const")
|
(self)
|
722,215
|
openap.kinematic
|
descent_cross_alt_concas
|
Get crossover altitude from constant Mach to CAS descent.
|
def descent_cross_alt_concas(self):
"""Get crossover altitude from constant Mach to CAS descent."""
return self._get_var("de_h_cas_const")
|
(self)
|
722,216
|
openap.kinematic
|
descent_cross_alt_conmach
|
Get crossover altitude from constant Mach to CAS descent.
|
def descent_cross_alt_conmach(self):
"""Get crossover altitude from constant Mach to CAS descent."""
return self._get_var("de_h_mach_const")
|
(self)
|
722,217
|
openap.kinematic
|
descent_range
|
Get descent range.
|
def descent_range(self):
"""Get descent range."""
return self._get_var("de_d_range")
|
(self)
|
722,218
|
openap.kinematic
|
descent_vs_concas
|
Get vertical rate during constant CAS descent.
|
def descent_vs_concas(self):
"""Get vertical rate during constant CAS descent."""
return self._get_var("de_vs_avg_cas_const")
|
(self)
|
722,219
|
openap.kinematic
|
descent_vs_conmach
|
Get vertical rate during constant Mach descent.
|
def descent_vs_conmach(self):
"""Get vertical rate during constant Mach descent."""
return self._get_var("de_vs_avg_mach_const")
|
(self)
|
722,220
|
openap.kinematic
|
descent_vs_post_concas
|
Get vertical rate after constant CAS descent.
|
def descent_vs_post_concas(self):
"""Get vertical rate after constant CAS descent."""
return self._get_var("de_vs_avg_after_cas")
|
(self)
|
722,221
|
openap.kinematic
|
finalapp_vcas
|
Get CAS for final approach.
|
def finalapp_vcas(self):
"""Get CAS for final approach."""
return self._get_var("fa_va_avg")
|
(self)
|
722,222
|
openap.kinematic
|
finalapp_vs
|
Get vertical speed for final approach.
|
def finalapp_vs(self):
"""Get vertical speed for final approach."""
return self._get_var("fa_vs_avg")
|
(self)
|
722,223
|
openap.kinematic
|
initclimb_vcas
|
Get initial climb CAS.
|
def initclimb_vcas(self):
"""Get initial climb CAS."""
return self._get_var("ic_va_avg")
|
(self)
|
722,224
|
openap.kinematic
|
initclimb_vs
|
Get initial climb vertical rate.
|
def initclimb_vs(self):
"""Get initial climb vertical rate."""
return self._get_var("ic_vs_avg")
|
(self)
|
722,225
|
openap.kinematic
|
landing_acceleration
|
Get landing deceleration.
|
def landing_acceleration(self):
"""Get landing deceleration."""
return self._get_var("ld_acc_brk")
|
(self)
|
722,226
|
openap.kinematic
|
landing_distance
|
Get breaking distance for landing.
|
def landing_distance(self):
"""Get breaking distance for landing."""
return self._get_var("ld_d_brk")
|
(self)
|
722,227
|
openap.kinematic
|
landing_speed
|
Get landing speed.
|
def landing_speed(self):
"""Get landing speed."""
return self._get_var("ld_v_app")
|
(self)
|
722,228
|
openap.kinematic
|
takeoff_acceleration
|
Get takeoff takeoff acceleration.
|
def takeoff_acceleration(self):
"""Get takeoff takeoff acceleration."""
return self._get_var("to_acc_tof")
|
(self)
|
722,229
|
openap.kinematic
|
takeoff_distance
|
Get takeoff takeoff distance.
|
def takeoff_distance(self):
"""Get takeoff takeoff distance."""
return self._get_var("to_d_tof")
|
(self)
|
722,230
|
openap.kinematic
|
takeoff_speed
|
Get takeoff speed.
|
def takeoff_speed(self):
"""Get takeoff speed."""
return self._get_var("to_v_lof")
|
(self)
|
722,246
|
jarowinkler
|
jaro_similarity
|
Calculates the jaro similarity
Parameters
----------
s1 : Sequence[Hashable]
First string to compare.
s2 : Sequence[Hashable]
Second string to compare.
processor: callable, optional
Optional callable that is used to preprocess the strings before
comparing them. Default is None, which deactivates this behaviour.
score_cutoff : float, optional
Optional argument for a score threshold as a float between 0 and 1.0.
For ratio < score_cutoff 0 is returned instead. Default is 0,
which deactivates this behaviour.
Returns
-------
similarity : float
similarity between s1 and s2 as a float between 0 and 1.0
|
def jaro_similarity(s1, s2, *, processor=None, score_cutoff=None) -> float:
"""
Calculates the jaro similarity
Parameters
----------
s1 : Sequence[Hashable]
First string to compare.
s2 : Sequence[Hashable]
Second string to compare.
processor: callable, optional
Optional callable that is used to preprocess the strings before
comparing them. Default is None, which deactivates this behaviour.
score_cutoff : float, optional
Optional argument for a score threshold as a float between 0 and 1.0.
For ratio < score_cutoff 0 is returned instead. Default is 0,
which deactivates this behaviour.
Returns
-------
similarity : float
similarity between s1 and s2 as a float between 0 and 1.0
"""
return _Jaro.similarity(s1, s2, processor=processor, score_cutoff=score_cutoff)
|
(s1, s2, *, processor=None, score_cutoff=None) -> float
|
722,247
|
jarowinkler
|
jarowinkler_similarity
|
Calculates the jaro winkler similarity
Parameters
----------
s1 : Sequence[Hashable]
First string to compare.
s2 : Sequence[Hashable]
Second string to compare.
prefix_weight : float, optional
Weight used for the common prefix of the two strings.
Has to be between 0 and 0.25. Default is 0.1.
processor: callable, optional
Optional callable that is used to preprocess the strings before
comparing them. Default is None, which deactivates this behaviour.
score_cutoff : float, optional
Optional argument for a score threshold as a float between 0 and 1.0.
For ratio < score_cutoff 0 is returned instead. Default is 0,
which deactivates this behaviour.
Returns
-------
similarity : float
similarity between s1 and s2 as a float between 0 and 1.0
Raises
------
ValueError
If prefix_weight is invalid
|
def jarowinkler_similarity(s1, s2, *, prefix_weight=0.1, processor=None, score_cutoff=None) -> float:
"""
Calculates the jaro winkler similarity
Parameters
----------
s1 : Sequence[Hashable]
First string to compare.
s2 : Sequence[Hashable]
Second string to compare.
prefix_weight : float, optional
Weight used for the common prefix of the two strings.
Has to be between 0 and 0.25. Default is 0.1.
processor: callable, optional
Optional callable that is used to preprocess the strings before
comparing them. Default is None, which deactivates this behaviour.
score_cutoff : float, optional
Optional argument for a score threshold as a float between 0 and 1.0.
For ratio < score_cutoff 0 is returned instead. Default is 0,
which deactivates this behaviour.
Returns
-------
similarity : float
similarity between s1 and s2 as a float between 0 and 1.0
Raises
------
ValueError
If prefix_weight is invalid
"""
return _JaroWinkler.similarity(
s1,
s2,
prefix_weight=prefix_weight,
processor=processor,
score_cutoff=score_cutoff,
)
|
(s1, s2, *, prefix_weight=0.1, processor=None, score_cutoff=None) -> float
|
722,249
|
notifiers.core
|
all_providers
|
Returns a list of all :class:`~notifiers.core.Provider` names
|
def all_providers() -> list:
"""Returns a list of all :class:`~notifiers.core.Provider` names"""
return list(_all_providers.keys())
|
() -> list
|
722,252
|
notifiers.core
|
get_notifier
|
Convenience method to return an instantiated :class:`~notifiers.core.Provider` object according to it ``name``
:param provider_name: The ``name`` of the requested :class:`~notifiers.core.Provider`
:param strict: Raises a :class:`ValueError` if the given provider string was not found
:return: :class:`Provider` or None
:raises ValueError: In case ``strict`` is True and provider not found
|
def get_notifier(provider_name: str, strict: bool = False) -> Provider:
"""
Convenience method to return an instantiated :class:`~notifiers.core.Provider` object according to it ``name``
:param provider_name: The ``name`` of the requested :class:`~notifiers.core.Provider`
:param strict: Raises a :class:`ValueError` if the given provider string was not found
:return: :class:`Provider` or None
:raises ValueError: In case ``strict`` is True and provider not found
"""
if provider_name in _all_providers:
log.debug("found a match for '%s', returning", provider_name)
return _all_providers[provider_name]()
elif strict:
raise NoSuchNotifierError(name=provider_name)
|
(provider_name: str, strict: bool = False) -> notifiers.core.Provider
|
722,254
|
notifiers.core
|
notify
|
Quickly sends a notification without needing to get a notifier via the :func:`get_notifier` method.
:param provider_name: Name of the notifier to use. Note that if this notifier name does not exist it will raise a
:param kwargs: Notification data, dependant on provider
:return: :class:`Response`
:raises: :class:`~notifiers.exceptions.NoSuchNotifierError` If ``provider_name`` is unknown,
will raise notification error
|
def notify(provider_name: str, **kwargs) -> Response:
"""
Quickly sends a notification without needing to get a notifier via the :func:`get_notifier` method.
:param provider_name: Name of the notifier to use. Note that if this notifier name does not exist it will raise a
:param kwargs: Notification data, dependant on provider
:return: :class:`Response`
:raises: :class:`~notifiers.exceptions.NoSuchNotifierError` If ``provider_name`` is unknown,
will raise notification error
"""
return get_notifier(provider_name=provider_name, strict=True).notify(**kwargs)
|
(provider_name: str, **kwargs) -> notifiers.core.Response
|
722,257
|
2dwavesim.room
|
Room
| null |
class Room:
def __init__(self, ds, width, height,*, walls=[], physics_params={}):
'''Create a 'room' system, with parameters for simulation.
Params:
ds: (float) size of unit step in space
width: (float) width of room, rounded down to nearest multiple of ds
height: (float) height of room, rounded down to nearest multiple of ds
Keyword params:
walls: (Wall) List of wall objects containing position and transmission
data.
physics_params: (dict{str:float}) dictionary of physics params for the
system. Contains 'wavespeed': speed of waves in medium (float) and
'attenuation': attenuation of waves in medium (float).
'''
self.room_points = np.meshgrid(np.arange(0, width, ds), np.arange(0, height, ds))
self.mask_points = np.ones(self.room_points[0].shape)
self.point_spacing = ds
self.wavespeed = physics_params.get('wavespeed', 343)
self.attenuation = physics_params.get('attenuation', 0)
self.walls = walls
self.func_sources = []
self.data_sources = []
self.runs = []
def add_source_func(self, loc, func):
'''Add a source which is based on a function in time. `loc` is the coordinate in the room.'''
Coordinate = namedtuple('Coordinate', 'x y')
true_loc = Coordinate(int(loc[0] // self.point_spacing), int(loc[1] // self.point_spacing))
self.func_sources.append((loc, true_loc, func))
def add_source_data(self, loc, data):
'''Add a source which is based on a list of values. `loc` is the coordinate in the room.'''
Coordinate = namedtuple('Coordinate', 'x y')
true_loc = Coordinate(int(loc[0] // self.point_spacing), int(loc[1] // self.point_spacing))
self.data_sources.append((loc, true_loc, data))
def add_walls(self, walls):
self.walls = self.walls + walls
def create_mask(self):
'''Create the wall mask based on all current walls. This uses a modified version of the
Bressenham algorithm for rasterizing the walls to pixels on the grid.
'''
def bressenham_ABC(p0, p1):
A = p1.y - p0.y
B = -(p1.x - p0.x)
C = p1.x * p0.y - p0.x * p1.y
return A,B,C
for wall in self.walls:
A, B, C = bressenham_ABC(wall.endpoint1, wall.endpoint2)
on_line_mask = 0 == np.floor(self.room_points[0] * A + self.room_points[1] * B + C)
on_line_mask *= max(wall.endpoint1.x, wall.endpoint2.x) >= self.room_points[0]
on_line_mask *= self.room_points[0] >= min(wall.endpoint1.x, wall.endpoint2.x)
on_line_mask *= max(wall.endpoint1.y, wall.endpoint2.y) >= self.room_points[1]
on_line_mask *= self.room_points[1] >= min(wall.endpoint1.y, wall.endpoint2.y)
self.mask_points[on_line_mask] = wall.transmission
def run(self, dt, t_final):
'''Solve the system using a finite differences solver, and return the solved system.
Make sure the numerical stability is maintained by ensuring (wavespeed)*dt/ds<=1.
`dt` is the time step. `t_final` is the time limit on the simulation.
'''
self.create_mask()
wave_constant = (self.wavespeed * dt / self.point_spacing)**2
damp_constant = self.attenuation * dt / 2
if 2 * wave_constant > 1:
raise ValueError(f'CFL condition not satisfied, results won\'t be numerically stable. C is {wave_constant}.')
time_steps = np.arange(0, t_final, dt)
room_data = np.zeros((*self.room_points[0].shape, len(time_steps)), dtype=float)
room_data[:,:,0] = np.multiply(room_data[:,:,0], self.mask_points)
for t in tqdm(range(1, len(time_steps)-1)):
room_data[:,:,t] = np.multiply(room_data[:,:,t], self.mask_points)
D2x = room_data[:-2,1:-1,t] - 2 * room_data[1:-1,1:-1,t] + room_data[2:,1:-1,t]
D2y = room_data[1:-1,:-2,t] - 2 * room_data[1:-1,1:-1,t] + room_data[1:-1,2:,t]
room_data[1:-1,1:-1,t+1] = wave_constant * (D2x + D2y) + 2 * room_data[1:-1,1:-1,t] + (damp_constant - 1) * room_data[1:-1,1:-1,t-1]
for source in self.func_sources:
loc = source[1]
source_func = source[2]
room_data[loc.x, loc.y, t+1] = dt**2 * source_func(time_steps[t])
for source in self.data_sources:
loc = source[1]
source_data = source[2]
if len(source_data) <= t:
room_data[loc.x, loc.y, t+1] = 0
else:
room_data[loc.x, loc.y, t+1] = dt**2 * source_data[t]
room_data /= (1 + damp_constant)
run_data = {'time params': {'dt': dt,
't_final': t_final},
'walls': self.walls,
'sources': self.func_sources + self.data_sources,
'results': room_data
}
self.runs.append(run_data)
def get_mask(self):
''' Return the a 2D numpy array of the wall mask, as currently calculated.
'''
return self.mask_points
|
(ds, width, height, *, walls=[], physics_params={})
|
722,258
|
2dwavesim.room
|
__init__
|
Create a 'room' system, with parameters for simulation.
Params:
ds: (float) size of unit step in space
width: (float) width of room, rounded down to nearest multiple of ds
height: (float) height of room, rounded down to nearest multiple of ds
Keyword params:
walls: (Wall) List of wall objects containing position and transmission
data.
physics_params: (dict{str:float}) dictionary of physics params for the
system. Contains 'wavespeed': speed of waves in medium (float) and
'attenuation': attenuation of waves in medium (float).
|
nit__(self, ds, width, height,*, walls=[], physics_params={}):
ate a 'room' system, with parameters for simulation.
:
float) size of unit step in space
: (float) width of room, rounded down to nearest multiple of ds
t: (float) height of room, rounded down to nearest multiple of ds
d params:
: (Wall) List of wall objects containing position and transmission
.
cs_params: (dict{str:float}) dictionary of physics params for the
em. Contains 'wavespeed': speed of waves in medium (float) and
enuation': attenuation of waves in medium (float).
oom_points = np.meshgrid(np.arange(0, width, ds), np.arange(0, height, ds))
ask_points = np.ones(self.room_points[0].shape)
oint_spacing = ds
avespeed = physics_params.get('wavespeed', 343)
ttenuation = physics_params.get('attenuation', 0)
alls = walls
unc_sources = []
ata_sources = []
uns = []
|
(self, ds, width, height, *, walls=[], physics_params={})
|
722,259
|
2dwavesim.room
|
add_source_data
|
Add a source which is based on a list of values. `loc` is the coordinate in the room.
|
_source_data(self, loc, data):
a source which is based on a list of values. `loc` is the coordinate in the room.'''
nate = namedtuple('Coordinate', 'x y')
oc = Coordinate(int(loc[0] // self.point_spacing), int(loc[1] // self.point_spacing))
ata_sources.append((loc, true_loc, data))
|
(self, loc, data)
|
722,260
|
2dwavesim.room
|
add_source_func
|
Add a source which is based on a function in time. `loc` is the coordinate in the room.
|
_source_func(self, loc, func):
a source which is based on a function in time. `loc` is the coordinate in the room.'''
nate = namedtuple('Coordinate', 'x y')
oc = Coordinate(int(loc[0] // self.point_spacing), int(loc[1] // self.point_spacing))
unc_sources.append((loc, true_loc, func))
|
(self, loc, func)
|
722,261
|
2dwavesim.room
|
add_walls
| null |
_walls(self, walls):
alls = self.walls + walls
|
(self, walls)
|
722,262
|
2dwavesim.room
|
create_mask
|
Create the wall mask based on all current walls. This uses a modified version of the
Bressenham algorithm for rasterizing the walls to pixels on the grid.
|
ate_mask(self):
ate the wall mask based on all current walls. This uses a modified version of the
nham algorithm for rasterizing the walls to pixels on the grid.
essenham_ABC(p0, p1):
1.y - p0.y
(p1.x - p0.x)
1.x * p0.y - p0.x * p1.y
n A,B,C
ll in self.walls:
C = bressenham_ABC(wall.endpoint1, wall.endpoint2)
ne_mask = 0 == np.floor(self.room_points[0] * A + self.room_points[1] * B + C)
ne_mask *= max(wall.endpoint1.x, wall.endpoint2.x) >= self.room_points[0]
ne_mask *= self.room_points[0] >= min(wall.endpoint1.x, wall.endpoint2.x)
ne_mask *= max(wall.endpoint1.y, wall.endpoint2.y) >= self.room_points[1]
ne_mask *= self.room_points[1] >= min(wall.endpoint1.y, wall.endpoint2.y)
mask_points[on_line_mask] = wall.transmission
|
(self)
|
722,263
|
2dwavesim.room
|
get_mask
|
Return the a 2D numpy array of the wall mask, as currently calculated.
|
_mask(self):
turn the a 2D numpy array of the wall mask, as currently calculated.
self.mask_points
|
(self)
|
722,264
|
2dwavesim.room
|
run
|
Solve the system using a finite differences solver, and return the solved system.
Make sure the numerical stability is maintained by ensuring (wavespeed)*dt/ds<=1.
`dt` is the time step. `t_final` is the time limit on the simulation.
|
(self, dt, t_final):
ve the system using a finite differences solver, and return the solved system.
ure the numerical stability is maintained by ensuring (wavespeed)*dt/ds<=1.
s the time step. `t_final` is the time limit on the simulation.
reate_mask()
onstant = (self.wavespeed * dt / self.point_spacing)**2
onstant = self.attenuation * dt / 2
wave_constant > 1:
ValueError(f'CFL condition not satisfied, results won\'t be numerically stable. C is {wave_constant}.')
teps = np.arange(0, t_final, dt)
ata = np.zeros((*self.room_points[0].shape, len(time_steps)), dtype=float)
ata[:,:,0] = np.multiply(room_data[:,:,0], self.mask_points)
in tqdm(range(1, len(time_steps)-1)):
data[:,:,t] = np.multiply(room_data[:,:,t], self.mask_points)
room_data[:-2,1:-1,t] - 2 * room_data[1:-1,1:-1,t] + room_data[2:,1:-1,t]
room_data[1:-1,:-2,t] - 2 * room_data[1:-1,1:-1,t] + room_data[1:-1,2:,t]
data[1:-1,1:-1,t+1] = wave_constant * (D2x + D2y) + 2 * room_data[1:-1,1:-1,t] + (damp_constant - 1) * room_data[1:-1,1:-1,t-1]
ource in self.func_sources:
= source[1]
ce_func = source[2]
_data[loc.x, loc.y, t+1] = dt**2 * source_func(time_steps[t])
ource in self.data_sources:
= source[1]
ce_data = source[2]
en(source_data) <= t:
m_data[loc.x, loc.y, t+1] = 0
:
m_data[loc.x, loc.y, t+1] = dt**2 * source_data[t]
ata /= (1 + damp_constant)
ta = {'time params': {'dt': dt,
't_final': t_final},
lls': self.walls,
urces': self.func_sources + self.data_sources,
sults': room_data
uns.append(run_data)
|
(self, dt, t_final)
|
722,265
|
2dwavesim.room
|
Wall
| null |
class Wall:
def __init__(self, endpoint1, endpoint2, transmission):
Coordinate = namedtuple('Coordinate', 'x y')
self.endpoint1 = Coordinate(endpoint1[0], endpoint1[1])
self.endpoint2 = Coordinate(endpoint2[0], endpoint2[1])
self.transmission = transmission
|
(endpoint1, endpoint2, transmission)
|
722,266
|
2dwavesim.room
|
__init__
| null |
nit__(self, endpoint1, endpoint2, transmission):
nate = namedtuple('Coordinate', 'x y')
ndpoint1 = Coordinate(endpoint1[0], endpoint1[1])
ndpoint2 = Coordinate(endpoint2[0], endpoint2[1])
ransmission = transmission
|
(self, endpoint1, endpoint2, transmission)
|
722,270
|
statsmodels.compat.patsy
|
monkey_patch_cat_dtype
| null |
def monkey_patch_cat_dtype():
patsy.util.safe_is_pandas_categorical_dtype = (
_safe_is_pandas_categorical_dtype
)
|
()
|
722,271
|
statsmodels
|
test
|
Run the test suite
Parameters
----------
extra_args : list[str]
List of argument to pass to pytest when running the test suite. The
default is ['--tb=short', '--disable-pytest-warnings'].
exit : bool
Flag indicating whether the test runner should exist when finished.
Returns
-------
int
The status code from the test run if exit is False.
|
def test(extra_args=None, exit=False):
"""
Run the test suite
Parameters
----------
extra_args : list[str]
List of argument to pass to pytest when running the test suite. The
default is ['--tb=short', '--disable-pytest-warnings'].
exit : bool
Flag indicating whether the test runner should exist when finished.
Returns
-------
int
The status code from the test run if exit is False.
"""
from .tools._testing import PytestTester
tst = PytestTester(package_path=__file__)
return tst(extra_args=extra_args, exit=exit)
|
(extra_args=None, exit=False)
|
722,273
|
aiopygismeteo._gismeteo
|
Gismeteo
|
Асинхронная обёртка для Gismeteo API.
|
class Gismeteo:
"""Асинхронная обёртка для Gismeteo API."""
__slots__ = (
"_session",
"_settings",
"current",
"search",
"step3",
"step6",
"step24",
)
def __init__(
self,
*,
lang: Optional[Lang] = None,
session: Optional[ClientSession] = None,
token: str,
) -> None:
"""Асинхронная обёртка для Gismeteo API.
Args:
lang:
Язык. По умолчанию "ru".
session:
Экземпляр aiohttp.ClientSession.
По умолчанию для каждого запроса создаётся новый экземпляр.
token:
X-Gismeteo-Token.
Запросить можно по электронной почте b2b@gismeteo.ru.
"""
self._settings = Settings(lang=lang, token=token)
self._session = AiohttpClient(session, self._settings)
self.current: Final = Current(self._session)
"""Текущая погода."""
self.search: Final = Search(self._session)
"""Поиск."""
self.step3: Final = Step3(self._session)
"""Погода с шагом 3 часа."""
self.step6: Final = Step6(self._session)
"""Погода с шагом 6 часов."""
self.step24: Final = Step24(self._session)
"""Погода с шагом 24 часа."""
@property
def lang(self) -> Optional[Lang]:
"""Язык."""
return self._settings.lang
@lang.setter
def lang(self, lang: Optional[Lang]) -> None:
self._settings.lang = lang
@property
def session(self) -> Optional[ClientSession]:
return self._session.session
@session.setter
def session(self, session: Optional[ClientSession]) -> None:
self._session.session = session
@property
def token(self) -> str:
"""X-Gismeteo-Token."""
return self._settings.token
@token.setter
def token(self, token: str) -> None:
self._settings.token = token
|
(*, lang: 'Optional[Lang]' = None, session: 'Optional[ClientSession]' = None, token: 'str') -> 'None'
|
722,274
|
aiopygismeteo._gismeteo
|
__init__
|
Асинхронная обёртка для Gismeteo API.
Args:
lang:
Язык. По умолчанию "ru".
session:
Экземпляр aiohttp.ClientSession.
По умолчанию для каждого запроса создаётся новый экземпляр.
token:
X-Gismeteo-Token.
Запросить можно по электронной почте b2b@gismeteo.ru.
|
def __init__(
self,
*,
lang: Optional[Lang] = None,
session: Optional[ClientSession] = None,
token: str,
) -> None:
"""Асинхронная обёртка для Gismeteo API.
Args:
lang:
Язык. По умолчанию "ru".
session:
Экземпляр aiohttp.ClientSession.
По умолчанию для каждого запроса создаётся новый экземпляр.
token:
X-Gismeteo-Token.
Запросить можно по электронной почте b2b@gismeteo.ru.
"""
self._settings = Settings(lang=lang, token=token)
self._session = AiohttpClient(session, self._settings)
self.current: Final = Current(self._session)
"""Текущая погода."""
self.search: Final = Search(self._session)
"""Поиск."""
self.step3: Final = Step3(self._session)
"""Погода с шагом 3 часа."""
self.step6: Final = Step6(self._session)
"""Погода с шагом 6 часов."""
self.step24: Final = Step24(self._session)
"""Погода с шагом 24 часа."""
|
(self, *, lang: Optional[Literal['ru', 'en', 'ua', 'lt', 'lv', 'pl', 'ro']] = None, session: Optional[aiohttp.client.ClientSession] = None, token: str) -> NoneType
|
722,283
|
nv.core
|
create
| null |
def create(project_dir, environment_name='', project_name=None, use_pew=False, aws_profile=None,
environment_vars=None, password=None, use_keyring=False, python_virtualenv=None, python_bin=None):
project_dir = realpath(project_dir)
_valid_environment_name(environment_name)
nv_dir = join(project_dir, _folder_name(environment_name))
if exists(nv_dir):
raise RuntimeError("Environment already exists at '{0}'".format(nv_dir))
if environment_vars:
if not isinstance(environment_vars, dict):
raise RuntimeError('Environment: Expected dict got {0}'.format(type(environment_vars)))
for k, v in environment_vars.items():
if not isinstance(v, str):
raise RuntimeError('Environment "{0}" expected str got {1}'.format(k, type(v)))
if not project_name:
project_name = basename(project_dir)
if password:
crypto = Crypto.from_password(password)
if use_keyring:
keyring_store(nv_dir, password)
else:
crypto = DisabledCrypto()
nv_conf = {
'project_name': project_name,
'environment_name': environment_name,
'aws_profile': aws_profile,
'encryption': crypto.get_memo(),
}
# Fallback for `use_pew`
if python_virtualenv is None:
python_virtualenv = use_pew
if python_bin or python_virtualenv:
if environment_name:
venv = "{0}-{1}".format(project_name, environment_name)
else:
venv = project_name
venv = "{}-{}".format(venv, os.urandom(4).hex()) # prevent name collisions
logger.info('Setting up a virtual environment... ({0})'.format(venv))
if not exists(workon_home):
makedirs(workon_home)
if python_bin:
sh.virtualenv('--python', python_bin, join(workon_home, venv), _cwd=workon_home, _fg=True)
else:
sh.virtualenv(join(workon_home, venv), _cwd=workon_home, _fg=True)
nv_conf.update({
'venv': venv
})
mkdir(nv_dir)
with open(join(nv_dir, 'nv.json'), 'w') as fp:
json.dump(nv_conf, fp, indent=2)
if environment_vars:
with open(join(nv_dir, 'environment.json'), 'w') as fp:
crypto.json_dump(fp, environment_vars)
return nv_dir
|
(project_dir, environment_name='', project_name=None, use_pew=False, aws_profile=None, environment_vars=None, password=None, use_keyring=False, python_virtualenv=None, python_bin=None)
|
722,285
|
nv.core
|
launch_shell
| null |
def launch_shell(project_dir, environment_name='', password=None, update_keyring=False):
return invoke(
command=os.environ['SHELL'], arguments=[],
project_dir=project_dir, environment_name=environment_name,
password=password, update_keyring=update_keyring
)
|
(project_dir, environment_name='', password=None, update_keyring=False)
|
722,286
|
nv.core
|
remove
| null |
def remove(project_dir, environment_name=''):
nv_dir, nv_conf = _load_nv(project_dir, environment_name)
venv = nv_conf.get('venv')
if venv:
shutil.rmtree(join(workon_home, venv))
shutil.rmtree(nv_dir)
|
(project_dir, environment_name='')
|
722,287
|
flake8_unused_arguments
|
FunctionFinder
| null |
class FunctionFinder(NodeVisitor):
functions: List[FunctionTypes]
def __init__(self, only_top_level: bool = False) -> None:
super().__init__()
self.functions = []
self.only_top_level = only_top_level
def visit_function_types(self, function: FunctionTypes) -> None:
self.functions.append(function)
if self.only_top_level:
return
if isinstance(function, ast.Lambda):
self.visit(function.body)
else:
for obj in function.body:
self.visit(obj)
visit_AsyncFunctionDef = visit_FunctionDef = visit_Lambda = visit_function_types # type: ignore[assignment]
|
(only_top_level: bool = False) -> None
|
722,288
|
flake8_unused_arguments
|
__init__
| null |
def __init__(self, only_top_level: bool = False) -> None:
super().__init__()
self.functions = []
self.only_top_level = only_top_level
|
(self, only_top_level: bool = False) -> NoneType
|
722,291
|
flake8_unused_arguments
|
visit_function_types
| null |
def visit_function_types(self, function: FunctionTypes) -> None:
self.functions.append(function)
if self.only_top_level:
return
if isinstance(function, ast.Lambda):
self.visit(function.body)
else:
for obj in function.body:
self.visit(obj)
|
(self, function: Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.Lambda]) -> NoneType
|
722,300
|
flake8_unused_arguments
|
Plugin
| null |
class Plugin:
name = "flake8-unused-arguments"
version = "0.0.13"
ignore_abstract = False
ignore_overload = False
ignore_override = False
ignore_stubs = False
ignore_variadic_names = False
ignore_lambdas = False
ignore_nested_functions = False
ignore_dunder_methods = False
def __init__(self, tree: ast.Module):
self.tree = tree
@classmethod
def add_options(cls, option_manager: flake8.options.manager.OptionManager) -> None:
option_manager.add_option(
"--unused-arguments-ignore-abstract-functions",
action="store_true",
parse_from_config=True,
default=cls.ignore_abstract,
dest="unused_arguments_ignore_abstract_functions",
help="If provided, then unused arguments for functions decorated with abstractmethod will be ignored.",
)
option_manager.add_option(
"--unused-arguments-ignore-overload-functions",
action="store_true",
parse_from_config=True,
default=cls.ignore_overload,
dest="unused_arguments_ignore_overload_functions",
help="If provided, then unused arguments for functions decorated with overload will be ignored.",
)
option_manager.add_option(
"--unused-arguments-ignore-override-functions",
action="store_true",
parse_from_config=True,
default=cls.ignore_override,
dest="unused_arguments_ignore_override_functions",
help="If provided, then unused arguments for functions decorated with override will be ignored.",
)
option_manager.add_option(
"--unused-arguments-ignore-stub-functions",
action="store_true",
parse_from_config=True,
default=cls.ignore_stubs,
dest="unused_arguments_ignore_stub_functions",
help="If provided, then unused arguments for functions that are only a pass statement will be ignored.",
)
option_manager.add_option(
"--unused-arguments-ignore-variadic-names",
action="store_true",
parse_from_config=True,
default=cls.ignore_variadic_names,
dest="unused_arguments_ignore_variadic_names",
help="If provided, then unused *args and **kwargs won't produce warnings.",
)
option_manager.add_option(
"--unused-arguments-ignore-lambdas",
action="store_true",
parse_from_config=True,
default=cls.ignore_lambdas,
dest="unused_arguments_ignore_lambdas",
help="If provided, all lambdas are ignored.",
)
option_manager.add_option(
"--unused-arguments-ignore-nested-functions",
action="store_true",
parse_from_config=True,
default=cls.ignore_nested_functions,
dest="unused_arguments_ignore_nested_functions",
help=(
"If provided, only functions at the top level of a module or "
"methods of a class in the top level of a module are checked."
),
)
option_manager.add_option(
"--unused-arguments-ignore-dunder",
action="store_true",
parse_from_config=True,
default=cls.ignore_dunder_methods,
dest="unused_arguments_ignore_dunder_methods",
help=(
"If provided, all double-underscore methods are ignored, e.g., __new__, _init__, "
"__enter__, __exit__, __reduce_ex__, etc."
),
)
@classmethod
def parse_options(cls, options: optparse.Values) -> None:
cls.ignore_abstract = options.unused_arguments_ignore_abstract_functions
cls.ignore_overload = options.unused_arguments_ignore_overload_functions
cls.ignore_override = options.unused_arguments_ignore_override_functions
cls.ignore_stubs = options.unused_arguments_ignore_stub_functions
cls.ignore_variadic_names = options.unused_arguments_ignore_variadic_names
cls.ignore_lambdas = options.unused_arguments_ignore_lambdas
cls.ignore_nested_functions = options.unused_arguments_ignore_nested_functions
cls.ignore_dunder_methods = options.unused_arguments_ignore_dunder_methods
def run(self) -> Iterable[LintResult]:
finder = FunctionFinder(self.ignore_nested_functions)
finder.visit(self.tree)
for function in finder.functions:
decorator_names = set(get_decorator_names(function))
# ignore overload functions, it's not a surprise when they're empty
if self.ignore_overload and "overload" in decorator_names:
continue
# ignore overridden functions
if self.ignore_override and "override" in decorator_names:
continue
# ignore abstractmethods, it's not a surprise when they're empty
if self.ignore_abstract and "abstractmethod" in decorator_names:
continue
# ignore stub functions
if self.ignore_stubs and is_stub_function(function):
continue
# ignore lambdas
if self.ignore_lambdas and isinstance(function, ast.Lambda):
continue
# ignore __double_underscore_methods__()
if self.ignore_dunder_methods and is_dunder_method(function):
continue
for i, argument in get_unused_arguments(function):
name = argument.arg
if self.ignore_variadic_names:
if function.args.vararg and function.args.vararg.arg == name:
continue
if function.args.kwarg and function.args.kwarg.arg == name:
continue
# ignore self or whatever the first argument is for a classmethod
if i == 0 and (name == "self" or "classmethod" in decorator_names):
continue
line_number = argument.lineno
offset = argument.col_offset
if name.startswith("_"):
error_code = "U101"
else:
error_code = "U100"
text = "{error_code} Unused argument '{name}'".format(
error_code=error_code, name=name
)
check = "unused argument"
yield (line_number, offset, text, check)
|
(tree: ast.Module)
|
722,301
|
flake8_unused_arguments
|
__init__
| null |
def __init__(self, tree: ast.Module):
self.tree = tree
|
(self, tree: ast.Module)
|
722,302
|
flake8_unused_arguments
|
run
| null |
def run(self) -> Iterable[LintResult]:
finder = FunctionFinder(self.ignore_nested_functions)
finder.visit(self.tree)
for function in finder.functions:
decorator_names = set(get_decorator_names(function))
# ignore overload functions, it's not a surprise when they're empty
if self.ignore_overload and "overload" in decorator_names:
continue
# ignore overridden functions
if self.ignore_override and "override" in decorator_names:
continue
# ignore abstractmethods, it's not a surprise when they're empty
if self.ignore_abstract and "abstractmethod" in decorator_names:
continue
# ignore stub functions
if self.ignore_stubs and is_stub_function(function):
continue
# ignore lambdas
if self.ignore_lambdas and isinstance(function, ast.Lambda):
continue
# ignore __double_underscore_methods__()
if self.ignore_dunder_methods and is_dunder_method(function):
continue
for i, argument in get_unused_arguments(function):
name = argument.arg
if self.ignore_variadic_names:
if function.args.vararg and function.args.vararg.arg == name:
continue
if function.args.kwarg and function.args.kwarg.arg == name:
continue
# ignore self or whatever the first argument is for a classmethod
if i == 0 and (name == "self" or "classmethod" in decorator_names):
continue
line_number = argument.lineno
offset = argument.col_offset
if name.startswith("_"):
error_code = "U101"
else:
error_code = "U100"
text = "{error_code} Unused argument '{name}'".format(
error_code=error_code, name=name
)
check = "unused argument"
yield (line_number, offset, text, check)
|
(self) -> Iterable[Tuple[int, int, str, str]]
|
722,303
|
ast
|
Store
|
Store
|
from ast import Store
| null |
722,306
|
flake8_unused_arguments
|
get_arguments
|
Get all of the argument names of the given function.
|
def get_arguments(function: FunctionTypes) -> List[ast.arg]:
"""Get all of the argument names of the given function."""
args = function.args
ordered_arguments: List[ast.arg] = []
# plain old args
ordered_arguments.extend(args.args)
# *arg name
if args.vararg is not None:
ordered_arguments.append(args.vararg)
# *, key, word, only, args
ordered_arguments.extend(args.kwonlyargs)
# **kwarg name
if args.kwarg is not None:
ordered_arguments.append(args.kwarg)
return ordered_arguments
|
(function: Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.Lambda]) -> List[ast.arg]
|
722,307
|
flake8_unused_arguments
|
get_decorator_names
| null |
def get_decorator_names(function: FunctionTypes) -> Iterable[str]:
if isinstance(function, ast.Lambda):
return
for decorator in function.decorator_list:
if isinstance(decorator, ast.Name):
yield decorator.id
elif isinstance(decorator, ast.Attribute):
yield decorator.attr
elif isinstance(decorator, ast.Call):
if isinstance(decorator.func, ast.Name):
yield decorator.func.id
else:
yield decorator.func.attr # type: ignore
else:
assert False, decorator
|
(function: Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.Lambda]) -> Iterable[str]
|
722,308
|
flake8_unused_arguments
|
get_unused_arguments
|
Generator that yields all of the unused arguments in the given function.
|
def get_unused_arguments(function: FunctionTypes) -> List[Tuple[int, ast.arg]]:
"""Generator that yields all of the unused arguments in the given function."""
arguments = list(enumerate(get_arguments(function)))
class NameFinder(NodeVisitor):
def visit_Name(self, name: ast.Name) -> None:
nonlocal arguments
if isinstance(name.ctx, Store):
return
arguments = [
(arg_index, arg) for arg_index, arg in arguments if arg.arg != name.id
]
NameFinder().visit(function)
return arguments
|
(function: Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.Lambda]) -> List[Tuple[int, ast.arg]]
|
722,309
|
flake8_unused_arguments
|
is_dunder_method
| null |
def is_dunder_method(function: FunctionTypes) -> bool:
if isinstance(function, ast.Lambda):
return False
if not hasattr(function, "name"):
return False
name = function.name
return len(name) > 4 and name.startswith("__") and name.endswith("__")
|
(function: Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.Lambda]) -> bool
|
722,310
|
flake8_unused_arguments
|
is_stub_function
| null |
def is_stub_function(function: FunctionTypes) -> bool:
if isinstance(function, ast.Lambda):
return isinstance(function.body, ast.Ellipsis)
statement = function.body[0]
if isinstance(statement, ast.Expr) and isinstance(statement.value, ast.Str):
if len(function.body) > 1:
# first statement is a docstring, let's skip it
statement = function.body[1]
else:
# it's a function with only a docstring, that's a stub
return True
if isinstance(statement, ast.Pass):
return True
if isinstance(statement, ast.Expr) and isinstance(statement.value, ast.Ellipsis):
return True
if isinstance(statement, ast.Raise):
# raise NotImplementedError()
if (
isinstance(statement.exc, ast.Call)
and hasattr(statement.exc.func, "id")
and statement.exc.func.id == "NotImplementedError"
):
return True
# raise NotImplementedError
elif (
isinstance(statement.exc, ast.Name)
and hasattr(statement.exc, "id")
and statement.exc.id == "NotImplementedError"
):
return True
return False
|
(function: Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.Lambda]) -> bool
|
722,312
|
first
|
first
|
Return first element of `iterable` that evaluates true, else return None
(or an optional default value).
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
|
def first(iterable, default=None, key=None):
"""
Return first element of `iterable` that evaluates true, else return None
(or an optional default value).
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
"""
if key is None:
for el in iterable:
if el:
return el
else:
for el in iterable:
if key(el):
return el
return default
|
(iterable, default=None, key=None)
|
722,313
|
paramz.model
|
Model
| null |
class Model(Parameterized):
_fail_count = 0 # Count of failed optimization steps (see objective)
_allowed_failures = 10 # number of allowed failures
def __init__(self, name):
super(Model, self).__init__(name) # Parameterized.__init__(self)
self.optimization_runs = []
self.sampling_runs = []
self.preferred_optimizer = 'lbfgsb'
#from paramz import Tie
#self.tie = Tie()
#self.link_parameter(self.tie, -1)
self.obj_grads = None
#self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500)
def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs):
"""
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
kwargs are passed to the optimizer. They can be:
:param max_iters: maximum number of function evaluations
:type max_iters: int
:messages: True: Display messages during optimisation, "ipython_notebook":
:type messages: bool"string
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
:type optimizer: string
Valid optimizers are:
- 'scg': scaled conjugate gradient method, recommended for stability.
See also GPy.inference.optimization.scg
- 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc)
- 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin),
- 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b),
- 'lbfgs': the bfgs method (see scipy.optimize.fmin_bfgs),
- 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only!
"""
if self.is_fixed or self.size == 0:
print('nothing to optimize')
return
if not self.update_model():
print("updates were off, setting updates on again")
self.update_model(True)
if start is None:
start = self.optimizer_array
if optimizer is None:
optimizer = self.preferred_optimizer
if isinstance(optimizer, optimization.Optimizer):
opt = optimizer
opt.model = self
else:
optimizer = optimization.get_optimizer(optimizer)
opt = optimizer(max_iters=max_iters, **kwargs)
with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook, clear_after_finish=clear_after_finish) as vo:
opt.run(start, f_fp=self._objective_grads, f=self._objective, fp=self._grads)
self.optimizer_array = opt.x_opt
self.optimization_runs.append(opt)
return opt
def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
"""
Perform random restarts of the model, and set the model to the best
seen solution.
If the robust flag is set, exceptions raised during optimizations will
be handled silently. If _all_ runs fail, the model is reset to the
existing parameter values.
\*\*kwargs are passed to the optimizer.
:param num_restarts: number of restarts to use (default 10)
:type num_restarts: int
:param robust: whether to handle exceptions silently or not (default False)
:type robust: bool
:param parallel: whether to run each restart as a separate process. It relies on the multiprocessing module.
:type parallel: bool
:param num_processes: number of workers in the multiprocessing pool
:type numprocesses: int
:param max_f_eval: maximum number of function evaluations
:type max_f_eval: int
:param max_iters: maximum number of iterations
:type max_iters: int
:param messages: whether to display during optimisation
:type messages: bool
.. note::
If num_processes is None, the number of workes in the
multiprocessing pool is automatically set to the number of processors
on the current machine.
"""
initial_length = len(self.optimization_runs)
initial_parameters = self.optimizer_array.copy()
if parallel: #pragma: no cover
try:
pool = mp.Pool(processes=num_processes)
obs = [self.copy() for i in range(num_restarts)]
[obs[i].randomize() for i in range(num_restarts-1)]
jobs = pool.map(opt_wrapper, [(o,kwargs) for o in obs])
pool.close()
pool.join()
except KeyboardInterrupt:
print("Ctrl+c received, terminating and joining pool.")
pool.terminate()
pool.join()
for i in range(num_restarts):
try:
if not parallel:
if i > 0:
self.randomize()
self.optimize(**kwargs)
else:#pragma: no cover
self.optimization_runs.append(jobs[i])
if verbose:
print(("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt)))
except Exception as e:
if robust:
print(("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts)))
else:
raise e
if len(self.optimization_runs) > initial_length:
# This works, since failed jobs don't get added to the optimization_runs.
i = np.argmin([o.f_opt for o in self.optimization_runs[initial_length:]])
self.optimizer_array = self.optimization_runs[initial_length + i].x_opt
else:
self.optimizer_array = initial_parameters
return self.optimization_runs
def objective_function(self):
"""
The objective function for the given algorithm.
This function is the true objective, which wants to be minimized.
Note that all parameters are already set and in place, so you just need
to return the objective function here.
For probabilistic models this is the negative log_likelihood
(including the MAP prior), so we return it here. If your model is not
probabilistic, just return your objective to minimize here!
"""
raise NotImplementedError("Implement the result of the objective function here")
def objective_function_gradients(self):
"""
The gradients for the objective function for the given algorithm.
The gradients are w.r.t. the *negative* objective function, as
this framework works with *negative* log-likelihoods as a default.
You can find the gradient for the parameters in self.gradient at all times.
This is the place, where gradients get stored for parameters.
This function is the true objective, which wants to be minimized.
Note that all parameters are already set and in place, so you just need
to return the gradient here.
For probabilistic models this is the gradient of the negative log_likelihood
(including the MAP prior), so we return it here. If your model is not
probabilistic, just return your *negative* gradient here!
"""
return self.gradient
def _grads(self, x):
"""
Gets the gradients from the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the gradients, and will raise the original exception if
the objective cannot be computed.
:param x: the parameters of the model.
:type x: np.array
"""
try:
# self._set_params_transformed(x)
self.optimizer_array = x
self.obj_grads = self._transform_gradients(self.objective_function_gradients())
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError): #pragma: no cover
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e100, 1e100)
return self.obj_grads
def _objective(self, x):
"""
The objective function passed to the optimizer. It combines
the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the objective, and will raise the original exception if
the objective cannot be computed.
:param x: the parameters of the model.
:parameter type: np.array
"""
try:
self.optimizer_array = x
obj = self.objective_function()
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):#pragma: no cover
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
return np.inf
return obj
def _objective_grads(self, x):
try:
self.optimizer_array = x
obj_f, self.obj_grads = self.objective_function(), self._transform_gradients(self.objective_function_gradients())
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):#pragma: no cover
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
obj_f = np.inf
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e10, 1e10)
return obj_f, self.obj_grads
def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, df_tolerance=1e-12):
"""
Check the gradient of the ,odel by comparing to a numerical
estimate. If the verbose flag is passed, individual
components are tested (and printed)
:param verbose: If True, print a "full" checking of each parameter
:type verbose: bool
:param step: The size of the step around which to linearise the objective
:type step: float (default 1e-6)
:param tolerance: the tolerance allowed (see note)
:type tolerance: float (default 1e-3)
Note:-
The gradient is considered correct if the ratio of the analytical
and numerical gradients is within <tolerance> of unity.
The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients.
If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually
not accurate enough for the tests (shown with blue).
"""
if not self._model_initialized_:
import warnings
warnings.warn("This model has not been initialized, try model.inititialize_model()", RuntimeWarning)
return False
x = self.optimizer_array.copy()
if not verbose:
# make sure only to test the selected parameters
if target_param is None:
transformed_index = np.arange(len(x))
else:
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
# just check the global ratio
dx = np.zeros(x.shape)
dx[transformed_index] = step * (np.sign(np.random.uniform(-1, 1, transformed_index.size)) if transformed_index.size != 2 else 1.)
# evaulate around the point x
f1 = self._objective(x + dx)
f2 = self._objective(x - dx)
gradient = self._grads(x)
dx = dx[transformed_index]
gradient = gradient[transformed_index]
denominator = (2 * np.dot(dx, gradient))
global_ratio = (f1 - f2) / np.where(denominator == 0., 1e-32, denominator)
global_diff = np.abs(f1 - f2) < tolerance and np.allclose(gradient, 0, atol=tolerance)
if global_ratio is np.nan: # pragma: no cover
global_ratio = 0
return np.abs(1. - global_ratio) < tolerance or global_diff
else:
# check the gradient of each parameter individually, and do some pretty printing
try:
names = self.parameter_names_flat()
except NotImplementedError:
names = ['Variable %i' % i for i in range(len(x))]
# Prepare for pretty-printing
header = ['Name', 'Ratio', 'Difference', 'Analytical', 'Numerical', 'dF_ratio']
max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])
float_len = 10
cols = [max_names]
cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])
cols = np.array(cols) + 5
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
header_string = list(map(lambda x: '|'.join(x), [header_string]))
separator = '-' * len(header_string[0])
print('\n'.join([header_string[0], separator]))
if target_param is None:
target_param = self
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
gradient = self._grads(x).copy()
np.where(gradient == 0, 1e-312, gradient)
ret = True
for xind in zip(transformed_index):
xx = x.copy()
xx[xind] += step
f1 = float(self._objective(xx))
xx[xind] -= 2.*step
f2 = float(self._objective(xx))
#Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall
#the same
if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15:
df_ratio = np.abs((f1 - f2) / min(f1, f2))
else: # pragma: no cover
df_ratio = 1.0
df_unstable = df_ratio < df_tolerance
numerical_gradient = (f1 - f2) / (2. * step)
if np.all(gradient[xind] == 0): # pragma: no cover
ratio = (f1 - f2) == gradient[xind]
else:
ratio = (f1 - f2) / (2. * step * gradient[xind])
difference = np.abs(numerical_gradient - gradient[xind])
if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance:
formatted_name = "\033[92m {0} \033[0m".format(names[xind])
ret &= True
else: # pragma: no cover
formatted_name = "\033[91m {0} \033[0m".format(names[xind])
ret &= False
if df_unstable: # pragma: no cover
formatted_name = "\033[94m {0} \033[0m".format(names[xind])
r = '%.6f' % float(ratio)
d = '%.6f' % float(difference)
g = '%.6f' % gradient[xind]
ng = '%.6f' % float(numerical_gradient)
df = '%1.e' % float(df_ratio)
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
print(grad_string)
self.optimizer_array = x
return ret
def _repr_html_(self):
"""Representation of the model in html for notebook display."""
model_details = [['<b>Model</b>', self.name + '<br>'],
['<b>Objective</b>', '{}<br>'.format(float(self.objective_function()))],
["<b>Number of Parameters</b>", '{}<br>'.format(self.size)],
["<b>Number of Optimization Parameters</b>", '{}<br>'.format(self._size_transformed())],
["<b>Updates</b>", '{}<br>'.format(self._update_on)],
]
from operator import itemgetter
to_print = ["""<style type="text/css">
.pd{
font-family: "Courier New", Courier, monospace !important;
width: 100%;
padding: 3px;
}
</style>\n"""] + ["<p class=pd>"] + ["{}: {}".format(name, detail) for name, detail in model_details] + ["</p>"]
to_print.append(super(Model, self)._repr_html_())
return "\n".join(to_print)
def __str__(self, VT100=True):
model_details = [['Name', self.name],
['Objective', '{}'.format(float(self.objective_function()))],
["Number of Parameters", '{}'.format(self.size)],
["Number of Optimization Parameters", '{}'.format(self._size_transformed())],
["Updates", '{}'.format(self._update_on)],
]
max_len = max(map(len, model_details))
to_print = [""] + ["{0:{l}} : {1}".format(name, detail, l=max_len) for name, detail in model_details] + ["Parameters:"]
to_print.append(super(Model, self).__str__(VT100=VT100))
return "\n".join(to_print)
|
(*args, **kw)
|
722,314
|
paramz.core.pickleable
|
__deepcopy__
| null |
def __deepcopy__(self, memo):
s = self.__new__(self.__class__) # fresh instance
memo[id(self)] = s # be sure to break all cycles --> self is already done
import copy
s.__setstate__(copy.deepcopy(self.__getstate__(), memo)) # standard copy
return s
|
(self, memo)
|
722,315
|
paramz.parameterized
|
__getitem__
| null |
def __getitem__(self, name, paramlist=None):
if isinstance(name, (int, slice, tuple, np.ndarray)):
return self.param_array[name]
else:
paramlist = self.grep_param_names(name)
if len(paramlist) < 1: raise AttributeError(name)
if len(paramlist) == 1:
#if isinstance(paramlist[-1], Parameterized) and paramlist[-1].size > 0:
# paramlist = paramlist[-1].flattened_parameters
# if len(paramlist) != 1:
# return ParamConcatenation(paramlist)
return paramlist[-1]
from .param import ParamConcatenation
return ParamConcatenation(paramlist)
|
(self, name, paramlist=None)
|
722,316
|
paramz.core.pickleable
|
__getstate__
| null |
def __getstate__(self):
ignore_list = ['_param_array_', # parameters get set from bottom to top
'_gradient_array_', # as well as gradients
'_optimizer_copy_',
'logger',
'observers',
'_fixes_', # and fixes
'cache', # never pickle the cache
]
dc = dict()
#py3 fix
#for k,v in self.__dict__.iteritems():
for k,v in self.__dict__.items():
if k not in ignore_list:
dc[k] = v
return dc
|
(self)
|
722,317
|
paramz.model
|
__init__
| null |
def __init__(self, name):
super(Model, self).__init__(name) # Parameterized.__init__(self)
self.optimization_runs = []
self.sampling_runs = []
self.preferred_optimizer = 'lbfgsb'
#from paramz import Tie
#self.tie = Tie()
#self.link_parameter(self.tie, -1)
self.obj_grads = None
#self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500)
|
(self, name)
|
722,318
|
paramz.parameterized
|
__setattr__
| null |
def __setattr__(self, name, val):
# override the default behaviour, if setting a param, so broadcasting can by used
if hasattr(self, "parameters"):
pnames = self.parameter_names(False, adjust_for_printing=True, recursive=False)
if name in pnames:
param = self.parameters[pnames.index(name)]
param[:] = val; return
return object.__setattr__(self, name, val)
|
(self, name, val)
|
722,319
|
paramz.parameterized
|
__setitem__
| null |
def __setitem__(self, name, value, paramlist=None):
if not self._model_initialized_:
raise AttributeError("""Model is not initialized, this change will only be reflected after initialization if in leaf.
If you are loading a model, set updates off, then initialize, then set the values, then update the model to be fully initialized:
>>> m.update_model(False)
>>> m.initialize_parameter()
>>> m[:] = loaded_parameters
>>> m.update_model(True)
""")
if value is None:
return # nothing to do here
if isinstance(name, (slice, tuple, np.ndarray)):
try:
self.param_array[name] = value
except:
raise ValueError("Setting by slice or index only allowed with array-like")
self.trigger_update()
else:
param = self.__getitem__(name, paramlist)
param[:] = value
|
(self, name, value, paramlist=None)
|
722,320
|
paramz.parameterized
|
__setstate__
| null |
def __setstate__(self, state):
super(Parameterized, self).__setstate__(state)
self._connect_parameters()
self._connect_fixes()
self._notify_parent_change()
self.parameters_changed()
return self
|
(self, state)
|
722,321
|
paramz.model
|
__str__
| null |
def __str__(self, VT100=True):
model_details = [['Name', self.name],
['Objective', '{}'.format(float(self.objective_function()))],
["Number of Parameters", '{}'.format(self.size)],
["Number of Optimization Parameters", '{}'.format(self._size_transformed())],
["Updates", '{}'.format(self._update_on)],
]
max_len = max(map(len, model_details))
to_print = [""] + ["{0:{l}} : {1}".format(name, detail, l=max_len) for name, detail in model_details] + ["Parameters:"]
to_print.append(super(Model, self).__str__(VT100=VT100))
return "\n".join(to_print)
|
(self, VT100=True)
|
722,322
|
paramz.core.indexable
|
_add_io
| null |
def _add_io(self, name, operations):
self._index_operations[name] = operations
def do_raise(self, x):
self._index_operations.__setitem__(name, x)
self._connect_fixes()
self._notify_parent_change()
#raise AttributeError("Cannot set {name} directly, use the appropriate methods to set new {name}".format(name=name))
setattr(Indexable, name, property(fget=lambda self: self._index_operations[name],
fset=do_raise))
|
(self, name, operations)
|
722,323
|
paramz.core.parameter_core
|
_add_parameter_name
| null |
def _add_parameter_name(self, param):
try:
pname = adjust_name_for_printing(param.name)
def warn_and_retry(param, match=None):
#===================================================================
# print """
# WARNING: added a parameter with formatted name {},
# which is already assigned to {}.
# Trying to change the parameter name to
#
# {}.{}
# """.format(pname, self.hierarchy_name(), self.hierarchy_name(), param.name + "_")
#===================================================================
if match is None:
param.name = param.name+"_1"
else:
param.name = match.group('name') + "_" + str(int(match.group('digit'))+1)
self._add_parameter_name(param)
# and makes sure to not delete programmatically added parameters
for other in self.parameters:
if (not (other is param)) and (other.name == param.name):
return warn_and_retry(other, _name_digit.match(other.name))
if pname not in dir(self):
self.__dict__[pname] = param
self._added_names_.add(pname)
else: # pname in self.__dict__
if pname in self._added_names_:
other = self.__dict__[pname]
#if not (param is other):
# del self.__dict__[pname]
# self._added_names_.remove(pname)
# warn_and_retry(other)
# warn_and_retry(param, _name_digit.match(other.name))
except RE:
raise RE("Maximum recursion depth reached, try naming the parts of your kernel uniquely to avoid naming conflicts.")
|
(self, param)
|
722,324
|
paramz.core.indexable
|
_add_to_index_operations
|
Helper preventing copy code.
This adds the given what (transformation, prior etc) to parameter index operations which.
reconstrained are reconstrained indices.
warn when reconstraining parameters if warning is True.
TODO: find out which parameters have changed specifically
|
def _add_to_index_operations(self, which, reconstrained, what, warning):
"""
Helper preventing copy code.
This adds the given what (transformation, prior etc) to parameter index operations which.
reconstrained are reconstrained indices.
warn when reconstraining parameters if warning is True.
TODO: find out which parameters have changed specifically
"""
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
logging.getLogger(self.name).warning("reconstraining parameters {}".format(self.hierarchy_name() or self.name))
index = self._raveled_index()
which.add(what, index)
return index
|
(self, which, reconstrained, what, warning)
|
722,325
|
paramz.model
|
_checkgrad
|
Check the gradient of the ,odel by comparing to a numerical
estimate. If the verbose flag is passed, individual
components are tested (and printed)
:param verbose: If True, print a "full" checking of each parameter
:type verbose: bool
:param step: The size of the step around which to linearise the objective
:type step: float (default 1e-6)
:param tolerance: the tolerance allowed (see note)
:type tolerance: float (default 1e-3)
Note:-
The gradient is considered correct if the ratio of the analytical
and numerical gradients is within <tolerance> of unity.
The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients.
If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually
not accurate enough for the tests (shown with blue).
|
def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, df_tolerance=1e-12):
"""
Check the gradient of the ,odel by comparing to a numerical
estimate. If the verbose flag is passed, individual
components are tested (and printed)
:param verbose: If True, print a "full" checking of each parameter
:type verbose: bool
:param step: The size of the step around which to linearise the objective
:type step: float (default 1e-6)
:param tolerance: the tolerance allowed (see note)
:type tolerance: float (default 1e-3)
Note:-
The gradient is considered correct if the ratio of the analytical
and numerical gradients is within <tolerance> of unity.
The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients.
If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually
not accurate enough for the tests (shown with blue).
"""
if not self._model_initialized_:
import warnings
warnings.warn("This model has not been initialized, try model.inititialize_model()", RuntimeWarning)
return False
x = self.optimizer_array.copy()
if not verbose:
# make sure only to test the selected parameters
if target_param is None:
transformed_index = np.arange(len(x))
else:
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
# just check the global ratio
dx = np.zeros(x.shape)
dx[transformed_index] = step * (np.sign(np.random.uniform(-1, 1, transformed_index.size)) if transformed_index.size != 2 else 1.)
# evaulate around the point x
f1 = self._objective(x + dx)
f2 = self._objective(x - dx)
gradient = self._grads(x)
dx = dx[transformed_index]
gradient = gradient[transformed_index]
denominator = (2 * np.dot(dx, gradient))
global_ratio = (f1 - f2) / np.where(denominator == 0., 1e-32, denominator)
global_diff = np.abs(f1 - f2) < tolerance and np.allclose(gradient, 0, atol=tolerance)
if global_ratio is np.nan: # pragma: no cover
global_ratio = 0
return np.abs(1. - global_ratio) < tolerance or global_diff
else:
# check the gradient of each parameter individually, and do some pretty printing
try:
names = self.parameter_names_flat()
except NotImplementedError:
names = ['Variable %i' % i for i in range(len(x))]
# Prepare for pretty-printing
header = ['Name', 'Ratio', 'Difference', 'Analytical', 'Numerical', 'dF_ratio']
max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])
float_len = 10
cols = [max_names]
cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])
cols = np.array(cols) + 5
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
header_string = list(map(lambda x: '|'.join(x), [header_string]))
separator = '-' * len(header_string[0])
print('\n'.join([header_string[0], separator]))
if target_param is None:
target_param = self
transformed_index = self._raveled_index_for_transformed(target_param)
if transformed_index.size == 0:
print("No free parameters to check")
return True
gradient = self._grads(x).copy()
np.where(gradient == 0, 1e-312, gradient)
ret = True
for xind in zip(transformed_index):
xx = x.copy()
xx[xind] += step
f1 = float(self._objective(xx))
xx[xind] -= 2.*step
f2 = float(self._objective(xx))
#Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall
#the same
if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15:
df_ratio = np.abs((f1 - f2) / min(f1, f2))
else: # pragma: no cover
df_ratio = 1.0
df_unstable = df_ratio < df_tolerance
numerical_gradient = (f1 - f2) / (2. * step)
if np.all(gradient[xind] == 0): # pragma: no cover
ratio = (f1 - f2) == gradient[xind]
else:
ratio = (f1 - f2) / (2. * step * gradient[xind])
difference = np.abs(numerical_gradient - gradient[xind])
if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance:
formatted_name = "\033[92m {0} \033[0m".format(names[xind])
ret &= True
else: # pragma: no cover
formatted_name = "\033[91m {0} \033[0m".format(names[xind])
ret &= False
if df_unstable: # pragma: no cover
formatted_name = "\033[94m {0} \033[0m".format(names[xind])
r = '%.6f' % float(ratio)
d = '%.6f' % float(difference)
g = '%.6f' % gradient[xind]
ng = '%.6f' % float(numerical_gradient)
df = '%1.e' % float(df_ratio)
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
print(grad_string)
self.optimizer_array = x
return ret
|
(self, target_param=None, verbose=False, step=1e-06, tolerance=0.001, df_tolerance=1e-12)
|
722,326
|
paramz.core.constrainable
|
_connect_fixes
| null |
def _connect_fixes(self):
fixed_indices = self.constraints[__fixed__]
if fixed_indices.size > 0:
self._ensure_fixes()
self._fixes_[:] = UNFIXED
self._fixes_[fixed_indices] = FIXED
else:
self._fixes_ = None
del self.constraints[__fixed__]
|
(self)
|
722,327
|
paramz.parameterized
|
_connect_parameters
| null |
def _connect_parameters(self, ignore_added_names=False):
# connect parameterlist to this parameterized object
# This just sets up the right connection for the params objects
# to be used as parameters
# it also sets the constraints for each parameter to the constraints
# of their respective parents
self._model_initialized_ = True
if not hasattr(self, "parameters") or len(self.parameters) < 1:
# no parameters for this class
return
old_size = 0
self._param_slices_ = []
for i, p in enumerate(self.parameters):
if not p.param_array.flags['C_CONTIGUOUS']:# getattr(p, 'shape', None) != getattr(p, '_realshape_', None):
raise ValueError("""
Have you added an additional dimension to a Param object?
p[:,None], where p is of type Param does not work
and is expected to fail! Try increasing the
dimensionality of the param array before making
a Param out of it:
p = Param("<name>", array[:,None])
Otherwise this should not happen!
Please write an email to the developers with the code,
which reproduces this error.
All parameter arrays must be C_CONTIGUOUS
""")
p._parent_ = self
p._parent_index_ = i
pslice = slice(old_size, old_size + p.size)
# first connect all children
p._propagate_param_grad(self.param_array[pslice], self.gradient_full[pslice])
# then connect children to self
self.param_array[pslice] = p.param_array.flat # , requirements=['C', 'W']).ravel(order='C')
self.gradient_full[pslice] = p.gradient_full.flat # , requirements=['C', 'W']).ravel(order='C')
p.param_array.data = self.param_array[pslice].data
p.gradient_full.data = self.gradient_full[pslice].data
self._param_slices_.append(pslice)
self._add_parameter_name(p)
old_size += p.size
|
(self, ignore_added_names=False)
|
722,328
|
paramz.core.indexable
|
_disconnect_parent
|
From Parentable:
disconnect the parent and set the new constraints to constr
|
def _disconnect_parent(self, *args, **kw):
"""
From Parentable:
disconnect the parent and set the new constraints to constr
"""
for name, iop in list(self._index_operations.items()):
iopc = iop.copy()
iop.clear()
self.remove_index_operation(name)
self.add_index_operation(name, iopc)
#self.constraints.clear()
#self.constraints = constr
self._parent_ = None
self._parent_index_ = None
self._connect_fixes()
self._notify_parent_change()
|
(self, *args, **kw)
|
722,329
|
paramz.core.constrainable
|
_ensure_fixes
| null |
def _ensure_fixes(self):
# Ensure that the fixes array is set:
# Parameterized: ones(self.size)
# Param: ones(self._realsize_
if (not hasattr(self, "_fixes_")) or (self._fixes_ is None) or (self._fixes_.size != self.size):
self._fixes_ = np.ones(self.size, dtype=bool)
self._fixes_[self.constraints[__fixed__]] = FIXED
|
(self)
|
722,330
|
paramz.parameterized
|
_format_spec
| null |
def _format_spec(self, name, names, desc, iops, VT100=True):
nl = max([len(str(x)) for x in names + [name]])
sl = max([len(str(x)) for x in desc + ["value"]])
lls = [reduce(lambda a,b: max(a, len(b)), iops[opname], len(opname)) for opname in iops]
if VT100:
format_spec = [" \033[1m{{name!s:<{0}}}\033[0;0m".format(nl),"{{desc!s:>{0}}}".format(sl)]
else:
format_spec = [" {{name!s:<{0}}}".format(nl),"{{desc!s:>{0}}}".format(sl)]
for opname, l in zip(iops, lls):
f = '{{{1}!s:^{0}}}'.format(l, opname)
format_spec.append(f)
return format_spec
|
(self, name, names, desc, iops, VT100=True)
|
722,331
|
paramz.core.constrainable
|
_get_original
| null |
def _get_original(self, param):
# if advanced indexing is activated it happens that the array is a copy
# you can retrieve the original param through this method, by passing
# the copy here
return self.parameters[param._parent_index_]
#===========================================================================
|
(self, param)
|
722,332
|
paramz.model
|
_grads
|
Gets the gradients from the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the gradients, and will raise the original exception if
the objective cannot be computed.
:param x: the parameters of the model.
:type x: np.array
|
def _grads(self, x):
"""
Gets the gradients from the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the gradients, and will raise the original exception if
the objective cannot be computed.
:param x: the parameters of the model.
:type x: np.array
"""
try:
# self._set_params_transformed(x)
self.optimizer_array = x
self.obj_grads = self._transform_gradients(self.objective_function_gradients())
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError): #pragma: no cover
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e100, 1e100)
return self.obj_grads
|
(self, x)
|
722,333
|
paramz.core.constrainable
|
_has_fixes
| null |
def _has_fixes(self):
return self.constraints[__fixed__].size != 0
|
(self)
|
722,334
|
paramz.core.parameter_core
|
_name_changed
| null |
def _name_changed(self, param, old_name):
self._remove_parameter_name(None, old_name)
self._add_parameter_name(param)
|
(self, param, old_name)
|
722,335
|
paramz.core.parameter_core
|
_notify_parent_change
|
Notify all parameters that the parent has changed
|
def _notify_parent_change(self):
"""
Notify all parameters that the parent has changed
"""
for p in self.parameters:
p._parent_changed(self)
|
(self)
|
722,336
|
paramz.model
|
_objective
|
The objective function passed to the optimizer. It combines
the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the objective, and will raise the original exception if
the objective cannot be computed.
:param x: the parameters of the model.
:parameter type: np.array
|
def _objective(self, x):
"""
The objective function passed to the optimizer. It combines
the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the objective, and will raise the original exception if
the objective cannot be computed.
:param x: the parameters of the model.
:parameter type: np.array
"""
try:
self.optimizer_array = x
obj = self.objective_function()
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):#pragma: no cover
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
return np.inf
return obj
|
(self, x)
|
722,337
|
paramz.model
|
_objective_grads
| null |
def _objective_grads(self, x):
try:
self.optimizer_array = x
obj_f, self.obj_grads = self.objective_function(), self._transform_gradients(self.objective_function_gradients())
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):#pragma: no cover
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
obj_f = np.inf
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e10, 1e10)
return obj_f, self.obj_grads
|
(self, x)
|
722,338
|
paramz.core.indexable
|
_offset_for
|
Return the offset of the param inside this parameterized object.
This does not need to account for shaped parameters, as it
basically just sums up the parameter sizes which come before param.
|
def _offset_for(self, param):
"""
Return the offset of the param inside this parameterized object.
This does not need to account for shaped parameters, as it
basically just sums up the parameter sizes which come before param.
"""
if param.has_parent():
p = param._parent_._get_original(param)
if p in self.parameters:
return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0)
return self._offset_for(param._parent_) + param._parent_._offset_for(param)
return 0
|
(self, param)
|
722,339
|
paramz.core.parameter_core
|
_parameters_changed_notification
|
In parameterizable we just need to make sure, that the next call to optimizer_array
will update the optimizer_array to the latest parameters
|
def _parameters_changed_notification(self, me, which=None):
"""
In parameterizable we just need to make sure, that the next call to optimizer_array
will update the optimizer_array to the latest parameters
"""
self._optimizer_copy_transformed = False # tells the optimizer array to update on next request
self.parameters_changed()
|
(self, me, which=None)
|
722,340
|
paramz.core.indexable
|
_parent_changed
|
From Parentable:
Called when the parent changed
update the constraints and priors view, so that
constraining is automized for the parent.
|
def _parent_changed(self, parent):
"""
From Parentable:
Called when the parent changed
update the constraints and priors view, so that
constraining is automized for the parent.
"""
from .index_operations import ParameterIndexOperationsView
#if getattr(self, "_in_init_"):
#import ipdb;ipdb.set_trace()
#self.constraints.update(param.constraints, start)
#self.priors.update(param.priors, start)
offset = parent._offset_for(self)
for name, iop in list(self._index_operations.items()):
self.remove_index_operation(name)
self.add_index_operation(name, ParameterIndexOperationsView(parent._index_operations[name], offset, self.size))
self._fixes_ = None
for p in self.parameters:
p._parent_changed(parent)
|
(self, parent)
|
722,341
|
paramz.core.parameter_core
|
_pass_through_notify_observers
| null |
def _pass_through_notify_observers(self, me, which=None):
self.notify_observers(which=which)
|
(self, me, which=None)
|
722,342
|
paramz.core.parameter_core
|
_propagate_param_grad
|
For propagating the param_array and gradient_array.
This ensures the in memory view of each subsequent array.
1.) connect param_array of children to self.param_array
2.) tell all children to propagate further
|
def _propagate_param_grad(self, parray, garray):
"""
For propagating the param_array and gradient_array.
This ensures the in memory view of each subsequent array.
1.) connect param_array of children to self.param_array
2.) tell all children to propagate further
"""
#if self.param_array.size != self.size:
# self._param_array_ = np.empty(self.size, dtype=np.float64)
#if self.gradient.size != self.size:
# self._gradient_array_ = np.empty(self.size, dtype=np.float64)
pi_old_size = 0
for pi in self.parameters:
pislice = slice(pi_old_size, pi_old_size + pi.size)
self.param_array[pislice] = pi.param_array.flat # , requirements=['C', 'W']).flat
self.gradient_full[pislice] = pi.gradient_full.flat # , requirements=['C', 'W']).flat
pi.param_array.data = parray[pislice].data
pi.gradient_full.data = garray[pislice].data
pi._propagate_param_grad(parray[pislice], garray[pislice])
pi_old_size += pi.size
self._model_initialized_ = True
|
(self, parray, garray)
|
722,343
|
paramz.core.indexable
|
_raveled_index
|
Flattened array of ints, specifying the index of this object.
This has to account for shaped parameters!
|
def _raveled_index(self):
"""
Flattened array of ints, specifying the index of this object.
This has to account for shaped parameters!
"""
return np.r_[:self.size]
|
(self)
|
722,344
|
paramz.core.indexable
|
_raveled_index_for
|
get the raveled index for a param
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work
|
def _raveled_index_for(self, param):
"""
get the raveled index for a param
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work
"""
from ..param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param)
|
(self, param)
|
722,345
|
paramz.core.indexable
|
_raveled_index_for_transformed
|
get the raveled index for a param for the transformed parameter array
(optimizer array).
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work. If you do not know
what you are doing, do not use this method, it will have
unexpected returns!
|
def _raveled_index_for_transformed(self, param):
"""
get the raveled index for a param for the transformed parameter array
(optimizer array).
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
!Warning! be sure to call this method on the highest parent of a hierarchy,
as it uses the fixes to do its work. If you do not know
what you are doing, do not use this method, it will have
unexpected returns!
"""
ravi = self._raveled_index_for(param)
if self._has_fixes():
fixes = self._fixes_
### Transformed indices, handling the offsets of previous fixes
transformed = (np.r_[:self.size] - (~fixes).cumsum())
return transformed[ravi[fixes[ravi]]]
else:
return ravi
|
(self, param)
|
722,346
|
paramz.core.indexable
|
_remove_from_index_operations
|
Helper preventing copy code.
Remove given what (transform prior etc) from which param index ops.
|
def _remove_from_index_operations(self, which, transforms):
"""
Helper preventing copy code.
Remove given what (transform prior etc) from which param index ops.
"""
if len(transforms) == 0:
transforms = which.properties()
removed = np.empty((0,), dtype=int)
for t in list(transforms):
unconstrained = which.remove(t, self._raveled_index())
removed = np.union1d(removed, unconstrained)
if t is __fixed__:
self._highest_parent_._set_unfixed(self, unconstrained)
return removed
|
(self, which, transforms)
|
722,347
|
paramz.core.parameter_core
|
_remove_parameter_name
| null |
def _remove_parameter_name(self, param=None, pname=None):
assert param is None or pname is None, "can only delete either param by name, or the name of a param"
pname = adjust_name_for_printing(pname) or adjust_name_for_printing(param.name)
if pname in self._added_names_:
del self.__dict__[pname]
self._added_names_.remove(pname)
self._connect_parameters()
|
(self, param=None, pname=None)
|
722,348
|
paramz.model
|
_repr_html_
|
Representation of the model in html for notebook display.
|
def _repr_html_(self):
"""Representation of the model in html for notebook display."""
model_details = [['<b>Model</b>', self.name + '<br>'],
['<b>Objective</b>', '{}<br>'.format(float(self.objective_function()))],
["<b>Number of Parameters</b>", '{}<br>'.format(self.size)],
["<b>Number of Optimization Parameters</b>", '{}<br>'.format(self._size_transformed())],
["<b>Updates</b>", '{}<br>'.format(self._update_on)],
]
from operator import itemgetter
to_print = ["""<style type="text/css">
.pd{
font-family: "Courier New", Courier, monospace !important;
width: 100%;
padding: 3px;
}
</style>\n"""] + ["<p class=pd>"] + ["{}: {}".format(name, detail) for name, detail in model_details] + ["</p>"]
to_print.append(super(Model, self)._repr_html_())
return "\n".join(to_print)
|
(self)
|
722,349
|
paramz.core.constrainable
|
_set_fixed
| null |
def _set_fixed(self, param, index):
self._ensure_fixes()
offset = self._offset_for(param)
self._fixes_[index+offset] = FIXED
if np.all(self._fixes_): self._fixes_ = None # ==UNFIXED
|
(self, param, index)
|
722,350
|
paramz.core.constrainable
|
_set_unfixed
| null |
def _set_unfixed(self, param, index):
self._ensure_fixes()
offset = self._offset_for(param)
self._fixes_[index+offset] = UNFIXED
if np.all(self._fixes_): self._fixes_ = None # ==UNFIXED
|
(self, param, index)
|
722,351
|
paramz.core.parameter_core
|
_setup_observers
|
Setup the default observers
1: parameters_changed_notify
2: pass through to parent, if present
|
def _setup_observers(self):
"""
Setup the default observers
1: parameters_changed_notify
2: pass through to parent, if present
"""
self.add_observer(self, self._parameters_changed_notification, -100)
if self.has_parent():
self.add_observer(self._parent_, self._parent_._pass_through_notify_observers, -np.inf)
|
(self)
|
722,352
|
paramz.parameterized
|
_short
| null |
def _short(self):
return self.hierarchy_name()
|
(self)
|
722,353
|
paramz.core.parameter_core
|
_size_transformed
|
As fixes are not passed to the optimiser, the size of the model for the optimiser
is the size of all parameters minus the size of the fixes.
|
def _size_transformed(self):
"""
As fixes are not passed to the optimiser, the size of the model for the optimiser
is the size of all parameters minus the size of the fixes.
"""
return self.size - self.constraints[__fixed__].size
|
(self)
|
722,354
|
paramz.core.parameter_core
|
_transform_gradients
|
Transform the gradients by multiplying the gradient factor for each
constraint to it.
|
def _transform_gradients(self, g):
"""
Transform the gradients by multiplying the gradient factor for each
constraint to it.
"""
#py3 fix
#[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
if self._has_fixes(): return g[self._fixes_]
return g
|
(self, g)
|
722,355
|
paramz.core.parameter_core
|
_traverse
| null |
def _traverse(self, visit, *args, **kwargs):
for c in self.parameters:
c.traverse(visit, *args, **kwargs)
|
(self, visit, *args, **kwargs)
|
722,356
|
paramz.core.parameter_core
|
_trigger_params_changed
|
First tell all children to update,
then update yourself.
If trigger_parent is True, we will tell the parent, otherwise not.
|
def _trigger_params_changed(self, trigger_parent=True):
"""
First tell all children to update,
then update yourself.
If trigger_parent is True, we will tell the parent, otherwise not.
"""
[p._trigger_params_changed(trigger_parent=False) for p in self.parameters if not p.is_fixed]
self.notify_observers(None, None if trigger_parent else -np.inf)
|
(self, trigger_parent=True)
|
722,357
|
paramz.core.indexable
|
add_index_operation
|
Add index operation with name to the operations given.
raises: attribute error if operations exist.
|
def add_index_operation(self, name, operations):
"""
Add index operation with name to the operations given.
raises: attribute error if operations exist.
"""
if name not in self._index_operations:
self._add_io(name, operations)
else:
raise AttributeError("An index operation with the name {} was already taken".format(name))
|
(self, name, operations)
|
722,358
|
paramz.core.observable
|
add_observer
|
Add an observer `observer` with the callback `callble`
and priority `priority` to this observers list.
|
def add_observer(self, observer, callble, priority=0):
"""
Add an observer `observer` with the callback `callble`
and priority `priority` to this observers list.
"""
self.observers.add(priority, observer, callble)
|
(self, observer, callble, priority=0)
|
722,359
|
paramz.parameterized
|
build_pydot
|
Build a pydot representation of this model. This needs pydot installed.
Example Usage::
np.random.seed(1000)
X = np.random.normal(0,1,(20,2))
beta = np.random.uniform(0,1,(2,1))
Y = X.dot(beta)
m = RidgeRegression(X, Y)
G = m.build_pydot()
G.write_png('example_hierarchy_layout.png')
The output looks like:
.. image:: ./example_hierarchy_layout.png
Rectangles are parameterized objects (nodes or leafs of hierarchy).
Trapezoids are param objects, which represent the arrays for parameters.
Black arrows show parameter hierarchical dependence. The arrow points
from parents towards children.
Orange arrows show the observer pattern. Self references (here) are
the references to the call to parameters changed and references upwards
are the references to tell the parents they need to update.
|
def build_pydot(self, G=None): # pragma: no cover
"""
Build a pydot representation of this model. This needs pydot installed.
Example Usage::
np.random.seed(1000)
X = np.random.normal(0,1,(20,2))
beta = np.random.uniform(0,1,(2,1))
Y = X.dot(beta)
m = RidgeRegression(X, Y)
G = m.build_pydot()
G.write_png('example_hierarchy_layout.png')
The output looks like:
.. image:: ./example_hierarchy_layout.png
Rectangles are parameterized objects (nodes or leafs of hierarchy).
Trapezoids are param objects, which represent the arrays for parameters.
Black arrows show parameter hierarchical dependence. The arrow points
from parents towards children.
Orange arrows show the observer pattern. Self references (here) are
the references to the call to parameters changed and references upwards
are the references to tell the parents they need to update.
"""
import pydot # @UnresolvedImport
iamroot = False
if G is None:
G = pydot.Dot(graph_type='digraph', bgcolor=None)
iamroot=True
node = pydot.Node(id(self), shape='box', label=self.name)#, color='white')
G.add_node(node)
for child in self.parameters:
child_node = child.build_pydot(G)
G.add_edge(pydot.Edge(node, child_node))#, color='white'))
for _, o, _ in self.observers:
label = o.name if hasattr(o, 'name') else str(o)
observed_node = pydot.Node(id(o), label=label)
if str(id(o)) not in G.obj_dict['nodes']:
G.add_node(observed_node)
edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee')
G.add_edge(edge)
if iamroot:
return G
return node
|
(self, G=None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.