hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79540e69ef8430899848bf5177c90799698c3fe2 | 2,260 | py | Python | src/wechaty/user/contact_self.py | kis87988/Chatie-python-wechaty | 92b0ad6fff693be67f37faf3dbcc16bcb0d13bde | [
"Apache-2.0"
] | 1 | 2021-12-10T06:48:33.000Z | 2021-12-10T06:48:33.000Z | src/wechaty/user/contact_self.py | kis87988/Chatie-python-wechaty | 92b0ad6fff693be67f37faf3dbcc16bcb0d13bde | [
"Apache-2.0"
] | 1 | 2022-01-17T04:02:36.000Z | 2022-01-17T04:02:36.000Z | src/wechaty/user/contact_self.py | kis87988/Chatie-python-wechaty | 92b0ad6fff693be67f37faf3dbcc16bcb0d13bde | [
"Apache-2.0"
] | null | null | null | """ContactSelf"""
from __future__ import annotations
from typing import Any, Optional, Type
from wechaty import FileBox, get_logger
from wechaty.exceptions import WechatyOperationError
from wechaty.user.contact import Contact
log = get_logger('ContactSelf')
class ContactSelf(Contact):
"""ContactSelf"""
async def avatar(self, file_box: Optional[FileBox] = None) -> FileBox:
"""get avatar of ContactSelf
Args:
file_box (Optional[FileBox], optional): i. Defaults to None.
Raises:
WechatyOperationError: _description_
Returns:
FileBox: _description_
"""
log.info('avatar(%s)' % file_box.name if file_box else '')
if not file_box:
file_box = await super().avatar(None)
return file_box
if self.contact_id != self.puppet.self_id():
raise WechatyOperationError('set avatar only available for user self')
await self.puppet.contact_avatar(self.contact_id, file_box)
async def qr_code(self) -> str:
"""
:return:
"""
try:
puppet_id: str = self.puppet.self_id()
except Exception:
raise WechatyOperationError(
'Can not get qr_code, user might be either not logged in or already logged out')
if self.contact_id != puppet_id:
raise WechatyOperationError('only can get qr_code for the login user self')
qr_code_value = await self.puppet.contact_self_qr_code()
return qr_code_value
@property
def name(self) -> str:
"""
:return:
"""
return super().name
async def set_name(self, name: str) -> None:
"""
set the name of login contact
Args:
name: new name
"""
await self.puppet.contact_self_name(name)
await self.ready(force_sync=True)
async def signature(self, signature: str) -> Any:
"""
:param signature:
:return:
"""
puppet_id = self.puppet.self_id()
if self.contact_id != puppet_id:
raise WechatyOperationError('only can get qr_code for the login user self')
return self.puppet.contact_signature(signature)
| 27.560976 | 96 | 0.611504 |
79540ffbdf30be7cee783af841efbc3792d0d5b8 | 4,243 | py | Python | mpython_ble/application/peripheral.py | labplus-cn/mpython_ble | c07a2eddec73c7f19ba5ef56ecbb5d47dd1b1fdf | [
"MIT"
] | 2 | 2020-11-01T05:51:00.000Z | 2021-09-20T03:32:50.000Z | mpython_ble/application/peripheral.py | labplus-cn/mpython_ble | c07a2eddec73c7f19ba5ef56ecbb5d47dd1b1fdf | [
"MIT"
] | 1 | 2021-08-09T09:29:17.000Z | 2021-08-09T09:29:17.000Z | mpython_ble/application/peripheral.py | labplus-cn/mpython_ble | c07a2eddec73c7f19ba5ef56ecbb5d47dd1b1fdf | [
"MIT"
] | 1 | 2020-08-11T02:28:38.000Z | 2020-08-11T02:28:38.000Z | # The MIT License (MIT)
# Copyright (c) 2020, Tangliufeng for labplus Industries
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import bluetooth
from ..advertising import advertising_payload
from ..const import IRQ
class Peripheral(object):
""" 外围设备 """
def __init__(self, profile, name=b'mpy_ble', appearance=None, adv_services=None, resp_services=None, interval_us=500000, connectable=True):
self.interval_us = interval_us
self.connectable = connectable
self.ble = bluetooth.BLE()
self.ble.active(True)
self.ble.config(gap_name=name)
print("BLE: activated!")
self.ble.irq(handler=self._irq)
self.connections = set()
profile.handles = self.ble.gatts_register_services(profile.definition)
self._adv_payload = advertising_payload(name=name, appearance=appearance, services=adv_services)
self._resp_payload = advertising_payload(services=resp_services)
self._write_cb = None
self._connection_cb = None
# self.advertise(True)
self._debug = False
@property
def mac(self):
return self.ble.config('mac')
def write_callback(self, callback):
self._write_cb = callback
def connection_callback(self, callback):
self._connection_cb = callback
def _irq(self, event, data):
# Track connections so we can send notifications.
if self._debug:
print("Event: {}, Data: {}".format(event, data))
if event == IRQ.IRQ_CENTRAL_CONNECT:
conn_handle, addr_type, addr = data
self.connections.add(conn_handle)
print("BLE: connect successful!")
if self._connection_cb:
self._connection_cb(conn_handle, addr_type, addr)
elif event == IRQ.IRQ_CENTRAL_DISCONNECT:
conn_handle, addr_type, addr = data
if conn_handle in self.connections:
print("BLE: disconnected!")
self.connections.remove(conn_handle)
# Start advertising again to allow a new connection.
self.advertise(True)
elif event == IRQ.IRQ_GATTS_WRITE:
conn_handle, attr_handle = data
if conn_handle in self.connections:
if self._write_cb:
self._write_cb(conn_handle, attr_handle, self.attrubute_read(attr_handle))
def advertise(self, toggle=True):
if toggle:
self.ble.gap_advertise(self.interval_us, adv_data=self._adv_payload,
resp_data=self._resp_payload, connectable=self.connectable)
else:
self.ble.gap_advertise(interval_us=None)
def attrubute_write(self, value_handle, data, notify=False):
self.ble.gatts_write(value_handle, data)
if notify:
for conn_handle in self.connections:
# Notify connected centrals to issue a read.
self.ble.gatts_notify(conn_handle, value_handle)
def attrubute_read(self, value_handle):
return self.ble.gatts_read(value_handle)
def disconnect(self):
for conn_handle in self.connections:
self.ble.gap_disconnect(conn_handle)
self.connections.clear()
| 41.598039 | 143 | 0.679472 |
7954103936245cb45d23cdc8205a11bdd8a13b2a | 1,485 | py | Python | setup.py | joequant/webdavfs | 369a6e6027aa94f103cb12626f58ea2fb9a7f709 | [
"MIT"
] | null | null | null | setup.py | joequant/webdavfs | 369a6e6027aa94f103cb12626f58ea2fb9a7f709 | [
"MIT"
] | null | null | null | setup.py | joequant/webdavfs | 369a6e6027aa94f103cb12626f58ea2fb9a7f709 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Filesystems',
]
import io
with io.open('README.rst', 'r', encoding='utf8') as f:
DESCRIPTION = f.read()
with io.open('HISTORY.rst', 'r', encoding='utf8') as f:
HISTORY = f.read()
REQUIREMENTS = [
"fs>2.0",
"webdavclient3",
"python-dateutil",
"cachetools"
]
setup(
author="Andreas Jung and others",
author_email="info@zopyx.com",
classifiers=CLASSIFIERS,
description="WebDAV support for pyfilesystem2",
entry_points={
'fs.opener': [
'webdav = webdavfs.opener:WebDAVOpener',
'webdavs = webdavfs.opener:WebDAVOpener'
]
},
install_requires=REQUIREMENTS,
license="MIT",
long_description=DESCRIPTION + "\n" + HISTORY,
name='fs.webdavfs',
packages=find_packages(exclude=("tests",)),
platforms=['any'],
setup_requires=['nose'],
tests_require=['docker'],
test_suite='webdavfs.tests',
url="http://pypi.python.org/pypi/fs.webdavfs/",
version="0.3.8"
)
| 27 | 55 | 0.628283 |
795410c29bcc3ba58f34780f4ac57eb58e19dab7 | 1,550 | py | Python | tests/symmetries_t.py | JunaidAkhter/vmc_jax | 4f0dcc9f32cb6885cad3c5d797d9f9e01247f737 | [
"MIT"
] | null | null | null | tests/symmetries_t.py | JunaidAkhter/vmc_jax | 4f0dcc9f32cb6885cad3c5d797d9f9e01247f737 | [
"MIT"
] | null | null | null | tests/symmetries_t.py | JunaidAkhter/vmc_jax | 4f0dcc9f32cb6885cad3c5d797d9f9e01247f737 | [
"MIT"
] | null | null | null | import sys
# Find jVMC package
sys.path.append(sys.path[0] + "/..")
import unittest
import jax
from jax.config import config
config.update("jax_enable_x64", True)
import jax.random as random
import jax.numpy as jnp
import numpy as np
import jVMC
import jVMC.util.symmetries as symmetries
import jVMC.global_defs as global_defs
import time
class TestSymmetries(unittest.TestCase):
def test_symmetries2D(self):
L = 3
rotation_f = 4
reflection_f = 2
translation_f = L**2
for rotation in [True, False]:
for reflection in [True, False]:
for translation in [True, False]:
orbit = symmetries.get_orbit_2d_square(L, rotation=rotation, reflection=reflection, translation=translation)
self.assertTrue(orbit.shape[0] == (rotation_f if rotation else 1) * (reflection_f if reflection else 1) * (translation_f if translation else 1))
self.assertTrue(np.issubdtype(orbit.dtype, np.integer))
def test_symmetries1D(self):
L = 3
reflection_f = 2
translation_f = L
for translation in [True, False]:
for reflection in [True, False]:
orbit = symmetries.get_orbit_1d(L, reflection=reflection, translation=translation)
self.assertTrue(orbit.shape[0] == (reflection_f if reflection else 1) * (translation_f if translation else 1))
self.assertTrue(np.issubdtype(orbit.dtype, np.integer))
if __name__ == "__main__":
unittest.main()
| 31 | 164 | 0.65871 |
795410ee88382dc57a6fef43db2f8cf6554450dd | 958 | py | Python | examples/test_cec2013.py | mcrimi/opfunu | 9c8a074c5dca604acc3ddc7fbf15ef30f57d1756 | [
"Apache-2.0"
] | 1 | 2021-05-25T14:46:21.000Z | 2021-05-25T14:46:21.000Z | examples/test_cec2013.py | HaaLeo/opfunu | 1ff3ecf4b7818a0edd5d92ce5475839fa9477da1 | [
"Apache-2.0"
] | null | null | null | examples/test_cec2013.py | HaaLeo/opfunu | 1ff3ecf4b7818a0edd5d92ce5475839fa9477da1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 22:21, 25/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
import numpy as np
from opfunu.cec.cec2013.unconstraint import Model
problem_size = 100
solution = np.random.uniform(-1, 1, problem_size)
func = Model(problem_size)
print(func.F28(solution))
| 53.222222 | 105 | 0.327766 |
795410fe296799b5835798c33993be540e204037 | 583 | py | Python | python3.4+/startbot.py | mickeymouse/Vagabot | 1bd3ab154647676a31de26a572af3af88f456571 | [
"MIT"
] | null | null | null | python3.4+/startbot.py | mickeymouse/Vagabot | 1bd3ab154647676a31de26a572af3af88f456571 | [
"MIT"
] | null | null | null | python3.4+/startbot.py | mickeymouse/Vagabot | 1bd3ab154647676a31de26a572af3af88f456571 | [
"MIT"
] | null | null | null | import discord
import asyncio
from configs import Configs
from Bot import commands
client = discord.Client()
@client.event
@asyncio.coroutine
def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
@asyncio.coroutine
def on_message(message):
for (commandNames, handler, _) in commands.commandList :
for commandName in commandNames.split() :
if message.content.startswith(commandName):
yield from handler(message, client)
client.run(Configs['token'])
| 22.423077 | 60 | 0.67753 |
795411ccbea2e2301b20401398527b1a01e02b72 | 1,607 | py | Python | doom/utils/tradeno.py | iiCodeThings/admin | 851ebcb8b90d4bea4bd7468ed403ec13fca801ee | [
"MIT"
] | 1 | 2022-02-16T08:20:12.000Z | 2022-02-16T08:20:12.000Z | doom/utils/tradeno.py | iiCodeThings/admin | 851ebcb8b90d4bea4bd7468ed403ec13fca801ee | [
"MIT"
] | null | null | null | doom/utils/tradeno.py | iiCodeThings/admin | 851ebcb8b90d4bea4bd7468ed403ec13fca801ee | [
"MIT"
] | null | null | null | import time
# Tue, 21 Mar 2006 20:50:14.000 GMT
twepoch = 1142974214000
worker_id_bits = 5
data_center_id_bits = 5
max_worker_id = -1 ^ (-1 << worker_id_bits)
max_data_center_id = -1 ^ (-1 << data_center_id_bits)
sequence_bits = 12
worker_id_shift = sequence_bits
data_center_id_shift = sequence_bits + worker_id_bits
timestamp_left_shift = sequence_bits + worker_id_bits + data_center_id_bits
sequence_mask = -1 ^ (-1 << sequence_bits)
def snowflake_to_timestamp(_id):
_id = _id >> 22 # strip the lower 22 bits
_id += twepoch # adjust for twitter epoch
_id = _id / 1000 # convert from milliseconds to seconds
return _id
def generator(worker_id, data_center_id):
assert 0 <= worker_id <= max_worker_id
assert 0 <= data_center_id <= max_data_center_id
last_timestamp = -1
sequence = 0
while True:
timestamp = int(time.time() * 1000)
if last_timestamp > timestamp:
print("clock is moving backwards. waiting until %i" % last_timestamp)
continue
if last_timestamp == timestamp:
sequence = (sequence + 1) & sequence_mask
if sequence == 0:
print("sequence overrun")
sequence = -1 & sequence_mask
continue
else:
sequence = 0
last_timestamp = timestamp
yield (
((timestamp - twepoch) << timestamp_left_shift) |
(data_center_id << data_center_id_shift) |
(worker_id << worker_id_shift) |
sequence)
trade_no_generator = generator(1, 0) # 订单编号
| 28.192982 | 81 | 0.632856 |
795411de44010507822794b7529cf20a5a7363c7 | 29,110 | py | Python | tensorpac/utils.py | tylerbrunette/tensorpac | afae9557db417a54b001c0837dbc8638f6fe20f0 | [
"BSD-3-Clause"
] | null | null | null | tensorpac/utils.py | tylerbrunette/tensorpac | afae9557db417a54b001c0837dbc8638f6fe20f0 | [
"BSD-3-Clause"
] | null | null | null | tensorpac/utils.py | tylerbrunette/tensorpac | afae9557db417a54b001c0837dbc8638f6fe20f0 | [
"BSD-3-Clause"
] | null | null | null | """Utility functions."""
import logging
import numpy as np
from scipy.signal import periodogram
from tensorpac.methods.meth_pac import _kl_hr
from tensorpac.pac import _PacObj, _PacVisual
from tensorpac.io import set_log_level
logger = logging.getLogger('tensorpac')
def pac_vec(f_pha='mres', f_amp='mres'):
"""Generate cross-frequency coupling vectors.
Parameters
----------
Frequency vector for the phase and amplitude. Here you can use
several forms to define those vectors :
* Basic list/tuple (ex: [2, 4] or [8, 12]...)
* List of frequency bands (ex: [[2, 4], [5, 7]]...)
* Dynamic definition : (start, stop, width, step)
* Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])
* Using a string. `f_pha` and `f_amp` can be 'lres', 'mres', 'hres'
respectively for low, middle and high resolution vectors. In that
case, it uses the definition proposed by Bahramisharif et al. 2013
:cite:`bahramisharif2013propagating` i.e
f_pha = [f - f / 4, f + f / 4] and f_amp = [f - f / 8, f + f / 8]
Returns
-------
f_pha, f_amp : array_like
Arrays containing the pairs of phase and amplitude frequencies. Each
vector have a shape of (N, 2).
"""
nb_fcy = dict(lres=10, mres=30, hres=50, demon=70, hulk=100)
if isinstance(f_pha, str):
# get where phase frequencies start / finish / number
f_pha_start, f_pha_end = 2, 20
f_pha_nb = nb_fcy[f_pha]
# f_pha = [f - f / 4, f + f / 4]
f_pha_mid = np.linspace(f_pha_start, f_pha_end, f_pha_nb)
f_pha = np.c_[f_pha_mid - f_pha_mid / 4., f_pha_mid + f_pha_mid / 4.]
if isinstance(f_amp, str):
# get where amplitude frequencies start / finish / number
f_amp_start, f_amp_end = 60, 160
f_amp_nb = nb_fcy[f_amp]
# f_amp = [f - f / 8, f + f / 8]
f_amp_mid = np.linspace(f_amp_start, f_amp_end, f_amp_nb)
f_amp = np.c_[f_amp_mid - f_amp_mid / 8., f_amp_mid + f_amp_mid / 8.]
return _check_freq(f_pha), _check_freq(f_amp)
def _check_freq(f):
"""Check the frequency definition."""
f = np.atleast_2d(np.asarray(f))
#
if len(f.reshape(-1)) == 1:
raise ValueError("The length of f should at least be 2.")
elif 2 in f.shape: # f of shape (N, 2) or (2, N)
if f.shape[1] is not 2:
f = f.T
elif np.squeeze(f).shape == (4,): # (f_start, f_end, f_width, f_step)
f = _pair_vectors(*tuple(np.squeeze(f)))
else: # Sequential
f = f.reshape(-1)
f.sort()
f = np.c_[f[0:-1], f[1::]]
return f
def _pair_vectors(f_start, f_end, f_width, f_step):
# Generate two array for phase and amplitude :
fdown = np.arange(f_start, f_end - f_width, f_step)
fup = np.arange(f_start + f_width, f_end, f_step)
return np.c_[fdown, fup]
def pac_trivec(f_start=60., f_end=160., f_width=10.):
"""Generate triangular vector.
By contrast with the pac_vec function, this function generate frequency
vector with an increasing frequency bandwidth.
Parameters
----------
f_start : float | 60.
Starting frequency.
f_end : float | 160.
Ending frequency.
f_width : float | 10.
Frequency bandwidth increase between each band.
Returns
-------
f : array_like
The triangular vector.
tridx : array_like
The triangular index for the reconstruction.
"""
starting = np.arange(f_start, f_end + f_width, f_width)
f, tridx = np.array([]), np.array([])
for num, k in enumerate(starting[0:-1]):
# Lentgh of the vector to build :
le = len(starting) - (num + 1)
# Create the frequency vector for this starting frequency :
fst = np.c_[np.full(le, k), starting[num + 1::]]
nfst = fst.shape[0]
# Create the triangular index for this vector of frequencies :
idx = np.c_[np.flipud(np.arange(nfst)), np.full(nfst, num)]
tridx = np.concatenate((tridx, idx), axis=0) if tridx.size else idx
f = np.concatenate((f, fst), axis=0) if f.size else fst
return f, tridx
class PSD(object):
"""Power Spectrum Density for electrophysiological brain data.
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency.
"""
def __init__(self, x, sf):
"""Init."""
assert isinstance(x, np.ndarray) and (x.ndim == 2), (
"x should be a 2d array of shape (n_epochs, n_times)")
self._n_trials, self._n_times = x.shape
logger.info(f"Compute PSD over {self._n_trials} trials and "
f"{self._n_times} time points")
self._freqs, self._psd = periodogram(x, fs=sf, window=None,
nfft=self._n_times,
detrend='constant',
return_onesided=True,
scaling='density', axis=1)
def plot(self, f_min=None, f_max=None, confidence=95, interp=None,
log=False, grid=True, fz_title=18, fz_labels=15):
"""Plot the PSD.
Parameters
----------
f_min, f_max : (int, float) | None
Frequency bounds to use for plotting
confidence : (int, float) | None
Light gray confidence interval. If None, no interval will be
displayed
interp : int | None
Line interpolation integer. For example, if interp is 10 the number
of points is going to be multiply by 10
log : bool | False
Use a log scale representation
grid : bool | True
Add a grid to the plot
fz_title : int | 18
Font size for the title
fz_labels : int | 15
Font size the x/y labels
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
f_types = (int, float)
# interpolation
xvec, yvec = self._freqs, self._psd
if isinstance(interp, int) and (interp > 1):
# from scipy.interpolate import make_interp_spline, BSpline
from scipy.interpolate import interp1d
xnew = np.linspace(xvec[0], xvec[-1], len(xvec) * interp)
f = interp1d(xvec, yvec, kind='quadratic', axis=1)
yvec = f(xnew)
xvec = xnew
# (f_min, f_max)
f_min = xvec[0] if not isinstance(f_min, f_types) else f_min
f_max = xvec[-1] if not isinstance(f_max, f_types) else f_max
# plot main psd
plt.plot(xvec, yvec.mean(0), color='black',
label='mean PSD over trials')
# plot confidence interval
if isinstance(confidence, (int, float)) and (0 < confidence < 100):
logger.info(f" Add {confidence}th confidence interval")
interval = (100. - confidence) / 2
kw = dict(axis=0, interpolation='nearest')
psd_min = np.percentile(yvec, interval, **kw)
psd_max = np.percentile(yvec, 100. - interval, **kw)
plt.fill_between(xvec, psd_max, psd_min, color='lightgray',
alpha=0.5,
label=f"{confidence}th confidence interval")
plt.legend(fontsize=fz_labels)
plt.xlabel("Frequencies (Hz)", fontsize=fz_labels)
plt.ylabel("Power (V**2/Hz)", fontsize=fz_labels)
plt.title(f"PSD mean over {self._n_trials} trials", fontsize=fz_title)
plt.xlim(f_min, f_max)
if log:
from matplotlib.ticker import ScalarFormatter
plt.xscale('log', basex=10)
plt.gca().xaxis.set_major_formatter(ScalarFormatter())
if grid:
plt.grid(color='grey', which='major', linestyle='-',
linewidth=1., alpha=0.5)
plt.grid(color='lightgrey', which='minor', linestyle='--',
linewidth=0.5, alpha=0.5)
return plt.gca()
def plot_st_psd(self, f_min=None, f_max=None, log=False, grid=True,
fz_title=18, fz_labels=15, fz_cblabel=15, **kw):
"""Single-trial PSD plot.
Parameters
----------
f_min, f_max : (int, float) | None
Frequency bounds to use for plotting
log : bool | False
Use a log scale representation
grid : bool | True
Add a grid to the plot
fz_title : int | 18
Font size for the title
fz_labels : int | 15
Font size the x/y labels
fz_cblabel : int | 15
Font size the colorbar label labels
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
# manage input variables
kw['fz_labels'] = kw.get('fz_labels', fz_labels)
kw['fz_title'] = kw.get('fz_title', fz_title)
kw['fz_cblabel'] = kw.get('fz_cblabel', fz_title)
kw['xlabel'] = kw.get('xlabel', "Frequencies (Hz)")
kw['ylabel'] = kw.get('ylabel', "Trials")
kw['title'] = kw.get('title', "Single-trial PSD")
kw['cblabel'] = kw.get('cblabel', "Power (V**2/Hz)")
# (f_min, f_max)
xvec, psd = self._freqs, self._psd
f_types = (int, float)
f_min = xvec[0] if not isinstance(f_min, f_types) else f_min
f_max = xvec[-1] if not isinstance(f_max, f_types) else f_max
# locate (f_min, f_max) indices
f_min_idx = np.abs(xvec - f_min).argmin()
f_max_idx = np.abs(xvec - f_max).argmin()
sl_freq = slice(f_min_idx, f_max_idx)
xvec = xvec[sl_freq]
psd = psd[:, sl_freq]
# make the 2D plot
_viz = _PacVisual()
trials = np.arange(self._n_trials)
_viz.pacplot(psd, xvec, trials, **kw)
if log:
from matplotlib.ticker import ScalarFormatter
plt.xscale('log', basex=10)
plt.gca().xaxis.set_major_formatter(ScalarFormatter())
if grid:
plt.grid(color='grey', which='major', linestyle='-',
linewidth=1., alpha=0.5)
plt.grid(color='lightgrey', which='minor', linestyle='--',
linewidth=0.5, alpha=0.5)
return plt.gca()
def show(self):
"""Display the PSD figure."""
import matplotlib.pyplot as plt
plt.show()
@property
def freqs(self):
"""Get the frequency vector."""
return self._freqs
@property
def psd(self):
"""Get the psd value."""
return self._psd
class BinAmplitude(_PacObj):
"""Bin the amplitude according to the phase.
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency
f_pha : tuple, list | [2, 4]
List of two floats describing the frequency bounds for extracting the
phase
f_amp : tuple, list | [60, 80]
List of two floats describing the frequency bounds for extracting the
amplitude
n_bins : int | 18
Number of bins to use to binarize the phase and the amplitude
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | (3, 6)
Control the number of cycles for filtering (only if dcomplex is
'hilbert'). Should be a tuple of integers where the first one
refers to the number of cycles for the phase and the second for the
amplitude :cite:`bahramisharif2013propagating`.
width : int | 7
Width of the Morlet's wavelet.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
"""
def __init__(self, x, sf, f_pha=[2, 4], f_amp=[60, 80], n_bins=18,
dcomplex='hilbert', cycle=(3, 6), width=7, edges=None,
n_jobs=-1):
"""Init."""
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,
cycle=cycle, width=width)
# check
x = np.atleast_2d(x)
assert x.ndim <= 2, ("`x` input should be an array of shape "
"(n_epochs, n_times)")
assert isinstance(sf, (int, float)), ("`sf` input should be a integer "
"or a float")
assert all([isinstance(k, (int, float)) for k in f_pha]), (
"`f_pha` input should be a list of two integers / floats")
assert all([isinstance(k, (int, float)) for k in f_amp]), (
"`f_amp` input should be a list of two integers / floats")
assert isinstance(n_bins, int), "`n_bins` should be an integer"
logger.info(f"Binning {f_amp}Hz amplitude according to {f_pha}Hz "
"phase")
# extract phase and amplitude
kw = dict(keepfilt=False, edges=edges, n_jobs=n_jobs)
pha = self.filter(sf, x, 'phase', **kw)
amp = self.filter(sf, x, 'amplitude', **kw)
# binarize amplitude according to phase
self._amplitude = _kl_hr(pha, amp, n_bins, mean_bins=False).squeeze()
self.n_bins = n_bins
def plot(self, unit='rad', normalize=False, **kw):
"""Plot the amplitude.
Parameters
----------
unit : {'rad', 'deg'}
The unit to use for the phase. Use either 'deg' for degree or 'rad'
for radians
normalize : bool | None
Normalize the histogram by the maximum
kw : dict | {}
Additional inputs are passed to the matplotlib.pyplot.bar function
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
assert unit in ['rad', 'deg']
if unit == 'rad':
self._phase = np.linspace(-np.pi, np.pi, self.n_bins)
width = 2 * np.pi / self.n_bins
elif unit == 'deg':
self._phase = np.linspace(-180, 180, self.n_bins)
width = 360 / self.n_bins
amp_mean = self._amplitude.mean(1)
if normalize:
amp_mean /= amp_mean.max()
plt.bar(self._phase, amp_mean, width=width, **kw)
plt.xlabel(f"Frequency phase ({self.n_bins} bins)", fontsize=18)
plt.ylabel("Amplitude", fontsize=18)
plt.title("Binned amplitude")
plt.autoscale(enable=True, axis='x', tight=True)
def show(self):
"""Show the figure."""
import matplotlib.pyplot as plt
plt.show()
@property
def amplitude(self):
"""Get the amplitude value."""
return self._amplitude
@property
def phase(self):
"""Get the phase value."""
return self._phase
class ITC(_PacObj, _PacVisual):
"""Compute the Inter-Trials Coherence (ITC).
The Inter-Trials Coherence (ITC) is a measure of phase consistency over
trials for a single recording site (electrode / sensor etc.).
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency
f_pha : tuple, list | [2, 4]
List of two floats describing the frequency bounds for extracting the
phase
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | 3
Control the number of cycles for filtering the phase (only if dcomplex
is 'hilbert').
width : int | 7
Width of the Morlet's wavelet.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
"""
def __init__(self, x, sf, f_pha=[2, 4], dcomplex='hilbert', cycle=3,
width=7, edges=None, n_jobs=-1, verbose=None):
"""Init."""
set_log_level(verbose)
_PacObj.__init__(self, f_pha=f_pha, f_amp=[60, 80], dcomplex=dcomplex,
cycle=(cycle, 6), width=width)
_PacVisual.__init__(self)
# check
x = np.atleast_2d(x)
assert x.ndim <= 2, ("`x` input should be an array of shape "
"(n_epochs, n_times)")
self._n_trials = x.shape[0]
logger.info("Inter-Trials Coherence (ITC)")
logger.info(f" extracting {len(self.xvec)} phases")
# extract phase and amplitude
kw = dict(keepfilt=False, edges=edges, n_jobs=n_jobs)
pha = self.filter(sf, x, 'phase', **kw)
# compute itc
self._itc = np.abs(np.exp(1j * pha).mean(1)).squeeze()
self._sf = sf
def plot(self, times=None, **kw):
"""Plot the Inter-Trials Coherence.
Parameters
----------
times : array_like | None
Custom time vector to use
kw : dict | {}
Additional inputs are either pass to the matplotlib.pyplot.plot
function if a single phase band is used, otherwise to the
matplotlib.pyplot.pcolormesh function
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
n_pts = self._itc.shape[-1]
if not isinstance(times, np.ndarray):
times = np.arange(n_pts) / self._sf
times = times[self._edges]
assert len(times) == n_pts, ("The length of the time vector should be "
"{n_pts}")
xlab = 'Time'
title = f"Inter-Trials Coherence ({self._n_trials} trials)"
if self._itc.ndim == 1:
plt.plot(times, self._itc, **kw)
elif self._itc.ndim == 2:
vmin = kw.get('vmin', np.percentile(self._itc, 1))
vmax = kw.get('vmax', np.percentile(self._itc, 99))
self.pacplot(self._itc, times, self.xvec, vmin=vmin, vmax=vmax,
ylabel="Frequency for phase (Hz)", xlabel=xlab,
title=title, **kw)
return plt.gca()
def show(self):
"""Show the figure."""
import matplotlib.pyplot as plt
plt.show()
@property
def itc(self):
"""Get the itc value."""
return self._itc
class PeakLockedTF(_PacObj, _PacVisual):
"""Peak-Locked Time-frequency representation.
This class can be used in order to re-align time-frequency representations
around a time-point (cue) according to the closest phase peak. This type
of visualization can bring out a cyclic behavior of the amplitude at a
given phase, potentially indicating the presence of a phase-amplitude
coupling. Here's the detailed pipeline :
* Filter around a single phase frequency bands and across multiple
amplitude frequencies
* Use a `cue` which define the time-point to use for the realignment
* Detect in the filtered phase the closest peak to the cue. This step
is repeated to each trial in order to get a list of length (n_epochs)
that contains the number of sample (shift) so that if the phase is
moved, the peak fall onto the cue. A positive shift indicates that
the phase is moved forward while a negative shift is for a backward
move
* Apply, to each trial, this shift to the amplitude
* Plot the mean re-aligned amplitudes
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency
cue : int, float
Time-point to use in order to detect the closest phase peak. This
parameter works in conjunction with the `times` input below. Use
either :
* An integer and `times` is None to indicate that you want to
realign according to a time-point in sample
* A integer or a float with `times` the time vector if you want
that Tensorpac automatically infer the sample number around which
to align
times : array_like | None
Time vector
f_pha : tuple, list | [2, 4]
List of two floats describing the frequency bounds for extracting the
phase
f_amp : tuple, list | [60, 80]
Frequency vector for the amplitude. Here you can use several forms to
define those vectors :
* Dynamic definition : (start, stop, width, step)
* Using a string : `f_amp` can be 'lres', 'mres', 'hres'
respectively for low, middle and high resolution vectors
cycle : tuple | (3, 6)
Control the number of cycles for filtering. Should be a tuple of
integers where the first one refers to the number of cycles for the
phase and the second for the amplitude
:cite:`bahramisharif2013propagating`.
"""
def __init__(self, x, sf, cue, times=None, f_pha=[5, 7], f_amp='hres',
cycle=(3, 6), n_jobs=-1, verbose=None):
"""Init."""
set_log_level(verbose)
# initialize to retrieve filtering methods
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex='hilbert',
cycle=cycle)
_PacVisual.__init__(self)
logger.info("PeakLockedTF object defined")
# inputs checking
x = np.atleast_2d(x)
assert isinstance(x, np.ndarray) and (x.ndim == 2)
assert isinstance(sf, (int, float))
assert isinstance(cue, (int, float))
assert isinstance(f_pha, (list, tuple)) and (len(f_pha) == 2)
n_epochs, n_times = x.shape
# manage cur conversion
if times is None:
cue = int(cue)
times = np.arange(n_times)
logger.info(f" align on sample cue={cue}")
else:
assert isinstance(times, np.ndarray) and (len(times) == n_times)
cue_time = cue
cue = np.abs(times - cue).argmin() - 1
logger.info(f" align on time-point={cue_time} (sample={cue})")
self.cue, self._times = cue, times
# extract phase and amplitudes
logger.info(f" extract phase and amplitudes "
f"(n_amps={len(self.yvec)})")
kw = dict(keepfilt=False, n_jobs=n_jobs)
pha = self.filter(sf, x, 'phase', n_jobs=n_jobs, keepfilt=True)
amp = self.filter(sf, x, 'amplitude', n_jobs=n_jobs)
self._pha, self._amp = pha, amp ** 2
# peak detection
logger.info(f" running peak detection around sample={cue}")
self.shifts = self._peak_detection(self._pha.squeeze(), cue)
# realign phases and amplitudes
logger.info(f" realign the {n_epochs} phases and amplitudes")
self.amp_a = self._shift_signals(self._amp, self.shifts, fill_with=0.)
self.pha_a = self._shift_signals(self._pha, self.shifts, fill_with=0.)
@staticmethod
def _peak_detection(pha, cue):
"""Single trial closest to a cue peak detection.
Parameters
----------
pha : array_like
Array of single trial phases of shape (n_trials, n_times)
cue : int
Cue to use as a reference (in sample unit)
Returns
-------
peaks : array_like
Array of length (n_trials,) describing each delay to apply
to each trial in order to realign the phases. In detail :
* Positive delays means that zeros should be prepend
* Negative delays means that zeros should be append
"""
n_trials, n_times = pha.shape
peaks = []
for tr in range(n_trials):
# select the single trial phase
st_pha = pha[tr, :]
# detect all peaks across time points
st_peaks = []
for t in range(n_times - 1):
if (st_pha[t - 1] < st_pha[t]) and (st_pha[t] > st_pha[t + 1]):
st_peaks += [t]
# detect the minimum peak
min_peak = st_peaks[np.abs(np.array(st_peaks) - cue).argmin()]
peaks += [cue - min_peak]
return np.array(peaks)
@staticmethod
def _shift_signals(sig, n_shifts, fill_with=0):
"""Shift an array of signals according to an array of delays.
Parameters
----------
sig : array_like
Array of signals of shape (n_freq, n_trials, n_times)
n_shifts : array_like
Array of delays to apply to each trial of shape (n_trials,)
fill_with : int
Value to prepend / append to each shifted time-series
Returns
-------
sig_shifted : array_like
Array of shifted signals with the same shape as the input
"""
# prepare the needed variables
n_freqs, n_trials, n_pts = sig.shape
sig_shifted = np.zeros_like(sig)
# shift each trial
for tr in range(n_trials):
# select the data of a specific trial
st_shift = n_shifts[tr]
st_sig = sig[:, tr, :]
fill = np.full((n_freqs, abs(st_shift)), fill_with,
dtype=st_sig.dtype)
# shift this specific trial
if st_shift > 0: # move forward = prepend zeros
sig_shifted[:, tr, :] = np.c_[fill, st_sig][:, 0:-st_shift]
elif st_shift < 0: # move backward = append zeros
sig_shifted[:, tr, :] = np.c_[st_sig, fill][:, abs(st_shift):]
return sig_shifted
def plot(self, zscore=False, baseline=None, edges=0, **kwargs):
"""Integrated Peak-Locked TF plotting function.
Parameters
----------
zscore : bool | False
Normalize the power by using a z-score normalization. This can be
useful in order to compensate the 1 / f effect in the power
spectrum. If True, the mean and deviation are computed at the
single trial level and across all time points
baseline : tuple | None
Baseline period to use in order to apply the z-score correction.
Should be in samples.
edges : int | 0
Number of pixels to discard to compensate filtering edge effect
(`power[edges:-edges]`).
kwargs : dict | {}
Additional arguments are sent to the
:class:`tensorpac.utils.PeakLockedTF.pacplot` method
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# manage additional arguments
kwargs['colorbar'] = False
kwargs['ylabel'] = 'Frequency for amplitude (hz)'
kwargs['xlabel'] = ''
kwargs['fz_labels'] = kwargs.get('fz_labels', 14)
kwargs['fz_cblabel'] = kwargs.get('fz_cblabel', 14)
kwargs['fz_title'] = kwargs.get('fz_title', 16)
sl_times = slice(edges, len(self._times) - edges)
times = self._times[sl_times]
pha_n = self.pha_a[..., sl_times].squeeze()
# z-score normalization
if zscore:
if baseline is None:
bsl_idx = sl_times
else:
assert len(baseline) == 2
bsl_idx = slice(baseline[0], baseline[1])
_mean = self.amp_a[..., bsl_idx].mean(2, keepdims=True)
_std = self.amp_a[..., bsl_idx].std(2, keepdims=True)
_std[_std == 0.] = 1. # correction from NaN
amp_n = (self.amp_a[..., sl_times] - _mean) / _std
else:
amp_n = self.amp_a[..., sl_times]
# grid definition
gs = GridSpec(8, 8)
# image plot
plt.subplot(gs[slice(0, 6), 0:-1])
self.pacplot(amp_n.mean(1), times, self.yvec, **kwargs)
plt.axvline(times[self.cue], color='w', lw=2)
plt.tick_params(bottom=False, labelbottom=False)
ax_1 = plt.gca()
# external colorbar
plt.subplot(gs[slice(1, 5), -1])
cb = plt.colorbar(self._plt_im, pad=0.01, cax=plt.gca())
cb.set_label('Power (V**2/Hz)', fontsize=kwargs['fz_cblabel'])
cb.outline.set_visible(False)
# phase plot
plt.subplot(gs[slice(6, 8), 0:-1])
plt.plot(times, pha_n.T, color='lightgray', alpha=.2, lw=1.)
plt.plot(times, pha_n.mean(0), label='single trial phases', alpha=.2,
lw=1.) # legend tweaking
plt.plot(times, pha_n.mean(0), label='mean phases',
color='#1f77b4')
plt.axvline(times[self.cue], color='k', lw=2)
plt.autoscale(axis='both', tight=True, enable=True)
plt.xlabel("Times", fontsize=kwargs['fz_labels'])
plt.ylabel("V / Hz", fontsize=kwargs['fz_labels'])
# bottom legend
plt.legend(loc='center', bbox_to_anchor=(.5, -.5),
fontsize='x-large', ncol=2)
ax_2 = plt.gca()
return [ax_1, ax_2]
| 39.073826 | 79 | 0.57482 |
795411e4d11dcaf91a3309f8ad4b18d5698068cf | 60 | py | Python | python/rrsg_cgreco/__init__.py | ismrm-rrsg-2019/rrsg_challenge_01 | 62feee3a3fbab3db376834429998b21ba474f593 | [
"MIT"
] | 4 | 2020-04-07T15:56:27.000Z | 2020-09-11T10:42:36.000Z | python/rrsg_cgreco/__init__.py | ismrm-rrsg-2019/rrsg_challenge_01 | 62feee3a3fbab3db376834429998b21ba474f593 | [
"MIT"
] | 1 | 2020-06-10T08:17:44.000Z | 2020-06-10T08:17:44.000Z | python/rrsg_cgreco/__init__.py | ismrm-rrsg-2019/rrsg_challenge_01 | 62feee3a3fbab3db376834429998b21ba474f593 | [
"MIT"
] | 2 | 2020-04-29T03:26:36.000Z | 2020-04-29T18:14:30.000Z | from . import linop
from . import recon
from . import solver | 20 | 20 | 0.766667 |
795412d69c3b66cccbf53c0fd685b6e4a914b142 | 2,192 | py | Python | libwise/app/WaveletBrowser.py | flomertens/wise-utils | ebc8e88a0a752f6119d049e6f7a044c9e6818f24 | [
"MIT"
] | 2 | 2017-11-07T19:32:51.000Z | 2019-11-06T17:31:29.000Z | libwise/app/WaveletBrowser.py | flomertens/libwise | ebc8e88a0a752f6119d049e6f7a044c9e6818f24 | [
"MIT"
] | null | null | null | libwise/app/WaveletBrowser.py | flomertens/libwise | ebc8e88a0a752f6119d049e6f7a044c9e6818f24 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from libwise import uiutils, wavelets, plotutils
import waveletsui
import numpy as np
class WaveletBrowser(uiutils.Experience):
def __init__(self, wavelet_families=wavelets.get_all_wavelet_families()):
uiutils.Experience.__init__(self)
self.gui = uiutils.UI(900, 500, "Wavelet Browser")
box = self.gui.add_box(uiutils.VBox())
self.view = box.add(plotutils.BaseCustomCanvas(), True)
ctl = box.add(uiutils.HBox())
self.wavelet = waveletsui.WaveletSelector(ctl, self,
wavelet_families)
self.level = uiutils.ScaleRangeParameter(ctl, self, "Level:", 1, 10, 1, 8)
self.ax1, self.ax2 = self.view.figure.subplots(1, 2)
self.gui.show()
self.do_update()
def update(self, changed, thread):
result = self.wavelet.get().get_wavelet_fct(self.level.get())
result_tf = self.wavelet.get().get_tf_wavelet_fct(self.level.get(), - 4, 4)
return (result, result_tf)
def after_update(self, result):
t, phi, psi = result[0]
f, tf_phi, tf_psi = result[1]
self.ax1.clear()
self.ax1.set_title("Scaling and wavelet function")
self.ax1.set_xlabel("time")
if phi is not None:
self.ax1.plot(t, phi, label="Scaling")
self.ax1.plot(t, psi, label="Wavelet")
self.ax1.legend()
self.ax2.clear()
self.ax2.set_title("PSD")
self.ax2.set_xlabel("frequency")
m = (tf_psi * tf_psi.conj()).argmax()
fmax = f[m]
# self.ax1.plot(t, np.sin(fmax * t * 2 * np.pi))
tf_f = np.fft.fft(np.sin(fmax * t * 2 * np.pi), 10 * len(t))
# ds = len(t) / (t.max() - t.min())
# f2 = np.fft.fftfreq(10 * len(t), 1 / ds)
tf_f = tf_f / tf_f.max()
if tf_phi is not None:
self.ax2.plot(f, tf_phi * tf_phi.conj())
self.ax2.plot(f, tf_psi * tf_psi.conj())
# self.ax2.plot(f2, tf_f * tf_f.conj())
self.ax2.set_xlim([-4, 4])
self.view.draw()
def main():
win = WaveletBrowser()
win.gui.start()
if __name__ == '__main__':
main()
| 27.061728 | 83 | 0.574818 |
79541507e17a44b93424bcbe59d037b50ee7e22d | 1,488 | py | Python | aiida/backends/tests/utils/fixtures.py | DanielMarchand/aiida_core | 1f6385d6245fc4941a7501ba6f5cb6e6fe3632fc | [
"BSD-2-Clause"
] | 1 | 2019-03-15T10:37:53.000Z | 2019-03-15T10:37:53.000Z | aiida/backends/tests/utils/fixtures.py | odarbelaeze/aiida_core | 934b4ccdc73a993f2a6656caf516500470e3da08 | [
"BSD-2-Clause"
] | null | null | null | aiida/backends/tests/utils/fixtures.py | odarbelaeze/aiida_core | 934b4ccdc73a993f2a6656caf516500470e3da08 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test utility to import fixtures, such as export archives."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
def import_archive_fixture(filepath):
"""Import a test fixture that is an AiiDA export archive
:param filepath: the relative path of the archive file within the fixture directory
"""
from aiida.orm.importexport import import_data
filepath_current = os.path.dirname(os.path.realpath(__file__))
filepath_fixtures = os.path.join(filepath_current, os.pardir, 'fixtures')
filepath_archive = os.path.join(filepath_fixtures, filepath)
if not os.path.isfile(filepath_archive):
raise ValueError('archive {} does not exist in the fixture directory {}'.format(filepath, filepath_fixtures))
import_data(filepath_archive, silent=True)
| 45.090909 | 117 | 0.59879 |
795415176a846c0a5b8810a7e60494138bd91669 | 839 | py | Python | src/os_urlpattern/compat.py | ARPSyndicate/os-urlpattern | 9311aff896ad591b2a9123d256f629f5d142dfc6 | [
"MIT"
] | 37 | 2018-06-15T11:38:26.000Z | 2022-03-17T22:35:38.000Z | src/os_urlpattern/compat.py | cfhamlet/os-urlpattern | 9311aff896ad591b2a9123d256f629f5d142dfc6 | [
"MIT"
] | null | null | null | src/os_urlpattern/compat.py | cfhamlet/os-urlpattern | 9311aff896ad591b2a9123d256f629f5d142dfc6 | [
"MIT"
] | 8 | 2018-06-20T10:02:39.000Z | 2022-02-17T14:47:08.000Z | """Compatible import.
"""
from __future__ import unicode_literals
import operator
import string
import sys
_PY3 = sys.version_info[0] >= 3
if _PY3:
from io import StringIO
iteritems = operator.methodcaller("items")
itervalues = operator.methodcaller("values")
from urllib.parse import urlparse, ParseResult
from configparser import ConfigParser
binary_stdin = sys.stdin.buffer
binary_stdout = sys.stdout.buffer
else:
try:
from cStringIO import StringIO # safe, only process ascii
except ImportError:
from StringIO import StringIO
iteritems = operator.methodcaller("iteritems")
itervalues = operator.methodcaller("itervalues")
from urlparse import urlparse, ParseResult
from ConfigParser import ConfigParser
binary_stdin = sys.stdin
binary_stdout = sys.stdout
| 27.966667 | 66 | 0.738975 |
795415d965ad571b6a95398812e3a90591bb8a1c | 4,368 | py | Python | scratch.py | LOLZ1235/Spaceinvader | ecd3a5988899583ed5bc81a893a3f2a3c4d99dba | [
"Apache-2.0"
] | null | null | null | scratch.py | LOLZ1235/Spaceinvader | ecd3a5988899583ed5bc81a893a3f2a3c4d99dba | [
"Apache-2.0"
] | 1 | 2020-04-25T06:26:46.000Z | 2020-04-25T06:26:46.000Z | scratch.py | LOLZ1235/Spaceinvader | ecd3a5988899583ed5bc81a893a3f2a3c4d99dba | [
"Apache-2.0"
] | null | null | null | import pygame
import random
import math
from pygame import mixer
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((900, 800))
mixer.music.load('l.ogg')
mixer.music.play(-1)
pygame.display.set_caption("Space Invader")
bg = pygame.image.load('background.png')
icon = pygame.image.load('Untitled.png')
pygame.display.set_icon(icon)
playerImg = pygame.image.load('d.png')
playerX = 340
playerY = 480
bx = 0
by = 12
bt_state = "ready"
over_font = pygame.font.Font('freesansbold.ttf', 64)
X = 0
Y = 0
btImg = pygame.image.load('bullet.png')
goImg = pygame.image.load('gop.png')
btY = playerY
btX = 0
score = 0
font = pygame.font.Font('freesansbold.ttf', 32)
textX = 10
textY = 10
enemyImg = []
enemyY = []
enemyX = []
ex = []
ey = []
ne = 6
for i in range(ne):
enemyImg.append(pygame.image.load('e.png'))
enemyY.append(random.randint(50, 150))
enemyX.append(random.randint(0, 799))
ex.append(10)
ey.append(80)
def ss(x, y):
scores = font.render("Score :" + str(score), True, (255, 255, 255))
screen.blit(scores, (x, y))
def fire_bt(x, y):
global bt_state
bt_state = "fire"
screen.blit(btImg, (x + 16, y - 20))
clock.tick(100)
pygame.display.update()
def game_over_text():
screen.blit(goImg, (30, 50))
mixer.music.load('death.ogg')
mixer.music.play()
def exp(enemyX, enemyY, playerX, playerY):
dist = math.sqrt((math.pow(enemyX - playerX, 2)) + (math.pow(enemyY - playerY, 2)))
if dist < 27:
return True
else:
return False
def col(enemyX, enemyY, btX, btY):
dist = math.sqrt((math.pow(enemyX - btX, 2)) + (math.pow(enemyY - btY, 2)))
if dist < 27:
return True
else:
return False
def enemy(x, y, i):
screen.blit(enemyImg[i], (x, y))
def player(x, y):
screen.blit(playerImg, (x, y))
running = True
while running:
screen.fill((255, 255, 255))
screen.blit(bg, (0, 0))
playerX += X
playerY += Y
player(playerX, playerY)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
X = -10
if event.key == pygame.K_RIGHT:
X = +10
if event.key == pygame.K_UP:
Y = -10
if event.key == pygame.K_DOWN:
Y = +10
if event.key == pygame.K_SPACE:
if bt_state is "ready":
btX = playerX
fire_bt(btX, btY)
bts = mixer.Sound('fire_s.ogg')
bts.play()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
X = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
Y = 0
if playerX <= 0:
playerX = 0
elif playerX >= 825:
playerX = 825
if playerY <= 0:
playerY = 0
elif playerY >= 700:
playerY = 700
for i in range(ne):
# game over
if enemyY[i] > 755:
for j in range(ne):
enemyY[j] = -20000
game_over_text()
enemyX[i] += ex[i]
if enemyX[i] <= 0:
ex[i] = 4
enemyY[i] += ey[i]
elif enemyX[i] >= 800:
ex[i] = -4
enemyY[i] += ey[i]
cols = col(enemyX[i], enemyY[i], btX, btY)
if cols:
btY = playerY
bt_state = "ready"
pts = mixer.Sound('exe.ogg')
pts.play()
enemyY[i] = random.randint(50, 150)
enemyX[i] = random.randint(0, 800)
score += 1
enemy(enemyX[i], enemyY[i], i)
cols = col(enemyX[i], enemyY[i], btX, btY)
expc = exp(enemyX[i], enemyY[i], playerX, playerY)
if expc:
for j in range(ne):
enemyY[j] = -20000
game_over_text()
if btY <= -100:
btY = playerY
bt_state = "ready"
if bt_state is "fire":
fire_bt(btX, btY)
btY -= by
ss(textX, textY)
pygame.display.update()
| 24.402235 | 88 | 0.510531 |
7954164f72688cf0d64235a3026d583f741d136e | 1,864 | py | Python | setup.py | fserena/wd-entities | f3a315fd7b4e99032b262a9c88fb12304c640777 | [
"Apache-2.0"
] | null | null | null | setup.py | fserena/wd-entities | f3a315fd7b4e99032b262a9c88fb12304c640777 | [
"Apache-2.0"
] | null | null | null | setup.py | fserena/wd-entities | f3a315fd7b4e99032b262a9c88fb12304c640777 | [
"Apache-2.0"
] | null | null | null | """
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Ontology Engineering Group
http://www.oeg-upm.net/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2016 Ontology Engineering Group.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import json
from setuptools import setup, find_packages
__author__ = 'Fernando Serena'
with open("wd_entities/metadata.json", 'r') as stream:
metadata = json.load(stream)
setup(
name="wd-entities",
version=metadata['version'],
author=metadata['author'],
author_email=metadata['email'],
description=metadata['description'],
license="Apache 2",
keywords=["overpass", "osm"],
url=metadata['github'],
download_url="https://github.com/fserena/wd-entities/tarball/{}".format(metadata['version']),
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
install_requires=['flask', 'Flask-Caching', 'redis', 'hiredis', 'requests', 'urllib3', 'gunicorn', 'futures', 'SPARQLWrapper'],
classifiers=[],
package_dir={'wd_entities': 'wd_entities'},
package_data={'wd_entities': ['metadata.json']},
scripts=['wd-entities']
)
| 38.833333 | 131 | 0.589056 |
7954175f29f5b05b0054d795055db1ea1265c86e | 2,029 | py | Python | modulos/menu5.py | DevJavaStudios/pack-comandos-termux- | dc68b4ce1581b4aed1057beae3ad1c39b4f68e67 | [
"MIT"
] | null | null | null | modulos/menu5.py | DevJavaStudios/pack-comandos-termux- | dc68b4ce1581b4aed1057beae3ad1c39b4f68e67 | [
"MIT"
] | null | null | null | modulos/menu5.py | DevJavaStudios/pack-comandos-termux- | dc68b4ce1581b4aed1057beae3ad1c39b4f68e67 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#Modulo do UniTools-Termux
import os
import sys
import time
from Zawiencom import *
def FacebookBruteForce():
update()
os.system("git clone https://github.com/IAmBlackHacker/Facebook-BruteForce.git")
os.system("mv Facebook-BruteForce ~")
cc()
restart_program()
def Hydra():
update()
os.system("pkg install hydra")
cc()
restart_program()
def facebookcracker():
update()
os.system("git clone https://github.com/Ha3MrX/facebook-cracker.git")
os.system("mv facebook-cracker ~")
cc()
restart_program()
def Instahack():
update()
os.system("git clone https://github.com/fuck3erboy/instahack.git")
os.system("mv instahack ~")
cc()
restart_program()
def crunch():
update()
os.system("pkg install unstable-repo")
os.system("pkg install crunch")
cc()
restart_program()
def hashcat():
update()
os.system("git clone https://github.com/hashcat/hashcat.git")
os.system("mv hashcat ~")
cc()
restart_program()
def BlackHydra():
update()
os.system("git clone https://github.com/Gameye98/Black-Hydra.git")
os.system("mv Black-Hydra ~")
cc()
restart_program()
def HashBuster():
update()
os.system("git clone https://github.com/s0md3v/Hash-Buster.git")
os.system("mv Hash-Buster ~")
cc()
restart_program()
def Facebom():
update()
os.system("git clone https://github.com/Oseid/Facebom.git")
os.system("mv Facebom ~")
os.system("pip install requests")
os.system("pip install mechanize")
cc()
restart_program()
def brutespray():
update()
os.system("git clone https://github.com/hanshaze/brutespray.git")
os.system("mv brutespray ~")
os.system("cd ~/brutespray")
os.system("pip install -r requirements.txt")
cc()
restart_program()
def hyprPulse():
update()
os.system("git clone https://github.com/Pure-L0G1C/hyprPulse.git")
os.system("mv hyprPulse ~")
os.system("cd ~/hyprPulse")
os.system("chmod + x install.sh")
os.system("./install.sh")
cc()
restart_program() | 21.817204 | 82 | 0.671759 |
7954180b75476c257ba428352815a7d97771d864 | 930 | py | Python | emulator/cr_tb.py | Adancurusul/UR408_Core | 077712cb3d2a2dd3d9d1a0eeaae2bc71b632e159 | [
"MIT"
] | 4 | 2020-07-13T03:12:19.000Z | 2021-08-03T02:09:28.000Z | emulator/cr_tb.py | Adancurusul/UR408_Core | 077712cb3d2a2dd3d9d1a0eeaae2bc71b632e159 | [
"MIT"
] | null | null | null | emulator/cr_tb.py | Adancurusul/UR408_Core | 077712cb3d2a2dd3d9d1a0eeaae2bc71b632e159 | [
"MIT"
] | null | null | null | from myhdl import *
from cr import cr
def cr_convert():
rst = ResetSignal(1, active=1, isasync=False)
clk,int0,int1,int2,int3,mem_read,mem_write,mem_ok,branch,cr_write,ret,apc,jmp,bra,main_state = [Signal(bool(0)) for _ in range(15)]
selector = Signal(intbv(0)[3:])
#Signal().driven='wire'
pc_next,branch_offset,r6_r7_data,cr_data = [Signal(intbv(0)[16:]) for _ in range (4)]
cr_ins =cr( pc_next, branch_offset, r6_r7_data, cr_data,clk, rst, int0, int1, int2, int3, mem_read, mem_write, mem_ok, branch, selector, cr_write, ret, apc, jmp, bra , main_state) #toVerilog(cr,pc_next, branch_offset, r6_r7_data, cr_data,clk, rst, int0, int1, int2, int3, mem_read, mem_write, mem_ok, branch, selector, cr_write, ret, apc, jmp, bra , main_state)
return cr_ins
def cr_sim():
crins = cr_convert()
if __name__ == '__main__':
cr_ins = cr_convert()
cr_ins.convert(hdl='Verilog', initial_values=True) | 54.705882 | 367 | 0.709677 |
795418454cb52bd911e661f5f1024bb2b35337be | 340 | py | Python | apps/promotions/migrations/0003_remove_promotion_current_nums.py | lsdlab/djshop_toturial | 6d450225cc05e6a1ecd161de2b522e1af0b68cc0 | [
"MIT"
] | null | null | null | apps/promotions/migrations/0003_remove_promotion_current_nums.py | lsdlab/djshop_toturial | 6d450225cc05e6a1ecd161de2b522e1af0b68cc0 | [
"MIT"
] | 6 | 2020-06-07T15:18:58.000Z | 2021-09-22T19:07:33.000Z | apps/promotions/migrations/0003_remove_promotion_current_nums.py | lsdlab/djshop_toturial | 6d450225cc05e6a1ecd161de2b522e1af0b68cc0 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.1 on 2020-01-07 22:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('promotions', '0002_auto_20191226_2252'),
]
operations = [
migrations.RemoveField(
model_name='promotion',
name='current_nums',
),
]
| 18.888889 | 50 | 0.605882 |
795418eace034909473c587489cbb5f366fd44be | 3,953 | py | Python | runtime/python/Tools/scripts/ndiff.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | 207 | 2018-10-01T08:53:01.000Z | 2022-03-14T12:15:54.000Z | runtime/python/Tools/scripts/ndiff.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | 8 | 2019-06-29T14:18:51.000Z | 2022-02-19T07:30:27.000Z | runtime/python/Tools/scripts/ndiff.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | 76 | 2020-03-16T01:47:46.000Z | 2022-03-21T16:37:07.000Z | #! /usr/bin/env python3
# Module ndiff version 1.7.0
# Released to the public domain 08-Dec-2000,
# by Tim Peters (tim.one@home.com).
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
# ndiff.py is now simply a front-end to the difflib.ndiff() function.
# Originally, it contained the difflib.SequenceMatcher class as well.
# This completes the raiding of reusable code from this formerly
# self-contained script.
"""ndiff [-q] file1 file2
or
ndiff (-r1 | -r2) < ndiff_output > file1_or_file2
Print a human-friendly file difference report to stdout. Both inter-
and intra-line differences are noted. In the second form, recreate file1
(-r1) or file2 (-r2) on stdout, from an ndiff report on stdin.
In the first form, if -q ("quiet") is not specified, the first two lines
of output are
-: file1
+: file2
Each remaining line begins with a two-letter code:
"- " line unique to file1
"+ " line unique to file2
" " line common to both files
"? " line not present in either input file
Lines beginning with "? " attempt to guide the eye to intraline
differences, and were not present in either input file. These lines can be
confusing if the source files contain tab characters.
The first file can be recovered by retaining only lines that begin with
" " or "- ", and deleting those 2-character prefixes; use ndiff with -r1.
The second file can be recovered similarly, but by retaining only " " and
"+ " lines; use ndiff with -r2; or, on Unix, the second file can be
recovered by piping the output through
sed -n '/^[+ ] /s/^..//p'
"""
__version__ = 1, 7, 0
import difflib, sys
def fail(msg):
out = sys.stderr.write
out(msg + "\n\n")
out(__doc__)
return 0
# open a file & return the file object; gripe and return 0 if it
# couldn't be opened
def fopen(fname):
try:
return open(fname)
except IOError as detail:
return fail("couldn't open " + fname + ": " + str(detail))
# open two files & spray the diff to stdout; return false iff a problem
def fcompare(f1name, f2name):
f1 = fopen(f1name)
f2 = fopen(f2name)
if not f1 or not f2:
return 0
a = f1.readlines(); f1.close()
b = f2.readlines(); f2.close()
for line in difflib.ndiff(a, b):
print(line, end=' ')
return 1
# crack args (sys.argv[1:] is normal) & compare;
# return false iff a problem
def main(args):
import getopt
try:
opts, args = getopt.getopt(args, "qr:")
except getopt.error as detail:
return fail(str(detail))
noisy = 1
qseen = rseen = 0
for opt, val in opts:
if opt == "-q":
qseen = 1
noisy = 0
elif opt == "-r":
rseen = 1
whichfile = val
if qseen and rseen:
return fail("can't specify both -q and -r")
if rseen:
if args:
return fail("no args allowed with -r option")
if whichfile in ("1", "2"):
restore(whichfile)
return 1
return fail("-r value must be 1 or 2")
if len(args) != 2:
return fail("need 2 filename args")
f1name, f2name = args
if noisy:
print('-:', f1name)
print('+:', f2name)
return fcompare(f1name, f2name)
# read ndiff output from stdin, and print file1 (which=='1') or
# file2 (which=='2') to stdout
def restore(which):
restored = difflib.restore(sys.stdin.readlines(), which)
sys.stdout.writelines(restored)
if __name__ == '__main__':
args = sys.argv[1:]
if "-profile" in args:
import profile, pstats
args.remove("-profile")
statf = "ndiff.pro"
profile.run("main(args)", statf)
stats = pstats.Stats(statf)
stats.strip_dirs().sort_stats('time').print_stats()
else:
main(args)
| 29.5 | 76 | 0.609411 |
79541924d221816c77a06c1982ab9d02f397ee20 | 424 | py | Python | introduction_to_classic_ciphers/lesson5/task1/tests.py | behzod/pycharm-courses | 0ba74ff0ff87e7747173c60cd139c25b8d7f3b0e | [
"Apache-2.0"
] | 213 | 2015-01-03T19:25:02.000Z | 2020-02-06T03:08:43.000Z | introduction_to_classic_ciphers/lesson5/task1/tests.py | behzod/pycharm-courses | 0ba74ff0ff87e7747173c60cd139c25b8d7f3b0e | [
"Apache-2.0"
] | 24 | 2015-01-01T17:03:09.000Z | 2019-12-22T10:28:22.000Z | introduction_to_classic_ciphers/lesson5/task1/tests.py | behzod/pycharm-courses | 0ba74ff0ff87e7747173c60cd139c25b8d7f3b0e | [
"Apache-2.0"
] | 139 | 2015-01-03T19:24:22.000Z | 2020-01-24T18:05:51.000Z | from test_helper import run_common_tests, failed, passed, get_answer_placeholders
def test_answer_placeholders():
placeholders = get_answer_placeholders()
placeholder = placeholders[0]
if placeholder == "'COME AT NOON'" or placeholder == '"COME AT NOON"':
passed()
else:
failed(message='Please try again.')
if __name__ == '__main__':
run_common_tests()
test_answer_placeholders()
| 26.5 | 81 | 0.705189 |
79541af9407fcb77ce9e83eb27b5a10d6bebcdd3 | 1,118 | py | Python | state_manager/filters/aiogram.py | Bloodielie/state_manager | 415e293c5fe85dd99648cacf727fd572c9d5df3f | [
"Apache-2.0"
] | 4 | 2020-07-13T15:07:11.000Z | 2021-10-30T17:11:44.000Z | state_manager/filters/aiogram.py | Bloodielie/state_manager | 415e293c5fe85dd99648cacf727fd572c9d5df3f | [
"Apache-2.0"
] | null | null | null | state_manager/filters/aiogram.py | Bloodielie/state_manager | 415e293c5fe85dd99648cacf727fd572c9d5df3f | [
"Apache-2.0"
] | null | null | null | from aiogram import types
from state_manager.filters.base import BaseTextFilter, BaseTextContainsFilter, BaseRegexFilter
from state_manager.filters.logic import text_in_list, text_contains, text_matches_regex
class TextFilter(BaseTextFilter):
def check(self, msg: types.Message) -> bool: # type: ignore
text = msg.text
if text is None:
return False
if self.ignore_case:
text = text.lower()
return text_in_list(text, self.text)
class TextContainsFilter(BaseTextContainsFilter):
def check(self, msg: types.Message) -> bool: # type: ignore
text = msg.text
if text is None:
return False
if self.ignore_case:
text = text.lower()
return text_contains(text, self.text)
class RegexFilter(BaseRegexFilter):
def check(self, msg: types.Message) -> bool: # type: ignore
text = msg.text
if text is None:
return False
return text_matches_regex(text, self.pattern)
text_filter = TextFilter
text_contains_filter = TextContainsFilter
regex_filter = RegexFilter
| 27.95 | 94 | 0.677102 |
79541cdf30d87601224d14ff284af8ce76af2389 | 4,507 | py | Python | pcmc/features/serveur.py | loic-simon/pcmc-bot | d1d9b3c0249edbdffa5c1f9f1919a7a5b52678c4 | [
"MIT"
] | null | null | null | pcmc/features/serveur.py | loic-simon/pcmc-bot | d1d9b3c0249edbdffa5c1f9f1919a7a5b52678c4 | [
"MIT"
] | null | null | null | pcmc/features/serveur.py | loic-simon/pcmc-bot | d1d9b3c0249edbdffa5c1f9f1919a7a5b52678c4 | [
"MIT"
] | null | null | null | """pcmc-bot / features / Gestion du serveur
Communication directe avec le serveur Minecraft
"""
import asyncio
import datetime
import re
import discord
from discord.ext import commands
from pcmc import config
from pcmc.blocs import tools, server
class _ServerInfo():
def __init__(self, players, n_players, max_players):
self.players = players
self.n_players = n_players
self.max_players = max_players
async def get_online_players():
"""Récupère les informations sur les joueurs connectés au serveur.
Exécute et parse la commande Minecraft
[``/list uuids``](https://minecraft.fandom.com/wiki/Commands/list)
Renvoie un proxy contenant les champs suivants :
- ``players`` : liste des tuples ``(nom, UUID)`` des joueurs
actuellement connectés ;
- ``n_players`` : le nombre de joueurs actuellement connectés,
normalement toujours égal à ``len(players)`` ;
- ``max_players`` : le nombre maximal de joueurs acceptés
simultanément par le serveur.
"""
raw = await server.command("list uuids")
mtch = re.fullmatch(
"There are (\d+) of a max of (\d+) players online: (.*)", raw
)
players = [(mt.group(1), mt.group(2)) for pl in mtch.group(3).split(", ")
if (mt := re.fullmatch(r"(.*) \(([0-9a-f-]{36})\)", pl))]
return _ServerInfo(players, int(mtch.group(1)), int(mtch.group(2)))
class GestionServeur(commands.Cog):
"""Commandes de communication directe avec le serveur"""
@commands.command(aliases=["!"])
@tools.admins_only
async def send(self, ctx, *, command):
"""Exécute une commande Minecraft via RCon (commande admin)
Args:
command: commande Minecraft à exécuter.
"""
res = await server.command(command)
await tools.send_code_blocs(ctx, res)
@commands.command(aliases=["statut", "statuts"])
async def status(self, ctx):
"""Récupère l'état du serveur
Informe sur les joueurs connectés et le nombre de TPS du serveur.
"""
async with ctx.typing():
online = await server.connect()
if online:
info = await get_online_players()
s = "" if info.n_players == 1 else "s"
on_off = f"🟢 ONLINE - {info.n_players} joueur{s} en ligne"
else:
on_off = "🔴 OFFLINE"
embed = discord.Embed(
title=f"État du serveur : {on_off}",
# description=config.bot.description,
color = discord.Color.green() if online else discord.Color.red()
).set_author(
name="PC Minecraft",
icon_url=config.bot.user.avatar_url,
).set_footer(
text=("pcmc.bloomenetwork.fr – "
+ datetime.datetime.now().strftime("%d/%m/%Y %H:%M")),
)
if online:
s = "" if info.n_players == 1 else "s"
embed.add_field(
name=f"Joueur{s} connectés : ",
value="\n".join(pl[0] for pl in info.players),
inline=True,
).add_field(
name="TPS (idéal = 20) :",
value="Calcul en cours... (10s)",
inline=True,
)
embed.add_field(
name="Vue de la map (s'actualise toutes les 2 heures) :",
value=("[pcmc.bloomenetwork.fr:8000]"
"(http://pcmc.bloomenetwork.fr:8000)"),
inline=False,
# ).add_field(
# name="Whitelist :",
# value="Ping Loïc sur #général",
# inline=True,
)
mess = await ctx.send(embed=embed)
if not online:
return
async with ctx.typing():
await server.command("debug start")
await asyncio.sleep(10)
res = await server.command("debug stop")
mtch = re.fullmatch("Stopped tick profiling after \d+\.\d+ seconds "
"and \d+ ticks \((\d+\.\d+) ticks per second\)",
res)
tps = mtch.group(1)
embed.set_field_at(1, name=embed.fields[1].name, value=tps)
await mess.edit(embed=embed)
@commands.command()
@tools.admins_only
async def reconnect(self, ctx, *, command):
"""Coupe et relance la connexion au serveur (commande admin)
Peut être utile en cas de non-réponse du serveur.
"""
await server.reconnect()
| 32.192857 | 77 | 0.56379 |
79541d354727375eb65dcac1bd9d7dd846e61930 | 912 | py | Python | FuzzyMiner/test/FuzzyTests.py | fnc11/FuzzyMiner | 5e159d9f10330334b72bd839d43854777c0c9449 | [
"Apache-2.0"
] | 5 | 2020-05-24T13:13:11.000Z | 2021-01-01T13:08:24.000Z | FuzzyMiner/test/FuzzyTests.py | fnc11/FuzzyMiner | 5e159d9f10330334b72bd839d43854777c0c9449 | [
"Apache-2.0"
] | 11 | 2020-05-27T22:38:06.000Z | 2022-02-27T06:07:31.000Z | FuzzyMiner/test/FuzzyTests.py | fnc11/FuzzyMiner | 5e159d9f10330334b72bd839d43854777c0c9449 | [
"Apache-2.0"
] | 2 | 2020-11-24T13:09:43.000Z | 2022-01-12T06:32:22.000Z | import unittest
from FuzzyMiner.fuzzyminerpk import FMUtility
class TestFuzzyMethods(unittest.TestCase):
def test_is_valid_matrix1D(self):
self.d = [1, 2, 3]
FMUtility.is_valid_matrix1D(self.d)
def test_is_valid_matrix2D(self):
self.d = a = [[1, 2, 3], [4, 5, 6]]
FMUtility.is_valid_matrix2D(self.d)
def test_is_standard_key(self):
self.key = 'concept lifecycle geeks'
FMUtility.is_standard_key(self.key)
def test_negative_is_valid_matrix1D(self):
self.d = [-1, 2, 3]
FMUtility.is_valid_matrix1D(self.d)
def test_negative_is_valid_matrix2D(self):
self.d = a = [[-1, 2, 3], [4, 5, 6]]
FMUtility.is_valid_matrix2D(self.d)
def test_negative_is_standard_key(self):
self.key = 'it is a negative test case'
FMUtility.is_standard_key(self.key)
if __name__ == '__main__':
unittest.main() | 28.5 | 47 | 0.658991 |
79541d4c5164beffb47b17be1afd0c8e89b6e774 | 54 | py | Python | apps/operation/__init__.py | codelieche/moocweb | 0e25efa597a79a38066ec41559334be604388f30 | [
"MIT"
] | 1 | 2017-08-07T07:28:23.000Z | 2017-08-07T07:28:23.000Z | apps/operation/__init__.py | codelieche/moocweb | 0e25efa597a79a38066ec41559334be604388f30 | [
"MIT"
] | null | null | null | apps/operation/__init__.py | codelieche/moocweb | 0e25efa597a79a38066ec41559334be604388f30 | [
"MIT"
] | null | null | null | default_app_config = "operation.apps.OperationConfig"
| 27 | 53 | 0.851852 |
79541d7839cc3556bddfc69b3ff5b3b39336b1e7 | 5,060 | py | Python | spawn_models.py | Stayermax/5dof-bartender-robot | dd04303afd2c252e6f7105e33ba35b01f3915194 | [
"MIT"
] | null | null | null | spawn_models.py | Stayermax/5dof-bartender-robot | dd04303afd2c252e6f7105e33ba35b01f3915194 | [
"MIT"
] | null | null | null | spawn_models.py | Stayermax/5dof-bartender-robot | dd04303afd2c252e6f7105e33ba35b01f3915194 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Baxter RSDK Inverse Kinematics Pick and Place Demo
*** modified ***
modelState service
placing picture
three blocks
Biggest part of the file was created by Rethink Robotics.
For my project I creates the following functions:
load_gazebo_models
number_to_configuration
delete_gazebo_models
get_model_pose
get_actual_pose
"""
import signal
import rospy
import os
from constants import *
import moveit_commander
from gazebo_msgs.srv import (
SpawnModel,
SetModelState,
DeleteModel,
GetModelState
)
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from gazebo_msgs.msg import ModelState
def signal_handler():
print('Signal handler')
def spawn_model(model_name, spawn_service=None):
if(spawn_service == None):
spawn_service = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
rospy.wait_for_service('/gazebo/spawn_sdf_model')
scene = moveit_commander.PlanningSceneInterface()
reference_frame = "world"
model_path = "/home/user/Project models/"
if('bottle' in model_name):
# model_path += "Round bottles/"
model_path += "Jack_bottles/"
m_name = Names[model_name]
m_pose = Poses[model_name]
m_color = Colors[model_name]
with open(model_path + m_name + "/model.sdf", "r") as mm:
xml = mm.read().replace('\n', '')
try:
spawn_service(model_name, xml, "/", m_pose, reference_frame)
except rospy.ServiceException, e:
rospy.logerr("Spawn SDF service call failed: {0}".format(e))
signal.signal(signal.SIGINT, signal_handler)
def spawn_all_models():
spawn_service = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
rospy.wait_for_service('/gazebo/spawn_sdf_model')
for num in [1,2,3,4,5,6]:
spawn_model("bottle_" + str(num), spawn_service)
for num in [1,2,3]:
spawn_model("cup_" + str(num), spawn_service)
def reset_model_position(model, wait = True):
print("Reseting position of model " + '\"' + model + '\"')
state_msg = ModelState()
state_msg.model_name = model
state_msg.pose.position.x = Poses[model].position.x
state_msg.pose.position.y = Poses[model].position.y
state_msg.pose.position.z = Poses[model].position.z
state_msg.pose.orientation.x = Poses[model].orientation.x
state_msg.pose.orientation.y = Poses[model].orientation.y
state_msg.pose.orientation.z = Poses[model].orientation.z
state_msg.pose.orientation.w = Poses[model].orientation.w
if(wait):
rospy.wait_for_service('/gazebo/set_model_state')
try:
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
resp = set_state( state_msg )
except rospy.ServiceException, e:
print "Service call failed: %s" % e
try:
set_model_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
set_model_state()
except rospy.ServiceException, e:
rospy.logerr("Spawn SDF service call failed: {0}".format(e))
def reset_all():
rospy.wait_for_service('/gazebo/set_model_state')
for num in [1,2,3,4,5,6]:
reset_model_position("bottle_" + str(num), wait = False)
for num in [1,2,3]:
reset_model_position("cup_" + str(num), wait = False)
if __name__ == '__main__':
# delete_gazebo_models('bottles')
# load_models()
spawn_all_models() | 34.657534 | 86 | 0.708893 |
79541fdc0932ddcaadfe519193589b0ca77fafae | 2,046 | py | Python | tests/integration/modules/test_virtualenv.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | 1 | 2022-02-09T06:40:14.000Z | 2022-02-09T06:40:14.000Z | tests/integration/modules/test_virtualenv.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | tests/integration/modules/test_virtualenv.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | 4 | 2020-11-04T06:28:05.000Z | 2022-02-09T10:54:49.000Z | # -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import tempfile
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.paths import TMP
# Import salt libs
import salt.utils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
@skipIf(salt.utils.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
class VirtualenvModuleTest(ModuleCase):
'''
Validate the virtualenv module
'''
def setUp(self):
super(VirtualenvModuleTest, self).setUp()
self.venv_test_dir = tempfile.mkdtemp(dir=TMP)
self.venv_dir = os.path.join(self.venv_test_dir, 'venv')
def test_create_defaults(self):
'''
virtualenv.managed
'''
self.run_function('virtualenv.create', [self.venv_dir])
pip_file = os.path.join(self.venv_dir, 'bin', 'pip')
self.assertTrue(os.path.exists(pip_file))
def test_site_packages(self):
pip_bin = os.path.join(self.venv_dir, 'bin', 'pip')
self.run_function(
'virtualenv.create', [self.venv_dir], system_site_packages=True
)
with_site = self.run_function('pip.freeze', bin_env=pip_bin)
self.run_function('file.remove', [self.venv_dir])
self.run_function('virtualenv.create', [self.venv_dir])
without_site = self.run_function('pip.freeze', bin_env=pip_bin)
self.assertFalse(with_site == without_site)
def test_clear(self):
pip_bin = os.path.join(self.venv_dir, 'bin', 'pip')
self.run_function('virtualenv.create', [self.venv_dir])
self.run_function('pip.install', [], pkgs='pep8', bin_env=pip_bin)
self.run_function('virtualenv.create', [self.venv_dir], clear=True)
packages = self.run_function(
'pip.list', prefix='pep8', bin_env=pip_bin
)
self.assertFalse('pep8' in packages)
def tearDown(self):
self.run_function('file.remove', [self.venv_test_dir])
| 34.677966 | 85 | 0.676931 |
79542163d1f022541aa24118da84b9417851c015 | 717 | py | Python | tests/test_text_mobject.py | Pow3r5/manim | 2972a64342aa5ae72977b444f653b05250ab1f8f | [
"MIT"
] | 2 | 2022-03-31T08:31:00.000Z | 2022-03-31T08:31:43.000Z | tests/test_text_mobject.py | Pow3r5/manim | 2972a64342aa5ae72977b444f653b05250ab1f8f | [
"MIT"
] | 21 | 2022-03-02T15:25:49.000Z | 2022-03-07T11:15:45.000Z | tests/test_text_mobject.py | DD2480-Group-10/manim | e147a9fc6c117332221e42437481f3efba76499a | [
"MIT"
] | null | null | null | from __future__ import annotations
from colour import Color
from manim.mobject.text.text_mobject import MarkupText, Text
def test_font_size():
"""Test that Text and MarkupText return the
correct font_size value after being scaled."""
text_string = Text("0").scale(0.3)
markuptext_string = MarkupText("0").scale(0.3)
assert round(text_string.font_size, 5) == 14.4
assert round(markuptext_string.font_size, 5) == 14.4
def test_non_str_color():
"""Test that the Text and MarkupText can accept non_str color values
i.e. colour.Color(red)."""
text = Text("test_color_inheritance", color=Color("blue"))
markup_text = MarkupText("test_color_inheritance", color=Color("blue"))
| 29.875 | 75 | 0.723849 |
79542223a6cdef0401a0da95deb872c1eca384bf | 1,436 | py | Python | recipe/migrations/0001_initial.py | UtkarshAgrawalDTU/My-Fridge-API | 8b73b40ef5c4920b47db66574305c26095f9b1e7 | [
"MIT"
] | null | null | null | recipe/migrations/0001_initial.py | UtkarshAgrawalDTU/My-Fridge-API | 8b73b40ef5c4920b47db66574305c26095f9b1e7 | [
"MIT"
] | 3 | 2021-06-04T23:22:04.000Z | 2021-09-22T19:10:42.000Z | recipe/migrations/0001_initial.py | UtkarshAgrawalDTU/My-Fridge-API | 8b73b40ef5c4920b47db66574305c26095f9b1e7 | [
"MIT"
] | 1 | 2021-08-20T10:50:24.000Z | 2021-08-20T10:50:24.000Z | # Generated by Django 3.0.6 on 2020-05-25 08:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default='Not provided', max_length=200)),
('img_url', models.URLField(blank=True)),
('author', models.CharField(blank=True, default='Unknown', max_length=200)),
('prepare_time', models.CharField(blank=True, default='Unknown', max_length=100)),
('cook_time', models.CharField(blank=True, default='Unknown', max_length=100)),
('total_time', models.CharField(blank=True, default='Unknown', max_length=100)),
('directions', models.TextField(blank=True, default='Unknown')),
('ingredients', models.ManyToManyField(related_name='recipes', to='recipe.Ingredient')),
],
),
]
| 39.888889 | 114 | 0.587047 |
79542269894205f26f7b297649a6c0cdf4568c23 | 1,166 | py | Python | maltools/models/files.py | infosec-garage/maltools | 007c7856f343b267b28b2bca8065ec2832678fc2 | [
"MIT"
] | null | null | null | maltools/models/files.py | infosec-garage/maltools | 007c7856f343b267b28b2bca8065ec2832678fc2 | [
"MIT"
] | null | null | null | maltools/models/files.py | infosec-garage/maltools | 007c7856f343b267b28b2bca8065ec2832678fc2 | [
"MIT"
] | null | null | null | """MaltoolFile model subclasses"""
from __future__ import annotations
import re
from pathlib import Path
from typing import List
from ..utils import read_file, sha256sum
from .base import Indicator, MaltoolFile
from .indicators import FunctionIndicator
class PowerShellFile(MaltoolFile):
"""Malicious PowerShell file"""
@classmethod
def parse(cls, filename: Path, url: str) -> PowerShellFile:
# PowerShell specific parsing function
sha256_hash = sha256sum(filename)
contents = read_file(filename)
indicators: List[Indicator] = list()
# Extract PowerShell functions
ps_regex = r'\s*function\s*([a-zA-Z_-]*)\s*{.*'
matches = re.findall(ps_regex, contents)
indicators.extend(list(set([FunctionIndicator(value=match) for match in matches])))
if indicators:
return cls(
name=filename.name,
url=url,
sha256=sha256_hash,
indicators=indicators,
)
else:
return cls(
name=filename.name,
url=url,
sha256=sha256_hash,
)
| 29.15 | 91 | 0.610635 |
795423b71b38aba0a54c93c6145a622a7334b9f3 | 9,053 | py | Python | Habana/benchmarks/bert/implementations/bert-tf-sys-420gh-tngr/TensorFlow/common/tb_utils.py | jqueguiner/training_results_v1.0 | 8200377f425ae24b6ed6c2816b9273aab0996d43 | [
"Apache-2.0"
] | 27 | 2021-07-01T00:34:52.000Z | 2022-03-29T08:49:53.000Z | Habana/benchmarks/bert/implementations/bert-tf-sys-420gh-tngr/TensorFlow/common/tb_utils.py | jqueguiner/training_results_v1.0 | 8200377f425ae24b6ed6c2816b9273aab0996d43 | [
"Apache-2.0"
] | 21 | 2021-08-31T08:34:50.000Z | 2022-03-17T11:42:10.000Z | Habana/benchmarks/bert/implementations/bert-tf-sys-420gh-tngr/TensorFlow/common/tb_utils.py | jqueguiner/training_results_v1.0 | 8200377f425ae24b6ed6c2816b9273aab0996d43 | [
"Apache-2.0"
] | 39 | 2021-07-02T00:46:14.000Z | 2022-03-13T16:59:55.000Z | import os
import time
import tensorflow as tf
from copy import deepcopy
from tensorboard.plugins.hparams import api as hp
from tensorflow.python.eager import context
from tensorflow.python.keras import backend as K
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training.summary_io import SummaryWriterCache
from tensorflow.compat.v1.keras.callbacks import TensorBoard, Callback
def _remove_prefix(s, prefix):
if s.startswith(prefix):
s = s[len(prefix):]
return s
def _parse_precision():
flag = os.environ.get('TF_ENABLE_BF16_CONVERSION', '0').lower()
try:
value = int(flag)
except:
value = -1
if flag == 'false' or value == 0:
return 'fp32'
elif flag == 'true' or value == 1:
return 'bf16'
return flag
def _set_precision_if_missing(hparams: dict):
if 'precision' not in hparams:
hparams['precision'] = _parse_precision()
return hparams
def _copy_and_clean_hparams(hparams: dict):
hparams_ = dict()
for name, value in hparams.items():
if isinstance(value, (str, bool, int, float)):
hparams_[name] = value
continue
try:
hparams_[name] = str(value)
tf.compat.v1.logging.info(
f'Type of parameter "{name}" is not one of (bool, int, float, str). '
'It will be saved as a string.')
except:
tf.compat.v1.logging.info(
f'Conversion of parameter "{name}" to string failed. '
'Parameter will not be saved.')
return hparams_
def write_hparams_v1(writer, hparams: dict):
hparams = _copy_and_clean_hparams(hparams)
hparams = _set_precision_if_missing(hparams)
# We create Session here, because in case of older topologies
# that run in graph mode the FileWriter needs it.
with tf.compat.v1.Session():
if isinstance(writer, str):
writer = SummaryWriterCache.get(writer)
summary = hp.hparams_pb(hparams).SerializeToString()
writer.add_summary(summary)
def write_hparams_v2(writer, hparams: dict):
hparams = _copy_and_clean_hparams(hparams)
hparams = _set_precision_if_missing(hparams)
with writer.as_default():
hp.hparams(hparams)
class ExamplesPerSecondEstimatorHook(tf.compat.v1.train.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
# Copy-pasted from tensorflow_estimator/python/estimator/tpu/tpu_estimator.py
def __init__(self,
batch_size=None,
every_n_steps=1,
every_n_secs=None,
output_dir=None,
summary_writer=None,
extra_metrics=None,
verbose=False):
super().__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
self._extra_metrics = extra_metrics or {}
self._verbose = verbose
if batch_size is not None:
self._extra_metrics['examples/sec'] = batch_size
def _add_summary(self, tag, value, step):
Summary = tf.compat.v1.Summary
global_step_summary = Summary(value=[
Summary.Value(tag=tag, simple_value=value)
])
self._summary_writer.add_summary(global_step_summary, step)
if self._verbose:
tf.compat.v1.logging.info(f'{tag}: {value}')
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
self._add_summary('global_step/sec',
global_step_per_sec, global_step)
for name, factor in self._extra_metrics.items():
value = factor * global_step_per_sec
self._add_summary(name, value, global_step)
class ExamplesPerSecondKerasHook(Callback):
def __init__(self,
every_n_steps=1,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self.writer = summary_writer or SummaryWriterCache.get(output_dir)
self._timer = tf.compat.v1.train.SecondOrStepTimer(
every_n_secs, every_n_steps)
self._total_examples = 0
self._should_trigger = True
def on_train_begin(self, logs=None):
self._timer.reset()
def on_train_batch_begin(self, batch, logs=None):
self._should_trigger = self._timer.should_trigger_for_step(
logs.get('batch', 0))
def on_train_batch_end(self, batch, logs=None):
step = logs.get('batch', 0)
self._total_examples += logs.get('size', 0)
if self._should_trigger:
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
step)
if elapsed_time is not None:
self._log_and_record(
elapsed_steps, elapsed_time, step, self._total_examples)
self._total_examples = 0
def _log_and_record(self, elapsed_steps, elapsed_time,
global_step, total_examples=None):
Summary = tf.compat.v1.Summary
global_step_per_sec = elapsed_steps / elapsed_time
if self.writer is not None:
global_step_summary = Summary(value=[
Summary.Value(
tag='global_step/sec', simple_value=global_step_per_sec)
])
self.writer.add_summary(global_step_summary, global_step)
if total_examples is not None:
examples_per_sec = total_examples / elapsed_time
example_summary = Summary(value=[
Summary.Value(tag='examples/sec',
simple_value=examples_per_sec)
])
self.writer.add_summary(example_summary, global_step)
class TBSummary(object):
"""
Creates a proxy for FileWriter for TensorBoard.
:param log_dir: - path where experiment is running (usually the same as
model_dir in Estimator)
"""
def __init__(self, log_dir: str):
super().__init__()
self._log_dir = log_dir
self._session = None
def __enter__(self):
self._session = tf.compat.v1.Session()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._session:
self._session.close()
self._session = None
def add_scalar(self, tag, value, global_step=None):
with self._session:
writer = SummaryWriterCache.get(self._log_dir)
summary = tf.compat.v1.Summary(
value=[tf.compat.v1.Summary.Value(tag=tag, simple_value=value)])
event = tf.compat.v1.Event(summary=summary)
event.wall_time = time.time()
event.step = global_step
writer.add_event(event)
class TensorBoardWithHParamsV1(TensorBoard):
"""
Adds TensorBoard visualization to training process.
Writes training tfevent file into default log directory, but
stores evaluation in log_dir/eval subdirectory.
"""
def __init__(self, hparams, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hparams = hparams
self._train_writer = None
self._eval_writer = None
def _switch_writer(self, mode):
self.writer = self._train_writer if mode == 'train' else self._eval_writer
def _init_writer(self, model):
"""Sets file writer."""
if context.executing_eagerly():
raise NotImplementedError('hook does not support eager execution')
self._train_writer = SummaryWriterCache.get(self.log_dir)
self._eval_writer = SummaryWriterCache.get(
os.path.join(self.log_dir, 'eval'))
self._switch_writer('train')
write_hparams_v1(self.writer, self.hparams)
def _write_custom_summaries(self, step, logs=None):
"""
This methods works on the assumption that metrics containing `val`
in name are related to validation (that's the default in Keras).
"""
logs = logs or {}
train_logs = {}
eval_logs = {}
for name, value in logs.items():
if 'val' in name:
if name.startswith('batch_val_'):
name = 'batch_' + _remove_prefix(name, 'batch_val_')
elif name.startswith('epoch_val_'):
name = _remove_prefix(name, 'epoch_val_')
eval_logs[name] = value
else:
if name.startswith('batch_'):
name = _remove_prefix(name, 'batch_')
train_logs[name] = value
self._switch_writer('eval')
super()._write_custom_summaries(step, eval_logs)
self._switch_writer('train')
super()._write_custom_summaries(step, train_logs)
| 34.953668 | 85 | 0.625097 |
7954247882ce3bb79e0a8cb4db7509bc5f6619ee | 857 | py | Python | plugin/taskmage2/utils/functional.py | willjp/vim-taskmage | adcf809ccf1768753eca4dadaf6279b34e8d5699 | [
"BSD-2-Clause"
] | 1 | 2017-11-28T14:12:03.000Z | 2017-11-28T14:12:03.000Z | plugin/taskmage2/utils/functional.py | willjp/vim-taskmage | adcf809ccf1768753eca4dadaf6279b34e8d5699 | [
"BSD-2-Clause"
] | 16 | 2017-08-13T18:01:26.000Z | 2020-11-17T04:55:43.000Z | plugin/taskmage2/utils/functional.py | willjp/vim-taskmage | adcf809ccf1768753eca4dadaf6279b34e8d5699 | [
"BSD-2-Clause"
] | null | null | null | import functools
def pipeline(data, fn_list):
""" Runs all functions in `fn_list` on `data` .
Each is expected to return a new copy of `data` .
"""
return functools.reduce(
lambda a, x: map(x, a),
fn_list,
data,
)
def multifilter(filters, result):
""" Applies multiple filters to `result` .
Returns:
list:
result, reduced by each filter.
"""
if not filters:
return result
for f in filters:
result = filter(f, result)
return result
if __name__ == '__main__': # pragma: no cover
def example():
def above_ten(num):
return num > 10
def below_fifteen(num):
return num < 15
filters = [above_ten, below_fifteen]
result = multifilter(filters, range(20))
print(result)
example()
| 20.404762 | 53 | 0.565928 |
795424ecb13cfe6a77b9f4fcca0d64bcd9943c3f | 836 | py | Python | py/python3/medium/environment.py | nmorse/pounce | 3b4ddb0e5b5be3a0699b4a98b6b4daefd3653539 | [
"MIT"
] | 9 | 2018-09-15T01:59:54.000Z | 2020-07-08T11:15:31.000Z | py/python3/medium/environment.py | nmorse/pounce | 3b4ddb0e5b5be3a0699b4a98b6b4daefd3653539 | [
"MIT"
] | 29 | 2018-07-06T19:14:30.000Z | 2019-05-17T10:15:10.000Z | py/python3/medium/environment.py | nmorse/pounce | 3b4ddb0e5b5be3a0699b4a98b6b4daefd3653539 | [
"MIT"
] | null | null | null | # connect environment specific inputs and outputs to pounce words
import time
#from pounce import runtime as pounce
io = {"state": True}
words = {}
def _readIO(s, pl):
global io
io_values = {}
#s.append(io.copy())
for key in io:
io_values[key] = io[key]
s.append(io_values)
return [s, pl]
def _writeIO(s, pl):
global io
nextio = s.pop()
for key in io:
io[key] = nextio[key]
print(key, io[key])
return [s, pl]
def _seconds(s, pl):
s.append(time.monotonic())
#print('time.monotonic', time.monotonic())
return [s, pl]
def _sleep(s, pl):
seconds = s.pop()
#print('sleep', seconds)
#print('stack', s)
time.sleep(seconds)
return [s, pl]
words['>io'] = _readIO
words['io>'] = _writeIO
words['>sec'] = _seconds
words['sleep>'] = _sleep
| 19.44186 | 65 | 0.593301 |
79542527c0dde92db62934f6613468d71ad739ab | 1,449 | py | Python | fiepipedesktoplib/sites/shells/networkedsite.py | leith-bartrich/fiepipe_desktop | 5136141d67a59e9a2afb79f368a6a02f2d61d2da | [
"MIT"
] | null | null | null | fiepipedesktoplib/sites/shells/networkedsite.py | leith-bartrich/fiepipe_desktop | 5136141d67a59e9a2afb79f368a6a02f2d61d2da | [
"MIT"
] | null | null | null | fiepipedesktoplib/sites/shells/networkedsite.py | leith-bartrich/fiepipe_desktop | 5136141d67a59e9a2afb79f368a6a02f2d61d2da | [
"MIT"
] | null | null | null | import fiepipedesktoplib.sites.shells.abstractsite
import fiepipelib.stateserver
class Shell(fiepipedesktoplib.sites.shells.abstractsite.Shell):
"""Shell for working in a networked site."""
_siteName
_networkedSite = None
def GetSite(self):
if self._networkedSite == None:
print("No site not loaded. You could try the 'reload' command.")
raise RuntimeError("Site not loaded.")
else:
return self._networkedSite
def __init__(self,localUser,entity,siteName):
super().__init__(localUser,entity)
self._siteName = siteName
self.do_reload(None)
def getPluginNamesV1(self):
ret = super().getPluginNamesV1()
ret.append('networked_site')
return ret
def do_reload(self, arg):
"""Reloads the named site from disk.
Usage: reload
"""
registry = fiepipelib.networkedsite.localregistry(self._localUser)
fqdn = self._entity.get_fqdn()
sites = registry.GetByFQDN(fqdn)
self._networkedsite = None
for site in sites:
assert isinstance(site, fiepipelib.networkedsite.networkedsite)
if site.GetName() == self._siteName:
self._networkedsite = site
if self._networkedSite == None:
print("Site not found. You probably need to exit this shell.")
return
def shutdownSite(self):
pass
| 27.339623 | 77 | 0.63216 |
795425d0f7cd6ad8482fc2ef371a5a9d3ac73135 | 3,033 | py | Python | lnt/util/wsgi_restart.py | llvm-mirror/lnt | 8c57bba3687ada10de5653ae46c537e957525bdb | [
"Apache-2.0"
] | 12 | 2015-10-29T19:28:02.000Z | 2020-02-04T21:25:32.000Z | lnt/util/wsgi_restart.py | llvm-mirror/lnt | 8c57bba3687ada10de5653ae46c537e957525bdb | [
"Apache-2.0"
] | 3 | 2017-03-04T14:23:14.000Z | 2019-11-02T21:56:51.000Z | lnt/util/wsgi_restart.py | llvm-mirror/lnt | 8c57bba3687ada10de5653ae46c537e957525bdb | [
"Apache-2.0"
] | 14 | 2015-04-03T03:36:06.000Z | 2019-10-23T14:09:08.000Z | # This code lifted from the mod_wsgi docs.
from __future__ import print_function
import os
import sys
import signal
import threading
import atexit
import Queue
_interval = 1.0
_times = {}
_files = []
_running = False
_queue = Queue.Queue()
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr)
print('%s Triggering process restart.' % prefix, file=sys.stderr)
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except Exception:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while True:
# Check modification times on all files in sys.modules.
for module in sys.modules.values():
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except Exception:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except Exception:
pass
_thread.join()
atexit.register(_exiting)
def track(path):
if path not in _files:
_files.append(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print('%s Starting change monitor.' % prefix, file=sys.stderr)
_running = True
_thread.start()
| 24.658537 | 76 | 0.607979 |
7954261d87560c9d93e9cca0e6e40051e7aa13d2 | 752 | py | Python | publications/OptimisingForEquity/main.py | alan-turing-institute/spatial-inequality | 13e47817db92c14aaf3b0f3d020305adb821e0b5 | [
"MIT"
] | 5 | 2021-07-14T12:42:32.000Z | 2022-03-02T18:14:48.000Z | publications/OptimisingForEquity/main.py | alan-turing-institute/spatial-inequality | 13e47817db92c14aaf3b0f3d020305adb821e0b5 | [
"MIT"
] | 40 | 2020-01-29T14:44:13.000Z | 2022-02-11T16:14:53.000Z | publications/OptimisingForEquity/main.py | alan-turing-institute/spatial-inequality | 13e47817db92c14aaf3b0f3d020305adb821e0b5 | [
"MIT"
] | 1 | 2021-03-27T20:26:39.000Z | 2021-03-27T20:26:39.000Z | import sensors_urb_obs
import networks_single_obj
import networks_multi_objs
import networks_two_objs
import figs_demographics
import figs_single_obj
import figs_urb_obs
import figs_multi_objs
import figs_two_objs
import report
def main():
"""
Run all scripts to process the data, generate optimised networks and save figures
and a formatted report for a local authority, as defined by the parameters in
`config.yml`.
"""
sensors_urb_obs.main()
networks_single_obj.main()
networks_multi_objs.main()
networks_two_objs.main()
figs_demographics.main()
figs_single_obj.main()
figs_urb_obs.main()
figs_multi_objs.main()
figs_two_objs.main()
report.main()
if __name__ == "__main__":
main()
| 22.787879 | 85 | 0.75133 |
795428bf2a3bb2eb928c703f2e6434194c9913d4 | 6,078 | py | Python | tests/broken/test_construct.py | james-morrison-mowi/wavespectra | d721b8bb491113173eabad0773ce4494b81c5e74 | [
"MIT"
] | null | null | null | tests/broken/test_construct.py | james-morrison-mowi/wavespectra | d721b8bb491113173eabad0773ce4494b81c5e74 | [
"MIT"
] | null | null | null | tests/broken/test_construct.py | james-morrison-mowi/wavespectra | d721b8bb491113173eabad0773ce4494b81c5e74 | [
"MIT"
] | null | null | null | import sys
import os
import logging
import unittest
import time
import numpy as np
from numpy.testing import assert_array_almost_equal
plot = False
if plot:
import matplotlib.pyplot as plt
sys.path.insert(0,os.path.join(os.path.dirname(__file__),'..'))
from wavespectra.construct import jonswap, ochihubble
def check_equal(one, other):
assert_array_almost_equal(one['efth'], other['efth'], decimal=4)
assert_array_almost_equal(one['freq'], other['freq'], decimal=4)
assert_array_almost_equal(one['dir'], other['dir'], decimal=4)
#Numerical integration of 2D spectrum
def integrate_2d_hs(freqs, dirs, S):
sum1 = np.trapz(S, freqs, axis=0)
sum2 = np.trapz(sum1, dirs)
return 4 * sum2**0.5
class TestJonswap(unittest.TestCase):
def setUp(self):
print("\n === Testing JONSWAP construct ===")
def hs(self, tp, alpha, gamma=3.3, df=0.02):
#Calculate 1D JONSWAP
f = np.arange(df, 1.0, df)
fp = 1.0 / tp
sig = np.where(f<=fp, 0.07, 0.09)
r = np.exp(-(f-fp)**2. / (2 * sig**2 * fp**2))
S = 0.0617 * np.array(alpha) * f**(-5) * np.exp(-1.25*(f/fp)**(-4)) * gamma**r
return 4 * (S.sum() * df)**0.5
def test_jonswap_scalar(self):
dset = jonswap(tp=10,dp=90,alpha=0.01)
if plot:
plt.pcolormesh(dset['freq'][:], dset['dir'][:], dset['efth'][:,:].T)
plt.show()
assert_array_almost_equal(integrate_2d_hs(dset['freq'][:],
dset['dir'][:],
dset['efth'][:,:]),
self.hs(10, 0.01), decimal=3)
def test_jonswap_series(self):
tp = [10,5]
dp = [90,180]
dspr = [25,40]
dset = jonswap(tp, dp, alpha=0.01, dspr=dspr, coordinates=[('time', [0,0])])
if plot:
plt.pcolormesh(dset['freq'][:], dset['dir'][:], dset['efth'][0,:,:].T)
plt.show()
plt.pcolormesh(dset['freq'][:], dset['dir'][:], dset['efth'][1,:,:].T)
plt.show()
for i,spec in enumerate(dset['efth']):
assert_array_almost_equal(integrate_2d_hs(dset['freq'][:],
dset['dir'][:],
spec[:,:]),
self.hs(tp[i],0.01), decimal=3)
def test_jonswap_matrix(self):
tp = 10 * np.random.random((5, 4, 3))
dp = 360 * np.random.random((5, 4, 3))
dset = jonswap(tp, dp, alpha=0.01, dspr=25, coordinates=[('time', np.arange(0, 5)),
('lat', np.arange(0, 4)),
('lon', np.arange(0, 3))
])
if plot:
plt.pcolormesh(dset['freq'][:], dset['dir'][:], dset['efth'][0,:,:].T)
plt.show()
plt.pcolormesh(dset['freq'][:], dset['dir'][:], dset['efth'][1,:,:].T)
plt.show()
i = np.random.randint(5)
j = np.random.randint(4)
k = np.random.randint(3)
assert_array_almost_equal(integrate_2d_hs(dset['freq'][:],
dset['dir'][:],
dset['efth'][i,j,k,:,:]),
self.hs(tp[i,j,k], 0.01), decimal=3)
class TestOchiHubble(unittest.TestCase):
def setUp(self):
print("\n === Testing OchiHubble construct ===")
def hs(self,hs,tp,l,df=0.02):
#Calculate 1D OH
gamma = lambda x: np.sqrt(2.*np.pi/x) * ((x/np.exp(1.)) * np.sqrt(x*np.sinh(1./x)))**x
w = 2 * np.pi * np.arange(df, 1.0, df)
S = np.zeros((len(w)))
for i,H in enumerate(hs):
#Create 1D spectrum
w0 = 2 * np.pi / tp[i]
B = np.maximum(l[i], 0.01) + 0.25
A = 0.5 * np.pi * H**2 * ((B*w0**4)**l[i] / gamma(l[i]))
a = np.minimum((w0 / w)**4, 100.)
S += A * np.exp(-B*a) / (np.power(w, 4.*B))
return 4 * (S.sum() * df)**0.5
def test_oh_scalar(self):
"""Test single set of parameters."""
dset = ochihubble(hs=[1.0,1.0], tp=[10,5], L=[1,3], dp=[90,180], dspr=[25,40])
if plot:
plt.pcolormesh(dset['freq'][:], dset['dir'][:], dset['efth'][:,:].T)
plt.show()
assert_array_almost_equal(integrate_2d_hs(dset['freq'][:],
dset['dir'][:],
dset['efth'][:,:]),
self.hs([1.,1.], [10,5], [1,3]), decimal=3)
def test_oh_series(self):
"""Test 1D arrays of parameters."""
hs = [1.0 * np.random.random(10), 1.0*np.random.random(10)]
tp = [10.0 * np.random.random(10) + 1, 10.0*np.random.random(10) + 1]
L = [1.0 * np.random.random(10) + 1, 3.0*np.random.random(10) + 1]
dp = [360.0 * np.random.random(10), 360.0*np.random.random(10)]
dspr = [20.0 * np.random.random(10) + 10., 50.0*np.random.random(10) + 10.]
dset = ochihubble(hs, tp, L, dp, dspr, coordinates=[('time', np.arange(0,10))])
if plot:
plt.pcolormesh(dset['freq'][:], dset['dir'][:], dset['efth'][0,:,:].T)
plt.show()
plt.pcolormesh(dset['freq'][:], dset['dir'][:], dset['efth'][1,:,:].T)
plt.show()
for i,spec in enumerate(dset['efth']):
params = [
[h[i] for h in hs],
[t[i] for t in tp],
[l[i] for l in L]
]
print(params)
assert_array_almost_equal(integrate_2d_hs(dset['freq'][:],
dset['dir'][:],
spec[:,:]),
self.hs(*params), decimal=3)
if __name__ == '__main__':
unittest.main()
| 41.346939 | 94 | 0.453274 |
795428cfa49b695c78e79f814195ffc449b8a454 | 2,283 | py | Python | test/pybind_test/wdl_mos_mpi_high_level.py | Chunshuizhao/HugeCTR | 085b2e8ad2abaee5578e7bf43b8394d0b8473b58 | [
"Apache-2.0"
] | null | null | null | test/pybind_test/wdl_mos_mpi_high_level.py | Chunshuizhao/HugeCTR | 085b2e8ad2abaee5578e7bf43b8394d0b8473b58 | [
"Apache-2.0"
] | null | null | null | test/pybind_test/wdl_mos_mpi_high_level.py | Chunshuizhao/HugeCTR | 085b2e8ad2abaee5578e7bf43b8394d0b8473b58 | [
"Apache-2.0"
] | null | null | null | import hugectr
from mpi4py import MPI
import threading
import sys
def model_oversubscriber_test(json_file, output_dir, use_host_ps):
dataset = [("file_list."+str(i)+".txt", "file_list."+str(i)+".keyset") for i in range(5)]
solver = hugectr.CreateSolver(batchsize = 16384,
batchsize_eval = 16384,
vvgpu = [[0, 1, 2, 3], [4, 5, 6, 7]],
use_mixed_precision = False,
i64_input_key = False,
use_algorithm_search = True,
use_cuda_graph = True,
repeat_dataset = False)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
source = ["file_list."+str(i)+".txt" for i in range(5)],
keyset = ["file_list."+str(i)+".keyset" for i in range(5)],
eval_source = "./file_list.5.txt",
check_type = hugectr.Check_t.Sum)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam)
mos = hugectr.CreateMOS(train_from_scratch = False, use_host_memory_ps=use_host_ps, trained_sparse_models = [output_dir + "/wdl_0_sparse_model", output_dir + "/wdl_1_sparse_model"])
model = hugectr.Model(solver, reader, optimizer, mos)
model.construct_from_json(graph_config_file = json_file, include_dense_network = True)
model.compile()
model.summary()
model.fit(num_epochs=1, eval_interval=1000000, display =200)
if use_host_ps:
updated_model = model.get_incremental_model()
model.save_params_to_files("wdl")
model.set_source(source = ["file_list."+str(i)+".txt" for i in range(6,9)],
keyset = ["file_list."+str(i)+".keyset" for i in range(6,9)],
eval_source = "./file_list.5.txt")
model.fit(num_epochs=1, eval_interval=1000000, display =200)
if use_host_ps:
updated_model = model.get_incremental_model()
model.save_params_to_files("wdl")
if __name__ == "__main__":
json_file = sys.argv[1]
use_host_ps = sys.argv[2].lower() == 'true'
output_dir = sys.argv[3]
model_oversubscriber_test(json_file, output_dir, use_host_ps) | 53.093023 | 183 | 0.611914 |
79542994fef39ed1046712dc557e4c632658cc2c | 7,207 | py | Python | tempest/reporting.py | KiranPawar72/tempest | 1fef3dd92b083055793065dd0693454735ec2c01 | [
"Apache-2.0"
] | null | null | null | tempest/reporting.py | KiranPawar72/tempest | 1fef3dd92b083055793065dd0693454735ec2c01 | [
"Apache-2.0"
] | null | null | null | tempest/reporting.py | KiranPawar72/tempest | 1fef3dd92b083055793065dd0693454735ec2c01 | [
"Apache-2.0"
] | null | null | null | from tempest import tvaultconf
import subprocess
import datetime
import os
import pickle
test_results_file="Report/results.html"
sanity_results_file="test_results"
test_script_status = tvaultconf.PASS
test_script_name = ""
test_step_to_write =""
passed_count = 0
failed_count = 0
total_tests_count = passed_count + failed_count
steps_count = 0
def setup_report(testname):
head = """<table border="1">
<tr bgcolor="#b3e6ff">
<th style="font-size:20px">{0}</th>
<th style="font-size:20px">Result</th>
</tr>
""".format(testname.capitalize())
with open(test_results_file, "a") as f:
f.write(head)
def add_test_script(script):
global test_script_name
global steps_count
steps_count = 0
test_script_name = script
def set_test_script_status(status):
global test_script_status
test_script_status = status
def test_case_to_write():
global test_step_to_write
global test_script_status
global passed_count
global failed_count
global total_tests_count
if test_script_status == "PASS":
color = "green"
passed_count += 1
else:
color = "red"
failed_count += 1
total_tests_count = passed_count + failed_count
test_case_to_write = """
<tr>
<td colspan="1" style="font-size:15px"><b>{0}</b></td>
<td> <font color={1} style="font-size:15px"><b>{2}</b></font> </td>
</tr>
""".format(test_script_name, color, test_script_status)
with open(test_results_file, "a") as f:
f.write(test_case_to_write)
f.write(test_step_to_write)
test_step_to_write = ""
test_script_status = tvaultconf.PASS
cmd1 = "sed -i -e '9s/passed_count = [0-9]*/passed_count = {0}/' tempest/reporting.py".format(passed_count)
cmd2 = "sed -i -e '10s/failed_count = [0-9]*/failed_count = {0}/' tempest/reporting.py".format(failed_count)
cmd = cmd1+"; " +cmd2
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
p.wait()
def add_test_step(teststep, status):
if status == "PASS":
color = "green"
else:
color = "red"
global test_script_status
test_script_status = "FAIL"
global test_step_to_write
global steps_count
steps_count+=1
test_step_to_write += """<tr>
<td> <font color={1}><pre style="font-family: 'Times New Roman', Times, serif; font-size: 13px; height: 17px"><i> {3}. {0}</pre></font> </td>
<td> <font color={1} style="font-size:15px">{2}</font> </td>
</tr>
""".format(teststep.capitalize(), color, status, steps_count)
def end_report_table():
with open(test_results_file, "a") as f:
f.write("</table>\n<br>")
cmd1 = "sed -i -e '14s/<td>[0-9]*/<td>{0}/' Report/results.html".format(total_tests_count)
cmd2 = "sed -i -e '15s/<b>[0-9]*/<b>{0}/' Report/results.html".format(passed_count)
cmd3 = "sed -i -e '16s/<b>[0-9]*/<b>{0}/' Report/results.html".format(failed_count)
cmd = cmd1+"; " +cmd2+"; "+cmd3
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
p.wait()
def consolidate_report():
pass_count = 0
fail_count = 0
with open(test_results_file, 'r') as html_file:
for line in html_file:
if 'PASS' in line:
if '<b>' in line:
pass_count+=1
if 'FAIL' in line:
if '<b>' in line:
fail_count+=1
total_count = pass_count + fail_count
consolidate_table = """
<table border="2">
<col width="150">
<col width="150">
<col width="150">
<tr bgcolor="#b3ffff">
<th colspan="4" style="font-size:19px">Consolidated Report</th>
</tr>
<tr>
<th style="font-size:17px">Total</th>
<th style="font-size:17px">Passed</th>
<th style="font-size:17px">Failed</th>
</tr>
<tr align="center"> <td style="font-size:17px">{0}</td>
<td><font color=green style="font-size:17px"><b>{1}</b></td>
<td><font color=red style="font-size:17px"><b>{2}</b></td>
</tr>
</table>
<br>
""".format(total_count, pass_count, fail_count)
with open(test_results_file,'r') as f2:
ogcontent = f2.read()
with open(test_results_file,'w') as f3:
f3.write(consolidate_table)
styl = '''
<style>
pre {
overflow-x: auto;
white-space: pre-wrap;
white-space: -moz-pre-wrap;
white-space: -pre-wrap;
white-space: -o-pre-wrap;
word-wrap: break-word;
}
</style>
'''
with open(test_results_file,'a') as f4:
f4.write(styl)
f4.write(ogcontent)
from bs4 import BeautifulSoup
with open(test_results_file, 'r') as f:
soup = BeautifulSoup(f, 'html.parser')
l1 = soup.findAll('table', {'border': '1'})
for each in l1:
i = 1
children = each.findChildren('b')
for child in children:
if child.string != 'FAIL' and child.string != 'PASS':
child.string = "{}. ".format(i) + child.string
i+=1
with open(test_results_file, "wb") as f_output:
f_output.write(soup.encode('utf8'))
def add_sanity_results(test_step, status):
with open(sanity_results_file, "a") as f:
f.write(str(test_step) + " " + str(status) + "\n")
def get_tests(test_list_file,suite_path):
import glob
with open (test_list_file, "w") as f:
for path in glob.glob(str(suite_path)+"/*.py"):
if "__init__" not in path:
print "test: " + ".".join(str(path[:-3]).split("/")[7:])+"\n"
f.write(".".join(str(path[:-3]).split("/")[7:])+"\n")
def add_sanity_results_to_tempest_report():
result_table = """ <table border="1"><tr><th>TestName</th><th>Result</th></tr>"""
with open(sanity_results_file, "r") as f:
for line in f:
if(line == "\n"):
pass
else:
row = line.split()
test_name = str(row[0])
test_result = str(row[1])
if(line.startswith("ERROR")):
text_color = "red"
test_result = line[6:]
elif(test_result.startswith("FAIL")):
text_color = "red"
else:
text_color = "green"
result_table+="""<tr>
<td><font color="%s" style="font-size:15px">%s</font></td>
<td><font color="%s" style="font-size:15px">%s</font></td>
</tr> """ % (text_color, test_name, text_color, test_result)
html_file=open(test_results_file, "a")
result_table+="""</table>"""
html_file.write("Date : " + str(datetime.datetime.now()))
html_file.write("<br/>")
html_file.write("<b>Sanity Test Results</b>")
html_file.write("<br/><br/>")
html_file.write(result_table)
html_file.close()
| 35.678218 | 164 | 0.557652 |
79542a6d2ea1634f16d943364039d7793a749978 | 42,178 | py | Python | Code/Data Processing/pre_processing.py | MauroSilvaPinto/Interpretable-EEG-seizure-prediction-using-a-multiobjective-evolutionary-algorithm | 210302843f2881ea1b19b25c9e5599e3896e09a8 | [
"CC0-1.0"
] | null | null | null | Code/Data Processing/pre_processing.py | MauroSilvaPinto/Interpretable-EEG-seizure-prediction-using-a-multiobjective-evolutionary-algorithm | 210302843f2881ea1b19b25c9e5599e3896e09a8 | [
"CC0-1.0"
] | null | null | null | Code/Data Processing/pre_processing.py | MauroSilvaPinto/Interpretable-EEG-seizure-prediction-using-a-multiobjective-evolutionary-algorithm | 210302843f2881ea1b19b25c9e5599e3896e09a8 | [
"CC0-1.0"
] | 2 | 2022-03-22T14:20:44.000Z | 2022-03-27T09:22:55.000Z | """
a code to pre-process and extract fist-level features from raw data from the selected patients.
the output will be the extracted features, chronologically, in 5 second non-overlapping windows
order by patient and seizure.
format output name:pat[patient_number]_seizure[seizure_number]_featureMatrix.npy
example output name: pat102_seizure_1_featureMatrix.npy
this code can not be executed
as the original data from Epilepsiae can not be available online for public use
due to ethical concers
"""
import numpy as np
#import matplotlib.pyplot as plt
import datetime as dt
import os
from scipy import signal, integrate
import pywt
#%% Path setup and patient selection
#path = "/Users/Tiago/Desktop/Research/Data"
path = "D:\\O nosso paper\\Data"
sep = os.path.sep
if path[-1] != sep:
path+=sep
patient_selection = input('Enter patient ID: ')
patient_IDs = patient_selection.split(sep = ',') # allow user to enter multiple IDs separated by commas
patient_fs = int(input('Enter original sampling frequency (Hz): ')) # used for downsampling (if higher than 256Hz)
#%% Hyperparameters (e.g. seizure time, sliding window, filtering, ...)
# BUILDING SEIZURE DATA:
h_before_onset = dt.timedelta(hours = 4) # how many hours before onset?
h_between_onsets = dt.timedelta(hours = 4.5) # how many hours between seizures (cluster assumption)?
m_postictal = dt.timedelta(minutes = 30) # how many minutes of post-itcal (avoid influence in inter-ictal)?
# SLIDING WINDOW:
fsampling = 256 # sampling frequency (Hz)
window_size = fsampling * 5 # in number of samples
overlap = 0 # in number of samples
# FILTERING:
f_notch = 50 # power-line interference
Q = 30
b_notch, a_notch = signal.iirnotch(f_notch, Q, fsampling) # notch filter
f_HPF = 0.5 # remove DC component and breathing artifacts (slow drifts)
order = 4
b_HPF, a_HPF = signal.butter(order, f_HPF, 'highpass', fs = fsampling)
# FEATURE EXTRACTION:
# Features: statistical moments, spectral band power, SEF, wavelets, hjorth parameters (more?)
feature_labels = np.sort(['mean', 'var', 'skew', 'kurt', 'theta', 'delta', 'beta', 'alpha', 'lowgamma', 'highgamma',
'h_act', 'h_com', 'h_mob', 'sef50', 'sef75', 'sef90',
'a7', 'd7', 'd6', 'd5', 'd4', 'd3', 'd2', 'd1'])
number_of_features = len(feature_labels) # used later to detect number of seizures for each patient
theta_range = [0, 4]
delta_range = [4, 8]
beta_range = [8, 12]
alpha_range = [13, 30]
gamma_range = [30, 128]
low_gamma_range = [30, 79]
high_gamma_range = [79, 128]
mother_wavelet = pywt.Wavelet('db4')
#%% List all EVTS and patients
evts_list = sorted(os.listdir(path + 'EVTS' + sep))
evts_list = [s for s in evts_list if 'dataEvts' in s] # only files with "dataEvts"
evts_list = [path + 'EVTS' + sep + s for s in evts_list]
patient_list = sorted(os.listdir(path))
patient_list = [s for s in patient_list if 'pat' in s] # only folders with "pat"
patient_list = [path + s + sep for s in patient_list]
#%% Retrieve electrode labels / rows from data header
for ID in patient_IDs:
for pat in patient_list:
if "pat_" + ID in pat:
print(f'Gathering time vectors and gaps for patient {ID}...')
signal_list = sorted(os.listdir(pat))
signal_list = [s for s in signal_list if 'signalData' in s] # only files with "signalData"
signal_list = [pat + s for s in signal_list]
header_list = sorted(os.listdir(pat))
header_list = [s for s in header_list if 'dataHead' in s] # only files with "dataHead"
header_list = [pat + s for s in header_list]
header = np.load(header_list[0], allow_pickle = True)
# Retrieve electrode labels and find which rows correspond to them
electrodes_label = np.array(['FP1', 'FP2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2',
'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz'])
# !!! Some patients seem to have the header in a different index
if patient_fs == 400:
header_label = np.array([x.lower() for x in header.item(6)])
else:
header_label = np.array([x.lower() for x in header.item(6)]) #!!! mudar para 5 depois
electrodes_rows = []
electrodes_rows.append(np.where(header_label == 'fp1')[0][0])
electrodes_rows.append(np.where(header_label == 'fp2')[0][0])
electrodes_rows.append(np.where(header_label == 'f3')[0][0])
electrodes_rows.append(np.where(header_label == 'f4')[0][0])
electrodes_rows.append(np.where(header_label == 'c3')[0][0])
electrodes_rows.append(np.where(header_label == 'c4')[0][0])
electrodes_rows.append(np.where(header_label == 'p3')[0][0])
electrodes_rows.append(np.where(header_label == 'p4')[0][0])
electrodes_rows.append(np.where(header_label == 'o1')[0][0])
electrodes_rows.append(np.where(header_label == 'o2')[0][0])
electrodes_rows.append(np.where(header_label == 'f7')[0][0])
electrodes_rows.append(np.where(header_label == 'f8')[0][0])
try:
electrodes_rows.append(np.where(header_label == 't3')[0][0])
except:
electrodes_rows.append(np.where(header_label == 't7')[0][0])
try:
electrodes_rows.append(np.where(header_label == 't4')[0][0])
except:
electrodes_rows.append(np.where(header_label == 't8')[0][0])
try:
electrodes_rows.append(np.where(header_label == 't5')[0][0])
except:
electrodes_rows.append(np.where(header_label == 'p7')[0][0])
try:
electrodes_rows.append(np.where(header_label == 't6')[0][0])
except:
electrodes_rows.append(np.where(header_label == 'p8')[0][0])
electrodes_rows.append(np.where(header_label == 'fz')[0][0])
electrodes_rows.append(np.where(header_label == 'cz')[0][0])
electrodes_rows.append(np.where(header_label == 'pz')[0][0])
#%% Concatenate seizure data (before seizure + ictal)
# First 3 seizures, for training: 4h before each seizure + ictal period;
# Remaining seizures, for testing: 30 mins after previous offset until onset + ictal period
# Signals, time vectors are concatenated; labels (ictal/non-ictal) added; exogenous variables for each seizure added
for ID in patient_IDs:
for EVTS in evts_list:
if sep + ID in EVTS:
print(f'Building seizure data for patient {ID}...')
all_onsets = np.load(EVTS, allow_pickle = True)[:,1]
all_offsets = np.load(EVTS, allow_pickle = True)[:,7]
exogenous = np.load(EVTS, allow_pickle = True)[:, 11:] # pattern, classification, vigilance, medicament, dosage
# find any onsets / offsets that are invalid (offset before onset, rare...)
annotation_errors = []
for i in range(len(all_onsets)):
if all_onsets[i]>all_offsets[i]:
annotation_errors.append(i)
# discard seizures that are too close together
clusters = []
for i in range(1,len(all_onsets)):
if all_onsets[i] - all_offsets[i-1] < h_between_onsets:
clusters.append(i)
# check if the first seizure has enough data before the onset; otherwise, discard it
not_enough_data = []
for pat in patient_list:
if "pat_" + ID in pat:
time_list = sorted(os.listdir(pat))
time_list = [s for s in time_list if 'timeVector' in s] # only files with "timeVector"
time_list = [pat + s for s in time_list]
rec_start = np.load(time_list[0], allow_pickle=True)[0]
if (all_onsets[0] - rec_start) < h_before_onset:
not_enough_data.append(0)
discard = np.unique(annotation_errors + clusters + not_enough_data)
print(f'Discarding seizures: {discard}')
if discard.size > 0:
onsets = np.delete(all_onsets, discard)
offsets = np.delete(all_offsets, discard)
exogenous = np.delete(exogenous, discard, 0)
else:
onsets = all_onsets
offsets = all_offsets
exogenous = exogenous
for pat in patient_list:
found_seizures = 0
if "pat_" + ID in pat:
time_list = sorted(os.listdir(pat))
time_list = [s for s in time_list if 'timeVector' in s] # only files with "timeVector"
time_list = [pat + s for s in time_list]
signal_list = sorted(os.listdir(pat))
signal_list = [s for s in signal_list if 'signalData' in s] # only files with "signalData"
signal_list = [pat + s for s in signal_list]
gap_list = sorted(os.listdir(pat))
gap_list = [s for s in gap_list if 'gapSeconds' in s] # only files with "gapSeconds"
gap_list = [pat + s for s in gap_list]
# reset these for each recording (optimize search)
t_start = 0
t_end = 0
if found_seizures > 0: # avoid looking for seizures already found (optimize search)
onsets = onsets[found_seizures:]
offsets = offsets[found_seizures:]
for o in range(len(onsets)):
print(f"Gathering data for seizure #{o+1}...")
# find beginning of the signal (different for training and testing seizures, read above)
if found_seizures < 3:
# find first signal that is X hours before the onset
searching_start = True
while searching_start and t_start <= len(time_list):
t_vector = np.load(time_list[t_start], allow_pickle=True)
gap = np.load(gap_list[t_start]).item(0) # check in case onset - X is in missing data segment
if t_vector[0] - dt.timedelta(seconds = gap) <= onsets[o] - h_before_onset and t_vector[-1] > onsets[o] - h_before_onset:
if gap > 0 and onsets[o] - h_before_onset < t_vector[0]:
gap_time = np.arange(1/fsampling, gap, 1/fsampling)
previous_t_vector = np.load(time_list[t_start-1], allow_pickle=True)
signal_array = np.load(signal_list[t_start], allow_pickle=True)[:,electrodes_rows].astype("float32")
previous_signal = np.load(signal_list[t_start-1], allow_pickle=True)[:,electrodes_rows].astype("float32")
signal_gap_input = (previous_signal[-1,:] + signal_array[0,:])/2
generated_time = np.array([previous_t_vector[-1] + dt.timedelta(seconds=gap_time[i]) for i in range(len(gap_time))])
generated_signal = np.ones((len(gap_time), 19), dtype="float32") * signal_gap_input
new_t_vector = np.concatenate((generated_time, t_vector))
new_signal_array = np.vstack((generated_signal, signal_array))
signal_start_idx = (np.abs(new_t_vector - (onsets[o] - h_before_onset))).argmin() # closest time sample
signal_start = new_signal_array[signal_start_idx:,:].astype("float32")
time_start = new_t_vector[signal_start_idx:]
else:
signal_start_idx = (np.abs(t_vector - (onsets[o] - h_before_onset))).argmin() # closest time sample
signal_array = np.load(signal_list[t_start], allow_pickle=True)[:,electrodes_rows].astype("float32")
signal_start = signal_array[signal_start_idx:,:].astype("float32")
time_start = t_vector[signal_start_idx:]
print(f"Found it! t_start = {t_start}")
searching_start = False
t_end = t_start # start looking for offset after onset (optimize search)
else:
t_start+=1
else:
# find first signal that is 30 mins after the previous offset (including discarded ones)
original_idx = np.where(all_onsets == onsets[o])[0][0]
if original_idx - 1 in annotation_errors:
after_last_offset = all_onsets[original_idx - 1] + m_postictal # use onset instead (rare, but it happens)
else:
after_last_offset = all_offsets[original_idx - 1] + m_postictal
searching_start = True
while searching_start and t_start <= len(time_list):
t_vector = np.load(time_list[t_start], allow_pickle=True)
gap = np.load(gap_list[t_start]).item(0) # check in case onset - X is in missing data segment
if t_vector[0] - dt.timedelta(seconds = gap) <= after_last_offset and t_vector[-1] > after_last_offset:
if gap > 0 and after_last_offset < t_vector[0]:
gap_time = np.arange(1/fsampling, gap, 1/fsampling) # !!! if a MemoryError occurs later, change this
previous_t_vector = np.load(time_list[t_start-1], allow_pickle=True)
signal_array = np.load(signal_list[t_start], allow_pickle=True)[:,electrodes_rows].astype("float32")
previous_signal = np.load(signal_list[t_start-1], allow_pickle=True)[:,electrodes_rows].astype("float32")
signal_gap_input = (previous_signal[-1,:] + signal_array[0,:])/2
generated_time = np.array([previous_t_vector[-1] + dt.timedelta(seconds=gap_time[i]) for i in range(len(gap_time))])
generated_signal = np.ones((len(gap_time), 19), dtype="float32") * signal_gap_input
new_t_vector = np.concatenate((generated_time, t_vector))
new_signal_array = np.vstack((generated_signal, signal_array)).astype("float32")
signal_start_idx = (np.abs(new_t_vector - (after_last_offset))).argmin() # closest time sample
signal_start = new_signal_array[signal_start_idx:,:]
time_start = new_t_vector[signal_start_idx:]
else:
signal_start_idx = (np.abs(t_vector - (after_last_offset))).argmin() # closest time sample
signal_array = np.load(signal_list[t_start], allow_pickle=True)[:,electrodes_rows].astype("float32")
signal_start = signal_array[signal_start_idx:,:].astype("float32")
time_start = t_vector[signal_start_idx:]
print(f"Found it! t_start = {t_start}")
searching_start = False
t_end = t_start # start looking for offset after onset (optimize search)
else:
t_start+=1
# find first signal that contains the offset
searching_end = True
if t_start == len(time_list):
searching_end = False # start searching in a different recording (optimize search)
while searching_end and t_end <= len(time_list):
t_vector = np.load(time_list[t_end], allow_pickle=True)
if t_vector[0] <= offsets[o] and t_vector[-1] > offsets[o]:
signal_end_idx = (np.abs(t_vector - offsets[o])).argmin() # closest time sample
signal_array = np.load(signal_list[t_end], allow_pickle=True)[:,electrodes_rows].astype("float32")
signal_end = signal_array[:signal_end_idx,:].astype("float32")
time_end = t_vector[:signal_end_idx]
print(f"Found it! t_end = {t_end}")
searching_end = False
else:
t_end+=1
if t_start != len(time_list): # find remaining signals between the previous segments and concatenate all of them; check for gaps!
if t_start == t_end: # may happen in large files that span several hours...
signal_segment = signal_array[signal_start_idx:signal_end_idx]
time_segment = t_vector[signal_start_idx:signal_end_idx]
for t in range(t_start+1,t_end+1):
print(f"Concatenating! t = {t}")
if t==t_start+1:
t_vector = np.load(time_list[t], allow_pickle=True)
signal_array = np.load(signal_list[t], allow_pickle = True)[:,electrodes_rows].astype("float32")
gap = np.load(gap_list[t])
if gap > 0:
# generate vector with missing samples (time and signal)
gap_time = np.arange(1/fsampling, gap, 1/fsampling)
previous_t_vector = np.load(time_list[t-1], allow_pickle=True)
#previous_signal = np.load(signal_list[t-1], allow_pickle=True)[:,0:19]
signal_gap_input = (signal_start[-1,:] + signal_array[0,:])/2
generated_time = np.array([previous_t_vector[-1] + dt.timedelta(seconds=gap_time[i]) for i in range(len(gap_time))])
generated_signal = np.ones((len(gap_time), 19), dtype="float32") * signal_gap_input
time_segment = np.concatenate((time_start, generated_time, t_vector))
signal_segment = np.vstack((signal_start, generated_signal, signal_array)).astype("float32")
else:
time_segment = np.concatenate((time_start, t_vector))
signal_segment = np.vstack((signal_start, signal_array)).astype("float32")
elif t==t_end:
gap = np.load(gap_list[t])
if gap > 0:
# generate vector with missing samples (time and signal)
gap_time = np.arange(1/fsampling, gap, 1/fsampling)
previous_t_vector = np.load(time_list[t-1], allow_pickle=True)
#previous_signal = np.load(signal_list[t-1], allow_pickle=True)[:,0:19]
signal_gap_input = (signal_segment[-1,:] + signal_end[0,:])/2
generated_time = np.array([previous_t_vector[-1] + dt.timedelta(seconds=gap_time[i]) for i in range(len(gap_time))])
generated_signal = np.ones((len(gap_time), 19), dtype="float32") * signal_gap_input
time_segment = np.concatenate((time_segment, generated_time, time_end))
signal_segment = np.vstack((signal_segment, generated_signal, signal_end)).astype("float32")
else:
time_segment = np.concatenate((time_segment, time_end))
signal_segment = np.vstack((signal_segment, signal_end))[:,:].astype("float32")
else:
t_vector = np.load(time_list[t], allow_pickle=True)
signal_array = np.load(signal_list[t], allow_pickle = True)[:,electrodes_rows].astype("float32")
gap = np.load(gap_list[t])
if gap > 0:
# generate vector with missing samples (time and signal)
gap_time = np.arange(1/fsampling, gap, 1/fsampling)
previous_t_vector = np.load(time_list[t-1], allow_pickle=True)
#previous_signal = np.load(signal_list[t-1], allow_pickle=True)[:,0:19]
signal_gap_input = (signal_segment[-1,:] + signal_array[0,:])/2
generated_time = np.array([previous_t_vector[-1] + dt.timedelta(seconds=gap_time[i]) for i in range(len(gap_time))])
generated_signal = np.ones((len(gap_time), 19), dtype="float32") * signal_gap_input
time_segment = np.concatenate((time_segment, generated_time, t_vector))
signal_segment = np.vstack((signal_segment, generated_signal, signal_array)).astype("float32")
else:
time_segment = np.concatenate((time_segment, t_vector))
signal_segment = np.vstack((signal_segment, signal_array)).astype("float32")
# label time_segment and signal_segment: 0 = non-ictal; 2 = ictal
ictal_start_idx = (np.abs(time_segment - onsets[o])).argmin() # closest time sample
label_segment = np.zeros(time_segment.shape)
label_segment[ictal_start_idx:] = 2
# save each seizure data in "Seizures" folder as: patX_seizureY_signal, patX_seizureY_time, patX_seizureY_label
found_seizures+=1
print(f'Saving seizure #{o+1}...')
np.save(path + 'Seizures' + sep + 'pat' + ID + '_seizure'+ str(found_seizures) + '_timeVector', time_segment)
np.save(path + 'Seizures' + sep + 'pat' + ID + '_seizure' + str(found_seizures) + '_signalData', signal_segment)
np.save(path + 'Seizures' + sep + 'pat' + ID + '_seizure' + str(found_seizures) + '_labelVector', label_segment)
np.save(path + 'Seizures' + sep + 'pat' + ID + '_seizure' + str(found_seizures) + '_exogenousVariables', exogenous[o])
#%% Window segmentation (5 secs, no overlap)
# Segment seizure data in windows, create labels for windowed data and extract linear univariate features
seizure_list = sorted(os.listdir(path + 'Seizures' + sep))
seizure_list = [path + 'Seizures' + sep + s for s in seizure_list]
signal_list = [s for s in seizure_list if 'signalData' in s] # only files with "signalData"
#time_list = [s for s in seizure_list if 'timeVector' in s] # only files with "timeVector"
label_list = [s for s in seizure_list if 'labelVector' in s] # only files with "labelVector"
for ID in patient_IDs:
print(f'Segmenting data for patient {ID}...')
for i in range(len(signal_list)):
if "pat" + ID in signal_list[i]:
sig = np.load(signal_list[i], allow_pickle = True)
labels = np.load(label_list[i], allow_pickle = True)
#times = np.load(time_list[i], allow_pickle = True)
print(f'Splitting signal {signal_list[i].split("Seizures" + sep)[1]}')
windows = []
windows_label = []
idx = 0
while idx + window_size < len(sig):
win = sig[idx:idx + window_size,:]
lab = labels[idx:idx + window_size]
# label window: if any ictal samples are present, classify whole window as ictal
if np.any(lab == 2) == True:
windows_label.append(2)
else:
windows_label.append(0)
# apply filters and save window
win_notch = signal.lfilter(b_notch, a_notch, win)
win_filtered = signal.lfilter(b_HPF, a_HPF, win_notch)
windows.append(np.array(win_filtered, dtype="float32"))
idx += window_size + 1
print('Saving windowed signal and labels...')
np.save(signal_list[i].split('_signalData.npy')[0].replace('Seizures', 'Seizures_windowed')+ '_windowData', windows)
np.save(signal_list[i].split('_signalData.npy')[0].replace('Seizures', 'Seizures_windowed')+ '_windowLabel', windows_label)
#np.save(signal_list[i].split('_signalData.npy')[0].replace('Seizures', 'Seizures_windowed')+ '_windowTime', windows_time)
#%% Feature extraction (linear univariate features)
window_list = sorted(os.listdir(path + 'Seizures_windowed' + sep))
window_list = [path + 'Seizures_windowed' + sep + s for s in window_list]
signal_list = [s for s in window_list if 'windowData' in s] # only files with "windowData"
for ID in patient_IDs:
print(f'Extracting features for patient {ID}...')
for i in range(len(signal_list)):
if "pat" + ID in signal_list[i]:
sig = np.load(signal_list[i], allow_pickle = True)
print(f'Computing features from {signal_list[i].split("Seizures_windowed" + sep)[1]}')
feature_mean = []
feature_variance = []
feature_skewness = []
feature_kurtosis = []
feature_thetapower = []
feature_deltapower = []
feature_betapower = []
feature_alphapower = []
#feature_gammapower = []
feature_lowgammapower = []
feature_highgammapower = []
feature_hjorth_act = []
feature_hjorth_mob = []
feature_hjorth_com = []
feature_sef50 = []
feature_sef75 = []
feature_sef90 = []
feature_wavelet_energy_a7 = []
feature_wavelet_energy_d7 = []
feature_wavelet_energy_d6 = []
feature_wavelet_energy_d5 = []
feature_wavelet_energy_d4 = []
feature_wavelet_energy_d3 = []
feature_wavelet_energy_d2 = []
feature_wavelet_energy_d1 = []
#feature_circadian_rhythm = []
for j in range(sig.shape[0]):
window = sig[j, :, :]
# MEAN
mean = np.mean(window, axis = 0)
feature_mean.append(mean)
# VARIANCE
variance = np.var(window, axis = 0, ddof = 1)
feature_variance.append(variance)
# SKEWNESS
sum = 0
for x in window:
sum += (x - mean)**3
skewness = ((1 / (len(window) - 1)) * sum) / (np.std(window, axis = 0)**3)
feature_skewness.append(skewness)
# KURTOSIS
sum = 0
for x in window:
sum += (x - mean)**4
kurtosis = (((1 / (len(window) - 1)) * sum) / ((len(window) - 1) * np.std(window, axis = 0)**4)) - 3
feature_kurtosis.append(kurtosis)
# RELATIVE SPECTRAL POWER
psd = []
for channel in range(window.shape[1]):
freqs, power = signal.welch(window[:,channel], fsampling)
psd.append(power)
thetapower = []
deltapower = []
betapower = []
alphapower = []
gammapower = []
lowgammapower = []
highgammapower = []
for spectrum in psd:
theta = integrate.simps(spectrum[theta_range[0]:theta_range[1]+1]) / integrate.simps(spectrum)
thetapower.append(theta)
delta = integrate.simps(spectrum[delta_range[0] : delta_range[1]+1]) / integrate.simps(spectrum)
deltapower.append(delta)
beta = integrate.simps(spectrum[beta_range[0] : beta_range[1]+1]) / integrate.simps(spectrum)
betapower.append(beta)
alpha = integrate.simps(spectrum[alpha_range[0] : alpha_range[1]+1]) / integrate.simps(spectrum)
alphapower.append(alpha)
#gamma = integrate.simps(spectrum[gamma_range[0] : gamma_range[1]+1]) / integrate.simps(spectrum)
#gammapower.append(gamma)
low_gamma = integrate.simps(spectrum[low_gamma_range[0] : low_gamma_range[1]+1]) / integrate.simps(spectrum)
lowgammapower.append(low_gamma)
high_gamma = integrate.simps(spectrum[high_gamma_range[0] : high_gamma_range[1]+1]) / integrate.simps(spectrum)
highgammapower.append(high_gamma)
feature_thetapower.append(np.array(thetapower))
feature_deltapower.append(np.array(deltapower))
feature_betapower.append(np.array(betapower))
feature_alphapower.append(np.array(alphapower))
#feature_gammapower.append(np.array(gammapower))
feature_lowgammapower.append(np.array(lowgammapower))
feature_highgammapower.append(np.array(highgammapower))
# HJORTH PARAMETERS
deriv1 = np.gradient(window, axis = 0)
deriv2 = np.gradient(deriv1, axis = 0)
hjorth_act = variance
hjorth_mob = np.sqrt(np.var(deriv1, axis = 0, ddof = 1)/np.var(window, axis = 0, ddof = 1))
hjorth_com = np.sqrt((np.var(deriv2, axis = 0, ddof = 1)*np.var(window, axis = 0, ddof = 1))/np.var(deriv1, axis = 0, ddof = 1)**2)
feature_hjorth_act.append(hjorth_act)
feature_hjorth_mob.append(hjorth_mob)
feature_hjorth_com.append(hjorth_com)
# SPECTRAL EDGE FREQUENCY (50%, 75%, 90%)
sef50percent = []
sef75percent = []
sef90percent = []
for spectrum in psd:
power_cum = integrate.cumtrapz(spectrum)
sef50 = (np.abs(power_cum - 0.5*integrate.trapz(spectrum))).argmin() # closest freq holding 50% spectral power
sef50percent.append(sef50)
sef75 = (np.abs(power_cum - 0.75*integrate.trapz(spectrum))).argmin() # closest freq holding 75% spectral power
sef75percent.append(sef75)
sef90 = (np.abs(power_cum - 0.9*integrate.trapz(spectrum))).argmin() # closest freq holding 90% spectral power
sef90percent.append(sef90)
feature_sef50.append(np.array(sef50percent))
feature_sef75.append(np.array(sef75percent))
feature_sef90.append(np.array(sef90percent))
# WAVELET COEFFICIENTS (ENERGY)
a7_energy = []; d7_energy = []; d6_energy = []; d5_energy = []
d4_energy = []; d3_energy = []; d2_energy = []; d1_energy = []
for channel in range(window.shape[1]):
coeffs = pywt.wavedec(window[:, channel], mother_wavelet, level = 8)
# coeffs -> [A7, D7, D6, D5, D4, D3, D2, D1]
a7_energy.append(np.sum(np.abs(np.power(coeffs[0], 2))))
d7_energy.append(np.sum(np.abs(np.power(coeffs[1], 2))))
d6_energy.append(np.sum(np.abs(np.power(coeffs[2], 2))))
d5_energy.append(np.sum(np.abs(np.power(coeffs[3], 2))))
d4_energy.append(np.sum(np.abs(np.power(coeffs[4], 2))))
d3_energy.append(np.sum(np.abs(np.power(coeffs[5], 2))))
d2_energy.append(np.sum(np.abs(np.power(coeffs[6], 2))))
d1_energy.append(np.sum(np.abs(np.power(coeffs[7], 2))))
feature_wavelet_energy_a7.append(a7_energy)
feature_wavelet_energy_d7.append(d7_energy)
feature_wavelet_energy_d6.append(d6_energy)
feature_wavelet_energy_d5.append(d5_energy)
feature_wavelet_energy_d4.append(d4_energy)
feature_wavelet_energy_d3.append(d3_energy)
feature_wavelet_energy_d2.append(d2_energy)
feature_wavelet_energy_d1.append(d1_energy)
# CIRCADIAN RHYTHM (seconds of the day, between 0 and 86400 -> normalize to 0-1)
#circadian = (window_time[0].hour * 3600 + window_time[0].minute * 60) / (24 * 3600)
#feature_circadian_rhythm.append(np.ones((19)) * circadian)
print('Saving features...')
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_mean', feature_mean)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_var', feature_variance)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_skew', feature_skewness)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_kurt', feature_kurtosis)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_theta', feature_thetapower)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_delta', feature_deltapower)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_beta', feature_betapower)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_alpha', feature_alphapower)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_lowgamma', feature_lowgammapower)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_highgamma', feature_highgammapower)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_h_act', feature_hjorth_act)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_h_mob', feature_hjorth_mob)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_h_com', feature_hjorth_com)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_sef50', feature_sef50)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_sef75', feature_sef75)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_sef90', feature_sef90)
#np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_circadian', feature_circadian_rhythm)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_a7', feature_wavelet_energy_a7)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_d7', feature_wavelet_energy_d7)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_d6', feature_wavelet_energy_d6)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_d5', feature_wavelet_energy_d5)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_d4', feature_wavelet_energy_d4)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_d3', feature_wavelet_energy_d3)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_d2', feature_wavelet_energy_d2)
np.save(signal_list[i].split('_windowData.npy')[0].replace('Seizures_windowed', 'Features')+ '_d1', feature_wavelet_energy_d1)
#%% Organize data for the evolutionary framework
feature_list = sorted(os.listdir(path + 'Features' + sep))
feature_list = [path + 'Features' + sep + s for s in feature_list]
window_labels = [s for s in window_list if 'windowLabel' in s] # only files with "windowLabel"
seizure_exogenous = [s for s in seizure_list if 'exogenousVariables' in s] # only files with "exogenousVariables"
# Build array containing: feature values, labels, missingdata/flat percentage (for each window); columns = time, rows = feature/others
# Build label for the array created above; feature values = electrodeX_featureY
for ID in patient_IDs:
# Get required files for all seizures of each patient: feature values, labels, missing/flat info
feature_list_patient = []
label_list_patient = []
flat_list_patient = []
saturated_list_patient = []
missing_list_patient = []
exogenous_list_patient = []
print(f'Organizing data for patient {ID}...')
for i in range(len(feature_list)):
if "pat" + ID in feature_list[i]:
feature_list_patient.append(feature_list[i])
for j in range(len(window_labels)):
if "pat" + ID in window_labels[j]:
label_list_patient.append(window_labels[j])
for j in range(len(seizure_exogenous)):
if "pat" + ID in seizure_exogenous[j]:
exogenous_list_patient.append(seizure_exogenous[j])
# used to detect number of seizures for each patient
if len(feature_list_patient) % number_of_features == 0:
seizures_number = len(feature_list_patient) / number_of_features
# build, for each seizure, matrix containing feature values, classification labels (in this order)
for j in range(0, len(feature_list_patient), number_of_features):
seizure_features = feature_list_patient[j:j + number_of_features]
seizure_ID = seizure_features[0].split(sep="_")[1]
seizure_no = int(seizure_ID.split("seizure")[1])
feature_matrix = np.load(seizure_features[0], allow_pickle = True).T # transpose so that rows = features, columns = window
for k in range(1, len(seizure_features)):
feature_matrix = np.vstack((feature_matrix, np.load(seizure_features[k], allow_pickle = True).T))
# add classification labels
feature_matrix = np.vstack((feature_matrix, np.load([x for x in label_list_patient if seizure_ID+"_" in x][0], allow_pickle = True).T))
np.save(path + 'Evol2' + sep + 'pat' + ID + '_seizure'+ str(seizure_no) + '_featureMatrix', feature_matrix)
else:
print(f'Could not detect number of seizures for patient {ID}! Please update feature labels...')
# build array with exogenous information for all seizures
exogenous_matrix = []
for j in range(len(exogenous_list_patient)):
exogenous_matrix.append(np.load(exogenous_list_patient[j], allow_pickle = True))
np.save(path + 'Evol2' + sep + 'pat' + ID + '_seizureInfo', exogenous_matrix)
# build legend (same for all patients)
legend = []
for i in range(len(feature_labels)):
for j in range(len(electrodes_label)):
legend.append(electrodes_label[j] + '_' + feature_labels[i])
legend.append('class')
np.save(path + 'Evol2' + sep + 'legend', legend) # !!! mudar de volta para Evol depois
print("\a") # beep when done :) | 55.717305 | 148 | 0.549718 |
79542acf10ad4a392e5ae0f45867dc6a80ea2592 | 94 | py | Python | automl_infrastructure/interpretation/__init__.py | barak1412/automl_infrastructure | e8a291d175237bb7f74ebae5d6f5d2f8bcf5dc32 | [
"MIT"
] | null | null | null | automl_infrastructure/interpretation/__init__.py | barak1412/automl_infrastructure | e8a291d175237bb7f74ebae5d6f5d2f8bcf5dc32 | [
"MIT"
] | null | null | null | automl_infrastructure/interpretation/__init__.py | barak1412/automl_infrastructure | e8a291d175237bb7f74ebae5d6f5d2f8bcf5dc32 | [
"MIT"
] | null | null | null | from automl_infrastructure.interpretation.permutation_importance import PermutationImportance
| 47 | 93 | 0.93617 |
79542b561d3f72a4a470258fbe3d32cb5370b12d | 4,481 | py | Python | dipole/tests/base.py | thisch/pydipole | e496177fe60c3ec1d3b28d2dc843c0fd54b5757c | [
"MIT"
] | null | null | null | dipole/tests/base.py | thisch/pydipole | e496177fe60c3ec1d3b28d2dc843c0fd54b5757c | [
"MIT"
] | null | null | null | dipole/tests/base.py | thisch/pydipole | e496177fe60c3ec1d3b28d2dc843c0fd54b5757c | [
"MIT"
] | null | null | null | import pytest
import os
import numpy as np
import logging
import matplotlib.pyplot as plt
import matplotlib as mpl
class Base:
log = logging.getLogger('dip')
def setup_method(self, method):
print("\n{}:{}".format(self.__class__.__name__, method.__name__))
# TODO support for fixtures (convert fixtures to strs)
self._testMethodName = method.__name__
def _plot_linpol(self, T, P, field, fixvminmax=True, ax=None):
from dipole.plots import plot_linpol
plot_linpol(T, P, field, fixvminmax=fixvminmax, ax=ax)
def _plot_linpol_plane(self, X, Y, field, fixvminmax=True, ax=None):
from dipole.plots import plot_linpol_plane
plot_linpol_plane(X, Y, field=field, fixvminmax=fixvminmax, ax=ax)
def _plot_surface(self, T, P, intens, ax=None):
if ax is None:
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
intnorm = intens/intens.max()
X = np.cos(P)*np.sin(T)*intnorm
Y = np.sin(P)*np.sin(T)*intnorm
Z = np.cos(T)*intnorm
ax.plot_surface(X, Y, Z, facecolors=mpl.cm.rainbow(intnorm))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
def _plot_intens(self, T=None, P=None, field=None, intens=None, title=None, XY=None, ax=None):
if intens is None:
intens = np.sum(np.abs(field)**2, axis=2)
if ax is None:
fig, ax = plt.subplots()
# ax.imshow(intens)
if XY is not None:
ax.pcolormesh(XY[0], XY[1], intens)
else:
ax.pcolormesh(np.degrees(T*np.cos(P)), np.degrees(T*np.sin(P)),
intens)
# TODO improve limits (take code from paper_plotter.py)
ax.set_aspect('equal')
if title:
ax.set_title(title)
# ax.set_xlim(-25,25)
# ax.set_ylim(-25,25)
# cax.set_xticks([])
# cax.set_yticks([])
# cax.set_title('R_disk=%g lambda=%g, ndipoles=%d' % (scalefac, 2*np.pi/k,
# ndipoles))
return ax
def _plot_quiver_exy(self, field=None, title=None, XY=None, ax=None):
Ex = field[:, :, 0].real
Ey = field[:, :, 1].real
if ax is None:
fig, ax = plt.subplots()
# ax.imshow(intens)
if XY is not None:
ax.quiver(XY[0], XY[1], Ex.real, Ey.real)
ax.set_aspect('equal')
if title:
ax.set_title(title)
ax.set_xlabel('X (plane)')
ax.set_ylabel('Y (plane)')
# ax.set_xlim(-25,25)
# ax.set_ylim(-25,25)
# cax.set_xticks([])
# cax.set_yticks([])
# cax.set_title('R_disk=%g lambda=%g, ndipoles=%d' % (scalefac, 2*np.pi/k,
# ndipoles))
return ax
def _plot_poynting(self, T=None, P=None, S=None, title=None, XY=None, ax=None):
"""s is either the magnitude of the poynting vector or a component of the
poynting vector
"""
if ax is None:
fig, ax = plt.subplots()
if XY is not None:
C = ax.pcolormesh(XY[0], XY[1], S)
else:
C = ax.pcolormesh(np.degrees(T*np.cos(P)), np.degrees(T*np.sin(P)), S)
if title:
ax.set_title(title)
ax.set_aspect('equal')
plt.colorbar(C)
return ax
def gen_filename(self, postfix, extension):
fname = "%s_%s%s.%s" % (self.__class__.__name__,
self._testMethodName,
"_%s" % postfix if postfix else "",
extension)
logd = pytest.config.getvalue('logdir')
fname = os.path.join(logd, fname)
return fname
def save_fig(self, postfix, fig, ext='png'):
assert not postfix.endswith('.png')
dest = self.gen_filename(postfix, ext)
if not pytest.config.getvalue('log'):
self.log.debug("do not save png %s (--log not set)", dest)
return
self.log.debug("saving mpl figure to '%s'", dest)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
fig.savefig(dest)
# TODO
# self.created_files.append(dest)
def show(self):
if pytest.config.getvalue('interactive'):
plt.show()
| 33.192593 | 98 | 0.543182 |
79542c54ee52f95245512c19257debe6a6280820 | 143,481 | py | Python | Rules.py | cheuer/ALttPDoorRandomizer | 44d7e6c15cca8dc613e8fe9cdca07eaa3c5f44a3 | [
"MIT"
] | null | null | null | Rules.py | cheuer/ALttPDoorRandomizer | 44d7e6c15cca8dc613e8fe9cdca07eaa3c5f44a3 | [
"MIT"
] | null | null | null | Rules.py | cheuer/ALttPDoorRandomizer | 44d7e6c15cca8dc613e8fe9cdca07eaa3c5f44a3 | [
"MIT"
] | null | null | null | import logging
from BaseClasses import CollectionState, RegionType, DoorType
from Regions import key_only_locations
from RoomData import DoorKind
from collections import deque
import OWGSets
def set_rules(world, player):
if world.logic[player] == 'nologic':
logging.getLogger('').info('WARNING! Seeds generated under this logic often require major glitches and may be impossible!')
if world.mode[player] != 'inverted':
world.get_region('Links House', player).can_reach_private = lambda state: True
world.get_region('Sanctuary', player).can_reach_private = lambda state: True
old_rule = world.get_region('Old Man House', player).can_reach
world.get_region('Old Man House', player).can_reach_private = lambda state: state.can_reach('Old Man', 'Location', player) or old_rule(state)
return
else:
world.get_region('Inverted Links House', player).can_reach_private = lambda state: True
world.get_region('Inverted Dark Sanctuary', player).entrances[0].parent_region.can_reach_private = lambda state: True
if world.shuffle[player] != 'vanilla':
old_rule = world.get_region('Old Man House', player).can_reach
world.get_region('Old Man House', player).can_reach_private = lambda state: state.can_reach('Old Man', 'Location', player) or old_rule(state)
world.get_region('Hyrule Castle Ledge', player).can_reach_private = lambda state: True
return
global_rules(world, player)
if world.mode[player] != 'inverted':
default_rules(world, player)
if world.mode[player] == 'open':
open_rules(world, player)
elif world.mode[player] == 'standard':
standard_rules(world, player)
elif world.mode[player] == 'inverted':
open_rules(world, player)
inverted_rules(world, player)
else:
raise NotImplementedError('Not implemented yet')
if world.logic[player] == 'noglitches':
no_glitches_rules(world, player)
elif world.logic[player] == 'owglitches':
logging.getLogger('').info('There is a chance OWG has bugged edge case rulesets, especially in inverted. Definitely file a report on GitHub if you see anything strange.')
# Initially setting no_glitches_rules to set the baseline rules for some
# entrances. The overworld_glitches_rules set is primarily additive.
no_glitches_rules(world, player)
overworld_glitches_rules(world, player)
elif world.logic[player] == 'minorglitches':
logging.getLogger('').info('Minor Glitches may be buggy still. No guarantee for proper logic checks.')
else:
raise NotImplementedError('Not implemented yet')
if world.goal[player] == 'dungeons':
# require all dungeons to beat ganon
add_rule(world.get_location('Ganon', player), lambda state: state.can_reach('Master Sword Pedestal', 'Location', player) and state.has('Beat Agahnim 1', player) and state.has('Beat Agahnim 2', player) and state.has_crystals(7, player))
elif world.goal[player] == 'ganon':
# require aga2 to beat ganon
add_rule(world.get_location('Ganon', player), lambda state: state.has('Beat Agahnim 2', player))
if world.mode[player] != 'inverted':
set_big_bomb_rules(world, player)
else:
set_inverted_big_bomb_rules(world, player)
# if swamp and dam have not been moved we require mirror for swamp palace
if not world.swamp_patch_required[player]:
add_rule(world.get_entrance('Swamp Lobby Moat', player), lambda state: state.has_Mirror(player))
if world.mode[player] != 'inverted':
set_bunny_rules(world, player)
else:
set_inverted_bunny_rules(world, player)
def set_rule(spot, rule):
spot.access_rule = rule
def set_defeat_dungeon_boss_rule(location):
# Lambda required to defer evaluation of dungeon.boss since it will change later if boos shuffle is used
set_rule(location, lambda state: location.parent_region.dungeon.boss.can_defeat(state))
def set_always_allow(spot, rule):
spot.always_allow = rule
def add_rule(spot, rule, combine='and'):
old_rule = spot.access_rule
if combine == 'or':
spot.access_rule = lambda state: rule(state) or old_rule(state)
else:
spot.access_rule = lambda state: rule(state) and old_rule(state)
def add_lamp_requirement(spot, player):
add_rule(spot, lambda state: state.has('Lamp', player, state.world.lamps_needed_for_dark_rooms))
def forbid_item(location, item, player):
old_rule = location.item_rule
location.item_rule = lambda i: (i.name != item or i.player != player) and old_rule(i)
def add_item_rule(location, rule):
old_rule = location.item_rule
location.item_rule = lambda item: rule(item) and old_rule(item)
def item_in_locations(state, item, player, locations):
for location in locations:
if item_name(state, location[0], location[1]) == (item, player):
return True
return False
def item_name(state, location, player):
location = state.world.get_location(location, player)
if location.item is None:
return None
return (location.item.name, location.item.player)
def global_rules(world, player):
# ganon can only carry triforce
add_item_rule(world.get_location('Ganon', player), lambda item: item.name == 'Triforce' and item.player == player)
# we can s&q to the old man house after we rescue him. This may be somewhere completely different if caves are shuffled!
old_rule = world.get_region('Old Man House', player).can_reach_private
world.get_region('Old Man House', player).can_reach_private = lambda state: state.can_reach('Old Man', 'Location', player) or old_rule(state)
set_rule(world.get_location('Sunken Treasure', player), lambda state: state.has('Open Floodgate', player))
set_rule(world.get_location('Dark Blacksmith Ruins', player), lambda state: state.has('Return Smith', player))
set_rule(world.get_location('Purple Chest', player), lambda state: state.has('Pick Up Purple Chest', player)) # Can S&Q with chest
set_rule(world.get_location('Ether Tablet', player), lambda state: state.has('Book of Mudora', player) and state.has_beam_sword(player))
set_rule(world.get_location('Master Sword Pedestal', player), lambda state: state.has('Red Pendant', player) and state.has('Blue Pendant', player) and state.has('Green Pendant', player))
set_rule(world.get_location('Missing Smith', player), lambda state: state.has('Get Frog', player) and state.can_reach('Blacksmiths Hut', 'Region', player)) # Can't S&Q with smith
set_rule(world.get_location('Blacksmith', player), lambda state: state.has('Return Smith', player))
set_rule(world.get_location('Magic Bat', player), lambda state: state.has('Magic Powder', player))
set_rule(world.get_location('Sick Kid', player), lambda state: state.has_bottle(player))
set_rule(world.get_location('Library', player), lambda state: state.has_Boots(player))
set_rule(world.get_location('Mimic Cave', player), lambda state: state.has('Hammer', player))
set_rule(world.get_location('Sahasrahla', player), lambda state: state.has('Green Pendant', player))
set_rule(world.get_location('Spike Cave', player), lambda state:
state.has('Hammer', player) and state.can_lift_rocks(player) and
((state.has('Cape', player) and state.can_extend_magic(player, 16, True)) or
(state.has('Cane of Byrna', player) and
(state.can_extend_magic(player, 12, True) or
(state.world.can_take_damage and (state.has_Boots(player) or state.has_hearts(player, 4))))))
)
set_rule(world.get_location('Hookshot Cave - Top Right', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_location('Hookshot Cave - Top Left', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_location('Hookshot Cave - Bottom Right', player), lambda state: state.has('Hookshot', player) or state.has('Pegasus Boots', player))
set_rule(world.get_location('Hookshot Cave - Bottom Left', player), lambda state: state.has('Hookshot', player))
# Start of door rando rules
# TODO: Do these need to flag off when door rando is off? - some of them, yes
# Eastern Palace
# Eyegore room needs a bow
set_rule(world.get_entrance('Eastern Duo Eyegores NE', player), lambda state: state.can_shoot_arrows(player))
set_rule(world.get_entrance('Eastern Single Eyegore NE', player), lambda state: state.can_shoot_arrows(player))
set_rule(world.get_entrance('Eastern Map Balcony Hook Path', player), lambda state: state.has('Hookshot', player))
# Boss rules. Same as below but no BK or arrow requirement.
set_defeat_dungeon_boss_rule(world.get_location('Eastern Palace - Prize', player))
set_defeat_dungeon_boss_rule(world.get_location('Eastern Palace - Boss', player))
# Desert
set_rule(world.get_location('Desert Palace - Torch', player), lambda state: state.has_Boots(player))
set_rule(world.get_entrance('Desert Wall Slide NW', player), lambda state: state.has_fire_source(player))
set_defeat_dungeon_boss_rule(world.get_location('Desert Palace - Prize', player))
set_defeat_dungeon_boss_rule(world.get_location('Desert Palace - Boss', player))
# Tower of Hera
set_rule(world.get_location('Tower of Hera - Big Key Chest', player), lambda state: state.has_fire_source(player))
set_defeat_dungeon_boss_rule(world.get_location('Tower of Hera - Boss', player))
set_defeat_dungeon_boss_rule(world.get_location('Tower of Hera - Prize', player))
set_rule(world.get_entrance('Tower Altar NW', player), lambda state: state.has_sword(player))
set_defeat_dungeon_boss_rule(world.get_location('Agahnim 1', player))
set_rule(world.get_entrance('PoD Arena Bonk Path', player), lambda state: state.has_Boots(player))
set_rule(world.get_entrance('PoD Mimics 1 NW', player), lambda state: state.can_shoot_arrows(player))
set_rule(world.get_entrance('PoD Mimics 2 NW', player), lambda state: state.can_shoot_arrows(player))
set_rule(world.get_entrance('PoD Bow Statue Down Ladder', player), lambda state: state.can_shoot_arrows(player))
set_rule(world.get_entrance('PoD Map Balcony Drop Down', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('PoD Dark Pegs WN', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('PoD Dark Pegs Up Ladder', player), lambda state: state.has('Hammer', player))
set_defeat_dungeon_boss_rule(world.get_location('Palace of Darkness - Boss', player))
set_defeat_dungeon_boss_rule(world.get_location('Palace of Darkness - Prize', player))
set_rule(world.get_entrance('Swamp Lobby Moat', player), lambda state: state.has('Flippers', player) and state.has('Open Floodgate', player))
set_rule(world.get_entrance('Swamp Trench 1 Approach Dry', player), lambda state: not state.has('Trench 1 Filled', player))
set_rule(world.get_entrance('Swamp Trench 1 Key Ledge Dry', player), lambda state: not state.has('Trench 1 Filled', player))
set_rule(world.get_entrance('Swamp Trench 1 Departure Dry', player), lambda state: not state.has('Trench 1 Filled', player))
set_rule(world.get_entrance('Swamp Trench 1 Approach Key', player), lambda state: state.has('Flippers', player) and state.has('Trench 1 Filled', player))
set_rule(world.get_entrance('Swamp Trench 1 Approach Swim Depart', player), lambda state: state.has('Flippers', player) and state.has('Trench 1 Filled', player))
set_rule(world.get_entrance('Swamp Trench 1 Key Approach', player), lambda state: state.has('Flippers', player) and state.has('Trench 1 Filled', player))
set_rule(world.get_entrance('Swamp Trench 1 Key Ledge Depart', player), lambda state: state.has('Flippers', player) and state.has('Trench 1 Filled', player))
set_rule(world.get_entrance('Swamp Trench 1 Departure Approach', player), lambda state: state.has('Flippers', player) and state.has('Trench 1 Filled', player))
set_rule(world.get_entrance('Swamp Trench 1 Departure Key', player), lambda state: state.has('Flippers', player) and state.has('Trench 1 Filled', player))
set_rule(world.get_location('Trench 1 Switch', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('Swamp Hub Hook Path', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_location('Swamp Palace - Hookshot Pot Key', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Swamp Trench 2 Pots Dry', player), lambda state: not state.has('Trench 2 Filled', player))
set_rule(world.get_entrance('Swamp Trench 2 Pots Wet', player), lambda state: state.has('Flippers', player) and state.has('Trench 2 Filled', player))
set_rule(world.get_entrance('Swamp Trench 2 Departure Wet', player), lambda state: state.has('Flippers', player) and state.has('Trench 2 Filled', player))
set_rule(world.get_entrance('Swamp West Ledge Hook Path', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Swamp Barrier Ledge Hook Path', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Swamp Drain Right Switch', player), lambda state: state.has('Drained Swamp', player))
set_rule(world.get_entrance('Swamp Drain WN', player), lambda state: state.has('Drained Swamp', player))
set_rule(world.get_entrance('Swamp Flooded Room WS', player), lambda state: state.has('Drained Swamp', player))
set_rule(world.get_entrance('Swamp Flooded Room Ladder', player), lambda state: state.has('Drained Swamp', player))
set_rule(world.get_location('Swamp Palace - Flooded Room - Left', player), lambda state: state.has('Drained Swamp', player))
set_rule(world.get_location('Swamp Palace - Flooded Room - Right', player), lambda state: state.has('Drained Swamp', player))
set_rule(world.get_entrance('Swamp Waterway NW', player), lambda state: state.has('Flippers', player))
set_rule(world.get_entrance('Swamp Waterway N', player), lambda state: state.has('Flippers', player))
set_rule(world.get_entrance('Swamp Waterway NE', player), lambda state: state.has('Flippers', player))
set_rule(world.get_location('Swamp Palace - Waterway Pot Key', player), lambda state: state.has('Flippers', player))
set_defeat_dungeon_boss_rule(world.get_location('Swamp Palace - Boss', player))
set_defeat_dungeon_boss_rule(world.get_location('Swamp Palace - Prize', player))
set_rule(world.get_entrance('Skull Big Chest Hookpath', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Skull Torch Room WN', player), lambda state: state.has('Fire Rod', player))
set_rule(world.get_entrance('Skull Vines NW', player), lambda state: state.has_sword(player))
set_defeat_dungeon_boss_rule(world.get_location('Skull Woods - Boss', player))
set_defeat_dungeon_boss_rule(world.get_location('Skull Woods - Prize', player))
# blind can't have the small key? - not necessarily true anymore - but likely still
set_rule(world.get_location('Thieves\' Town - Big Chest', player), lambda state: state.has('Hammer', player))
for entrance in ['Thieves Basement Block Path', 'Thieves Blocked Entry Path', 'Thieves Conveyor Block Path', 'Thieves Conveyor Bridge Block Path']:
set_rule(world.get_entrance(entrance, player), lambda state: state.can_lift_rocks(player))
for location in ['Thieves\' Town - Blind\'s Cell', 'Thieves\' Town - Boss']:
forbid_item(world.get_location(location, player), 'Big Key (Thieves Town)', player)
forbid_item(world.get_location('Thieves\' Town - Blind\'s Cell', player), 'Big Key (Thieves Town)', player)
for location in ['Suspicious Maiden', 'Thieves\' Town - Blind\'s Cell']:
set_rule(world.get_location(location, player), lambda state: state.has('Big Key (Thieves Town)', player))
set_rule(world.get_location('Revealing Light', player), lambda state: state.has('Shining Light', player) and state.has('Maiden Rescued', player))
set_rule(world.get_location('Thieves\' Town - Boss', player), lambda state: state.has('Maiden Unmasked', player) and world.get_location('Thieves\' Town - Boss', player).parent_region.dungeon.boss.can_defeat(state))
set_rule(world.get_location('Thieves\' Town - Prize', player), lambda state: state.has('Maiden Unmasked', player) and world.get_location('Thieves\' Town - Prize', player).parent_region.dungeon.boss.can_defeat(state))
set_rule(world.get_entrance('Ice Lobby WS', player), lambda state: state.can_melt_things(player))
set_rule(world.get_entrance('Ice Hammer Block ES', player), lambda state: state.can_lift_rocks(player) and state.has('Hammer', player))
set_rule(world.get_location('Ice Palace - Hammer Block Key Drop', player), lambda state: state.can_lift_rocks(player) and state.has('Hammer', player))
set_rule(world.get_location('Ice Palace - Map Chest', player), lambda state: state.can_lift_rocks(player) and state.has('Hammer', player))
set_rule(world.get_entrance('Ice Antechamber Hole', player), lambda state: state.can_lift_rocks(player) and state.has('Hammer', player))
# todo: ohko rules for spike room - could split into two regions instead of these, but can_take_damage is usually true
set_rule(world.get_entrance('Ice Spike Room WS', player), lambda state: state.world.can_take_damage or state.has('Hookshot', player) or state.has('Cape', player) or state.has('Cane of Byrna', player))
set_rule(world.get_entrance('Ice Spike Room Up Stairs', player), lambda state: state.world.can_take_damage or state.has('Hookshot', player) or state.has('Cape', player) or state.has('Cane of Byrna', player))
set_rule(world.get_entrance('Ice Spike Room Down Stairs', player), lambda state: state.world.can_take_damage or state.has('Hookshot', player) or state.has('Cape', player) or state.has('Cane of Byrna', player))
set_rule(world.get_location('Ice Palace - Spike Room', player), lambda state: state.world.can_take_damage or state.has('Hookshot', player) or state.has('Cape', player) or state.has('Cane of Byrna', player))
set_rule(world.get_location('Ice Palace - Freezor Chest', player), lambda state: state.can_melt_things(player))
set_rule(world.get_entrance('Ice Hookshot Ledge Path', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Ice Hookshot Balcony Path', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Ice Switch Room SE', player), lambda state: state.has('Cane of Somaria', player) or state.has('Convenient Block', player))
set_defeat_dungeon_boss_rule(world.get_location('Ice Palace - Boss', player))
set_defeat_dungeon_boss_rule(world.get_location('Ice Palace - Prize', player))
set_rule(world.get_entrance('Mire Lobby Gap', player), lambda state: state.has_Boots(player) or state.has('Hookshot', player))
set_rule(world.get_entrance('Mire Post-Gap Gap', player), lambda state: state.has_Boots(player) or state.has('Hookshot', player))
set_rule(world.get_entrance('Mire Falling Bridge WN', player), lambda state: state.has_Boots(player) or state.has('Hookshot', player)) # this is due to the fact the the door opposite is blocked
set_rule(world.get_entrance('Mire 2 NE', player), lambda state: state.has_sword(player) or state.has('Fire Rod', player) or state.has('Ice Rod', player) or state.has('Hammer', player) or state.has('Cane of Somaria', player) or state.can_shoot_arrows(player)) # need to defeat wizzrobes, bombs don't work ...
set_rule(world.get_location('Misery Mire - Spike Chest', player), lambda state: (state.world.can_take_damage and state.has_hearts(player, 4)) or state.has('Cane of Byrna', player) or state.has('Cape', player))
set_rule(world.get_entrance('Mire Left Bridge Hook Path', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Mire Tile Room NW', player), lambda state: state.has_fire_source(player))
set_rule(world.get_entrance('Mire Attic Hint Hole', player), lambda state: state.has_fire_source(player))
set_rule(world.get_entrance('Mire Dark Shooters SW', player), lambda state: state.has('Cane of Somaria', player))
set_defeat_dungeon_boss_rule(world.get_location('Misery Mire - Boss', player))
set_defeat_dungeon_boss_rule(world.get_location('Misery Mire - Prize', player))
set_rule(world.get_entrance('TR Main Lobby Gap', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Lobby Ledge Gap', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Hub SW', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Hub SE', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Hub ES', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Hub EN', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Hub NW', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Hub NE', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Torches NW', player), lambda state: state.has('Cane of Somaria', player) and state.has('Fire Rod', player))
set_rule(world.get_entrance('TR Big Chest Entrance Gap', player), lambda state: state.has('Cane of Somaria', player) or state.has('Hookshot', player))
set_rule(world.get_entrance('TR Big Chest Gap', player), lambda state: state.has('Cane of Somaria', player) or state.has('Hookshot', player))
set_rule(world.get_entrance('TR Dark Ride Up Stairs', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Dark Ride SW', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Crystal Maze Cane Path', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Final Abyss South Stairs', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('TR Final Abyss NW', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_location('Turtle Rock - Eye Bridge - Bottom Left', player), lambda state: state.has('Cane of Byrna', player) or state.has('Cape', player) or state.has('Mirror Shield', player))
set_rule(world.get_location('Turtle Rock - Eye Bridge - Bottom Right', player), lambda state: state.has('Cane of Byrna', player) or state.has('Cape', player) or state.has('Mirror Shield', player))
set_rule(world.get_location('Turtle Rock - Eye Bridge - Top Left', player), lambda state: state.has('Cane of Byrna', player) or state.has('Cape', player) or state.has('Mirror Shield', player))
set_rule(world.get_location('Turtle Rock - Eye Bridge - Top Right', player), lambda state: state.has('Cane of Byrna', player) or state.has('Cape', player) or state.has('Mirror Shield', player))
set_defeat_dungeon_boss_rule(world.get_location('Turtle Rock - Boss', player))
set_defeat_dungeon_boss_rule(world.get_location('Turtle Rock - Prize', player))
set_rule(world.get_location('Ganons Tower - Bob\'s Torch', player), lambda state: state.has_Boots(player))
set_rule(world.get_entrance('GT Hope Room EN', player), lambda state: state.has('Cane of Somaria', player))
set_rule(world.get_entrance('GT Conveyor Cross WN', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('GT Conveyor Cross EN', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('GT Speed Torch SE', player), lambda state: state.has('Fire Rod', player))
set_rule(world.get_entrance('GT Hookshot East-North Path', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('GT Hookshot South-East Path', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('GT Hookshot South-North Path', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('GT Hookshot East-South Path', player), lambda state: state.has('Hookshot', player) or state.has_Boots(player))
set_rule(world.get_entrance('GT Hookshot North-East Path', player), lambda state: state.has('Hookshot', player) or state.has_Boots(player))
set_rule(world.get_entrance('GT Hookshot North-South Path', player), lambda state: state.has('Hookshot', player) or state.has_Boots(player))
set_rule(world.get_entrance('GT Firesnake Room Hook Path', player), lambda state: state.has('Hookshot', player))
# I am tempted to stick an invincibility rule for getting across falling bridge
set_rule(world.get_entrance('GT Ice Armos NE', player), lambda state: world.get_region('GT Ice Armos', player).dungeon.bosses['bottom'].can_defeat(state))
set_rule(world.get_entrance('GT Ice Armos WS', player), lambda state: world.get_region('GT Ice Armos', player).dungeon.bosses['bottom'].can_defeat(state))
set_rule(world.get_entrance('GT Mimics 1 NW', player), lambda state: state.can_shoot_arrows(player))
set_rule(world.get_entrance('GT Mimics 1 ES', player), lambda state: state.can_shoot_arrows(player))
set_rule(world.get_entrance('GT Mimics 2 WS', player), lambda state: state.can_shoot_arrows(player))
set_rule(world.get_entrance('GT Mimics 2 NE', player), lambda state: state.can_shoot_arrows(player))
# consider access to refill room
set_rule(world.get_entrance('GT Gauntlet 1 WN', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Gauntlet 2 EN', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Gauntlet 2 SW', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Gauntlet 3 NW', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Gauntlet 3 SW', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Gauntlet 4 NW', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Gauntlet 4 SW', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Gauntlet 5 NW', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Gauntlet 5 WS', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Wizzrobes 1 SW', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Wizzrobes 2 SE', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Wizzrobes 2 NE', player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_entrance('GT Lanmolas 2 ES', player), lambda state: world.get_region('GT Lanmolas 2', player).dungeon.bosses['middle'].can_defeat(state))
set_rule(world.get_entrance('GT Lanmolas 2 NW', player), lambda state: world.get_region('GT Lanmolas 2', player).dungeon.bosses['middle'].can_defeat(state))
set_rule(world.get_entrance('GT Torch Cross ES', player), lambda state: state.has_fire_source(player))
set_rule(world.get_entrance('GT Falling Torches NE', player), lambda state: state.has_fire_source(player))
set_rule(world.get_entrance('GT Moldorm Gap', player), lambda state: state.has('Hookshot', player) and world.get_region('GT Moldorm', player).dungeon.bosses['top'].can_defeat(state))
set_defeat_dungeon_boss_rule(world.get_location('Agahnim 2', player))
add_key_logic_rules(world, player)
# crystal switch rules
set_rule(world.get_entrance('PoD Arena Crystal Path', player), lambda state: state.can_reach_blue(world.get_region('PoD Arena Crystal', player), player))
set_rule(world.get_entrance('Swamp Trench 2 Pots Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Swamp Trench 2 Pots', player), player))
set_rule(world.get_entrance('Swamp Shortcut Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Swamp Shortcut', player), player))
set_rule(world.get_entrance('Thieves Attic ES', player), lambda state: state.can_reach_blue(world.get_region('Thieves Attic', player), player))
set_rule(world.get_entrance('Thieves Hellway Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Thieves Hellway', player), player))
set_rule(world.get_entrance('Thieves Hellway Crystal Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Thieves Hellway N Crystal', player), player))
set_rule(world.get_entrance('Thieves Triple Bypass SE', player), lambda state: state.can_reach_blue(world.get_region('Thieves Triple Bypass', player), player))
set_rule(world.get_entrance('Thieves Triple Bypass WN', player), lambda state: state.can_reach_blue(world.get_region('Thieves Triple Bypass', player), player))
set_rule(world.get_entrance('Thieves Triple Bypass EN', player), lambda state: state.can_reach_blue(world.get_region('Thieves Triple Bypass', player), player))
set_rule(world.get_entrance('Ice Crystal Right Blue Hole', player), lambda state: state.can_reach_blue(world.get_region('Ice Crystal Right', player), player))
set_rule(world.get_entrance('Ice Crystal Left Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Ice Crystal Left', player), player))
set_rule(world.get_entrance('Ice Backwards Room Hole', player), lambda state: state.can_reach_blue(world.get_region('Ice Backwards Room', player), player))
set_rule(world.get_entrance('Mire Hub Upper Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Hub', player), player))
set_rule(world.get_entrance('Mire Hub Lower Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Hub', player), player))
set_rule(world.get_entrance('Mire Hub Right Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Hub Right', player), player))
set_rule(world.get_entrance('Mire Hub Top Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Hub Top', player), player))
set_rule(world.get_entrance('Mire Hub Switch Blue Barrier N', player), lambda state: state.can_reach_blue(world.get_region('Mire Hub Switch', player), player))
set_rule(world.get_entrance('Mire Hub Switch Blue Barrier S', player), lambda state: state.can_reach_blue(world.get_region('Mire Hub Switch', player), player))
set_rule(world.get_entrance('Mire Map Spike Side Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Map Spike Side', player), player))
set_rule(world.get_entrance('Mire Map Spot Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Map Spot', player), player))
set_rule(world.get_entrance('Mire Crystal Dead End Left Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Crystal Dead End', player), player))
set_rule(world.get_entrance('Mire Crystal Dead End Right Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Crystal Dead End', player), player))
set_rule(world.get_entrance('Mire South Fish Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire South Fish', player), player))
set_rule(world.get_entrance('Mire Compass Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Compass Room', player), player))
set_rule(world.get_entrance('Mire Crystal Mid Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Crystal Mid', player), player))
set_rule(world.get_entrance('Mire Crystal Left Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('Mire Crystal Left', player), player))
set_rule(world.get_entrance('TR Crystal Maze Blue Path', player), lambda state: state.can_reach_blue(world.get_region('TR Crystal Maze End', player), player))
set_rule(world.get_entrance('GT Hookshot Entry Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('GT Hookshot South Entry', player), player))
set_rule(world.get_entrance('GT Double Switch Key Blue Path', player), lambda state: state.can_reach_blue(world.get_region('GT Double Switch Key Spot', player), player))
set_rule(world.get_entrance('GT Double Switch Blue Barrier', player), lambda state: state.can_reach_blue(world.get_region('GT Double Switch Switches', player), player))
set_rule(world.get_entrance('GT Double Switch Transition Blue', player), lambda state: state.can_reach_blue(world.get_region('GT Double Switch Transition', player), player))
set_rule(world.get_entrance('Swamp Barrier Ledge - Orange', player), lambda state: state.can_reach_orange(world.get_region('Swamp Barrier Ledge', player), player))
set_rule(world.get_entrance('Swamp Barrier - Orange', player), lambda state: state.can_reach_orange(world.get_region('Swamp Barrier', player), player))
set_rule(world.get_entrance('Thieves Hellway Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Thieves Hellway', player), player))
set_rule(world.get_entrance('Thieves Hellway Crystal Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Thieves Hellway S Crystal', player), player))
set_rule(world.get_entrance('Ice Bomb Jump Ledge Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Ice Bomb Jump Ledge', player), player))
set_rule(world.get_entrance('Ice Bomb Jump Catwalk Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Ice Bomb Jump Catwalk', player), player))
set_rule(world.get_entrance('Ice Crystal Right Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Ice Crystal Right', player), player))
set_rule(world.get_entrance('Ice Crystal Left Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Ice Crystal Left', player), player))
set_rule(world.get_entrance('Mire Crystal Right Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Mire Crystal Right', player), player))
set_rule(world.get_entrance('Mire Crystal Mid Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Mire Crystal Mid', player), player))
set_rule(world.get_entrance('Mire Firesnake Skip Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Mire Firesnake Skip', player), player))
set_rule(world.get_entrance('Mire Antechamber Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('Mire Antechamber', player), player))
set_rule(world.get_entrance('GT Double Switch Orange Barrier', player), lambda state: state.can_reach_orange(world.get_region('GT Double Switch Entry', player), player))
set_rule(world.get_entrance('GT Double Switch Orange Barrier 2', player), lambda state: state.can_reach_orange(world.get_region('GT Double Switch Entry', player), player))
set_rule(world.get_entrance('GT Double Switch Orange Path', player), lambda state: state.can_reach_orange(world.get_region('GT Double Switch Switches', player), player))
set_rule(world.get_entrance('GT Double Switch Key Orange Path', player), lambda state: state.can_reach_orange(world.get_region('GT Double Switch Key Spot', player), player))
# End of door rando rules.
add_rule(world.get_location('Sunken Treasure', player), lambda state: state.has('Open Floodgate', player))
set_rule(world.get_location('Ganon', player), lambda state: state.has_beam_sword(player) and state.has_fire_source(player) and state.has_crystals(world.crystals_needed_for_ganon[player], player)
and (state.has('Tempered Sword', player) or state.has('Golden Sword', player) or (state.has('Silver Arrows', player) and state.can_shoot_arrows(player)) or state.has('Lamp', player) or state.can_extend_magic(player, 12))) # need to light torch a sufficient amount of times
set_rule(world.get_entrance('Ganon Drop', player), lambda state: state.has_beam_sword(player)) # need to damage ganon to get tiles to drop
def default_rules(world, player):
if world.mode[player] == 'standard':
# Links house requires reaching Sanc so skipping that chest isn't a softlock.
world.get_region('Hyrule Castle Secret Entrance', player).can_reach_private = lambda state: True
old_rule = world.get_region('Links House', player).can_reach_private
world.get_region('Links House', player).can_reach_private = lambda state: state.has('Zelda Delivered', player) or old_rule(state)
else:
# these are default save&quit points and always accessible
world.get_region('Links House', player).can_reach_private = lambda state: True
world.get_region('Sanctuary', player).can_reach_private = lambda state: True
# overworld requirements
set_rule(world.get_entrance('Kings Grave', player), lambda state: state.has_Boots(player))
set_rule(world.get_entrance('Kings Grave Outer Rocks', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Kings Grave Inner Rocks', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Kings Grave Mirror Spot', player), lambda state: state.has_Pearl(player) and state.has_Mirror(player))
# Caution: If king's grave is releaxed at all to account for reaching it via a two way cave's exit in insanity mode, then the bomb shop logic will need to be updated (that would involve create a small ledge-like Region for it)
set_rule(world.get_entrance('Bonk Fairy (Light)', player), lambda state: state.has_Boots(player))
set_rule(world.get_entrance('Lumberjack Tree Tree', player), lambda state: state.has_Boots(player) and state.has('Beat Agahnim 1', player))
set_rule(world.get_entrance('Bonk Rock Cave', player), lambda state: state.has_Boots(player))
set_rule(world.get_entrance('Desert Palace Stairs', player), lambda state: state.has('Book of Mudora', player))
set_rule(world.get_entrance('Sanctuary Grave', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('20 Rupee Cave', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('50 Rupee Cave', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('Death Mountain Entrance Rock', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('Bumper Cave Entrance Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Flute Spot 1', player), lambda state: state.has('Ocarina', player))
set_rule(world.get_entrance('Lake Hylia Central Island Teleporter', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Dark Desert Teleporter', player), lambda state: state.has('Ocarina', player) and state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('East Hyrule Teleporter', player), lambda state: state.has('Hammer', player) and state.can_lift_rocks(player) and state.has_Pearl(player)) # bunny cannot use hammer
set_rule(world.get_entrance('South Hyrule Teleporter', player), lambda state: state.has('Hammer', player) and state.can_lift_rocks(player) and state.has_Pearl(player)) # bunny cannot use hammer
set_rule(world.get_entrance('Kakariko Teleporter', player), lambda state: ((state.has('Hammer', player) and state.can_lift_rocks(player)) or state.can_lift_heavy_rocks(player)) and state.has_Pearl(player)) # bunny cannot lift bushes
set_rule(world.get_location('Flute Spot', player), lambda state: state.has('Shovel', player))
set_rule(world.get_location('Zora\'s Ledge', player), lambda state: state.has('Flippers', player))
set_rule(world.get_entrance('Waterfall of Wishing', player), lambda state: state.has('Flippers', player)) # can be fake flippered into, but is in weird state inside that might prevent you from doing things. Can be improved in future Todo
set_rule(world.get_location('Frog', player), lambda state: state.can_lift_heavy_rocks(player)) # will get automatic moon pearl requirement
set_rule(world.get_location('Potion Shop', player), lambda state: state.has('Mushroom', player))
set_rule(world.get_entrance('Desert Palace Entrance (North) Rocks', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('Desert Ledge Return Rocks', player), lambda state: state.can_lift_rocks(player)) # should we decide to place something that is not a dungeon end up there at some point
set_rule(world.get_entrance('Checkerboard Cave', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('Agahnims Tower', player), lambda state: state.has('Cape', player) or state.has_beam_sword(player) or state.has('Beat Agahnim 1', player)) # barrier gets removed after killing agahnim, relevant for entrance shuffle
set_rule(world.get_entrance('Top of Pyramid', player), lambda state: state.has('Beat Agahnim 1', player))
set_rule(world.get_entrance('Old Man Cave Exit (West)', player), lambda state: False) # drop cannot be climbed up
set_rule(world.get_entrance('Broken Bridge (West)', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Broken Bridge (East)', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('East Death Mountain Teleporter', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Fairy Ascension Rocks', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Paradox Cave Push Block Reverse', player), lambda state: state.has('Mirror', player)) # can erase block
set_rule(world.get_entrance('Death Mountain (Top)', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('Turtle Rock Teleporter', player), lambda state: state.can_lift_heavy_rocks(player) and state.has('Hammer', player))
set_rule(world.get_entrance('East Death Mountain (Top)', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('Catfish Exit Rock', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('Catfish Entrance Rock', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('Northeast Dark World Broken Bridge Pass', player), lambda state: state.has_Pearl(player) and (state.can_lift_rocks(player) or state.has('Hammer', player) or state.has('Flippers', player)))
set_rule(world.get_entrance('East Dark World Broken Bridge Pass', player), lambda state: state.has_Pearl(player) and (state.can_lift_rocks(player) or state.has('Hammer', player)))
set_rule(world.get_entrance('South Dark World Bridge', player), lambda state: state.has('Hammer', player) and state.has_Pearl(player))
set_rule(world.get_entrance('Bonk Fairy (Dark)', player), lambda state: state.has_Pearl(player) and state.has_Boots(player))
set_rule(world.get_entrance('West Dark World Gap', player), lambda state: state.has_Pearl(player) and state.has('Hookshot', player))
set_rule(world.get_entrance('Palace of Darkness', player), lambda state: state.has_Pearl(player)) # kiki needs pearl
set_rule(world.get_entrance('Hyrule Castle Ledge Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Hyrule Castle Main Gate', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Dark Lake Hylia Drop (East)', player), lambda state: (state.has_Pearl(player) and state.has('Flippers', player) or state.has_Mirror(player))) # Overworld Bunny Revival
set_rule(world.get_location('Bombos Tablet', player), lambda state: state.has('Book of Mudora', player) and state.has_beam_sword(player))
set_rule(world.get_entrance('Dark Lake Hylia Drop (South)', player), lambda state: state.has_Pearl(player) and state.has('Flippers', player)) # ToDo any fake flipper set up?
set_rule(world.get_entrance('Dark Lake Hylia Ledge Fairy', player), lambda state: state.has_Pearl(player)) # bomb required
set_rule(world.get_entrance('Dark Lake Hylia Ledge Spike Cave', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Dark Lake Hylia Teleporter', player), lambda state: state.has_Pearl(player) and (state.has('Hammer', player) or state.can_lift_rocks(player))) # Fake Flippers
set_rule(world.get_entrance('Village of Outcasts Heavy Rock', player), lambda state: state.has_Pearl(player) and state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Hype Cave', player), lambda state: state.has_Pearl(player)) # bomb required
set_rule(world.get_entrance('Brewery', player), lambda state: state.has_Pearl(player)) # bomb required
set_rule(world.get_entrance('Thieves Town', player), lambda state: state.has_Pearl(player)) # bunny cannot pull
set_rule(world.get_entrance('Skull Woods First Section Hole (North)', player), lambda state: state.has_Pearl(player)) # bunny cannot lift bush
set_rule(world.get_entrance('Skull Woods Second Section Hole', player), lambda state: state.has_Pearl(player)) # bunny cannot lift bush
set_rule(world.get_entrance('Maze Race Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Cave 45 Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Bombos Tablet Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('East Dark World Bridge', player), lambda state: state.has_Pearl(player) and state.has('Hammer', player))
set_rule(world.get_entrance('Lake Hylia Island Mirror Spot', player), lambda state: state.has_Pearl(player) and state.has_Mirror(player) and state.has('Flippers', player))
set_rule(world.get_entrance('Lake Hylia Central Island Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('East Dark World River Pier', player), lambda state: state.has_Pearl(player) and state.has('Flippers', player)) # ToDo any fake flipper set up?
set_rule(world.get_entrance('Graveyard Ledge Mirror Spot', player), lambda state: state.has_Pearl(player) and state.has_Mirror(player))
set_rule(world.get_entrance('Bumper Cave Entrance Rock', player), lambda state: state.has_Pearl(player) and state.can_lift_rocks(player))
set_rule(world.get_entrance('Bumper Cave Ledge Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Bat Cave Drop Ledge Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Dark World Hammer Peg Cave', player), lambda state: state.has_Pearl(player) and state.has('Hammer', player))
set_rule(world.get_entrance('Village of Outcasts Eastern Rocks', player), lambda state: state.has_Pearl(player) and state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Peg Area Rocks', player), lambda state: state.has_Pearl(player) and state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Village of Outcasts Pegs', player), lambda state: state.has_Pearl(player) and state.has('Hammer', player))
set_rule(world.get_entrance('Grassy Lawn Pegs', player), lambda state: state.has_Pearl(player) and state.has('Hammer', player))
set_rule(world.get_entrance('Bumper Cave Exit (Top)', player), lambda state: state.has('Cape', player))
set_rule(world.get_entrance('Bumper Cave Exit (Bottom)', player), lambda state: state.has('Cape', player) or state.has('Hookshot', player))
set_rule(world.get_entrance('Skull Woods Final Section', player), lambda state: state.has('Fire Rod', player) and state.has_Pearl(player)) # bunny cannot use fire rod
set_rule(world.get_entrance('Misery Mire', player), lambda state: state.has_Pearl(player) and state.has_sword(player) and state.has_misery_mire_medallion(player)) # sword required to cast magic (!)
set_rule(world.get_entrance('Desert Ledge (Northeast) Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Desert Ledge Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Desert Palace Stairs Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Desert Palace Entrance (North) Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Spectacle Rock Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Hookshot Cave', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('East Death Mountain (Top) Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Mimic Cave Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Spiral Cave Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Fairy Ascension Mirror Spot', player), lambda state: state.has_Mirror(player) and state.has_Pearl(player)) # need to lift flowers
set_rule(world.get_entrance('Isolated Ledge Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Superbunny Cave Exit (Bottom)', player), lambda state: False) # Cannot get to bottom exit from top. Just exists for shuffling
set_rule(world.get_entrance('Floating Island Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Turtle Rock', player), lambda state: state.has_Pearl(player) and state.has_sword(player) and state.has_turtle_rock_medallion(player) and state.can_reach('Turtle Rock (Top)', 'Region', player)) # sword required to cast magic (!)
set_rule(world.get_entrance('Pyramid Hole', player), lambda state: state.has('Beat Agahnim 2', player) or world.open_pyramid[player])
set_rule(world.get_entrance('Ganons Tower', player), lambda state: False) # This is a safety for the TR function below to not require GT entrance in its key logic.
if world.swords[player] == 'swordless':
swordless_rules(world, player)
set_rule(world.get_entrance('Ganons Tower', player), lambda state: state.has_crystals(world.crystals_needed_for_gt[player], player))
def forbid_overworld_glitches(world, player):
for exit in OWGSets.get_boots_clip_exits_lw(world.mode[player] == 'inverted'):
set_rule(world.get_entrance(exit, player), lambda state: False)
for exit in OWGSets.get_boots_clip_exits_dw(world.mode[player] == 'inverted'):
set_rule(world.get_entrance(exit, player), lambda state: False)
for exit in OWGSets.get_glitched_speed_drops_dw():
set_rule(world.get_entrance(exit, player), lambda state: False)
if world.mode[player] != 'inverted':
for exit in OWGSets.get_mirror_clip_spots_dw():
set_rule(world.get_entrance(exit, player), lambda state: False)
def inverted_rules(world, player):
# s&q regions. link's house entrance is set to true so the filler knows the chest inside can always be reached
world.get_region('Inverted Links House', player).can_reach_private = lambda state: True
world.get_region('Inverted Links House', player).entrances[0].can_reach = lambda state: True
world.get_region('Inverted Dark Sanctuary', player).entrances[0].parent_region.can_reach_private = lambda state: True
old_rule = world.get_region('Hyrule Castle Ledge', player).can_reach_private
world.get_region('Hyrule Castle Ledge', player).can_reach_private = lambda state: (state.has_Mirror(player) and state.has('Beat Agahnim 1', player) and state.can_reach_light_world(player)) or old_rule(state)
# overworld requirements
set_rule(world.get_location('Maze Race', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Mini Moldorm Cave', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Light Hype Fairy', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Potion Shop Pier', player), lambda state: state.has('Flippers', player) and state.has_Pearl(player))
set_rule(world.get_entrance('Light World Pier', player), lambda state: state.has('Flippers', player) and state.has_Pearl(player))
set_rule(world.get_entrance('Kings Grave', player), lambda state: state.has_Boots(player) and state.can_lift_heavy_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Kings Grave Outer Rocks', player), lambda state: state.can_lift_heavy_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Kings Grave Inner Rocks', player), lambda state: state.can_lift_heavy_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Potion Shop Inner Bushes', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Potion Shop Outer Bushes', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Potion Shop Outer Rock', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Potion Shop Inner Rock', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Graveyard Cave Inner Bushes', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Graveyard Cave Outer Bushes', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Secret Passage Inner Bushes', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Secret Passage Outer Bushes', player), lambda state: state.has_Pearl(player))
# Caution: If king's grave is releaxed at all to account for reaching it via a two way cave's exit in insanity mode, then the bomb shop logic will need to be updated (that would involve create a small ledge-like Region for it)
set_rule(world.get_entrance('Bonk Fairy (Light)', player), lambda state: state.has_Boots(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Bat Cave Drop Ledge', player), lambda state: state.has('Hammer', player) and state.has_Pearl(player))
set_rule(world.get_entrance('Lumberjack Tree Tree', player), lambda state: state.has_Boots(player) and state.has_Pearl(player) and state.has('Beat Agahnim 1', player))
set_rule(world.get_entrance('Bonk Rock Cave', player), lambda state: state.has_Boots(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Desert Palace Stairs', player), lambda state: state.has('Book of Mudora', player)) # bunny can use book
set_rule(world.get_entrance('Sanctuary Grave', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('20 Rupee Cave', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('50 Rupee Cave', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Death Mountain Entrance Rock', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Bumper Cave Entrance Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Lake Hylia Central Island Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Dark Lake Hylia Central Island Teleporter', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Dark Desert Teleporter', player), lambda state: state.can_flute(player) and state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('East Dark World Teleporter', player), lambda state: state.has('Hammer', player) and state.can_lift_rocks(player) and state.has_Pearl(player)) # bunny cannot use hammer
set_rule(world.get_entrance('South Dark World Teleporter', player), lambda state: state.has('Hammer', player) and state.can_lift_rocks(player) and state.has_Pearl(player)) # bunny cannot use hammer
set_rule(world.get_entrance('West Dark World Teleporter', player), lambda state: ((state.has('Hammer', player) and state.can_lift_rocks(player)) or state.can_lift_heavy_rocks(player)) and state.has_Pearl(player))
set_rule(world.get_location('Flute Spot', player), lambda state: state.has('Shovel', player) and state.has_Pearl(player))
set_rule(world.get_location('Zora\'s Ledge', player), lambda state: state.has('Flippers', player) and state.has_Pearl(player))
set_rule(world.get_entrance('Waterfall of Wishing', player), lambda state: state.has('Flippers', player) and state.has_Pearl(player)) # can be fake flippered into, but is in weird state inside that might prevent you from doing things. Can be improved in future Todo
set_rule(world.get_location('Frog', player), lambda state: state.can_lift_heavy_rocks(player) or (state.can_reach('Light World', 'Region', player) and state.has_Mirror(player)))
set_rule(world.get_location('Mushroom', player), lambda state: state.has_Pearl(player)) # need pearl to pick up bushes
set_rule(world.get_entrance('Bush Covered Lawn Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Bush Covered Lawn Inner Bushes', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Bush Covered Lawn Outer Bushes', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Bomb Hut Inner Bushes', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Bomb Hut Outer Bushes', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('North Fairy Cave Drop', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Lost Woods Hideout Drop', player), lambda state: state.has_Pearl(player))
set_rule(world.get_location('Potion Shop', player), lambda state: state.has('Mushroom', player) and (state.can_reach('Potion Shop Area', 'Region', player))) # new inverted region, need pearl for bushes or access to potion shop door/waterfall fairy
set_rule(world.get_entrance('Desert Palace Entrance (North) Rocks', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Desert Ledge Return Rocks', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player)) # should we decide to place something that is not a dungeon end up there at some point
set_rule(world.get_entrance('Checkerboard Cave', player), lambda state: state.can_lift_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Hyrule Castle Secret Entrance Drop', player), lambda state: state.has_Pearl(player))
set_rule(world.get_entrance('Old Man Cave Exit (West)', player), lambda state: False) # drop cannot be climbed up
set_rule(world.get_entrance('Broken Bridge (West)', player), lambda state: state.has('Hookshot', player) and state.has_Pearl(player))
set_rule(world.get_entrance('Broken Bridge (East)', player), lambda state: state.has('Hookshot', player) and state.has_Pearl(player))
set_rule(world.get_entrance('Dark Death Mountain Teleporter (East Bottom)', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Fairy Ascension Rocks', player), lambda state: state.can_lift_heavy_rocks(player) and state.has_Pearl(player))
set_rule(world.get_entrance('Paradox Cave Push Block Reverse', player), lambda state: state.has('Mirror', player)) # can erase block
set_rule(world.get_entrance('Death Mountain (Top)', player), lambda state: state.has('Hammer', player) and state.has_Pearl(player))
set_rule(world.get_entrance('Dark Death Mountain Teleporter (East)', player), lambda state: state.can_lift_heavy_rocks(player) and state.has('Hammer', player) and state.has_Pearl(player)) # bunny cannot use hammer
set_rule(world.get_entrance('East Death Mountain (Top)', player), lambda state: state.has('Hammer', player) and state.has_Pearl(player)) # bunny can not use hammer
set_rule(world.get_location('Catfish', player), lambda state: state.can_lift_rocks(player) or (state.has('Flippers', player) and state.has_Mirror(player) and state.has_Pearl(player) and state.can_reach('Light World', 'Region', player)))
set_rule(world.get_entrance('Northeast Dark World Broken Bridge Pass', player), lambda state: ((state.can_lift_rocks(player) or state.has('Hammer', player)) or state.has('Flippers', player)))
set_rule(world.get_entrance('East Dark World Broken Bridge Pass', player), lambda state: (state.can_lift_rocks(player) or state.has('Hammer', player)))
set_rule(world.get_entrance('South Dark World Bridge', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('Bonk Fairy (Dark)', player), lambda state: state.has_Boots(player))
set_rule(world.get_entrance('West Dark World Gap', player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Dark Lake Hylia Drop (East)', player), lambda state: state.has('Flippers', player))
set_rule(world.get_location('Bombos Tablet', player), lambda state: state.has('Book of Mudora', player) and state.has_beam_sword(player))
set_rule(world.get_entrance('Dark Lake Hylia Drop (South)', player), lambda state: state.has('Flippers', player)) # ToDo any fake flipper set up?
set_rule(world.get_entrance('Dark Lake Hylia Ledge Pier', player), lambda state: state.has('Flippers', player))
set_rule(world.get_entrance('Dark Lake Hylia Ledge Spike Cave', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('Dark Lake Hylia Teleporter', player), lambda state: state.has('Flippers', player) and (state.has('Hammer', player) or state.can_lift_rocks(player))) # Fake Flippers
set_rule(world.get_entrance('Village of Outcasts Heavy Rock', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('East Dark World Bridge', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('Lake Hylia Central Island Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('East Dark World River Pier', player), lambda state: state.has('Flippers', player)) # ToDo any fake flipper set up? (Qirn Jump)
set_rule(world.get_entrance('Bumper Cave Entrance Rock', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('Bumper Cave Ledge Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Hammer Peg Area Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Dark World Hammer Peg Cave', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('Village of Outcasts Eastern Rocks', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Peg Area Rocks', player), lambda state: state.can_lift_heavy_rocks(player))
set_rule(world.get_entrance('Village of Outcasts Pegs', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('Grassy Lawn Pegs', player), lambda state: state.has('Hammer', player))
set_rule(world.get_entrance('Bumper Cave Exit (Top)', player), lambda state: state.has('Cape', player))
set_rule(world.get_entrance('Bumper Cave Exit (Bottom)', player), lambda state: state.has('Cape', player) or state.has('Hookshot', player))
set_rule(world.get_entrance('Skull Woods Final Section', player), lambda state: state.has('Fire Rod', player))
set_rule(world.get_entrance('Misery Mire', player), lambda state: state.has_sword(player) and state.has_misery_mire_medallion(player)) # sword required to cast magic (!)
set_rule(world.get_entrance('Hookshot Cave', player), lambda state: state.can_lift_rocks(player))
set_rule(world.get_entrance('East Death Mountain Mirror Spot (Top)', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Death Mountain (Top) Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('East Death Mountain Mirror Spot (Bottom)', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Dark Death Mountain Ledge Mirror Spot (East)', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Dark Death Mountain Ledge Mirror Spot (West)', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Laser Bridge Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Superbunny Cave Exit (Bottom)', player), lambda state: False) # Cannot get to bottom exit from top. Just exists for shuffling
set_rule(world.get_entrance('Floating Island Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Turtle Rock', player), lambda state: state.has_sword(player) and state.has_turtle_rock_medallion(player) and state.can_reach('Turtle Rock (Top)', 'Region', player)) # sword required to cast magic (!)
# new inverted spots
set_rule(world.get_entrance('Post Aga Teleporter', player), lambda state: state.has('Beat Agahnim 1', player))
set_rule(world.get_entrance('Mire Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Desert Palace Stairs Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Death Mountain Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('East Dark World Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('West Dark World Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('South Dark World Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Northeast Dark World Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Potion Shop Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Shopping Mall Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Maze Race Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Desert Palace North Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Death Mountain (Top) Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Graveyard Cave Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Bomb Hut Mirror Spot', player), lambda state: state.has_Mirror(player))
set_rule(world.get_entrance('Skull Woods Mirror Spot', player), lambda state: state.has_Mirror(player))
# inverted flute spots
set_rule(world.get_entrance('DDM Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('NEDW Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('WDW Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('SDW Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('EDW Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('DLHL Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('DD Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('EDDM Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('Dark Grassy Lawn Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('Hammer Peg Area Flute', player), lambda state: state.can_flute(player))
set_rule(world.get_entrance('Inverted Pyramid Hole', player), lambda state: state.has('Beat Agahnim 2', player) or world.open_pyramid[player])
set_rule(world.get_entrance('Inverted Ganons Tower', player), lambda state: False) # This is a safety for the TR function below to not require GT entrance in its key logic.
if world.swords[player] == 'swordless':
swordless_rules(world, player)
set_rule(world.get_entrance('Inverted Ganons Tower', player), lambda state: state.has_crystals(world.crystals_needed_for_gt[player], player))
def no_glitches_rules(world, player):
if world.mode[player] != 'inverted':
add_rule(world.get_entrance('Zoras River', player), lambda state: state.has('Flippers', player) or state.can_lift_rocks(player))
add_rule(world.get_entrance('Lake Hylia Central Island Pier', player), lambda state: state.has('Flippers', player)) # can be fake flippered to
add_rule(world.get_entrance('Hobo Bridge', player), lambda state: state.has('Flippers', player))
add_rule(world.get_entrance('Dark Lake Hylia Drop (East)', player), lambda state: state.has_Pearl(player) and state.has('Flippers', player))
add_rule(world.get_entrance('Dark Lake Hylia Teleporter', player), lambda state: state.has_Pearl(player) and state.has('Flippers', player) and (state.has('Hammer', player) or state.can_lift_rocks(player)))
add_rule(world.get_entrance('Dark Lake Hylia Ledge Drop', player), lambda state: state.has_Pearl(player) and state.has('Flippers', player))
else:
add_rule(world.get_entrance('Zoras River', player), lambda state: state.has_Pearl(player) and (state.has('Flippers', player) or state.can_lift_rocks(player)))
add_rule(world.get_entrance('Lake Hylia Central Island Pier', player), lambda state: state.has_Pearl(player) and state.has('Flippers', player)) # can be fake flippered to
add_rule(world.get_entrance('Lake Hylia Island', player), lambda state: state.has_Pearl(player) and state.has('Flippers', player))
add_rule(world.get_entrance('Hobo Bridge', player), lambda state: state.has_Pearl(player) and state.has('Flippers', player))
add_rule(world.get_entrance('Dark Lake Hylia Drop (East)', player), lambda state: state.has('Flippers', player))
add_rule(world.get_entrance('Dark Lake Hylia Teleporter', player), lambda state: state.has('Flippers', player) and (state.has('Hammer', player) or state.can_lift_rocks(player)))
add_rule(world.get_entrance('Dark Lake Hylia Ledge Drop', player), lambda state: state.has('Flippers', player))
add_rule(world.get_entrance('East Dark World Pier', player), lambda state: state.has('Flippers', player))
set_rule(world.get_entrance('Bat Cave Drop Ledge', player), lambda state: state.has('Hammer', player))
# todo: move some dungeon rules to no glictes logic - see these for examples
# add_rule(world.get_entrance('Ganons Tower (Hookshot Room)', player), lambda state: state.has('Hookshot', player) or state.has_Boots(player))
# add_rule(world.get_entrance('Ganons Tower (Double Switch Room)', player), lambda state: state.has('Hookshot', player))
# DMs_room_chests = ['Ganons Tower - DMs Room - Top Left', 'Ganons Tower - DMs Room - Top Right', 'Ganons Tower - DMs Room - Bottom Left', 'Ganons Tower - DMs Room - Bottom Right']
# for location in DMs_room_chests:
# add_rule(world.get_location(location, player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Paradox Cave Push Block Reverse', player), lambda state: False) # no glitches does not require block override
forbid_bomb_jump_requirements(world, player)
forbid_overworld_glitches(world, player)
add_conditional_lamps(world, player)
def forbid_bomb_jump_requirements(world, player):
DMs_room_chests = ['Ganons Tower - DMs Room - Top Left', 'Ganons Tower - DMs Room - Top Right', 'Ganons Tower - DMs Room - Bottom Left', 'Ganons Tower - DMs Room - Bottom Right']
for location in DMs_room_chests:
add_rule(world.get_location(location, player), lambda state: state.has('Hookshot', player))
set_rule(world.get_entrance('Paradox Cave Bomb Jump', player), lambda state: False)
DW_Entrances = ['Bumper Cave (Bottom)',
'Superbunny Cave (Top)',
'Superbunny Cave (Bottom)',
'Hookshot Cave',
'Bumper Cave (Top)',
'Hookshot Cave Back Entrance',
'Dark Death Mountain Ledge (East)',
'Turtle Rock Isolated Ledge Entrance',
'Thieves Town',
'Skull Woods Final Section',
'Ice Palace',
'Misery Mire',
'Palace of Darkness',
'Swamp Palace',
'Turtle Rock',
'Dark Death Mountain Ledge (West)']
def check_is_dark_world(region):
for entrance in region.entrances:
if entrance.name in DW_Entrances:
return True
return False
def add_conditional_lamps(world, player):
# Light cones in standard depend on which world we actually are in, not which one the location would normally be
# We add Lamp requirements only to those locations which lie in the dark world (or everything if open
def add_conditional_lamp(spot, region, spottype='Location'):
if spottype == 'Location':
spot = world.get_location(spot, player)
else:
spot = world.get_entrance(spot, player)
if (not world.dark_world_light_cone and check_is_dark_world(world.get_region(region, player))) or (not world.light_world_light_cone and not check_is_dark_world(world.get_region(region, player))):
add_lamp_requirement(spot, player)
dark_rooms = {
'TR Dark Ride': {'sewer': False, 'entrances': ['TR Dark Ride Up Stairs', 'TR Dark Ride SW'], 'locations': []},
'Mire Dark Shooters': {'sewer': False, 'entrances': ['Mire Dark Shooters Up Stairs', 'Mire Dark Shooters SW', 'Mire Dark Shooters SE'], 'locations': []},
'Mire Key Rupees': {'sewer': False, 'entrances': ['Mire Key Rupees NE'], 'locations': []},
'Mire Block X': {'sewer': False, 'entrances': ['Mire Block X NW', 'Mire Block X WS'], 'locations': []},
'Mire Tall Dark and Roomy': {'sewer': False, 'entrances': ['Mire Tall Dark and Roomy ES', 'Mire Tall Dark and Roomy WS', 'Mire Tall Dark and Roomy WN'], 'locations': []},
'Mire Crystal Right': {'sewer': False, 'entrances': ['Mire Crystal Right ES'], 'locations': []},
'Mire Crystal Mid': {'sewer': False, 'entrances': ['Mire Crystal Mid NW'], 'locations': []},
'Mire Crystal Left': {'sewer': False, 'entrances': ['Mire Crystal Left WS'], 'locations': []},
'Mire Crystal Top': {'sewer': False, 'entrances': ['Mire Crystal Top SW'], 'locations': []},
'Mire Shooter Rupees': {'sewer': False, 'entrances': ['Mire Shooter Rupees EN'], 'locations': []},
'PoD Dark Alley': {'sewer': False, 'entrances': ['PoD Dark Alley NE'], 'locations': []},
'PoD Callback': {'sewer': False, 'entrances': ['PoD Callback WS', 'PoD Callback Warp'], 'locations': []},
'PoD Turtle Party': {'sewer': False, 'entrances': ['PoD Turtle Party ES', 'PoD Turtle Party NW'], 'locations': []},
'PoD Lonely Turtle': {'sewer': False, 'entrances': ['PoD Lonely Turtle SW', 'PoD Lonely Turtle EN'], 'locations': []},
'PoD Dark Pegs': {'sewer': False, 'entrances': ['PoD Dark Pegs Up Ladder', 'PoD Dark Pegs WN'], 'locations': []},
'PoD Dark Basement': {'sewer': False, 'entrances': ['PoD Dark Basement W Up Stairs', 'PoD Dark Basement E Up Stairs'], 'locations': ['Palace of Darkness - Dark Basement - Left', 'Palace of Darkness - Dark Basement - Right']},
'PoD Dark Maze': {'sewer': False, 'entrances': ['PoD Dark Maze EN', 'PoD Dark Maze E'], 'locations': ['Palace of Darkness - Dark Maze - Top', 'Palace of Darkness - Dark Maze - Bottom']},
'Eastern Dark Square': {'sewer': False, 'entrances': ['Eastern Dark Square NW', 'Eastern Dark Square Key Door WN', 'Eastern Dark Square EN'], 'locations': []},
'Eastern Dark Pots': {'sewer': False, 'entrances': ['Eastern Dark Pots WN'], 'locations': ['Eastern Palace - Dark Square Pot Key']},
'Eastern Darkness': {'sewer': False, 'entrances': ['Eastern Darkness S', 'Eastern Darkness Up Stairs', 'Eastern Darkness NE'], 'locations': ['Eastern Palace - Dark Eyegore Key Drop']},
'Eastern Rupees': {'sewer': False, 'entrances': ['Eastern Rupees SE'], 'locations': []},
'Tower Lone Statue': {'sewer': False, 'entrances': ['Tower Lone Statue Down Stairs', 'Tower Lone Statue WN'], 'locations': []},
'Tower Dark Maze': {'sewer': False, 'entrances': ['Tower Dark Maze EN', 'Tower Dark Maze ES'], 'locations': ['Castle Tower - Dark Maze']},
'Tower Dark Chargers': {'sewer': False, 'entrances': ['Tower Dark Chargers WS', 'Tower Dark Chargers Up Stairs'], 'locations': []},
'Tower Dual Statues': {'sewer': False, 'entrances': ['Tower Dual Statues Down Stairs', 'Tower Dual Statues WS'], 'locations': []},
'Tower Dark Pits': {'sewer': False, 'entrances': ['Tower Dark Pits ES', 'Tower Dark Pits EN'], 'locations': []},
'Tower Dark Archers': {'sewer': False, 'entrances': ['Tower Dark Archers WN', 'Tower Dark Archers Up Stairs'], 'locations': ['Castle Tower - Dark Archer Key Drop']},
'Sewers Dark Cross': {'sewer': True, 'entrances': ['Sewers Dark Cross Key Door N', 'Sewers Dark Cross South Stairs'], 'locations': ['Sewers - Dark Cross']},
'Sewers Behind Tapestry': {'sewer': True, 'entrances': ['Sewers Behind Tapestry S', 'Sewers Behind Tapestry Down Stairs'], 'locations': []},
'Sewers Rope Room': {'sewer': True, 'entrances': ['Sewers Rope Room Up Stairs', 'Sewers Rope Room North Stairs'], 'locations': []},
'Sewers Water': {'sewer': True, 'entrances': ['Sewers Water S', 'Sewers Water W'], 'locations': []},
'Sewers Key Rat': {'sewer': True, 'entrances': ['Sewers Key Rat E', 'Sewers Key Rat Key Door N'], 'locations': ['Hyrule Castle - Key Rat Key Drop']},
}
dark_debug_set = set()
for region, info in dark_rooms.items():
is_dark = False
if not world.sewer_light_cone[player]:
is_dark = True
elif world.doorShuffle[player] != 'crossed' and not info['sewer']:
is_dark = True
elif world.doorShuffle[player] == 'crossed':
sewer_builder = world.dungeon_layouts[player]['Hyrule Castle']
is_dark = region not in sewer_builder.master_sector.region_set()
if is_dark:
dark_debug_set.add(region)
for ent in info['entrances']:
add_conditional_lamp(ent, region, 'Entrance')
for loc in info['locations']:
add_conditional_lamp(loc, region, 'Location')
logging.getLogger('').debug('Non Dark Regions: ' + ', '.join(set(dark_rooms.keys()).difference(dark_debug_set)))
add_conditional_lamp('Old Man', 'Old Man Cave', 'Location')
add_conditional_lamp('Old Man Cave Exit (East)', 'Old Man Cave', 'Entrance')
add_conditional_lamp('Death Mountain Return Cave Exit (East)', 'Death Mountain Return Cave', 'Entrance')
add_conditional_lamp('Death Mountain Return Cave Exit (West)', 'Death Mountain Return Cave', 'Entrance')
add_conditional_lamp('Old Man House Front to Back', 'Old Man House', 'Entrance')
add_conditional_lamp('Old Man House Back to Front', 'Old Man House', 'Entrance')
def overworld_glitches_rules(world, player):
# Spots that are immediately accessible.
for entrance in OWGSets.get_immediately_accessible_entrances(world, player):
set_rule(world.get_entrance(entrance, player), lambda state: True)
# Boots-accessible locations.
for entrance in OWGSets.get_boots_clip_exits_lw(world.mode[player] == 'inverted'):
set_rule(world.get_entrance(entrance, player), lambda state: state.can_boots_clip_lw(player))
for entrance in OWGSets.get_boots_clip_exits_dw(world.mode[player] == 'inverted'):
set_rule(world.get_entrance(entrance, player), lambda state: state.can_boots_clip_dw(player))
# Glitched speed drops.
for drop in OWGSets.get_glitched_speed_drops_dw():
set_rule(world.get_entrance(drop, player), lambda state: state.can_get_glitched_speed_dw(player))
# Dark Death Mountain Ledge Clip Spot also accessible with mirror.
if world.mode[player] != 'inverted':
add_rule(world.get_entrance('Dark Death Mountain Ledge Clip Spot', player), lambda state: state.has_Mirror(player), 'or')
# Mirror clip spots.
if world.mode[player] != 'inverted':
for clip_spot in OWGSets.get_mirror_clip_spots_dw():
set_rule(world.get_entrance(clip_spot, player), lambda state: state.has_Mirror(player))
else:
for clip_spot in OWGSets.get_mirror_clip_spots_lw():
set_rule(world.get_entrance(clip_spot, player), lambda state: state.has_Mirror(player))
# Locations that you can superbunny mirror into, but need a sword to clear.
for superbunny_mirror_weapon_region in OWGSets.get_sword_required_superbunny_mirror_regions():
region = world.get_region(superbunny_mirror_weapon_region, player)
if check_is_dark_world(region):
for spot in region.locations:
add_rule(world.get_location(spot, player), lambda state: state.can_superbunny_mirror_with_sword(player), 'or')
# Regions that require the boots and some other stuff.
if world.mode[player] != 'inverted':
set_rule(world.get_entrance('Dark Desert Teleporter', player), lambda state: state.has('Ocarina', player) or (state.can_boots_clip_dw(player) and state.can_lift_heavy_rocks(player)))
set_rule(world.get_entrance('Turtle Rock Teleporter', player), lambda state: (state.can_boots_clip_dw(player) or state.can_lift_heavy_rocks(player)) and state.has('Hammer', player))
add_rule(world.get_entrance('Catfish Exit Rock', player), lambda state: state.can_boots_clip_dw(player), 'or')
add_rule(world.get_entrance('East Dark World Broken Bridge Pass', player), lambda state: state.can_boots_clip_dw(player), 'or')
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.can_reach('Dark Death Mountain (West Bottom)', 'Region', player) and state.has_Mirror(player))
else:
add_rule(world.get_entrance('South Dark World Teleporter', player), lambda state: state.has_Boots(player) and state.can_lift_rocks(player), 'or')
# Zora's Ledge via waterwalk setup.
add_rule(world.get_location('Zora\'s Ledge', player), lambda state: state.has_Boots(player), 'or')
def open_rules(world, player):
# softlock protection as you can reach the sewers small key door with a guard drop key
set_rule(world.get_location('Hyrule Castle - Boomerang Chest', player), lambda state: state.has_key('Small Key (Escape)', player))
set_rule(world.get_location('Hyrule Castle - Zelda\'s Chest', player), lambda state: state.has_key('Small Key (Escape)', player))
def swordless_rules(world, player):
set_rule(world.get_entrance('Tower Altar NW', player), lambda state: True)
set_rule(world.get_entrance('Skull Vines NW', player), lambda state: True)
set_rule(world.get_entrance('Ice Lobby WS', player), lambda state: state.has('Fire Rod', player) or state.has('Bombos', player))
set_rule(world.get_location('Ice Palace - Freezor Chest', player), lambda state: state.has('Fire Rod', player) or state.has('Bombos', player))
set_rule(world.get_location('Ether Tablet', player), lambda state: state.has('Book of Mudora', player) and state.has('Hammer', player))
set_rule(world.get_location('Ganon', player), lambda state: state.has('Hammer', player) and state.has_fire_source(player) and state.has('Silver Arrows', player) and state.can_shoot_arrows(player) and state.has_crystals(world.crystals_needed_for_ganon[player], player))
set_rule(world.get_entrance('Ganon Drop', player), lambda state: state.has('Hammer', player)) # need to damage ganon to get tiles to drop
if world.mode[player] != 'inverted':
set_rule(world.get_entrance('Agahnims Tower', player), lambda state: state.has('Cape', player) or state.has('Hammer', player) or state.has('Beat Agahnim 1', player)) # barrier gets removed after killing agahnim, relevant for entrance shuffle
set_rule(world.get_entrance('Turtle Rock', player), lambda state: state.has_Pearl(player) and state.has_turtle_rock_medallion(player) and state.can_reach('Turtle Rock (Top)', 'Region', player)) # sword not required to use medallion for opening in swordless (!)
set_rule(world.get_entrance('Misery Mire', player), lambda state: state.has_Pearl(player) and state.has_misery_mire_medallion(player)) # sword not required to use medallion for opening in swordless (!)
set_rule(world.get_location('Bombos Tablet', player), lambda state: state.has('Book of Mudora', player) and state.has('Hammer', player) and state.has_Mirror(player))
else:
# only need ddm access for aga tower in inverted
set_rule(world.get_entrance('Turtle Rock', player), lambda state: state.has_turtle_rock_medallion(player) and state.can_reach('Turtle Rock (Top)', 'Region', player)) # sword not required to use medallion for opening in swordless (!)
set_rule(world.get_entrance('Misery Mire', player), lambda state: state.has_misery_mire_medallion(player)) # sword not required to use medallion for opening in swordless (!)
set_rule(world.get_location('Bombos Tablet', player), lambda state: state.has('Book of Mudora', player) and state.has('Hammer', player))
std_kill_rooms = {
'Hyrule Dungeon Armory Main': ['Hyrule Dungeon Armory S'],
'Hyrule Dungeon Armory Boomerang': ['Hyrule Dungeon Armory Boomerang WS'],
'Eastern Stalfos Spawn': ['Eastern Stalfos Spawn ES', 'Eastern Stalfos Spawn NW'],
'Desert Compass Room': ['Desert Compass NW'],
'Desert Four Statues': ['Desert Four Statues NW', 'Desert Four Statues ES'],
'Hera Beetles': ['Hera Beetles WS'],
'Tower Gold Knights': ['Tower Gold Knights SW', 'Tower Gold Knights EN'],
'Tower Dark Archers': ['Tower Dark Archers WN'],
'Tower Red Spears': ['Tower Red Spears WN'],
'Tower Red Guards': ['Tower Red Guards EN', 'Tower Red Guards SW'],
'Tower Circle of Pots': ['Tower Circle of Pots NW'],
'PoD Turtle Party': ['PoD Turtle Party ES', 'PoD Turtle Party NW'], # todo: hammer req. in main rules
'Thieves Basement Block': ['Thieves Basement Block WN'],
'Ice Stalfos Hint': ['Ice Stalfos Hint SE'],
'Ice Pengator Trap': ['Ice Pengator Trap NE'],
'Mire 2': ['Mire 2 NE'],
'Mire Cross': ['Mire Cross ES'],
'TR Twin Pokeys': ['TR Twin Pokeys EN', 'TR Twin Pokeys SW'],
'GT Petting Zoo': ['GT Petting Zoo SE'],
'GT DMs Room': ['GT DMs Room SW'],
'GT Gauntlet 1': ['GT Gauntlet 1 WN'],
'GT Gauntlet 2': ['GT Gauntlet 2 EN', 'GT Gauntlet 2 SW'],
'GT Gauntlet 3': ['GT Gauntlet 3 NW', 'GT Gauntlet 3 SW'],
'GT Gauntlet 4': ['GT Gauntlet 4 NW', 'GT Gauntlet 4 SW'],
'GT Gauntlet 5': ['GT Gauntlet 5 NW', 'GT Gauntlet 5 WS'],
'GT Wizzrobes 1': ['GT Wizzrobes 1 SW'],
'GT Wizzrobes 2': ['GT Wizzrobes 2 SE', 'GT Wizzrobes 2 NE']
} # all trap rooms?
def standard_rules(world, player):
# these are because of rails
if world.shuffle[player] != 'vanilla':
set_rule(world.get_entrance('Hyrule Castle Exit (East)', player), lambda state: state.has('Zelda Delivered', player))
set_rule(world.get_entrance('Hyrule Castle Exit (West)', player), lambda state: state.has('Zelda Delivered', player))
# too restrictive for crossed?
def uncle_item_rule(item):
copy_state = CollectionState(world)
copy_state.collect(item)
copy_state.sweep_for_events()
return copy_state.has('Zelda Delivered', player)
add_item_rule(world.get_location('Link\'s Uncle', player), uncle_item_rule)
# ensures the required weapon for escape lands on uncle (unless player has it pre-equipped)
for location in ['Link\'s House', 'Sanctuary', 'Sewers - Secret Room - Left', 'Sewers - Secret Room - Middle',
'Sewers - Secret Room - Right']:
add_rule(world.get_location(location, player), lambda state: state.can_kill_most_things(player))
add_rule(world.get_location('Secret Passage', player), lambda state: state.can_kill_most_things(player))
escape_builder = world.dungeon_layouts[player]['Hyrule Castle']
for region in escape_builder.master_sector.regions:
for loc in region.locations:
add_rule(loc, lambda state: state.can_kill_most_things(player))
if region.name in std_kill_rooms:
for ent in std_kill_rooms[region.name]:
add_rule(world.get_entrance(ent, player), lambda state: state.can_kill_most_things(player))
set_rule(world.get_location('Zelda Pickup', player), lambda state: state.has('Big Key (Escape)', player))
set_rule(world.get_entrance('Hyrule Castle Throne Room N', player), lambda state: state.has('Zelda Herself', player))
def check_rule_list(state, r_list):
return True if len(r_list) <= 0 else r_list[0](state) and check_rule_list(state, r_list[1:])
rule_list, debug_path = find_rules_for_zelda_delivery(world, player)
set_rule(world.get_location('Zelda Drop Off', player), lambda state: state.has('Zelda Herself', player) and check_rule_list(state, rule_list))
for location in ['Mushroom', 'Bottle Merchant', 'Flute Spot', 'Sunken Treasure', 'Purple Chest']:
add_rule(world.get_location(location, player), lambda state: state.has('Zelda Delivered', player))
# Bonk Fairy (Light) is a notable omission in ER shuffles/Retro
for entrance in ['Blinds Hideout', 'Zoras River', 'Kings Grave Outer Rocks', 'Dam', 'Tavern North', 'Chicken House',
'Aginahs Cave', 'Sahasrahlas Hut', 'Kakariko Well Drop', 'Kakariko Well Cave', 'Blacksmiths Hut',
'Bat Cave Drop Ledge', 'Bat Cave Cave', 'Sick Kids House', 'Hobo Bridge',
'Lost Woods Hideout Drop', 'Lost Woods Hideout Stump', 'Lumberjack Tree Tree',
'Lumberjack Tree Cave', 'Mini Moldorm Cave', 'Ice Rod Cave', 'Lake Hylia Central Island Pier',
'Bonk Rock Cave', 'Library', 'Potion Shop', 'Two Brothers House (East)', 'Desert Palace Stairs',
'Eastern Palace', 'Master Sword Meadow', 'Sanctuary', 'Sanctuary Grave',
'Death Mountain Entrance Rock', 'Flute Spot 1', 'Dark Desert Teleporter', 'East Hyrule Teleporter',
'South Hyrule Teleporter', 'Kakariko Teleporter', 'Elder House (East)', 'Elder House (West)',
'North Fairy Cave', 'North Fairy Cave Drop', 'Lost Woods Gamble', 'Snitch Lady (East)',
'Snitch Lady (West)', 'Tavern (Front)', 'Bush Covered House', 'Light World Bomb Hut',
'Kakariko Shop', 'Long Fairy Cave', 'Good Bee Cave', '20 Rupee Cave', 'Cave Shop (Lake Hylia)',
'Waterfall of Wishing', 'Hyrule Castle Main Gate', '50 Rupee Cave',
'Fortune Teller (Light)', 'Lake Hylia Fairy', 'Light Hype Fairy', 'Desert Fairy',
'Lumberjack House', 'Lake Hylia Fortune Teller', 'Kakariko Gamble Game', 'Top of Pyramid']:
add_rule(world.get_entrance(entrance, player), lambda state: state.has('Zelda Delivered', player))
def find_rules_for_zelda_delivery(world, player):
# path rules for backtracking
start_region = world.get_region('Hyrule Dungeon Cellblock', player)
queue = deque([(start_region, [], [])])
visited = {start_region}
blank_state = CollectionState(world)
while len(queue) > 0:
region, path_rules, path = queue.popleft()
for ext in region.exits:
connect = ext.connected_region
if connect and connect.type == RegionType.Dungeon and connect not in visited:
rule = ext.access_rule
rule_list = list(path_rules)
next_path = list(path)
if not rule(blank_state):
rule_list.append(rule)
next_path.append(ext.name)
if connect.name == 'Sanctuary':
return rule_list, next_path
else:
visited.add(connect)
queue.append((connect, rule_list, next_path))
raise Exception('No path to Sanctuary found')
def set_big_bomb_rules(world, player):
# this is a mess
bombshop_entrance = world.get_region('Big Bomb Shop', player).entrances[0]
Normal_LW_entrances = ['Blinds Hideout',
'Bonk Fairy (Light)',
'Lake Hylia Fairy',
'Light Hype Fairy',
'Desert Fairy',
'Chicken House',
'Aginahs Cave',
'Sahasrahlas Hut',
'Cave Shop (Lake Hylia)',
'Blacksmiths Hut',
'Sick Kids House',
'Lost Woods Gamble',
'Fortune Teller (Light)',
'Snitch Lady (East)',
'Snitch Lady (West)',
'Bush Covered House',
'Tavern (Front)',
'Light World Bomb Hut',
'Kakariko Shop',
'Mini Moldorm Cave',
'Long Fairy Cave',
'Good Bee Cave',
'20 Rupee Cave',
'50 Rupee Cave',
'Ice Rod Cave',
'Bonk Rock Cave',
'Library',
'Potion Shop',
'Dam',
'Lumberjack House',
'Lake Hylia Fortune Teller',
'Eastern Palace',
'Kakariko Gamble Game',
'Kakariko Well Cave',
'Bat Cave Cave',
'Elder House (East)',
'Elder House (West)',
'North Fairy Cave',
'Lost Woods Hideout Stump',
'Lumberjack Tree Cave',
'Two Brothers House (East)',
'Sanctuary',
'Hyrule Castle Entrance (South)',
'Hyrule Castle Secret Entrance Stairs']
LW_walkable_entrances = ['Dark Lake Hylia Ledge Fairy',
'Dark Lake Hylia Ledge Spike Cave',
'Dark Lake Hylia Ledge Hint',
'Mire Shed',
'Dark Desert Hint',
'Dark Desert Fairy',
'Misery Mire']
Northern_DW_entrances = ['Brewery',
'C-Shaped House',
'Chest Game',
'Dark World Hammer Peg Cave',
'Red Shield Shop',
'Dark Sanctuary Hint',
'Fortune Teller (Dark)',
'Dark World Shop',
'Dark World Lumberjack Shop',
'Thieves Town',
'Skull Woods First Section Door',
'Skull Woods Second Section Door (East)']
Southern_DW_entrances = ['Hype Cave',
'Bonk Fairy (Dark)',
'Archery Game',
'Big Bomb Shop',
'Dark Lake Hylia Shop',
'Swamp Palace']
Isolated_DW_entrances = ['Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Dark Death Mountain Fairy',
'Mimic Cave',
'Skull Woods Second Section Door (West)',
'Skull Woods Final Section',
'Ice Palace',
'Turtle Rock',
'Dark Death Mountain Ledge (West)',
'Dark Death Mountain Ledge (East)',
'Bumper Cave (Top)',
'Superbunny Cave (Top)',
'Superbunny Cave (Bottom)',
'Hookshot Cave',
'Ganons Tower',
'Turtle Rock Isolated Ledge Entrance',
'Hookshot Cave Back Entrance']
Isolated_LW_entrances = ['Capacity Upgrade',
'Tower of Hera',
'Death Mountain Return Cave (West)',
'Paradox Cave (Top)',
'Fairy Ascension Cave (Top)',
'Spiral Cave',
'Desert Palace Entrance (East)']
West_LW_DM_entrances = ['Old Man Cave (East)',
'Old Man House (Bottom)',
'Old Man House (Top)',
'Death Mountain Return Cave (East)',
'Spectacle Rock Cave Peak',
'Spectacle Rock Cave',
'Spectacle Rock Cave (Bottom)']
East_LW_DM_entrances = ['Paradox Cave (Bottom)',
'Paradox Cave (Middle)',
'Hookshot Fairy',
'Spiral Cave (Bottom)']
Mirror_from_SDW_entrances = ['Two Brothers House (West)',
'Cave 45']
Castle_ledge_entrances = ['Hyrule Castle Entrance (West)',
'Hyrule Castle Entrance (East)',
'Agahnims Tower']
Desert_mirrorable_ledge_entrances = ['Desert Palace Entrance (West)',
'Desert Palace Entrance (North)',
'Desert Palace Entrance (South)',
'Checkerboard Cave']
set_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.can_reach('East Dark World', 'Region', player) and state.can_reach('Big Bomb Shop', 'Region', player) and state.has('Crystal 5', player) and state.has('Crystal 6', player))
#crossing peg bridge starting from the southern dark world
def cross_peg_bridge(state):
return state.has('Hammer', player) and state.has_Pearl(player)
# returning via the eastern and southern teleporters needs the same items, so we use the southern teleporter for out routing.
# crossing preg bridge already requires hammer so we just add the gloves to the requirement
def southern_teleporter(state):
return state.can_lift_rocks(player) and cross_peg_bridge(state)
# the basic routes assume you can reach eastern light world with the bomb.
# you can then use the southern teleporter, or (if you have beaten Aga1) the hyrule castle gate warp
def basic_routes(state):
return southern_teleporter(state) or state.can_reach('Top of Pyramid', 'Entrance', player)
# Key for below abbreviations:
# P = pearl
# A = Aga1
# H = hammer
# M = Mirror
# G = Glove
if bombshop_entrance.name in Normal_LW_entrances:
#1. basic routes
#2. Can reach Eastern dark world some other way, mirror, get bomb, return to mirror spot, walk to pyramid: Needs mirror
# -> M or BR
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: basic_routes(state) or state.has_Mirror(player))
elif bombshop_entrance.name in LW_walkable_entrances:
#1. Mirror then basic routes
# -> M and BR
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has_Mirror(player) and basic_routes(state))
elif bombshop_entrance.name in Northern_DW_entrances:
#1. Mirror and basic routes
#2. Go to south DW and then cross peg bridge: Need Mitts and hammer and moon pearl
# -> (Mitts and CPB) or (M and BR)
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.can_lift_heavy_rocks(player) and cross_peg_bridge(state)) or (state.has_Mirror(player) and basic_routes(state)))
elif bombshop_entrance.name == 'Bumper Cave (Bottom)':
#1. Mirror and Lift rock and basic_routes
#2. Mirror and Flute and basic routes (can make difference if accessed via insanity or w/ mirror from connector, and then via hyrule castle gate, because no gloves are needed in that case)
#3. Go to south DW and then cross peg bridge: Need Mitts and hammer and moon pearl
# -> (Mitts and CPB) or (((G or Flute) and M) and BR))
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.can_lift_heavy_rocks(player) and cross_peg_bridge(state)) or (((state.can_lift_rocks(player) or state.has('Ocarina', player)) and state.has_Mirror(player)) and basic_routes(state)))
elif bombshop_entrance.name in Southern_DW_entrances:
#1. Mirror and enter via gate: Need mirror and Aga1
#2. cross peg bridge: Need hammer and moon pearl
# -> CPB or (M and A)
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: cross_peg_bridge(state) or (state.has_Mirror(player) and state.can_reach('Top of Pyramid', 'Entrance', player)))
elif bombshop_entrance.name in Isolated_DW_entrances:
# 1. mirror then flute then basic routes
# -> M and Flute and BR
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has_Mirror(player) and state.has('Ocarina', player) and basic_routes(state))
elif bombshop_entrance.name in Isolated_LW_entrances:
# 1. flute then basic routes
# Prexisting mirror spot is not permitted, because mirror might have been needed to reach these isolated locations.
# -> Flute and BR
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has('Ocarina', player) and basic_routes(state))
elif bombshop_entrance.name in West_LW_DM_entrances:
# 1. flute then basic routes or mirror
# Prexisting mirror spot is permitted, because flute can be used to reach west DM directly.
# -> Flute and (M or BR)
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has('Ocarina', player) and (state.has_Mirror(player) or basic_routes(state)))
elif bombshop_entrance.name in East_LW_DM_entrances:
# 1. flute then basic routes or mirror and hookshot
# Prexisting mirror spot is permitted, because flute can be used to reach west DM directly and then east DM via Hookshot
# -> Flute and ((M and Hookshot) or BR)
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has('Ocarina', player) and ((state.has_Mirror(player) and state.has('Hookshot', player)) or basic_routes(state)))
elif bombshop_entrance.name == 'Fairy Ascension Cave (Bottom)':
# Same as East_LW_DM_entrances except navigation without BR requires Mitts
# -> Flute and ((M and Hookshot and Mitts) or BR)
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has('Ocarina', player) and ((state.has_Mirror(player) and state.has('Hookshot', player) and state.can_lift_heavy_rocks(player)) or basic_routes(state)))
elif bombshop_entrance.name in Castle_ledge_entrances:
# 1. mirror on pyramid to castle ledge, grab bomb, return through mirror spot: Needs mirror
# 2. flute then basic routes
# -> M or (Flute and BR)
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has_Mirror(player) or (state.has('Ocarina', player) and basic_routes(state)))
elif bombshop_entrance.name in Desert_mirrorable_ledge_entrances:
# Cases when you have mire access: Mirror to reach locations, return via mirror spot, move to center of desert, mirror anagin and:
# 1. Have mire access, Mirror to reach locations, return via mirror spot, move to center of desert, mirror again and then basic routes
# 2. flute then basic routes
# -> (Mire access and M) or Flute) and BR
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: ((state.can_reach('Dark Desert', 'Region', player) and state.has_Mirror(player)) or state.has('Ocarina', player)) and basic_routes(state))
elif bombshop_entrance.name == 'Old Man Cave (West)':
# 1. Lift rock then basic_routes
# 2. flute then basic_routes
# -> (Flute or G) and BR
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.has('Ocarina', player) or state.can_lift_rocks(player)) and basic_routes(state))
elif bombshop_entrance.name == 'Graveyard Cave':
# 1. flute then basic routes
# 2. (has west dark world access) use existing mirror spot (required Pearl), mirror again off ledge
# -> (Flute or (M and P and West Dark World access) and BR
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.has('Ocarina', player) or (state.can_reach('West Dark World', 'Region', player) and state.has_Pearl(player) and state.has_Mirror(player))) and basic_routes(state))
elif bombshop_entrance.name in Mirror_from_SDW_entrances:
# 1. flute then basic routes
# 2. (has South dark world access) use existing mirror spot, mirror again off ledge
# -> (Flute or (M and South Dark World access) and BR
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.has('Ocarina', player) or (state.can_reach('South Dark World', 'Region', player) and state.has_Mirror(player))) and basic_routes(state))
elif bombshop_entrance.name == 'Dark World Potion Shop':
# 1. walk down by lifting rock: needs gloves and pearl`
# 2. walk down by hammering peg: needs hammer and pearl
# 3. mirror and basic routes
# -> (P and (H or Gloves)) or (M and BR)
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.has_Pearl(player) and (state.has('Hammer', player) or state.can_lift_rocks(player))) or (state.has_Mirror(player) and basic_routes(state)))
elif bombshop_entrance.name == 'Kings Grave':
# same as the Normal_LW_entrances case except that the pre-existing mirror is only possible if you have mitts
# (because otherwise mirror was used to reach the grave, so would cancel a pre-existing mirror spot)
# to account for insanity, must consider a way to escape without a cave for basic_routes
# -> (M and Mitts) or ((Mitts or Flute or (M and P and West Dark World access)) and BR)
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.can_lift_heavy_rocks(player) and state.has_Mirror(player)) or ((state.can_lift_heavy_rocks(player) or state.has('Ocarina', player) or (state.can_reach('West Dark World', 'Region', player) and state.has_Pearl(player) and state.has_Mirror(player))) and basic_routes(state)))
elif bombshop_entrance.name == 'Waterfall of Wishing':
# same as the Normal_LW_entrances case except in insanity it's possible you could be here without Flippers which
# means you need an escape route of either Flippers or Flute
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.has('Flippers', player) or state.has('Ocarina', player)) and (basic_routes(state) or state.has_Mirror(player)))
def set_inverted_big_bomb_rules(world, player):
bombshop_entrance = world.get_region('Inverted Big Bomb Shop', player).entrances[0]
Normal_LW_entrances = ['Blinds Hideout',
'Bonk Fairy (Light)',
'Lake Hylia Fairy',
'Light Hype Fairy',
'Desert Fairy',
'Chicken House',
'Aginahs Cave',
'Sahasrahlas Hut',
'Cave Shop (Lake Hylia)',
'Blacksmiths Hut',
'Sick Kids House',
'Lost Woods Gamble',
'Fortune Teller (Light)',
'Snitch Lady (East)',
'Snitch Lady (West)',
'Tavern (Front)',
'Kakariko Shop',
'Mini Moldorm Cave',
'Long Fairy Cave',
'Good Bee Cave',
'20 Rupee Cave',
'50 Rupee Cave',
'Ice Rod Cave',
'Bonk Rock Cave',
'Library',
'Potion Shop',
'Dam',
'Lumberjack House',
'Lake Hylia Fortune Teller',
'Eastern Palace',
'Kakariko Gamble Game',
'Kakariko Well Cave',
'Bat Cave Cave',
'Elder House (East)',
'Elder House (West)',
'North Fairy Cave',
'Lost Woods Hideout Stump',
'Lumberjack Tree Cave',
'Two Brothers House (East)',
'Sanctuary',
'Hyrule Castle Entrance (South)',
'Hyrule Castle Secret Entrance Stairs',
'Hyrule Castle Entrance (West)',
'Hyrule Castle Entrance (East)',
'Inverted Ganons Tower',
'Cave 45',
'Checkerboard Cave',
'Inverted Big Bomb Shop']
LW_DM_entrances = ['Old Man Cave (East)',
'Old Man House (Bottom)',
'Old Man House (Top)',
'Death Mountain Return Cave (East)',
'Spectacle Rock Cave Peak',
'Tower of Hera',
'Death Mountain Return Cave (West)',
'Paradox Cave (Top)',
'Fairy Ascension Cave (Top)',
'Spiral Cave',
'Paradox Cave (Bottom)',
'Paradox Cave (Middle)',
'Hookshot Fairy',
'Spiral Cave (Bottom)',
'Mimic Cave',
'Fairy Ascension Cave (Bottom)',
'Desert Palace Entrance (West)',
'Desert Palace Entrance (North)',
'Desert Palace Entrance (South)']
Northern_DW_entrances = ['Brewery',
'C-Shaped House',
'Chest Game',
'Dark World Hammer Peg Cave',
'Red Shield Shop',
'Inverted Dark Sanctuary',
'Fortune Teller (Dark)',
'Dark World Shop',
'Dark World Lumberjack Shop',
'Thieves Town',
'Skull Woods First Section Door',
'Skull Woods Second Section Door (East)']
Southern_DW_entrances = ['Hype Cave',
'Bonk Fairy (Dark)',
'Archery Game',
'Inverted Links House',
'Dark Lake Hylia Shop',
'Swamp Palace']
Isolated_DW_entrances = ['Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Dark Death Mountain Fairy',
'Skull Woods Second Section Door (West)',
'Skull Woods Final Section',
'Turtle Rock',
'Dark Death Mountain Ledge (West)',
'Dark Death Mountain Ledge (East)',
'Bumper Cave (Top)',
'Superbunny Cave (Top)',
'Superbunny Cave (Bottom)',
'Hookshot Cave',
'Turtle Rock Isolated Ledge Entrance',
'Hookshot Cave Back Entrance',
'Inverted Agahnims Tower',
'Dark Lake Hylia Ledge Fairy',
'Dark Lake Hylia Ledge Spike Cave',
'Dark Lake Hylia Ledge Hint',
'Mire Shed',
'Dark Desert Hint',
'Dark Desert Fairy',
'Misery Mire']
LW_bush_entrances = ['Bush Covered House',
'Light World Bomb Hut',
'Graveyard Cave']
set_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.can_reach('East Dark World', 'Region', player) and state.can_reach('Inverted Big Bomb Shop', 'Region', player) and state.has('Crystal 5', player) and state.has('Crystal 6', player))
# crossing peg bridge starting from the southern dark world
def cross_peg_bridge(state):
return state.has('Hammer', player)
# Key for below abbreviations:
# P = pearl
# A = Aga1
# H = hammer
# M = Mirror
# G = Glove
if bombshop_entrance.name in Normal_LW_entrances:
# Just walk to the castle and mirror.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has_Mirror(player))
elif bombshop_entrance.name in LW_DM_entrances:
# For these entrances, you cannot walk to the castle/pyramid and thus must use Mirror and then Flute.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.can_flute(player) and state.has_Mirror(player))
elif bombshop_entrance.name in Northern_DW_entrances:
# You can just fly with the Flute, you can take a long walk with Mitts and Hammer,
# or you can leave a Mirror portal nearby and then walk to the castle to Mirror again.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.can_flute or (state.can_lift_heavy_rocks(player) and cross_peg_bridge(state)) or (state.has_Mirror(player) and state.can_reach('Light World', 'Region', player)))
elif bombshop_entrance.name in Southern_DW_entrances:
# This is the same as north DW without the Mitts rock present.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: cross_peg_bridge(state) or state.can_flute(player) or (state.has_Mirror(player) and state.can_reach('Light World', 'Region', player)))
elif bombshop_entrance.name in Isolated_DW_entrances:
# There's just no way to escape these places with the bomb and no Flute.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.can_flute(player))
elif bombshop_entrance.name in LW_bush_entrances:
# These entrances are behind bushes in LW so you need either Pearl or the tools to solve NDW bomb shop locations.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has_Mirror(player) and (state.can_flute(player) or state.has_Pearl(player) or (state.can_lift_heavy_rocks(player) and cross_peg_bridge(state))))
elif bombshop_entrance.name == 'Bumper Cave (Bottom)':
# This is mostly the same as NDW but the Mirror path requires being able to lift a rock.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.can_flute or (state.can_lift_heavy_rocks(player) and cross_peg_bridge(state)) or (state.has_Mirror(player) and state.can_lift_rocks(player) and state.can_reach('Light World', 'Region', player)))
elif bombshop_entrance.name == 'Old Man Cave (West)':
# The three paths back are Mirror and DW walk, Mirror and Flute, or LW walk and then Mirror.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has_Mirror(player) and ((state.can_lift_heavy_rocks(player) and cross_peg_bridge(state)) or (state.can_lift_rocks(player) and state.has_Pearl(player)) or state.can_flute(player)))
elif bombshop_entrance.name == 'Dark World Potion Shop':
# You either need to Flute to 5 or cross the rock/hammer choice pass to the south.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.can_flute(player) or state.has('Hammer', player) or state.can_lift_rocks(player))
elif bombshop_entrance.name == 'Kings Grave':
# Either lift the rock and walk to the castle to Mirror or Mirror immediately and Flute.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.can_flute(player) or state.can_lift_heavy_rocks(player)) and state.has_Mirror(player))
elif bombshop_entrance.name == 'Two Brothers House (West)':
# First you must Mirror. Then you can either Flute, cross the peg bridge, or use the Agah 1 portal to Mirror again.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.can_flute(player) or cross_peg_bridge(state) or state.has('Beat Agahnim 1', player)) and state.has_Mirror(player))
elif bombshop_entrance.name == 'Waterfall of Wishing':
# You absolutely must be able to swim to return it from here.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has('Flippers', player) and state.has_Pearl(player) and state.has_Mirror(player))
elif bombshop_entrance.name == 'Ice Palace':
# You can swim to the dock or use the Flute to get off the island.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: state.has('Flippers', player) or state.can_flute(player))
elif bombshop_entrance.name == 'Capacity Upgrade':
# You must Mirror but then can use either Ice Palace return path.
add_rule(world.get_entrance('Pyramid Fairy', player), lambda state: (state.has('Flippers', player) or state.can_flute(player)) and state.has_Mirror(player))
def set_bunny_rules(world, player):
# regions for the exits of multi-entrace caves/drops that bunny cannot pass
# Note spiral cave may be technically passible, but it would be too absurd to require since OHKO mode is a thing.
bunny_impassable_caves = ['Bumper Cave', 'Two Brothers House', 'Hookshot Cave',
'Pyramid', 'Spiral Cave (Top)', 'Fairy Ascension Cave (Drop)']
bunny_accessible_locations = ['Link\'s Uncle', 'Sahasrahla', 'Sick Kid', 'Lost Woods Hideout', 'Lumberjack Tree',
'Checkerboard Cave', 'Potion Shop', 'Spectacle Rock Cave', 'Pyramid',
'Hype Cave - Generous Guy', 'Peg Cave', 'Bumper Cave Ledge', 'Dark Blacksmith Ruins']
def path_to_access_rule(path, entrance):
return lambda state: state.can_reach(entrance) and all(rule_func(state) for rule_func in path)
def options_to_access_rule(options):
return lambda state: any(rule_func(state) for rule_func in options)
def get_rule_to_add(region, location = None, connecting_entrance = None):
# In OWG, a location can potentially be superbunny-mirror accessible or
# bunny revival accessible.
if world.logic[player] == 'owglitches':
if region.name in OWGSets.get_invalid_bunny_revival_dungeons():
return lambda state: state.has_Mirror(player) or state.has_Pearl(player)
if not any([
None not in [location, connecting_entrance] and location.name in OWGSets.get_superbunny_accessible_locations() and connecting_entrance.name not in OWGSets.get_invalid_mirror_bunny_entrances_dw(),
not region.is_light_world]):
return lambda state: state.has_Pearl(player)
else:
if not region.is_light_world:
return lambda state: state.has_Pearl(player)
# in this case we are mixed region.
# we collect possible options.
# The base option is having the moon pearl
possible_options = [lambda state: state.has_Pearl(player)]
# We will search entrances recursively until we find
# one that leads to an exclusively light world region
# for each such entrance a new option is added that consist of:
# a) being able to reach it, and
# b) being able to access all entrances from there to `region`
seen = {region}
queue = deque([(region, [])])
while queue:
(current, path) = queue.popleft()
for entrance in current.entrances:
new_region = entrance.parent_region
if new_region in seen:
continue
new_path = path + [entrance.access_rule]
seen.add(new_region)
if not new_region.is_light_world:
# For OWG, establish superbunny and revival rules.
if world.logic[player] == 'owglitches' and entrance.name not in OWGSets.get_invalid_mirror_bunny_entrances_dw():
for location in entrance.connected_region.locations:
if location.name in OWGSets.get_superbunny_accessible_locations():
possible_options.append(lambda state: path_to_access_rule(new_path, entrance) and state.has_Mirror(player))
continue
else:
continue
if new_region.is_dark_world:
queue.append((new_region, new_path))
else:
# we have reached pure light world or a dungeon, so we have a new possible option
possible_options.append(path_to_access_rule(new_path, entrance))
return options_to_access_rule(possible_options)
# Add requirements for bunny-impassible caves if they occur in the dark world
for region in [world.get_region(name, player) for name in bunny_impassable_caves]:
if not region.is_dark_world:
continue
rule = get_rule_to_add(region)
for ext in region.exits:
add_rule(ext, rule)
paradox_shop = world.get_region('Light World Death Mountain Shop', player)
if paradox_shop.is_dark_world:
add_rule(paradox_shop.entrances[0], get_rule_to_add(paradox_shop))
for ent_name in bunny_impassible_doors:
bunny_exit = world.get_entrance(ent_name, player)
if bunny_exit.parent_region.is_dark_world:
add_rule(bunny_exit, get_rule_to_add(bunny_exit.parent_region))
doors_to_check = [x for x in world.doors if x.player == player and x not in bunny_impassible_doors]
doors_to_check = [x for x in doors_to_check if x.type in [DoorType.Normal, DoorType.Interior] and not x.blocked]
for door in doors_to_check:
room = world.get_room(door.roomIndex, player)
if door.entrance.parent_region.is_dark_world and room.kind(door) in [DoorKind.Dashable, DoorKind.Bombable, DoorKind.Hidden]:
add_rule(door.entrance, get_rule_to_add(door.entrance.parent_region))
# Add requirements for all locations that are actually in the dark world, except those available to the bunny, including dungeon revival
for entrance in world.get_entrances():
if entrance.player == player and entrance.parent_region.is_dark_world:
if world.logic[player] == 'owglitches':
if entrance.connected_region.type == RegionType.Dungeon:
if entrance.connected_region.name in OWGSets.get_invalid_bunny_revival_dungeons():
add_rule(entrance, get_rule_to_add(entrance.connected_region, None, entrance))
continue
if entrance.connected_region.name == 'Turtle Rock (Entrance)':
add_rule(world.get_entrance('Turtle Rock Entrance Gap', player), get_rule_to_add(entrance.connected_region, None, entrance))
if entrance.name in OWGSets.get_invalid_mirror_bunny_entrances_dw():
continue
for location in entrance.connected_region.locations:
if world.logic[player] == 'owglitches' and entrance.name in OWGSets.get_invalid_mirror_bunny_entrances_dw():
add_rule(location, get_rule_to_add(entrance.connected_region, location, entrance))
continue
if location.name in bunny_accessible_locations:
continue
add_rule(location, get_rule_to_add(entrance.connected_region, location))
def set_inverted_bunny_rules(world, player):
# regions for the exits of multi-entrace caves/drops that bunny cannot pass
# Note spiral cave may be technically passible, but it would be too absurd to require since OHKO mode is a thing.
bunny_impassable_caves = ['Bumper Cave', 'Two Brothers House', 'Hookshot Cave', 'Skull Woods First Section (Right)', 'Skull Woods First Section (Left)', 'Skull Woods First Section (Top)', 'Turtle Rock (Entrance)', 'Turtle Rock (Second Section)', 'Turtle Rock (Big Chest)', 'Skull Woods Second Section (Drop)',
'Turtle Rock (Eye Bridge)', 'Sewers', 'Pyramid', 'Spiral Cave (Top)', 'Desert Palace Main (Inner)', 'Fairy Ascension Cave (Drop)', 'The Sky']
bunny_accessible_locations = ['Link\'s Uncle', 'Sahasrahla', 'Sick Kid', 'Lost Woods Hideout', 'Lumberjack Tree', 'Checkerboard Cave', 'Potion Shop', 'Spectacle Rock Cave', 'Pyramid', 'Hype Cave - Generous Guy', 'Peg Cave', 'Bumper Cave Ledge', 'Dark Blacksmith Ruins', 'Bombos Tablet Ledge', 'Ether Tablet', 'Purple Chest']
def path_to_access_rule(path, entrance):
return lambda state: state.can_reach(entrance) and all(rule_func(state) for rule_func in path)
def options_to_access_rule(options):
return lambda state: any(rule_func(state) for rule_func in options)
def get_rule_to_add(region, location = None, connecting_entrance = None):
# In OWG, a location can potentially be superbunny-mirror accessible or
# bunny revival accessible.
if world.logic[player] == 'owglitches':
if region.name in OWGSets.get_invalid_bunny_revival_dungeons():
return lambda state: state.has_Mirror(player) or state.has_Pearl(player)
if not any([
None not in [location, connecting_entrance] and location.name in OWGSets.get_superbunny_accessible_locations() and connecting_entrance.name not in OWGSets.get_invalid_mirror_bunny_entrances_lw(),
not region.is_dark_world]):
return lambda state: state.has_Pearl(player)
else:
if not region.is_dark_world:
return lambda state: state.has_Pearl(player)
# in this case we are mixed region.
# we collect possible options.
# The base option is having the moon pearl
possible_options = [lambda state: state.has_Pearl(player)]
# We will search entrances recursively until we find
# one that leads to an exclusively dark world region
# for each such entrance a new option is added that consist of:
# a) being able to reach it, and
# b) being able to access all entrances from there to `region`
seen = {region}
queue = deque([(region, [])])
while queue:
(current, path) = queue.popleft()
for entrance in current.entrances:
new_region = entrance.parent_region
if new_region in seen:
continue
new_path = path + [entrance.access_rule]
seen.add(new_region)
if not new_region.is_dark_world:
# For OWG, establish superbunny and revival rules.
if world.logic[player] == 'owglitches' and entrance.name not in OWGSets.get_invalid_mirror_bunny_entrances_lw():
for location in entrance.connected_region.locations:
if location.name in OWGSets.get_superbunny_accessible_locations():
possible_options.append(lambda state: path_to_access_rule(new_path, entrance) and state.has_Mirror(player))
continue
else:
continue
if new_region.is_light_world:
queue.append((new_region, new_path))
else:
# we have reached pure dark world, so we have a new possible option
possible_options.append(path_to_access_rule(new_path, entrance))
return options_to_access_rule(possible_options)
# Add requirements for bunny-impassible caves if they occur in the light world
for region in [world.get_region(name, player) for name in bunny_impassable_caves]:
if not region.is_light_world:
continue
rule = get_rule_to_add(region)
for ext in region.exits:
add_rule(ext, rule)
paradox_shop = world.get_region('Light World Death Mountain Shop', player)
if paradox_shop.is_light_world:
add_rule(paradox_shop.entrances[0], get_rule_to_add(paradox_shop))
for ent_name in bunny_impassible_doors:
bunny_exit = world.get_entrance(ent_name, player)
if bunny_exit.parent_region.is_light_world:
add_rule(bunny_exit, get_rule_to_add(bunny_exit.parent_region))
doors_to_check = [x for x in world.doors if x.player == player and x not in bunny_impassible_doors]
doors_to_check = [x for x in doors_to_check if x.type in [DoorType.Normal, DoorType.Interior] and not x.blocked]
for door in doors_to_check:
room = world.get_room(door.roomIndex, player)
if door.entrance.parent_region.is_light_world and room.kind(door) in [DoorKind.Dashable, DoorKind.Bombable, DoorKind.Hidden]:
add_rule(door.entrance, get_rule_to_add(door.entrance.parent_region))
# Add requirements for all locations that are actually in the light world, except those available to the bunny, including dungeon revival
for entrance in world.get_entrances():
if entrance.player == player and entrance.parent_region.is_light_world:
if world.logic[player] == 'owglitches':
if entrance.connected_region.type == RegionType.Dungeon:
if entrance.connected_region.name in OWGSets.get_invalid_bunny_revival_dungeons():
add_rule(entrance, get_rule_to_add(entrance.connected_region, None, entrance))
continue
if entrance.connected_region.name == 'Turtle Rock (Entrance)':
add_rule(world.get_entrance('Turtle Rock Entrance Gap', player), get_rule_to_add(entrance.connected_region, None, entrance))
if entrance.name in OWGSets.get_invalid_mirror_bunny_entrances_lw():
continue
for location in entrance.connected_region.locations:
if world.logic[player] == 'owglitches' and entrance.name in OWGSets.get_invalid_mirror_bunny_entrances_lw():
add_rule(location, get_rule_to_add(entrance.connected_region, location, entrance))
continue
if location.name in bunny_accessible_locations:
continue
add_rule(location, get_rule_to_add(entrance.connected_region, location))
bunny_impassible_doors = {
'Hyrule Dungeon Armory S', 'Hyrule Dungeon Armory ES', 'Sewers Secret Room Push Block', 'Sewers Pull Switch S',
'Eastern Lobby N', 'Eastern Courtyard Ledge W', 'Eastern Courtyard Ledge E', 'Eastern Pot Switch SE',
'Eastern Map Balcony Hook Path', 'Eastern Stalfos Spawn ES', 'Eastern Stalfos Spawn NW',
'Eastern Hint Tile Push Block', 'Eastern Darkness S', 'Eastern Darkness NE', 'Eastern Darkness Up Stairs',
'Eastern Attic Start WS', 'Eastern Single Eyegore NE', 'Eastern Duo Eyegores NE', 'Desert Main Lobby Left Path',
'Desert Main Lobby Right Path', 'Desert Left Alcove Path', 'Desert Right Alcove Path', 'Desert Compass NW',
'Desert West Lobby NW', 'Desert Back Lobby NW', 'Desert Four Statues NW', 'Desert Four Statues ES',
'Desert Beamos Hall WS', 'Desert Beamos Hall NE', 'Desert Wall Slide NW', 'Hera Lobby Down Stairs',
'Hera Lobby Key Stairs', 'Hera Lobby Up Stairs', 'Hera Tile Room EN', 'Hera Tridorm SE', 'Hera Beetles WS',
'Hera 4F Down Stairs', 'Tower Gold Knights SW', 'Tower Dark Maze EN', 'Tower Dark Pits ES', 'Tower Dark Archers WN',
'Tower Red Spears WN', 'Tower Red Guards EN', 'Tower Red Guards SW', 'Tower Circle of Pots NW', 'Tower Altar NW',
'PoD Left Cage SW', 'PoD Middle Cage SE', 'PoD Pit Room Bomb Hole', 'PoD Pit Room Block Path N',
'PoD Pit Room Block Path S', 'PoD Stalfos Basement Warp', 'PoD Arena Main SW', 'PoD Arena Main Crystal Path',
'PoD Arena Bonk Path', 'PoD Arena Crystal Path', 'PoD Sexy Statue NW', 'PoD Map Balcony Drop Down',
'PoD Mimics 1 NW', 'PoD Warp Hint Warp', 'PoD Falling Bridge Path N', 'PoD Falling Bridge Path S',
'PoD Mimics 2 NW', 'PoD Bow Statue Down Ladder', 'PoD Dark Pegs Up Ladder', 'PoD Dark Pegs WN',
'PoD Turtle Party ES', 'PoD Turtle Party NW', 'PoD Callback Warp', 'Swamp Lobby Moat', 'Swamp Entrance Moat',
'Swamp Trench 1 Approach Swim Depart', 'Swamp Trench 1 Approach Key', 'Swamp Trench 1 Key Approach',
'Swamp Trench 1 Key Ledge Depart', 'Swamp Trench 1 Departure Approach', 'Swamp Trench 1 Departure Key',
'Swamp Hub Hook Path', 'Swamp Compass Donut Push Block',
'Swamp Shortcut Blue Barrier', 'Swamp Trench 2 Pots Blue Barrier', 'Swamp Trench 2 Pots Wet',
'Swamp Trench 2 Departure Wet', 'Swamp West Shallows Push Blocks', 'Swamp West Ledge Hook Path',
'Swamp Barrier Ledge Hook Path', 'Swamp Attic Left Pit', 'Swamp Attic Right Pit', 'Swamp Push Statue NW',
'Swamp Push Statue NE', 'Swamp Drain Right Switch', 'Swamp Waterway NE', 'Swamp Waterway N', 'Swamp Waterway NW',
'Skull Pot Circle WN', 'Skull Pot Circle Star Path', 'Skull Pull Switch S', 'Skull Big Chest N',
'Skull Big Chest Hookpath', 'Skull 2 East Lobby NW', 'Skull Back Drop Star Path', 'Skull 2 West Lobby NW',
'Skull 3 Lobby EN', 'Skull Star Pits SW', 'Skull Star Pits ES', 'Skull Torch Room WN', 'Skull Vines NW',
'Thieves Conveyor Maze EN', 'Thieves Triple Bypass EN', 'Thieves Triple Bypass SE', 'Thieves Triple Bypass WN',
'Thieves Hellway Blue Barrier', 'Thieves Hellway Crystal Blue Barrier', 'Thieves Attic ES',
'Thieves Basement Block Path', 'Thieves Blocked Entry Path', 'Thieves Conveyor Bridge Block Path',
'Thieves Conveyor Block Path', 'Ice Lobby WS', 'Ice Cross Left Push Block', 'Ice Cross Bottom Push Block Left',
'Ice Cross Bottom Push Block Right', 'Ice Cross Right Push Block Top', 'Ice Cross Right Push Block Bottom',
'Ice Cross Top Push Block Bottom', 'Ice Cross Top Push Block Right', 'Ice Bomb Drop Hole', 'Ice Pengator Switch WS',
'Ice Pengator Switch ES', 'Ice Big Key Push Block', 'Ice Stalfos Hint SE', 'Ice Bomb Jump EN',
'Ice Pengator Trap NE', 'Ice Hammer Block ES', 'Ice Tongue Pull WS', 'Ice Freezors Bomb Hole', 'Ice Tall Hint WS',
'Ice Hookshot Ledge Path', 'Ice Hookshot Balcony Path', 'Ice Many Pots SW', 'Ice Many Pots WS',
'Ice Crystal Right Blue Hole', 'Ice Crystal Left Blue Barrier', 'Ice Big Chest Landing Push Blocks',
'Ice Backwards Room Hole', 'Ice Switch Room SE', 'Ice Antechamber NE', 'Ice Antechamber Hole', 'Mire Lobby Gap',
'Mire Post-Gap Gap', 'Mire 2 NE', 'Mire Hub Upper Blue Barrier', 'Mire Hub Lower Blue Barrier',
'Mire Hub Right Blue Barrier', 'Mire Hub Top Blue Barrier', 'Mire Hub Switch Blue Barrier N',
'Mire Hub Switch Blue Barrier S', 'Mire Falling Bridge WN',
'Mire Map Spike Side Blue Barrier', 'Mire Map Spot Blue Barrier', 'Mire Crystal Dead End Left Barrier',
'Mire Crystal Dead End Right Barrier', 'Mire Cross ES', 'Mire Hidden Shooters Block Path S',
'Mire Hidden Shooters Block Path N', 'Mire Left Bridge Hook Path', 'Mire Fishbone Blue Barrier',
'Mire South Fish Blue Barrier', 'Mire Tile Room NW', 'Mire Compass Blue Barrier', 'Mire Attic Hint Hole',
'Mire Dark Shooters SW', 'Mire Crystal Mid Blue Barrier', 'Mire Crystal Left Blue Barrier', 'TR Main Lobby Gap',
'TR Lobby Ledge Gap', 'TR Hub SW', 'TR Hub SE', 'TR Hub ES', 'TR Hub EN', 'TR Hub NW', 'TR Hub NE', 'TR Torches NW',
'TR Pokey 2 EN', 'TR Pokey 2 ES', 'TR Twin Pokeys SW', 'TR Twin Pokeys EN', 'TR Big Chest Gap',
'TR Big Chest Entrance Gap', 'TR Lazy Eyes ES', 'TR Tongue Pull WS', 'TR Tongue Pull NE', 'TR Dark Ride Up Stairs',
'TR Dark Ride SW', 'TR Crystal Maze Forwards Path', 'TR Crystal Maze Blue Path', 'TR Crystal Maze Cane Path',
'TR Final Abyss South Stairs', 'TR Final Abyss NW', 'GT Hope Room EN', 'GT Blocked Stairs Block Path',
'GT Bob\'s Room Hole', 'GT Speed Torch SE', 'GT Speed Torch South Path', 'GT Speed Torch North Path',
'GT Crystal Conveyor NE', 'GT Crystal Conveyor WN', 'GT Conveyor Cross EN', 'GT Conveyor Cross WN',
'GT Hookshot East-North Path', 'GT Hookshot East-South Path', 'GT Hookshot North-East Path',
'GT Hookshot North-South Path', 'GT Hookshot South-East Path', 'GT Hookshot South-North Path',
'GT Hookshot Platform Blue Barrier', 'GT Hookshot Entry Blue Barrier', 'GT Double Switch Blue Path',
'GT Double Switch Key Blue Path', 'GT Double Switch Blue Barrier', 'GT Double Switch Transition Blue',
'GT Firesnake Room Hook Path', 'GT Falling Bridge WN', 'GT Falling Bridge WS', 'GT Ice Armos NE', 'GT Ice Armos WS',
'GT Crystal Paths SW', 'GT Mimics 1 NW', 'GT Mimics 1 ES', 'GT Mimics 2 WS', 'GT Mimics 2 NE',
'GT Hidden Spikes EN', 'GT Cannonball Bridge SE', 'GT Gauntlet 1 WN', 'GT Gauntlet 2 EN', 'GT Gauntlet 2 SW',
'GT Gauntlet 3 NW', 'GT Gauntlet 3 SW', 'GT Gauntlet 4 NW', 'GT Gauntlet 4 SW', 'GT Gauntlet 5 NW',
'GT Gauntlet 5 WS', 'GT Lanmolas 2 ES', 'GT Lanmolas 2 NW', 'GT Wizzrobes 1 SW', 'GT Wizzrobes 2 SE',
'GT Wizzrobes 2 NE', 'GT Torch Cross ES', 'GT Falling Torches NE', 'GT Moldorm Gap', 'GT Validation Block Path'
}
def add_key_logic_rules(world, player):
key_logic = world.key_logic[player]
for d_name, d_logic in key_logic.items():
for door_name, keys in d_logic.door_rules.items():
spot = world.get_entrance(door_name, player)
add_rule(spot, create_advanced_key_rule(d_logic, player, keys))
if keys.opposite:
add_rule(spot, create_advanced_key_rule(d_logic, player, keys.opposite), 'or')
for location in d_logic.bk_restricted:
if location.name not in key_only_locations.keys():
forbid_item(location, d_logic.bk_name, player)
for location in d_logic.sm_restricted:
forbid_item(location, d_logic.small_key_name, player)
for door in d_logic.bk_doors:
add_rule(world.get_entrance(door.name, player), create_rule(d_logic.bk_name, player))
for chest in d_logic.bk_chests:
add_rule(world.get_location(chest.name, player), create_rule(d_logic.bk_name, player))
def create_rule(item_name, player):
return lambda state: state.has(item_name, player)
def create_key_rule(small_key_name, player, keys):
return lambda state: state.has_key(small_key_name, player, keys)
def create_key_rule_allow_small(small_key_name, player, keys, location):
loc = location.name
return lambda state: state.has_key(small_key_name, player, keys) or (item_name(state, loc, player) in [(small_key_name, player)] and state.has_key(small_key_name, player, keys-1))
def create_key_rule_bk_exception(small_key_name, big_key_name, player, keys, bk_keys, bk_locs):
chest_names = [x.name for x in bk_locs]
return lambda state: (state.has_key(small_key_name, player, keys) and not item_in_locations(state, big_key_name, player, zip(chest_names, [player] * len(chest_names)))) or (item_in_locations(state, big_key_name, player, zip(chest_names, [player] * len(chest_names))) and state.has_key(small_key_name, player, bk_keys))
def create_key_rule_bk_exception_or_allow(small_key_name, big_key_name, player, keys, location, bk_keys, bk_locs):
loc = location.name
chest_names = [x.name for x in bk_locs]
return lambda state: (state.has_key(small_key_name, player, keys) and not item_in_locations(state, big_key_name, player, zip(chest_names, [player] * len(chest_names)))) or (item_name(state, loc, player) in [(small_key_name, player)] and state.has_key(small_key_name, player, keys-1)) or (item_in_locations(state, big_key_name, player, zip(chest_names, [player] * len(chest_names))) and state.has_key(small_key_name, player, bk_keys))
def create_advanced_key_rule(key_logic, player, rule):
if not rule.allow_small and rule.alternate_small_key is None:
return create_key_rule(key_logic.small_key_name, player, rule.small_key_num)
if rule.allow_small and rule.alternate_small_key is None:
return create_key_rule_allow_small(key_logic.small_key_name, player, rule.small_key_num, rule.small_location)
if not rule.allow_small and rule.alternate_small_key is not None:
return create_key_rule_bk_exception(key_logic.small_key_name, key_logic.bk_name, player, rule.small_key_num,
rule.alternate_small_key, rule.alternate_big_key_loc)
if rule.allow_small and rule.alternate_small_key is not None:
return create_key_rule_bk_exception_or_allow(key_logic.small_key_name, key_logic.bk_name, player,
rule.small_key_num, rule.small_location, rule.alternate_small_key,
rule.alternate_big_key_loc)
| 81.942319 | 437 | 0.690565 |
79542c7772eac13f7480a501a9dc7e7b492ba824 | 51,624 | py | Python | my_happy_pandas/core/arrays/sparse/array.py | ggservice007/my-happy-pandas | 63145d54e452177f7d5b2fc8fdbc1fdf37dd5b16 | [
"Apache-2.0"
] | null | null | null | my_happy_pandas/core/arrays/sparse/array.py | ggservice007/my-happy-pandas | 63145d54e452177f7d5b2fc8fdbc1fdf37dd5b16 | [
"Apache-2.0"
] | null | null | null | my_happy_pandas/core/arrays/sparse/array.py | ggservice007/my-happy-pandas | 63145d54e452177f7d5b2fc8fdbc1fdf37dd5b16 | [
"Apache-2.0"
] | null | null | null | """
SparseArray data structure
"""
from collections import abc
import numbers
import operator
from typing import Any, Callable, Union
import warnings
import numpy as np
from my_happy_pandas._libs import lib
import my_happy_pandas._libs.sparse as splib
from my_happy_pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex
from my_happy_pandas._libs.tslibs import NaT
from my_happy_pandas._typing import Scalar
import my_happy_pandas.compat as compat
from my_happy_pandas.compat.numpy import function as nv
from my_happy_pandas.errors import PerformanceWarning
from my_happy_pandas.core.dtypes.cast import (
astype_nansafe,
construct_1d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
)
from my_happy_pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_integer,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from my_happy_pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from my_happy_pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
import my_happy_pandas.core.algorithms as algos
from my_happy_pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from my_happy_pandas.core.arrays.sparse.dtype import SparseDtype
from my_happy_pandas.core.base import PandasObject
import my_happy_pandas.core.common as com
from my_happy_pandas.core.construction import extract_array, sanitize_array
from my_happy_pandas.core.indexers import check_array_indexer
from my_happy_pandas.core.missing import interpolate_2d
from my_happy_pandas.core.nanops import check_below_min_count
import my_happy_pandas.core.ops as ops
from my_happy_pandas.core.ops.common import unpack_zerodim_and_defer
import my_happy_pandas.io.formats.printing as printing
# ----------------------------------------------------------------------------
# Array
_sparray_doc_kwargs = dict(klass="SparseArray")
def _get_fill(arr: "SparseArray") -> np.ndarray:
"""
Create a 0-dim ndarray containing the fill value
Parameters
----------
arr : SparseArray
Returns
-------
fill_value : ndarray
0-dim ndarray with just the fill value.
Notes
-----
coerce fill_value to arr dtype if possible
int64 SparseArray can have NaN as fill_value if there is no missing
"""
try:
return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(
left: "SparseArray", right: "SparseArray", op: Callable, name: str
) -> Any:
"""
Perform a binary operation between two arrays.
Parameters
----------
left : Union[SparseArray, ndarray]
right : Union[SparseArray, ndarray]
op : Callable
The binary operation to perform
name str
Name of the callable.
Returns
-------
SparseArray
"""
if name.startswith("__"):
# For lookups in _libs.sparse we need non-dunder op name
name = name[2:-2]
# dtype used to find corresponding sparse method
ltype = left.dtype.subtype
rtype = right.dtype.subtype
if not is_dtype_equal(ltype, rtype):
subtype = find_common_type([ltype, rtype])
ltype = SparseDtype(subtype, left.fill_value)
rtype = SparseDtype(subtype, right.fill_value)
# TODO(GH-23092): pass copy=False. Need to fix astype_nansafe
left = left.astype(ltype)
right = right.astype(rtype)
dtype = ltype.subtype
else:
dtype = ltype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(all="ignore"):
result = op(left.to_dense(), right.to_dense())
fill = op(_get_fill(left), _get_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(all="ignore"):
result = op(left.sp_values, right.sp_values)
fill = op(_get_fill(left), _get_fill(right))
index = left.sp_index
else:
if name[0] == "r":
left, right = right, left
name = name[1:]
if name in ("and", "or", "xor") and dtype == "bool":
opname = f"sparse_{name}_uint8"
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = bool
else:
opname = f"sparse_{name}_{dtype}"
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = getattr(splib, opname)
with np.errstate(all="ignore"):
result, index, fill = sparse_op(
left_sp_values,
left.sp_index,
left.fill_value,
right_sp_values,
right.sp_index,
right.fill_value,
)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
"""
wrap op result to have correct dtype
"""
if name.startswith("__"):
# e.g. __eq__ --> eq
name = name[2:-2]
if name in ("eq", "ne", "lt", "gt", "le", "ge"):
dtype = bool
fill_value = lib.item_from_zerodim(fill_value)
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(
data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype
)
class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin):
"""
An ExtensionArray for storing sparse data.
.. versionchanged:: 0.24.0
Implements the ExtensionArray interface.
Parameters
----------
data : array-like
A dense array of values to store in the SparseArray. This may contain
`fill_value`.
sparse_index : SparseIndex, optional
index : Index
fill_value : scalar, optional
Elements in `data` that are `fill_value` are not stored in the
SparseArray. For memory savings, this should be the most common value
in `data`. By default, `fill_value` depends on the dtype of `data`:
=========== ==========
data.dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool False
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The fill value is potentially specified in three ways. In order of
precedence, these are
1. The `fill_value` argument
2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
a ``SparseDtype``
3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
is not a ``SparseDtype`` and `data` is a ``SparseArray``.
kind : {'integer', 'block'}, default 'integer'
The type of storage for sparse locations.
* 'block': Stores a `block` and `block_length` for each
contiguous *span* of sparse values. This is best when
sparse data tends to be clumped together, with large
regions of ``fill-value`` values between sparse values.
* 'integer': uses an integer to store the location of
each sparse value.
dtype : np.dtype or SparseDtype, optional
The dtype to use for the SparseArray. For numpy dtypes, this
determines the dtype of ``self.sp_values``. For SparseDtype,
this determines ``self.sp_values`` and ``self.fill_value``.
copy : bool, default False
Whether to explicitly copy the incoming `data` array.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> from my_happy_pandas.arrays import SparseArray
>>> arr = SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
"""
_subtyp = "sparse_array" # register ABCSparseArray
_deprecations = PandasObject._deprecations | frozenset(["get_values"])
_sparse_index: SparseIndex
def __init__(
self,
data,
sparse_index=None,
index=None,
fill_value=None,
kind="integer",
dtype=None,
copy=False,
):
if fill_value is None and isinstance(dtype, SparseDtype):
fill_value = dtype.fill_value
if isinstance(data, type(self)):
# disable normal inference on dtype, sparse_index, & fill_value
if sparse_index is None:
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
if dtype is None:
dtype = data.dtype
# TODO: make kind=None, and use data.kind?
data = data.sp_values
# Handle use-provided dtype
if isinstance(dtype, str):
# Two options: dtype='int', regular numpy dtype
# or dtype='Sparse[int]', a sparse dtype
try:
dtype = SparseDtype.construct_from_string(dtype)
except TypeError:
dtype = pandas_dtype(dtype)
if isinstance(dtype, SparseDtype):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
if index is not None and not is_scalar(data):
raise Exception("must only pass scalars with an index")
if is_scalar(data):
if index is not None:
if data is None:
data = np.nan
if index is not None:
npoints = len(index)
elif sparse_index is None:
npoints = 1
else:
npoints = sparse_index.length
dtype = infer_dtype_from_scalar(data)[0]
data = construct_1d_arraylike_from_scalar(data, npoints, dtype)
if dtype is not None:
dtype = pandas_dtype(dtype)
# TODO: disentangle the fill_value dtype inference from
# dtype inference
if data is None:
# TODO: What should the empty dtype be? Object or float?
data = np.array([], dtype=dtype)
if not is_array_like(data):
try:
# probably shared code in sanitize_series
data = sanitize_array(data, index=None)
except ValueError:
# NumPy may raise a ValueError on data like [1, []]
# we retry with object dtype here.
if dtype is None:
dtype = object
data = np.atleast_1d(np.asarray(data, dtype=dtype))
else:
raise
if copy:
# TODO: avoid double copy when dtype forces cast.
data = data.copy()
if fill_value is None:
fill_value_dtype = data.dtype if dtype is None else dtype
if fill_value_dtype is None:
fill_value = np.nan
else:
fill_value = na_value_for_dtype(fill_value_dtype)
if isinstance(data, type(self)) and sparse_index is None:
sparse_index = data._sparse_index
sparse_values = np.asarray(data.sp_values, dtype=dtype)
elif sparse_index is None:
data = extract_array(data, extract_numpy=True)
if not isinstance(data, np.ndarray):
# EA
if is_datetime64tz_dtype(data.dtype):
warnings.warn(
f"Creating SparseArray from {data.dtype} data "
"loses timezone information. Cast to object before "
"sparse to retain timezone information.",
UserWarning,
stacklevel=2,
)
data = np.asarray(data, dtype="datetime64[ns]")
data = np.asarray(data)
sparse_values, sparse_index, fill_value = make_sparse(
data, kind=kind, fill_value=fill_value, dtype=dtype
)
else:
sparse_values = np.asarray(data, dtype=dtype)
if len(sparse_values) != sparse_index.npoints:
raise AssertionError(
f"Non array-like type {type(sparse_values)} must "
"have the same length as the index"
)
self._sparse_index = sparse_index
self._sparse_values = sparse_values
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
@classmethod
def _simple_new(
cls, sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype
) -> "SparseArray":
new = object.__new__(cls)
new._sparse_index = sparse_index
new._sparse_values = sparse_array
new._dtype = dtype
return new
@classmethod
def from_spmatrix(cls, data):
"""
Create a SparseArray from a scipy.sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.sp_matrix
This should be a SciPy sparse matrix where the size
of the second dimension is 1. In other words, a
sparse matrix with a single column.
Returns
-------
SparseArray
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.coo_matrix((4, 1))
>>> pd.arrays.SparseArray.from_spmatrix(mat)
[0.0, 0.0, 0.0, 0.0]
Fill: 0.0
IntIndex
Indices: array([], dtype=int32)
"""
length, ncol = data.shape
if ncol != 1:
raise ValueError(f"'data' must have a single column, not '{ncol}'")
# our sparse index classes require that the positions be strictly
# increasing. So we need to sort loc, and arr accordingly.
data = data.tocsc()
data.sort_indices()
arr = data.data
idx = data.indices
zero = np.array(0, dtype=arr.dtype).item()
dtype = SparseDtype(arr.dtype, zero)
index = IntIndex(length, idx)
return cls._simple_new(arr, index, dtype)
def __array__(self, dtype=None, copy=True) -> np.ndarray:
fill_value = self.fill_value
if self.sp_index.ngaps == 0:
# Compat for na dtype and int values.
return self.sp_values
if dtype is None:
# Can NumPy represent this type?
# If not, `np.result_type` will raise. We catch that
# and return object.
if is_datetime64_any_dtype(self.sp_values.dtype):
# However, we *do* special-case the common case of
# a datetime64 with pandas NaT.
if fill_value is NaT:
# Can't put pd.NaT in a datetime64[ns]
fill_value = np.datetime64("NaT")
try:
dtype = np.result_type(self.sp_values.dtype, type(fill_value))
except TypeError:
dtype = object
out = np.full(self.shape, fill_value, dtype=dtype)
out[self.sp_index.to_int_index().indices] = self.sp_values
return out
def __setitem__(self, key, value):
# I suppose we could allow setting of non-fill_value elements.
# TODO(SparseArray.__setitem__): remove special cases in
# ExtensionBlock.where
msg = "SparseArray does not support item assignment via setitem"
raise TypeError(msg)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
# ------------------------------------------------------------------------
# Data
# ------------------------------------------------------------------------
@property
def sp_index(self):
"""
The SparseIndex containing the location of non- ``fill_value`` points.
"""
return self._sparse_index
@property
def sp_values(self) -> np.ndarray:
"""
An ndarray containing the non- ``fill_value`` values.
Examples
--------
>>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
>>> s.sp_values
array([1, 2])
"""
return self._sparse_values
@property
def dtype(self):
return self._dtype
@property
def fill_value(self):
"""
Elements in `data` that are `fill_value` are not stored.
For memory savings, this should be the most common value in the array.
"""
return self.dtype.fill_value
@fill_value.setter
def fill_value(self, value):
self._dtype = SparseDtype(self.dtype.subtype, value)
@property
def kind(self) -> str:
"""
The kind of sparse index for this array. One of {'integer', 'block'}.
"""
if isinstance(self.sp_index, IntIndex):
return "integer"
else:
return "block"
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = notna(sp_vals)
return sp_vals[mask]
def __len__(self) -> int:
return self.sp_index.length
@property
def _null_fill_value(self):
return self._dtype._is_na_fill_value
def _fill_value_matches(self, fill_value):
if self._null_fill_value:
return isna(fill_value)
else:
return self.fill_value == fill_value
@property
def nbytes(self) -> int:
return self.sp_values.nbytes + self.sp_index.nbytes
@property
def density(self):
"""
The percent of non- ``fill_value`` points, as decimal.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.density
0.6
"""
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
@property
def npoints(self) -> int:
"""
The number of non- ``fill_value`` points.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.npoints
3
"""
return self.sp_index.npoints
def isna(self):
# If null fill value, we want SparseDtype[bool, true]
# to preserve the same memory usage.
dtype = SparseDtype(bool, self._null_fill_value)
return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype)
def fillna(self, value=None, method=None, limit=None):
"""
Fill missing values with `value`.
Parameters
----------
value : scalar, optional
method : str, optional
.. warning::
Using 'method' will result in high memory use,
as all `fill_value` methods will be converted to
an in-memory ndarray
limit : int, optional
Returns
-------
SparseArray
Notes
-----
When `value` is specified, the result's ``fill_value`` depends on
``self.fill_value``. The goal is to maintain low-memory use.
If ``self.fill_value`` is NA, the result dtype will be
``SparseDtype(self.dtype, fill_value=value)``. This will preserve
amount of memory used before and after filling.
When ``self.fill_value`` is not NA, the result dtype will be
``self.dtype``. Again, this preserves the amount of memory used.
"""
if (method is None and value is None) or (
method is not None and value is not None
):
raise ValueError("Must specify one of 'method' or 'value'.")
elif method is not None:
msg = "fillna with 'method' requires high memory usage."
warnings.warn(msg, PerformanceWarning)
filled = interpolate_2d(np.asarray(self), method=method, limit=limit)
return type(self)(filled, fill_value=self.fill_value)
else:
new_values = np.where(isna(self.sp_values), value, self.sp_values)
if self._null_fill_value:
# This is essentially just updating the dtype.
new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
else:
new_dtype = self.dtype
return self._simple_new(new_values, self._sparse_index, new_dtype)
def shift(self, periods=1, fill_value=None):
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
subtype = np.result_type(fill_value, self.dtype.subtype)
if subtype != self.dtype.subtype:
# just coerce up front
arr = self.astype(SparseDtype(subtype, self.fill_value))
else:
arr = self
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)), dtype=arr.dtype
)
if periods > 0:
a = empty
b = arr[:-periods]
else:
a = arr[abs(periods) :]
b = empty
return arr._concat_same_type([a, b])
def _first_fill_value_loc(self):
"""
Get the location of the first missing value.
Returns
-------
int
"""
if len(self) == 0 or self.sp_index.npoints == len(self):
return -1
indices = self.sp_index.to_int_index().indices
if not len(indices) or indices[0] > 0:
return 0
diff = indices[1:] - indices[:-1]
return np.searchsorted(diff, 2) + 1
def unique(self):
uniques = list(algos.unique(self.sp_values))
fill_loc = self._first_fill_value_loc()
if fill_loc >= 0:
uniques.insert(fill_loc, self.fill_value)
return type(self)._from_sequence(uniques, dtype=self.dtype)
def _values_for_factorize(self):
# Still override this for hash_pandas_object
return np.asarray(self), self.fill_value
def factorize(self, na_sentinel=-1):
# Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
# The sparsity on this is backwards from what Sparse would want. Want
# ExtensionArray.factorize -> Tuple[EA, EA]
# Given that we have to return a dense array of codes, why bother
# implementing an efficient factorize?
codes, uniques = algos.factorize(np.asarray(self), na_sentinel=na_sentinel)
uniques = SparseArray(uniques, dtype=self.dtype)
return codes, uniques
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of unique values.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Series
"""
from my_happy_pandas import Index, Series
keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna)
fcounts = self.sp_index.ngaps
if fcounts > 0:
if self._null_fill_value and dropna:
pass
else:
if self._null_fill_value:
mask = isna(keys)
else:
mask = keys == self.fill_value
if mask.any():
counts[mask] += fcounts
else:
keys = np.insert(keys, 0, self.fill_value)
counts = np.insert(counts, 0, fcounts)
if not isinstance(keys, ABCIndexClass):
keys = Index(keys)
result = Series(counts, index=keys)
return result
# --------
# Indexing
# --------
def __getitem__(self, key):
# avoid mypy issues when importing at the top-level
from my_happy_pandas.core.indexing import check_bool_indexer
if isinstance(key, tuple):
if len(key) > 1:
raise IndexError("too many indices for array.")
key = key[0]
if is_integer(key):
return self._get_val_at(key)
elif isinstance(key, tuple):
data_slice = self.to_dense()[key]
elif isinstance(key, slice):
# special case to preserve dtypes
if key == slice(None):
return self.copy()
# TODO: this logic is surely elsewhere
# TODO: this could be more efficient
indices = np.arange(len(self), dtype=np.int32)[key]
return self.take(indices)
else:
# TODO: I think we can avoid densifying when masking a
# boolean SparseArray with another. Need to look at the
# key's fill_value for True / False, and then do an intersection
# on the indices of the sp_values.
if isinstance(key, SparseArray):
if is_bool_dtype(key):
key = key.to_dense()
else:
key = np.asarray(key)
key = check_array_indexer(self, key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self, key)
return self.take(np.arange(len(key), dtype=np.int32)[key])
elif hasattr(key, "__len__"):
return self.take(key)
else:
raise ValueError(f"Cannot slice with '{key}'")
return type(self)(data_slice, kind=self.kind)
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError("Out of bounds access")
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
val = self.sp_values[sp_loc]
val = com.maybe_box_datetimelike(val, self.sp_values.dtype)
return val
def take(self, indices, allow_fill=False, fill_value=None) -> "SparseArray":
if is_scalar(indices):
raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.")
indices = np.asarray(indices, dtype=np.int32)
if indices.size == 0:
result = np.array([], dtype="object")
kwargs = {"dtype": self.dtype}
elif allow_fill:
result = self._take_with_fill(indices, fill_value=fill_value)
kwargs = {}
else:
result = self._take_without_fill(indices)
kwargs = {"dtype": self.dtype}
return type(self)(result, fill_value=self.fill_value, kind=self.kind, **kwargs)
def _take_with_fill(self, indices, fill_value=None) -> np.ndarray:
if fill_value is None:
fill_value = self.dtype.na_value
if indices.min() < -1:
raise ValueError(
"Invalid value in 'indices'. Must be between -1 "
"and the length of the array."
)
if indices.max() >= len(self):
raise IndexError("out of bounds value in 'indices'.")
if len(self) == 0:
# Empty... Allow taking only if all empty
if (indices == -1).all():
dtype = np.result_type(self.sp_values, type(fill_value))
taken = np.empty_like(indices, dtype=dtype)
taken.fill(fill_value)
return taken
else:
raise IndexError("cannot do a non-empty take from an empty axes.")
# sp_indexer may be -1 for two reasons
# 1.) we took for an index of -1 (new)
# 2.) we took a value that was self.fill_value (old)
sp_indexer = self.sp_index.lookup_array(indices)
new_fill_indices = indices == -1
old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
if self.sp_index.npoints == 0 and old_fill_indices.all():
# We've looked up all valid points on an all-sparse array.
taken = np.full(
sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype
)
elif self.sp_index.npoints == 0:
# Avoid taking from the empty self.sp_values
_dtype = np.result_type(self.dtype.subtype, type(fill_value))
taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype)
else:
taken = self.sp_values.take(sp_indexer)
# Fill in two steps.
# Old fill values
# New fill values
# potentially coercing to a new dtype at each stage.
m0 = sp_indexer[old_fill_indices] < 0
m1 = sp_indexer[new_fill_indices] < 0
result_type = taken.dtype
if m0.any():
result_type = np.result_type(result_type, type(self.fill_value))
taken = taken.astype(result_type)
taken[old_fill_indices] = self.fill_value
if m1.any():
result_type = np.result_type(result_type, type(fill_value))
taken = taken.astype(result_type)
taken[new_fill_indices] = fill_value
return taken
def _take_without_fill(self, indices) -> Union[np.ndarray, "SparseArray"]:
to_shift = indices < 0
indices = indices.copy()
n = len(self)
if (indices.max() >= n) or (indices.min() < -n):
if n == 0:
raise IndexError("cannot do a non-empty take from an empty axes.")
else:
raise IndexError("out of bounds value in 'indices'.")
if to_shift.any():
indices[to_shift] += n
if self.sp_index.npoints == 0:
# edge case in take...
# I think just return
out = np.full(
indices.shape,
self.fill_value,
dtype=np.result_type(type(self.fill_value)),
)
arr, sp_index, fill_value = make_sparse(out, fill_value=self.fill_value)
return type(self)(arr, sparse_index=sp_index, fill_value=fill_value)
sp_indexer = self.sp_index.lookup_array(indices)
taken = self.sp_values.take(sp_indexer)
fillable = sp_indexer < 0
if fillable.any():
# TODO: may need to coerce array to fill value
result_type = np.result_type(taken, type(self.fill_value))
taken = taken.astype(result_type)
taken[fillable] = self.fill_value
return taken
def searchsorted(self, v, side="left", sorter=None):
msg = "searchsorted requires high memory usage."
warnings.warn(msg, PerformanceWarning, stacklevel=2)
if not is_scalar(v):
v = np.asarray(v)
v = np.asarray(v)
return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter)
def copy(self):
values = self.sp_values.copy()
return self._simple_new(values, self.sp_index, self.dtype)
@classmethod
def _concat_same_type(cls, to_concat):
fill_value = to_concat[0].fill_value
values = []
length = 0
if to_concat:
sp_kind = to_concat[0].kind
else:
sp_kind = "integer"
if sp_kind == "integer":
indices = []
for arr in to_concat:
idx = arr.sp_index.to_int_index().indices.copy()
idx += length # TODO: wraparound
length += arr.sp_index.length
values.append(arr.sp_values)
indices.append(idx)
data = np.concatenate(values)
indices = np.concatenate(indices)
sp_index = IntIndex(length, indices)
else:
# when concatenating block indices, we don't claim that you'll
# get an identical index as concating the values and then
# creating a new index. We don't want to spend the time trying
# to merge blocks across arrays in `to_concat`, so the resulting
# BlockIndex may have more blocs.
blengths = []
blocs = []
for arr in to_concat:
idx = arr.sp_index.to_block_index()
values.append(arr.sp_values)
blocs.append(idx.blocs.copy() + length)
blengths.append(idx.blengths)
length += arr.sp_index.length
data = np.concatenate(values)
blocs = np.concatenate(blocs)
blengths = np.concatenate(blengths)
sp_index = BlockIndex(length, blocs, blengths)
return cls(data, sparse_index=sp_index, fill_value=fill_value)
def astype(self, dtype=None, copy=True):
"""
Change the dtype of a SparseArray.
The output will always be a SparseArray. To convert to a dense
ndarray with a certain dtype, use :meth:`numpy.asarray`.
Parameters
----------
dtype : np.dtype or ExtensionDtype
For SparseDtype, this changes the dtype of
``self.sp_values`` and the ``self.fill_value``.
For other dtypes, this only changes the dtype of
``self.sp_values``.
copy : bool, default True
Whether to ensure a copy is made, even if not necessary.
Returns
-------
SparseArray
Examples
--------
>>> arr = pd.arrays.SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
>>> arr.astype(np.dtype('int32'))
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
>>> arr.astype(np.dtype('float64'))
... # doctest: +NORMALIZE_WHITESPACE
[0.0, 0.0, 1.0, 2.0]
Fill: 0.0
IntIndex
Indices: array([2, 3], dtype=int32)
Use a SparseDtype if you wish to be change the fill value as well.
>>> arr.astype(SparseDtype("float64", fill_value=np.nan))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
IntIndex
Indices: array([2, 3], dtype=int32)
"""
dtype = self.dtype.update_dtype(dtype)
subtype = dtype._subtype_with_str
# TODO copy=False is broken for astype_nansafe with int -> float, so cannot
# passthrough copy keyword: https://github.com/pandas-dev/pandas/issues/34456
sp_values = astype_nansafe(self.sp_values, subtype, copy=True)
if sp_values is self.sp_values and copy:
sp_values = sp_values.copy()
return self._simple_new(sp_values, self.sp_index, dtype)
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.arrays.SparseArray([0, 1, 2])
>>> arr.map(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.map({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
# this is used in apply.
# We get hit since we're an "is_extension_type" but regular extension
# types are not hit. This may be worth adding to the interface.
if isinstance(mapper, ABCSeries):
mapper = mapper.to_dict()
if isinstance(mapper, abc.Mapping):
fill_value = mapper.get(self.fill_value, self.fill_value)
sp_values = [mapper.get(x, None) for x in self.sp_values]
else:
fill_value = mapper(self.fill_value)
sp_values = [mapper(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_value)
def to_dense(self):
"""
Convert SparseArray to a NumPy array.
Returns
-------
arr : NumPy array
"""
return np.asarray(self, dtype=self.sp_values.dtype)
_internal_get_values = to_dense
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, tuple):
# Compat for pandas < 0.24.0
nd_state, (fill_value, sp_index) = state
sparse_values = np.array([])
sparse_values.__setstate__(nd_state)
self._sparse_values = sparse_values
self._sparse_index = sp_index
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
else:
self.__dict__.update(state)
def nonzero(self):
if self.fill_value == 0:
return (self.sp_index.to_int_index().indices,)
else:
return (self.sp_index.to_int_index().indices[self.sp_values != 0],)
# ------------------------------------------------------------------------
# Reductions
# ------------------------------------------------------------------------
def _reduce(self, name: str, skipna: bool = True, **kwargs):
method = getattr(self, name, None)
if method is None:
raise TypeError(f"cannot perform {name} with type {self.dtype}")
if skipna:
arr = self
else:
arr = self.dropna()
# we don't support these kwargs.
# They should only be present when called via pandas, so do it here.
# instead of in `any` / `all` (which will raise if they're present,
# thanks to nv.validate
kwargs.pop("filter_type", None)
kwargs.pop("numeric_only", None)
kwargs.pop("op", None)
return getattr(arr, name)(**kwargs)
def all(self, axis=None, *args, **kwargs):
"""
Tests whether all elements evaluate True
Returns
-------
all : bool
See Also
--------
numpy.all
"""
nv.validate_all(args, kwargs)
values = self.sp_values
if len(values) != len(self) and not np.all(self.fill_value):
return False
return values.all()
def any(self, axis=0, *args, **kwargs):
"""
Tests whether at least one of elements evaluate True
Returns
-------
any : bool
See Also
--------
numpy.any
"""
nv.validate_any(args, kwargs)
values = self.sp_values
if len(values) != len(self) and np.any(self.fill_value):
return True
return values.any().item()
def sum(self, axis: int = 0, min_count: int = 0, *args, **kwargs) -> Scalar:
"""
Sum of non-NA/null values
Parameters
----------
axis : int, default 0
Not Used. NumPy compatibility.
min_count : int, default 0
The required number of valid values to perform the summation. If fewer
than ``min_count`` valid values are present, the result will be the missing
value indicator for subarray type.
*args, **kwargs
Not Used. NumPy compatibility.
Returns
-------
scalar
"""
nv.validate_sum(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
if self._null_fill_value:
if check_below_min_count(valid_vals.shape, None, min_count):
return na_value_for_dtype(self.dtype.subtype, compat=False)
return sp_sum
else:
nsparse = self.sp_index.ngaps
if check_below_min_count(valid_vals.shape, None, min_count - nsparse):
return na_value_for_dtype(self.dtype.subtype, compat=False)
return sp_sum + self.fill_value * nsparse
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseArray will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : int or None
Axis over which to perform the cumulative summation. If None,
perform cumulative summation over flattened array.
Returns
-------
cumsum : SparseArray
"""
nv.validate_cumsum(args, kwargs)
if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
raise ValueError(f"axis(={axis}) out of bounds")
if not self._null_fill_value:
return SparseArray(self.to_dense()).cumsum()
return SparseArray(
self.sp_values.cumsum(),
sparse_index=self.sp_index,
fill_value=self.fill_value,
)
def mean(self, axis=0, *args, **kwargs):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
nv.validate_mean(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
ct = len(valid_vals)
if self._null_fill_value:
return sp_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
def transpose(self, *axes) -> "SparseArray":
"""
Returns the SparseArray.
"""
return self
@property
def T(self) -> "SparseArray":
"""
Returns the SparseArray.
"""
return self
# ------------------------------------------------------------------------
# Ufuncs
# ------------------------------------------------------------------------
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if len(inputs) == 1:
# No alignment necessary.
sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)
fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)
if isinstance(sp_values, tuple):
# multiple outputs. e.g. modf
arrays = tuple(
self._simple_new(
sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)
)
for sp_value, fv in zip(sp_values, fill_value)
)
return arrays
elif is_scalar(sp_values):
# e.g. reductions
return sp_values
return self._simple_new(
sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)
)
result = getattr(ufunc, method)(*[np.asarray(x) for x in inputs], **kwargs)
if out:
if len(out) == 1:
out = out[0]
return out
if type(result) is tuple:
return tuple(type(self)(x) for x in result)
elif method == "at":
# no return value
return None
else:
return type(self)(result)
def __abs__(self):
return np.abs(self)
# ------------------------------------------------------------------------
# Ops
# ------------------------------------------------------------------------
@classmethod
def _create_unary_method(cls, op) -> Callable[["SparseArray"], "SparseArray"]:
def sparse_unary_method(self) -> "SparseArray":
fill_value = op(np.array(self.fill_value)).item()
values = op(self.sp_values)
dtype = SparseDtype(values.dtype, fill_value)
return cls._simple_new(values, self.sp_index, dtype)
name = f"__{op.__name__}__"
return compat.set_function_name(sparse_unary_method, name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
op_name = op.__name__
@unpack_zerodim_and_defer(op_name)
def sparse_arithmetic_method(self, other):
if isinstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(all="ignore"):
fill = op(_get_fill(self), np.asarray(other))
result = op(self.sp_values, other)
if op_name == "divmod":
left, right = result
lfill, rfill = fill
return (
_wrap_result(op_name, left, self.sp_index, lfill),
_wrap_result(op_name, right, self.sp_index, rfill),
)
return _wrap_result(op_name, result, self.sp_index, fill)
else:
other = np.asarray(other)
with np.errstate(all="ignore"):
# TODO: look into _wrap_result
if len(self) != len(other):
raise AssertionError(
(f"length mismatch: {len(self)} vs. {len(other)}")
)
if not isinstance(other, SparseArray):
dtype = getattr(other, "dtype", None)
other = SparseArray(
other, fill_value=self.fill_value, dtype=dtype
)
return _sparse_array_op(self, other, op, op_name)
name = f"__{op.__name__}__"
return compat.set_function_name(sparse_arithmetic_method, name, cls)
@classmethod
def _create_comparison_method(cls, op):
op_name = op.__name__
if op_name in {"and_", "or_"}:
op_name = op_name[:-1]
@unpack_zerodim_and_defer(op_name)
def cmp_method(self, other):
if not is_scalar(other) and not isinstance(other, type(self)):
# convert list-like to ndarray
other = np.asarray(other)
if isinstance(other, np.ndarray):
# TODO: make this more flexible than just ndarray...
if len(self) != len(other):
raise AssertionError(
f"length mismatch: {len(self)} vs. {len(other)}"
)
other = SparseArray(other, fill_value=self.fill_value)
if isinstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
else:
with np.errstate(all="ignore"):
fill_value = op(self.fill_value, other)
result = op(self.sp_values, other)
return type(self)(
result,
sparse_index=self.sp_index,
fill_value=fill_value,
dtype=np.bool_,
)
name = f"__{op.__name__}__"
return compat.set_function_name(cmp_method, name, cls)
@classmethod
def _add_unary_ops(cls):
cls.__pos__ = cls._create_unary_method(operator.pos)
cls.__neg__ = cls._create_unary_method(operator.neg)
cls.__invert__ = cls._create_unary_method(operator.invert)
@classmethod
def _add_comparison_ops(cls):
cls.__and__ = cls._create_comparison_method(operator.and_)
cls.__or__ = cls._create_comparison_method(operator.or_)
cls.__xor__ = cls._create_arithmetic_method(operator.xor)
super()._add_comparison_ops()
# ----------
# Formatting
# -----------
def __repr__(self) -> str:
pp_str = printing.pprint_thing(self)
pp_fill = printing.pprint_thing(self.fill_value)
pp_index = printing.pprint_thing(self.sp_index)
return f"{pp_str}\nFill: {pp_fill}\n{pp_index}"
def _formatter(self, boxed=False):
# Defer to the formatter from the GenericArrayFormatter calling us.
# This will infer the correct formatter from the dtype of the values.
return None
SparseArray._add_arithmetic_ops()
SparseArray._add_comparison_ops()
SparseArray._add_unary_ops()
def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None, copy=False):
"""
Convert ndarray to sparse format
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
dtype : np.dtype, optional
copy : bool, default False
Returns
-------
(sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
"""
assert isinstance(arr, np.ndarray)
if arr.ndim > 1:
raise TypeError("expected dimension <= 1 data")
if fill_value is None:
fill_value = na_value_for_dtype(arr.dtype)
if isna(fill_value):
mask = notna(arr)
else:
# cast to object comparison to be safe
if is_string_dtype(arr.dtype):
arr = arr.astype(object)
if is_object_dtype(arr.dtype):
# element-wise equality check method in numpy doesn't treat
# each element type, eg. 0, 0.0, and False are treated as
# same. So we have to check the both of its type and value.
mask = splib.make_mask_object_ndarray(arr, fill_value)
else:
mask = arr != fill_value
length = len(arr)
if length != len(mask):
# the arr is a SparseArray
indices = mask.sp_index.indices
else:
indices = mask.nonzero()[0].astype(np.int32)
index = _make_index(length, indices, kind)
sparsified_values = arr[mask]
if dtype is not None:
sparsified_values = astype_nansafe(sparsified_values, dtype=dtype)
# TODO: copy
return sparsified_values, index, fill_value
def _make_index(length, indices, kind):
if kind == "block" or isinstance(kind, BlockIndex):
locs, lens = splib.get_blocks(indices)
index = BlockIndex(length, locs, lens)
elif kind == "integer" or isinstance(kind, IntIndex):
index = IntIndex(length, indices)
else: # pragma: no cover
raise ValueError("must be block or integer type")
return index
| 32.735574 | 88 | 0.565086 |
79542d79d9a4dfa000e0273e3ab37dc2f5112df2 | 10,849 | py | Python | salt/modules/macports.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | 3 | 2016-09-03T06:26:42.000Z | 2019-06-30T13:04:53.000Z | salt/modules/macports.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | null | null | null | salt/modules/macports.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | 1 | 2021-12-02T15:30:00.000Z | 2021-12-02T15:30:00.000Z | # -*- coding: utf-8 -*-
'''
Support for MacPorts under Mac OSX.
This module has some caveats.
1. Updating the database of available ports is quite resource-intensive.
However, `refresh=True` is the default for all operations that need an
up-to-date copy of available ports. Consider `refresh=False` when you are
sure no db update is needed.
2. In some cases MacPorts doesn't always realize when another copy of itself
is running and will gleefully tromp all over the available ports database.
This makes MacPorts behave in undefined ways until a fresh complete
copy is retrieved.
Because of 1 and 2 it is possible to get the salt-minion into a state where
`salt mac-machine pkg./something/` won't want to return. Use
`salt-run jobs.active`
on the master to check for potentially long-running calls to `port`.
Finally, ports database updates are always handled with `port selfupdate`
as opposed to `port sync`. This makes sense in the MacPorts user commmunity
but may confuse experienced Linux admins as Linux package managers
don't upgrade the packaging software when doing a package database update.
In other words `salt mac-machine pkg.refresh_db` is more like
`apt-get update; apt-get upgrade dpkg apt-get` than simply `apt-get update`.
'''
from __future__ import absolute_import
# Import python libs
import copy
import logging
import re
# Import salt libs
import salt.utils
from salt.exceptions import (
CommandExecutionError
)
log = logging.getLogger(__name__)
LIST_ACTIVE_ONLY = True
__virtualname__ = 'pkg'
def __virtual__():
'''
Confine this module to Mac OS with MacPorts.
'''
if salt.utils.which('port') and __grains__['os'] == 'MacOS':
return __virtualname__
return False
def _list(query=''):
ret = {}
cmd = 'port list {0}'.format(query)
call = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += call['stderr']
if 'stdout' in call:
comment += call['stdout']
raise CommandExecutionError(
'{0}'.format(comment)
)
else:
out = call['stdout']
for line in out.splitlines():
try:
name, version_num, category = re.split(r'\s+', line.lstrip())[0:3]
version_num = version_num[1:]
except ValueError:
continue
ret[name] = version_num
return ret
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
'''
versions_as_list = salt.utils.is_true(versions_as_list)
# 'removed', 'purge_desired' not yet implemented or not applicable
if any([salt.utils.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {}
cmd = ['port', 'installed']
out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
for line in out.splitlines():
try:
name, version_num, active = re.split(r'\s+', line.lstrip())[0:3]
version_num = version_num[1:]
except ValueError:
continue
if not LIST_ACTIVE_ONLY or active == '(active)':
__salt__['pkg_resource.add_pkg'](ret, name, version_num)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3>
'''
return __salt__['pkg_resource.version'](*names, **kwargs)
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation
Options:
refresh
Update ports with ``port selfupdate``
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3>
'''
if salt.utils.is_true(kwargs.get('refresh', True)):
refresh_db()
available = _list(' '.join(names)) or {}
installed = __salt__['pkg.list_pkgs']() or {}
ret = {}
for k, v in available.items():
if k not in installed or salt.utils.compare_versions(ver1=installed[k], oper='<', ver2=v):
ret[k] = v
else:
ret[k] = ''
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = latest_version
def remove(name=None, pkgs=None, **kwargs):
'''
Removes packages with ``port uninstall``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
pkg_params = __salt__['pkg_resource.parse_targets'](name,
pkgs,
**kwargs)[0]
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
cmd = ['port', 'uninstall']
cmd.extend(targets)
__salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.compare_dicts(old, new)
def install(name=None, refresh=False, pkgs=None, **kwargs):
'''
Install the passed package(s) with ``port install``
name
The name of the formula to be installed. Note that this parameter is
ignored if "pkgs" is passed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
version
Specify a version to pkg to install. Ignored if pkgs is specified.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
salt '*' pkg.install git-core version='1.8.5.5'
variant
Specify a variant to pkg to install. Ignored if pkgs is specified.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
salt '*' pkg.install git-core version='1.8.5.5' variant='+credential_osxkeychain+doc+pcre'
Multiple Package Installation Options:
pkgs
A list of formulas to install. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo","bar"]'
salt '*' pkg.install pkgs='["foo@1.2","bar"]'
salt '*' pkg.install pkgs='["foo@1.2+ssl","bar@2.3"]'
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install 'package package package'
'''
pkg_params, pkg_type = \
__salt__['pkg_resource.parse_targets'](name,
pkgs,
{})
if salt.utils.is_true(refresh):
refresh_db()
# Handle version kwarg for a single package target
if pkgs is None:
version_num = kwargs.get('version')
variant_spec = kwargs.get('variant')
spec = None
if version_num:
spec = (spec or '') + '@' + version_num
if variant_spec:
spec = (spec or '') + variant_spec
pkg_params = {name: spec}
if pkg_params is None or len(pkg_params) == 0:
return {}
formulas_array = []
for pname, pparams in pkg_params.items():
formulas_array.append(pname + (pparams or ''))
old = list_pkgs()
cmd = ['port', 'install']
cmd.extend(formulas_array)
__salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.compare_dicts(old, new)
def list_upgrades(refresh=True):
'''
Check whether or not an upgrade is available for all packages
Options:
refresh
Update ports with ``port selfupdate``
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
if refresh:
refresh_db()
return _list('outdated')
def upgrade_available(pkg, refresh=True):
'''
Check whether or not an upgrade is available for a given package
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
return pkg in list_upgrades(refresh=refresh)
def refresh_db():
'''
Update ports with ``port selfupdate``
'''
call = __salt__['cmd.run_all']('port selfupdate', output_loglevel='trace')
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += call['stderr']
raise CommandExecutionError(
'{0}'.format(comment)
)
def upgrade(refresh=True): # pylint: disable=W0613
'''
Run a full upgrade using MacPorts 'port upgrade outdated'
Options:
refresh
Update ports with ``port selfupdate``
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
ret = {'changes': {},
'result': True,
'comment': '',
}
if refresh:
refresh_db()
old = list_pkgs()
cmd = ['port', 'upgrade', 'outdated']
__salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.compare_dicts(old, new)
| 26.079327 | 102 | 0.606968 |
79542db3c22af7e3161cc18f898a7c1c064d9aa7 | 923 | py | Python | utils/data_test.py | libai2019/dataset-api | 2f793821864f32bd210c17060a70682488bb74e0 | [
"Apache-2.0"
] | 385 | 2018-07-02T22:21:25.000Z | 2022-03-28T13:12:47.000Z | utils/data_test.py | libai2019/dataset-api | 2f793821864f32bd210c17060a70682488bb74e0 | [
"Apache-2.0"
] | 102 | 2018-08-01T10:40:40.000Z | 2022-03-16T10:32:44.000Z | utils/data_test.py | libai2019/dataset-api | 2f793821864f32bd210c17060a70682488bb74e0 | [
"Apache-2.0"
] | 98 | 2018-07-12T18:36:42.000Z | 2022-03-20T04:38:03.000Z | import cv2
import data
import utils as uts
from collections import OrderedDict
def stereo_rectify_test():
image_name = 'test/180602_015140124'
dataset = data.ApolloScape(scale=1.0, use_stereo=True)
images = OrderedDict([])
image_size = dataset._data_config['image_size']
for cam_name in ['Camera_5', 'Camera_6']:
images[cam_name] = cv2.imread('%s_%s.jpg' % (image_name, cam_name))
images[cam_name] = cv2.resize(
images[cam_name], (image_size[1], image_size[0]))
images[cam_name] = dataset.stereo_rectify(images[cam_name], cam_name)
for cam_name in ['Camera_5', 'Camera_6']:
images[cam_name + '_crop'] = uts.crop_image(images[cam_name],
dataset._data_config['stereo_crop'])
uts.plot_images(images, layout=[2, 2])
if __name__ == '__main__':
stereo_rectify_test()
print('test data pass')
| 32.964286 | 88 | 0.646804 |
79542dd8b4470adf5b21cc1fbfedd3366e36aebf | 723 | py | Python | code/matching_track/train_eval/merge_score2.py | seungkee/2nd-place-solution-to-facebook-image-similarity-matching-track | 716667bf416239f448e4ea2730a2cc5146536719 | [
"Apache-2.0"
] | 13 | 2021-12-07T17:15:10.000Z | 2022-02-23T08:45:58.000Z | code/matching_track/train_eval/merge_score2.py | seungkee/2nd-place-solution-to-facebook-image-similarity-matching-track | 716667bf416239f448e4ea2730a2cc5146536719 | [
"Apache-2.0"
] | 2 | 2021-12-16T14:33:10.000Z | 2021-12-28T07:15:33.000Z | code/matching_track/train_eval/merge_score2.py | seungkee/2nd-place-solution-to-facebook-image-similarity-matching-track | 716667bf416239f448e4ea2730a2cc5146536719 | [
"Apache-2.0"
] | 4 | 2021-12-08T07:52:28.000Z | 2022-03-29T05:50:38.000Z | import pandas as pd
import numpy as np
p1='repo/0/matching-1009-from-1008-from-1001-nochange'
p2='repo/1/matching-1009-from-1008-from-1001-nochange'
p3='repo/2/matching-1009-from-1008-from-1001-nochange_000002'
df1_1=pd.read_csv(p1+'/final_cand_n.csv_halfeval.csv')
df2_1=pd.read_csv(p2+'/final_cand_n.csv_halfeval.csv')
df3_1=pd.read_csv(p3+'/final_cand_n.csv_halfeval.csv')
df1_1['score']=(np.array(df1_1['score'])+np.array(df2_1['score'])+np.array(df3_1['score']))/3.0
df=df1_1.drop_duplicates(subset=['query_id','reference_id','score'],keep='last').reset_index(drop=True)
idx = df.groupby(['query_id'])['score'].transform(max) == df['score']
df = df[idx].reset_index(drop=True)
df.to_csv(f'final.csv',index=False)
| 38.052632 | 103 | 0.745505 |
79542e7dfdfa76a3856956ca5007f52889a74838 | 1,205 | py | Python | application.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | 1 | 2017-04-26T18:51:43.000Z | 2017-04-26T18:51:43.000Z | application.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | null | null | null | application.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | null | null | null | import os, sys
from pprint import pprint
sys.path.append('lib')
import webapp2
import modules
import settings
HOMEPAGE = open('home.html').read()
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.write(HOMEPAGE)
app = webapp2.WSGIApplication([
('/', MainPage),
('/api/github/', modules.github_issues.api.Index),
('/api/github/snapshots/', modules.github_issues.api.Snapshots),
('/api/github/latest/', modules.github_issues.api.Latest),
('/api/github/snapshot_status/', modules.github_issues.api.SnapshotStatus),
('/tasks/github/take_snapshot/', modules.github_issues.tasks.SnapshotWorker),
('/tasks/github/delete/', modules.github_issues.tasks.DeleteAllWorker),
('/api/stackoverflow/', modules.stackoverflow.api.Index),
('/api/stackoverflow/snapshot_status/', modules.stackoverflow.api.SnapshotStatus),
('/tasks/stackoverflow/take_snapshot/', modules.stackoverflow.tasks.SnapshotWorker)
])
if os.getenv('APPENGINE_CONF') == 'DEV':
#development settings n
app.config = settings.Development
elif os.getenv('APPENGINE_CONF') == 'TEST':
app.config = settings.Testing
else:
app.config = settings.Production
| 28.690476 | 87 | 0.721162 |
79542f8437840752c8d1cbc972dd03aa9cb4e3ca | 4,247 | py | Python | core/loss.py | verages/YOLOv4_light | b8f707a7ab5c2f3b2fd58d34e287e6b28a625641 | [
"MIT"
] | null | null | null | core/loss.py | verages/YOLOv4_light | b8f707a7ab5c2f3b2fd58d34e287e6b28a625641 | [
"MIT"
] | 1 | 2022-02-10T00:11:51.000Z | 2022-02-10T00:11:51.000Z | core/loss.py | verages/YOLOv4_light | b8f707a7ab5c2f3b2fd58d34e287e6b28a625641 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Brief: loss相关
from core.ious import box_ciou, box_iou
from nets.yolo import yolo_head
import config.config as cfg
import tensorflow as tf
from tensorflow.keras import losses
def smooth_labels(y_true, e):
"""
u(y)表示一个关于label y,且独立于观测样本x(与x无关)的固定且已知的分布:
q’(y|x) =(1-e) * q(y|x)+ e * u(y)
其中,e属于[0,1]。把label y的真实分布q(y|x)与固定的分布u(y)按照1-e和e的权重混合在一起,
构成一个新的分布。这相当于对label y中加入噪声,y值有e的概率来自于分布u(k)为方便计算,
u(y)一般服从简单的均匀分布,则u(y)=1/K,K表示模型预测类别数目。因此,公式
q’(y|x) = (1 - e) * q(y|x) + e/K
:param y_true:
:param e: [0,1]的浮点数
:return:
"""
k = tf.cast(tf.shape(y_true)[-1], dtype=tf.float32)
e = tf.constant(e, dtype=tf.float32)
return y_true * (1.0 - e) + e / k
def focal_loss(y_true, y_pred, alpha=1, gamma=2):
"""
何凯明提出的foacl loss有助于控制正负样本的占总loss的权重、可以按照难易程度分类样本
pt = p if y == 1 else (1 - p)
公式FL(pt) = -α(1 - pt)^γ * log(pt)
:param y_true:
:param y_pred:
:param alpha: α 范围是0 ~ 1
:param gamma: γ
:return:
"""
return alpha * tf.pow(tf.abs(y_true - y_pred), gamma)
def YoloLoss(anchors, label_smooth=cfg.label_smooth):
def compute_loss(y_true, y_pred):
"""
计算loss
:param y_true:
:param y_pred:
:return: 总的loss
"""
# 1. 转换 y_pred -> bbox,预测置信度,各个分类的最后一层分数, 中心点坐标+宽高
# y_pred: (batch_size, grid, grid, anchors * (x, y, w, h, obj, ...cls))
pred_box, grid = yolo_head(y_pred, anchors, calc_loss=True)
pred_conf = y_pred[..., 4:5]
pred_class = y_pred[..., 5:]
true_conf = y_true[..., 4:5]
true_class = y_true[..., 5:]
if label_smooth:
true_class = smooth_labels(true_class, label_smooth)
# 乘上一个比例,让小框的在total loss中有更大的占比,这个系数是个超参数,如果小物体太多,可以适当调大
box_loss_scale = 2 - y_true[..., 2:3] * y_true[..., 3:4]
# 找到负样本群组,第一步是创建一个数组,[]
ignore_mask = tf.TensorArray(dtype=tf.float32, size=1, dynamic_size=True)
true_conf_bool = tf.cast(true_conf, tf.bool)
# 对每一张图片计算ignore_mask
def loop_body(b, ignore_mask):
# true_conf_bool中,为True的值,y_true[l][b, ..., 0:4]才有效
# 最后计算除true_box的shape[box_num, 4]
true_box = tf.boolean_mask(y_true[b, ..., 0:4], true_conf_bool[b, ..., 0])
# 计算预测框 和 真实框(归一化后的xywh在图中的比例)的交并比
iou = box_iou(pred_box[b], true_box)
# 计算每个true_box对应的预测的iou最大的box
best_iou = tf.reduce_max(iou, axis=-1)
# 计算出来的iou如果大于阈值则不被输入到loss计算中去,这个方法可以平衡正负样本
ignore_mask = ignore_mask.write(b, tf.cast(best_iou < cfg.ignore_thresh, tf.float32))
return b + 1, ignore_mask
batch_size = tf.shape(y_pred)[0]
# while_loop创建一个tensorflow的循环体,args:1、循环条件(b小于batch_size) 2、循环体 3、传入初始参数
# lambda b,*args: b<m:是条件函数 b,*args是形参,b<bs是返回的结果
_, ignore_mask = tf.while_loop(lambda b, ignore_mask: b < batch_size, loop_body, [0, ignore_mask])
# 将每幅图的内容压缩,进行处理
ignore_mask = ignore_mask.stack()
ignore_mask = tf.expand_dims(ignore_mask, -1) # 扩展维度用来后续计算loss (b,13,13,3,1,1)
# 计算ciou损失
raw_true_box = y_true[..., 0:4]
ciou = box_ciou(pred_box, raw_true_box)
ciou_loss = true_conf * box_loss_scale * (1 - ciou)
# 如果该位置本来有框,那么计算1与置信度的交叉熵
# 如果该位置本来没有框,而且满足best_iou<ignore_thresh,则被认定为负样本
# best_iou<ignore_thresh用于限制负样本数量
conf_loss = tf.nn.sigmoid_cross_entropy_with_logits(true_conf, pred_conf)
respond_bbox = true_conf
respond_bgd = (1 - true_conf) * ignore_mask
# 计算focal loss
conf_focal = focal_loss(true_conf, pred_conf)
confidence_loss = conf_focal * (respond_bbox * conf_loss + respond_bgd * conf_loss)
# 预测类别损失
class_loss = true_conf * tf.nn.sigmoid_cross_entropy_with_logits(true_class, pred_class)
# 各个损失求平均
location_loss = tf.reduce_sum(ciou_loss) / tf.cast(batch_size, tf.float32)
confidence_loss = tf.reduce_sum(confidence_loss) / tf.cast(batch_size, tf.float32)
class_loss = tf.reduce_sum(class_loss) / tf.cast(batch_size, tf.float32)
return location_loss + confidence_loss + class_loss
return compute_loss
| 34.811475 | 106 | 0.626089 |
79542f85c7a6de0c6a63179a84f1ffff7b8b8fc9 | 4,389 | py | Python | textsemantics/textrank/pagerank_weighted.py | PrimozGodec/text-semantics | 194b0bce7adcc8937a30643959681f0b175927ab | [
"MIT"
] | 11 | 2021-01-27T07:43:33.000Z | 2021-12-18T11:58:00.000Z | textsemantics/textrank/pagerank_weighted.py | PrimozGodec/text-semantics | 194b0bce7adcc8937a30643959681f0b175927ab | [
"MIT"
] | 32 | 2020-11-24T12:42:46.000Z | 2021-12-06T12:01:22.000Z | textsemantics/textrank/pagerank_weighted.py | PrimozGodec/text-semantics | 194b0bce7adcc8937a30643959681f0b175927ab | [
"MIT"
] | 3 | 2020-11-10T15:29:16.000Z | 2020-11-28T11:42:52.000Z | """
Module was removed from gensim - this is a fixed copy.
This module calculate PageRank [1]_ based on wordgraph.
.. [1] https://en.wikipedia.org/wiki/PageRank
Examples
--------
Calculate Pagerank for words
.. sourcecode:: pycon
>>> from textsemantics.textrank.keywords import get_graph
>>> from textsemantics.textrank.pagerank_weighted import pagerank_weighted
>>> graph = get_graph("The road to hell is paved with good intentions.")
>>> # result will looks like {'good': 0.70432858653171504, 'hell': 0.051128871128006126, ...}
>>> result = pagerank_weighted(graph)
Build matrix from graph
.. sourcecode:: pycon
>>> from textsemantics.textrank.pagerank_weighted import build_adjacency_matrix
>>> build_adjacency_matrix(graph).todense()
matrix([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
"""
import numpy
from scipy.linalg import eig
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import eigs
from six.moves import range
def pagerank_weighted(graph, damping=0.85):
"""Get dictionary of `graph` nodes and its ranks.
Parameters
----------
graph : :class:`~textsemantics.textrank.graph.Graph`
Given graph.
damping : float
Damping parameter, optional
Returns
-------
dict
Nodes of `graph` as keys, its ranks as values.
"""
coeff_adjacency_matrix = build_adjacency_matrix(graph, coeff=damping)
probabilities = (1 - damping) / float(len(graph))
pagerank_matrix = coeff_adjacency_matrix.toarray()
# trying to minimize memory allocations
pagerank_matrix += probabilities
vec = principal_eigenvector(pagerank_matrix.T)
# Because pagerank_matrix is positive, vec is always real (i.e. not complex)
return process_results(graph, vec.real)
def build_adjacency_matrix(graph, coeff=1):
"""Get matrix representation of given `graph`.
Parameters
----------
graph : :class:`~textsemantics.textrank.graph.Graph`
Given graph.
coeff : float
Matrix values coefficient, optional.
Returns
-------
:class:`scipy.sparse.csr_matrix`, shape = [n, n]
Adjacency matrix of given `graph`, n is number of nodes.
"""
row = []
col = []
data = []
nodes = graph.nodes()
nodes2id = {v: i for i, v in enumerate(nodes)}
length = len(nodes)
for i in range(length):
current_node = nodes[i]
neighbors = graph.neighbors(current_node)
neighbors_sum = sum(graph.edge_weight((current_node, neighbor)) for neighbor in neighbors)
for neighbor in neighbors:
edge_weight = float(graph.edge_weight((current_node, neighbor)))
if edge_weight != 0.0:
row.append(i)
col.append(nodes2id[neighbor])
data.append(coeff * edge_weight / neighbors_sum)
return csr_matrix((data, (row, col)), shape=(length, length))
def principal_eigenvector(a):
"""Get eigenvector of square matrix `a`.
Parameters
----------
a : numpy.ndarray, shape = [n, n]
Given matrix.
Returns
-------
numpy.ndarray, shape = [n, ]
Eigenvector of matrix `a`.
"""
# Note that we prefer to use `eigs` even for dense matrix
# because we need only one eigenvector. See #441, #438 for discussion.
# But it doesn't work for dim A < 3, so we just handle this special case
if len(a) < 3:
vals, vecs = eig(a)
ind = numpy.abs(vals).argmax()
return vecs[:, ind]
else:
vals, vecs = eigs(a, k=1)
return vecs[:, 0]
def process_results(graph, vec):
"""Get `graph` nodes and corresponding absolute values of provided eigenvector.
This function is helper for :func:`~textsemantics.textrank.pagerank_weighted.pagerank_weighted`
Parameters
----------
graph : :class:`~textsemantics.textrank.graph.Graph`
Given graph.
vec : numpy.ndarray, shape = [n, ]
Given eigenvector, n is number of nodes of `graph`.
Returns
-------
dict
Graph nodes as keys, corresponding elements of eigenvector as values.
"""
scores = {}
for i, node in enumerate(graph.nodes()):
scores[node] = abs(vec[i])
return scores
| 27.43125 | 99 | 0.622465 |
79542fd45f9ff1535d363a88a69d9245293a3837 | 25,305 | py | Python | biopal/io/data_io.py | rpitonak/BioPAL | 08c57b3ba2d8e5a06105f930b1067c2541636bb6 | [
"MIT"
] | null | null | null | biopal/io/data_io.py | rpitonak/BioPAL | 08c57b3ba2d8e5a06105f930b1067c2541636bb6 | [
"MIT"
] | null | null | null | biopal/io/data_io.py | rpitonak/BioPAL | 08c57b3ba2d8e5a06105f930b1067c2541636bb6 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: BioPAL <biopal@esa.int>
# SPDX-License-Identifier: MIT
import os
import io
import struct
import logging
import operator
import numpy as np
from scipy.interpolate import interp2d
from biopal.io.xml_io import raster_info
from biopal.utility.constants import EPSG_CODE_LLA
from arepytools.io.productfolder import ProductFolder
from arepytools.timing.precisedatetime import PreciseDateTime
from osgeo import (
gdal,
osr,
)
###############################################################################
# Fields to write and read the Biomass Header in the Binary Files:#############
_STRING_ENCODING = "utf-8"
_date_str_STR_LEN = 33
_date_str_FORMAT = "{}s".format(_date_str_STR_LEN)
_GEO_CORNERS_FORMAT = "ffff" # lon_min, lon_max, lat_min, lat_max
_UNIQUE_ACQ_ID_STR_LEN = 47
_UNIQUE_ACQ_ID_FORMAT = "{}s".format(_UNIQUE_ACQ_ID_STR_LEN)
_RESOLUTIONS_FORMAT = "ff" # resolution_m_slant_rg, resolution_m_az
_SENSOR_VELOCITY_FORMAT = "f" # sensor_velocity
_HEADER_FORMAT = (
_date_str_FORMAT + _GEO_CORNERS_FORMAT + _UNIQUE_ACQ_ID_FORMAT + _RESOLUTIONS_FORMAT + _SENSOR_VELOCITY_FORMAT
)
_HEADER_FORMAT_SIZE = struct.calcsize(_HEADER_FORMAT)
###############################################################################
def getBiomassHeaderOffsetSize(stack_composition):
return _HEADER_FORMAT_SIZE
def writeBiomassHeader(
product_folder,
channel_idx,
date_str,
lon_min,
lon_max,
lat_min,
lat_max,
unique_acq_id,
resolution_m_slant_rg,
resolution_m_az,
sensor_velocity,
):
if not isinstance(date_str, str):
error_message = "date_str should be an Utc string, not a PreciseDateTime object"
logging.error(error_message)
raise ValueError(error_message)
if len(date_str) != _date_str_STR_LEN:
error_message = "date_str has a different length from an Utc date"
logging.error(error_message)
raise ValueError(error_message)
# encode all the strings with the _STRING_ENCODING format
encoded_date_str = date_str.encode(_STRING_ENCODING)
encoded_unique_acq_id = unique_acq_id.encode(_STRING_ENCODING)
# fill the struct with all data to write
packed_data = struct.pack(
_HEADER_FORMAT,
encoded_date_str,
lon_min,
lon_max,
lat_min,
lat_max,
encoded_unique_acq_id,
resolution_m_slant_rg,
resolution_m_az,
sensor_velocity,
)
raster_info_read = (
product_folder.get_channel(channel_idx).metadata.get_metadata_channels(0).get_element("RasterInfo")
)
if raster_info_read.header_offset_bytes != _HEADER_FORMAT_SIZE:
error_message = "Incompatible header offset size, please follow this flow: step 1 -> getBiomassHeaderOffsetSize; step 2 -> append_channel to product_folder with offset from step 1; step 3 -> execute this function"
logging.error(error_message)
raise ValueError(error_message)
raster_file = os.path.join(product_folder.pf_dir_path, raster_info_read.file_name)
with open(raster_file, "wb") as raster_fid:
raster_fid.write(packed_data)
def readBiomassHeader(product_folder, channel_idx):
data_channel_obj = product_folder.get_channel(channel_idx)
metadata_obj = data_channel_obj.metadata
metadatachannel_obj = metadata_obj.get_metadata_channels(0)
ri = metadatachannel_obj.get_element("RasterInfo")
raster_file = os.path.join(product_folder.pf_dir_path, ri.file_name)
(
date_str,
lon_min,
lon_max,
lat_min,
lat_max,
unique_acq_id,
resolution_m_slant_rg,
resolution_m_az,
sensor_velocity,
) = readBiomassHeader_core(raster_file)
return (
date_str,
lon_min,
lon_max,
lat_min,
lat_max,
unique_acq_id,
resolution_m_slant_rg,
resolution_m_az,
sensor_velocity,
)
def readBiomassHeader_core(raster_file):
# get the product folder RasterInfo and retrive the raster_file name
# read the raster file (just the header)
with open(raster_file, "br") as fid:
packed_data = fid.read(_HEADER_FORMAT_SIZE)
(
encoded_date_str,
lon_min,
lon_max,
lat_min,
lat_max,
encoded_unique_acq_id,
resolution_m_slant_rg,
resolution_m_az,
sensor_velocity,
) = struct.unpack(_HEADER_FORMAT, packed_data)
date_str = encoded_date_str.decode(_STRING_ENCODING)
unique_acq_id = encoded_unique_acq_id.decode(_STRING_ENCODING)
return (
date_str,
lon_min,
lon_max,
lat_min,
lat_max,
unique_acq_id,
resolution_m_slant_rg,
resolution_m_az,
sensor_velocity,
)
def read_data(folder, pf_name):
# reads a data:
# it is supposed to contain one or more polarizations (the "SwathInfo" is read to retrive it)
# it returns a dictionary with keys = polarizations
# it returns also the dimensions of data
data_pf_name = os.path.join(folder, pf_name)
pf = ProductFolder(data_pf_name, "r")
number_of_pols = pf.get_number_channels()
data_read = {}
polid_found = []
for pol_channel_idx in range(number_of_pols):
# prepare the metadata elements
data_channel_obj = pf.get_channel(pol_channel_idx)
metadata_obj = data_channel_obj.metadata
metadatachannel_obj = metadata_obj.get_metadata_channels(0)
# get the ID of the master acquisition:
di = metadatachannel_obj.get_element("DataSetInfo")
if not di:
raise ValueError("data product folder should contain the DataSetInfo to retrive the MASTER ID")
if di.description.find("Master_swath_") != 0:
raise ValueError(
'DataSetInfo description not recognized: it should be a string as "Master_swath_IdOfTheMaster"'
)
master_id = di.description[13:]
# Raster Info
ri = metadatachannel_obj.get_element("RasterInfo")
num_samples = ri.samples
num_lines = ri.lines
pixel_spacing_slant_rg = ri.samples_step
pixel_spacing_az = ri.lines_step
lines_start_utc = str(ri.lines_start)
# DataSet Info
di = metadatachannel_obj.get_element("DataSetInfo")
carrier_frequency_hz = di.fc_hz
# SwathInfo
si = metadatachannel_obj.get_element("SwathInfo")
if not si:
raise ValueError("data product folder should contain the SwathInfo to retrive the polarization")
pol_id = si.polarization.name
polid_found.append(pol_id)
# Sampling constants
sc = metadatachannel_obj.get_element("SamplingConstants")
range_bandwidth_hz = sc.brg_hz
# hv and vh data are mean togheter, ew save only a vh polarization, that will be a "vh_used = (vh+hv)/2"
if pol_id == "hv" or pol_id == "vh":
if "vh" in data_read.keys():
# data (vh or hv) already saved in the dict, add the other data
data_read["vh"] = (data_read["vh"] + pf.read_data(pol_channel_idx).transpose()) / np.sqrt(2)
else:
# nor vh nor vv have been saved to dict yet, add first one
data_read["vh"] = pf.read_data(pol_channel_idx).transpose()
else:
data_read[pol_id] = pf.read_data(pol_channel_idx).transpose()
if len(polid_found) < 4:
raise ValueError(
"Input data stack {} should contain #4 polarizations, hh, hv, vh, vv, only {} found ".format(
pf_name, len(polid_found)
)
)
elif not "hh" in polid_found or not "hv" in polid_found or not "vh" in polid_found or not "vv" in polid_found:
raise ValueError(
"Input data stack {} should contain #4 polarizations, hh, hv, vh, vv, only {} found ".format(
pf_name, len(polid_found)
)
)
return (
data_read,
num_samples,
num_lines,
pixel_spacing_slant_rg,
pixel_spacing_az,
carrier_frequency_hz,
range_bandwidth_hz,
master_id,
lines_start_utc,
)
def read_auxiliary_single_channel(folder, pf_name):
# reads Incidence_Angle and Reference_height auxiliary data:
# it returns a numpy matrix, no dictionaries in this case
data_pf_name = os.path.join(folder, pf_name)
if os.path.exists(data_pf_name):
pf = ProductFolder(data_pf_name, "r")
number_of_channels = pf.get_number_channels()
if number_of_channels > 1:
raise ValueError(
"Input auxiliary data is supposed to have just one channel, and not # {}".format(number_of_channels)
)
aux_read = pf.read_data(0).transpose()
else:
aux_read = None
logging.warning("Path " + data_pf_name + " does not exist.")
return aux_read
def read_auxiliary_multi_channels(folder, pf_name, valid_acq_id_to_read=None, read_raster_info=False):
# reads a KZ product:
# it is supposed to be a pf containing "N" channels, with "N" the number of acquisitions in a stack
# the acquisition name is read from the SwathInfo "Swath" field
# it returns a dictionary with keys = acquisition_id ( which is the "Swath")
data_pf_name = os.path.join(folder, pf_name)
if os.path.exists(data_pf_name):
pf = ProductFolder(data_pf_name, "r")
number_of_acq = pf.get_number_channels()
data_read = {}
for channel_idx in range(number_of_acq):
# prepare the metadata elements
data_channel_obj = pf.get_channel(channel_idx)
metadata_obj = data_channel_obj.metadata
metadatachannel_obj = metadata_obj.get_metadata_channels(0)
# SwathInfo
si = metadatachannel_obj.get_element("SwathInfo")
if not si:
raise ValueError("Input KZ and off_nadir should contain the SwathInfo to retrive the Swath ID")
if valid_acq_id_to_read is None or (si.swath in valid_acq_id_to_read):
data_read[si.swath] = pf.read_data(channel_idx).transpose()
# Raster Info
ri = metadatachannel_obj.get_element("RasterInfo")
num_samples = ri.samples
num_lines = ri.lines
pixel_spacing_slant_rg = ri.samples_step
pixel_spacing_az = ri.lines_step
raster_info_obj = raster_info(
num_samples, num_lines, pixel_spacing_slant_rg, pixel_spacing_az, None, None, None, None, None,
)
else:
data_read = None
raster_info_obj = None
logging.warning("Path " + data_pf_name + " does not exist.")
if read_raster_info:
return data_read, raster_info_obj
else:
return data_read
def read_ecef_grid(folder, pf_name):
# reads an ECEFGRID:
# it is supposed to be a pf containing exactly 3 channels, with X, Y and Z coordinates
# the X,Y or Z is got from the DataSetInfo Description field which is supposed
# to be exactly a string like :Auxiliary data: X ECEF GRID [m] (example for X)
# it returns a dictionary with keys = coordinate_id (X, Y or Z)
data_pf_name = os.path.join(folder, pf_name)
if os.path.exists(data_pf_name):
pf = ProductFolder(data_pf_name, "r")
number_of_coords = pf.get_number_channels()
if not number_of_coords == 3:
raise ValueError(
"Input ECEF GRID should contain #3 channels with X,Y and Z coordinates: #{} channels have been found.".format(
number_of_coords
)
)
coordinates_read = {}
for coord_channel_idx in range(number_of_coords):
# prepare the metadata elements
data_channel_obj = pf.get_channel(coord_channel_idx)
metadata_obj = data_channel_obj.metadata
metadatachannel_obj = metadata_obj.get_metadata_channels(0)
# DataSetInfo
di = metadatachannel_obj.get_element("DataSetInfo")
if not di:
raise ValueError("Input ECEF GRID should contain the DataSetInfo to retrive the Description")
coord_id = di.description[16]
if not coord_id == "X" and not coord_id == "Y" and not coord_id == "Z":
raise ValueError(
'Cannot retrive coordinate name from DataSetInfo description: description should be a string as: "Auxiliary data: X ECEF GRID [m]", instead it is: "'
+ di.description
+ '"'
)
coordinates_read[coord_id] = pf.read_data(coord_channel_idx).transpose()
else:
coordinates_read = None
logging.warning("Path " + data_pf_name + " does not exist.")
return coordinates_read
def tandemx_search_fnf_tiles(geographic_boundaries):
# geographic_boundaries:
# is a namedlist with four fields: lon_min, lon_max, lat_min and lat_max
lon_raster_min = geographic_boundaries.lon_min
lon_raster_max = geographic_boundaries.lon_max
lat_raster_min = geographic_boundaries.lat_min
lat_raster_max = geographic_boundaries.lat_max
fnf_string_list = []
geotransform_list = []
tile_extent_lonlat_list = []
tile_extent_lat = 1 # deg
pixel_spacing_lat = 1.8 / 3600 # deg
lat_start = np.arange(-89, 89, tile_extent_lat) + pixel_spacing_lat / 2
tile_extent_lon_list = np.zeros(lat_start.shape, dtype=int)
tile_extent_lon_list[np.logical_and(lat_start >= -89, lat_start < -80)] = 4 # deg
tile_extent_lon_list[np.logical_and(lat_start >= -80, lat_start < -60)] = 2
tile_extent_lon_list[np.logical_and(lat_start >= -60, lat_start < 60)] = 1
tile_extent_lon_list[np.logical_and(lat_start >= 60, lat_start < 80)] = 2
tile_extent_lon_list[np.logical_and(lat_start >= 80, lat_start < 89)] = 4
pixel_spacing_lon_list = np.zeros(lat_start.shape, dtype=float)
pixel_spacing_lon_list[np.logical_and(lat_start >= -89, lat_start < -80)] = 6.4 / 3600 # deg
pixel_spacing_lon_list[np.logical_and(lat_start >= -80, lat_start < -60)] = 3.6 / 3600
pixel_spacing_lon_list[np.logical_and(lat_start >= -60, lat_start < 60)] = 1.8 / 3600
pixel_spacing_lon_list[np.logical_and(lat_start >= 60, lat_start < 80)] = 3.6 / 3600
pixel_spacing_lon_list[np.logical_and(lat_start >= 80, lat_start < 89)] = 6.4 / 3600
lat_tiles = ["S{:02d}".format(89 - l) for l in range(0, 89, tile_extent_lat)] + [
"N{:02d}".format(l) for l in range(0, 89, tile_extent_lat)
]
lat_first_tile = np.max(lat_start[lat_raster_min > lat_start])
lat_first_tile = np.where(lat_start == lat_first_tile)[0][0]
lat_last_tile = np.min(lat_start[lat_raster_max <= lat_start])
lat_last_tile = np.where(lat_start == lat_last_tile)[0][0]
lat_start = lat_start[lat_first_tile:lat_last_tile]
tile_extent_lon_list = tile_extent_lon_list[lat_first_tile:lat_last_tile]
pixel_spacing_lon_list = pixel_spacing_lon_list[lat_first_tile:lat_last_tile]
lat_tiles = lat_tiles[lat_first_tile:lat_last_tile]
for lat_idx in np.arange(len(lat_start)):
pixel_spacing_lon = pixel_spacing_lon_list[lat_idx]
tile_extent_lon = tile_extent_lon_list[lat_idx]
lon_start = np.arange(-180, 180, tile_extent_lon) - pixel_spacing_lon / 2
lon_tiles = ["W{:03d}".format(180 - l) for l in range(0, 180, tile_extent_lon)] + [
"E{:03d}".format(l) for l in range(0, 180, tile_extent_lon)
]
lon_first_tile = np.max(lon_start[lon_raster_min > lon_start])
lon_first_tile = np.where(lon_start == lon_first_tile)[0][0]
lon_last_tile = np.min(lon_start[lon_raster_max <= lon_start])
lon_last_tile = np.where(lon_start == lon_last_tile)[0][0]
lon_start = lon_start[lon_first_tile:lon_last_tile]
lon_tiles = lon_tiles[lon_first_tile:lon_last_tile]
for lon_idx in np.arange(len(lon_start)):
fnf_string = "TDM_FNF_20_" + lat_tiles[lat_idx] + lon_tiles[lon_idx]
geotransform = [
lon_start[lon_idx],
pixel_spacing_lon,
0.0,
lat_start[lat_idx] + tile_extent_lat,
0.0,
-pixel_spacing_lat,
]
tile_extent_lon_lat = [tile_extent_lon, tile_extent_lat]
fnf_string_list.append(fnf_string)
geotransform_list.append(geotransform)
tile_extent_lonlat_list.append(tile_extent_lon_lat)
return fnf_string_list, geotransform_list, tile_extent_lonlat_list
def tandemx_fnf_read(fnf_catalogue, geographic_boundaries):
# geographic_boundaries:
# is a namedlist with four fields: lon_min, lon_max, lat_min and lat_max
fnf_string_list, geotransform_list, tile_extent_lonlat_list = tandemx_search_fnf_tiles(geographic_boundaries)
fnf_tile_loaded_list = []
fnf_loaded_geotransform_list = []
for tile_idx in np.arange(len(fnf_string_list)):
fnf_path = os.path.join(fnf_catalogue, fnf_string_list[tile_idx], "FNF", fnf_string_list[tile_idx] + ".tiff")
fnf_aux_inf_file_path = os.path.join(
fnf_catalogue, fnf_string_list[tile_idx], "AUXFILES", fnf_string_list[tile_idx] + "_INF.txt",
)
input_image_driver = gdal.Open(fnf_path, 0)
if input_image_driver is not None:
Ny = input_image_driver.RasterYSize
Nx = input_image_driver.RasterXSize
fnf_mask = input_image_driver.ReadAsArray(0, 0, Nx, Ny)
fnf_geotransform = input_image_driver.GetGeoTransform()
map_object = map(operator.sub, list(fnf_geotransform), geotransform_list[tile_idx])
diff_list = list(map_object)
values_are_different = [coord_value for coord_value in diff_list if abs(coord_value) > np.finfo(float).eps]
if not values_are_different:
fnf_tile_loaded_list.append(fnf_mask)
fnf_loaded_geotransform_list.append(fnf_geotransform)
else:
logging.warning("Error: inconsistency for tile" + fnf_string_list[tile_idx] + "\n")
else:
logging.warning("Error: tile" + fnf_string_list[tile_idx] + "not found \n")
input_image_driver = None
for idx, line in enumerate(io.open(fnf_aux_inf_file_path, newline="\r\n")):
month_num = int(line[17:19])
if month_num == 1:
month_str = "JAN"
elif month_num == 2:
month_str = "FEB"
elif month_num == 3:
month_str = "MAR"
elif month_num == 4:
month_str = "APR"
elif month_num == 5:
month_str = "MAY"
elif month_num == 6:
month_str = "JUN"
elif month_num == 7:
month_str = "JUL"
elif month_num == 8:
month_str = "AUG"
elif month_num == 9:
month_str = "SEP"
elif month_num == 10:
month_str = "OCT"
elif month_num == 11:
month_str = "NOV"
elif month_num == 12:
month_str = "DEC"
date_time = []
utc_string = line[20:22] + "-" + month_str + "-" + line[12:16] + " 00:00:00.000000000000"
if idx == 0:
date_time = PreciseDateTime().set_from_utc_string(utc_string)
else:
date_time = max(date_time, PreciseDateTime().set_from_utc_string(utc_string))
return fnf_tile_loaded_list, date_time, fnf_loaded_geotransform_list
def tandemx_fnf_write(out_fnf_path, fnf_raster, lon_raster, lat_raster):
lat_raster_min = np.min(lat_raster)
lon_raster_min = np.min(lon_raster)
lat_raster_max = np.max(lat_raster)
lon_raster_max = np.max(lon_raster)
fnf_string_list, geotransform_list, tile_extent_lonlat_list = tandemx_search_fnf_tiles(
lon_raster_min, lon_raster_max, lat_raster_min, lat_raster_max
)
for tile_idx in np.arange(len(fnf_string_list)):
fnf_path = os.path.join(out_fnf_path, fnf_string_list[tile_idx], "FNF", fnf_string_list[tile_idx] + ".tiff")
directory = os.path.dirname(fnf_path)
if not os.path.exists(directory):
os.makedirs(directory)
lon_out = np.arange(
geotransform_list[tile_idx][0],
geotransform_list[tile_idx][0] + tile_extent_lonlat_list[tile_idx][0] + geotransform_list[tile_idx][1],
geotransform_list[tile_idx][1],
)
lat_out = np.arange(
geotransform_list[tile_idx][3],
geotransform_list[tile_idx][3] - tile_extent_lonlat_list[tile_idx][1] + geotransform_list[tile_idx][5],
geotransform_list[tile_idx][5],
)
lon_out = lon_out[lon_out <= geotransform_list[tile_idx][0] + tile_extent_lonlat_list[tile_idx][0]]
lat_out = lat_out[lat_out >= geotransform_list[tile_idx][3] - tile_extent_lonlat_list[tile_idx][1]]
raster_interp = interp2d(lon_raster, lat_raster, fnf_raster, fill_value=0)
raster_out = raster_interp(lon_out, lat_out)
raster_out = np.round(raster_out)
Nx, Ny = raster_out.shape
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(fnf_path, Ny, Nx, 1, gdal.GDT_Byte, ["COMPRESS=LZW"])
outdata.SetGeoTransform(geotransform_list[tile_idx])
outdata.SetProjection(srs.ExportToWkt())
outdata.GetRasterBand(1).WriteArray(raster_out)
outdata.FlushCache() ##saves to disk!!
outdata = None
def tiff_formatter(
data_in, out_fname, geotransform, gdal_data_format, projection=None, multi_layers_tiff=False, time_tag=None,
):
if ".tiff" in out_fname:
out_fname = out_fname[0:-5]
elif ".tif" in out_fname:
out_fname = out_fname[0:-4]
if isinstance(data_in, list) and multi_layers_tiff:
# write multi layer data in same tiff
if isinstance(geotransform[0], list):
geotransform = geotransform[0]
out_tiff_fname = out_fname + ".tif"
num_layers = len(data_in)
# formats and saves the input data in GEO-TIFF
if type(data_in[0]) == str:
data_temp = np.load(data_in[0])
Nx, Ny = data_temp.shape
del data_temp
else:
Nx, Ny = data_in[0].shape
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(out_tiff_fname, Ny, Nx, num_layers, gdal_data_format)
if time_tag:
outdata.SetMetadata({"time_tag": time_tag}, "TIFFTAG_DATETIME")
if projection:
outdata.SetProjection(projection)
else:
srs = osr.SpatialReference()
srs.ImportFromEPSG(np.int(EPSG_CODE_LLA[5:]))
outdata.SetProjection(srs.ExportToWkt())
outdata.SetGeoTransform(geotransform)
for idx, data in enumerate(data_in):
if type(data) == str:
outdata.GetRasterBand(idx + 1).WriteArray(np.load(data))
else:
outdata.GetRasterBand(idx + 1).WriteArray(data)
outdata.FlushCache() ##saves to disk!!
outdata = None
elif isinstance(data_in, list) and not multi_layers_tiff:
# write each data in a different tiff
out_tiff_fname = []
for idx, data in enumerate(data_in):
out_tiff_fname.append(out_fname + "_fnftile" + str(idx) + ".tif")
# formats and saves the input data in GEO-TIFF
Nx, Ny = data.shape
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(out_tiff_fname[idx], Ny, Nx, 1, gdal_data_format)
if time_tag:
outdata.SetMetadata({"time_tag": time_tag}, "TIFFTAG_DATETIME")
if projection:
outdata.SetProjection(projection)
else:
srs = osr.SpatialReference()
srs.ImportFromEPSG(np.int(EPSG_CODE_LLA[5:]))
outdata.SetProjection(srs.ExportToWkt())
outdata.SetGeoTransform(geotransform[idx])
outdata.GetRasterBand(1).WriteArray(data)
outdata.FlushCache() ##saves to disk!!
outdata = None
else:
# write the single input data to tiff
out_tiff_fname = out_fname + ".tif"
# formats and saves the input data in GEO-TIFF
Nx, Ny = data_in.shape
driver = gdal.GetDriverByName("GTiff")
outdata = driver.Create(out_tiff_fname, Ny, Nx, 1, gdal_data_format)
if time_tag:
outdata.SetMetadata({"time_tag": time_tag}, "TIFFTAG_DATETIME")
if projection:
outdata.SetProjection(projection)
else:
srs = osr.SpatialReference()
srs.ImportFromEPSG(np.int(EPSG_CODE_LLA[5:]))
outdata.SetProjection(srs.ExportToWkt())
outdata.SetGeoTransform(geotransform)
outdata.GetRasterBand(1).WriteArray(data_in)
outdata.FlushCache() ##saves to disk!!
outdata = None
return out_tiff_fname
| 36.047009 | 221 | 0.646947 |
795430ed4d70e6a4bd5510c05dfc35c9139a14b0 | 639 | py | Python | test.py | segfault87/PyFCM | aa158d3ec220b5776e993b4051fd44dc7ec96d6e | [
"MIT"
] | null | null | null | test.py | segfault87/PyFCM | aa158d3ec220b5776e993b4051fd44dc7ec96d6e | [
"MIT"
] | null | null | null | test.py | segfault87/PyFCM | aa158d3ec220b5776e993b4051fd44dc7ec96d6e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'olucurious'
from pyfcm import FCMNotification
from pprint import pprint
push_service = FCMNotification(api_key="<server key>")
registration_id="<device registration_id>"
message = "Hope you're having fun this weekend, don't forget to check today's news"
result = push_service.notify_single_device(registration_id=registration_id)
pprint(result)
result = push_service.notify_multiple_devices(registration_ids=[registration_id,registration_id,registration_id])
pprint(result)
result = push_service.notify_topic_subscribers(topic_name="global", message_body=message)
pprint(result)
| 42.6 | 113 | 0.816901 |
7954323e58893ff29ef1e74338adf928bedfb954 | 647 | py | Python | binary_search.py | daneebee/python_algorithms | 7d9da23b95ee97d1c32be61ea8c5187684b06d67 | [
"MIT"
] | null | null | null | binary_search.py | daneebee/python_algorithms | 7d9da23b95ee97d1c32be61ea8c5187684b06d67 | [
"MIT"
] | null | null | null | binary_search.py | daneebee/python_algorithms | 7d9da23b95ee97d1c32be61ea8c5187684b06d67 | [
"MIT"
] | null | null | null | def binary_search(search_val, search_list):
midpoint = (len(search_list)-1) // 2
if search_val == search_list[midpoint]:
return True
elif len(search_list) == 1 and search_val != search_list[0]:
return False
elif search_val > search_list[midpoint]:
return binary_search(search_val, search_list[midpoint:])
else:
return binary_search(search_val, search_list[:midpoint])
def main():
test_list = [7, 1, 16, 100, 5, 8, 101, 2, 6, 1560]
test_list_sorted = sorted(test_list)
search_val = 101
print(binary_search(search_val, test_list_sorted))
if __name__ == "__main__":
main() | 28.130435 | 64 | 0.670788 |
79543255e3772eea4573d3a3fdb131311366f726 | 2,497 | py | Python | esprima/compat.py | gustavopinto/entente | 19b65d8cafd77c198c9c441f4f5e01503360309b | [
"BSD-2-Clause"
] | 384 | 2015-01-06T15:09:23.000Z | 2022-02-25T19:56:44.000Z | esprima/compat.py | gustavopinto/entente | 19b65d8cafd77c198c9c441f4f5e01503360309b | [
"BSD-2-Clause"
] | 222 | 2015-01-06T19:11:08.000Z | 2022-02-16T06:46:39.000Z | esprima/compat.py | gustavopinto/entente | 19b65d8cafd77c198c9c441f4f5e01503360309b | [
"BSD-2-Clause"
] | 86 | 2015-01-16T09:50:31.000Z | 2022-02-25T13:27:14.000Z | # -*- coding: utf-8 -*-
# Copyright JS Foundation and other contributors, https://js.foundation/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
import sys
PY3 = sys.version_info >= (3, 0)
if PY3:
# Python 3:
basestring = str
long = int
xrange = range
unicode = str
uchr = chr
def uord(ch):
return ord(ch[0])
else:
basestring = basestring
long = long
xrange = xrange
unicode = unicode
try:
# Python 2 UCS4:
unichr(0x10000)
uchr = unichr
def uord(ch):
return ord(ch[0])
except ValueError:
# Python 2 UCS2:
def uchr(code):
# UTF-16 Encoding
if code <= 0xFFFF:
return unichr(code)
cu1 = ((code - 0x10000) >> 10) + 0xD800
cu2 = ((code - 0x10000) & 1023) + 0xDC00
return unichr(cu1) + unichr(cu2)
def uord(ch):
cp = ord(ch[0])
if cp >= 0xD800 and cp <= 0xDBFF:
second = ord(ch[1])
if second >= 0xDC00 and second <= 0xDFFF:
first = cp
cp = (first - 0xD800) * 0x400 + second - 0xDC00 + 0x10000
return cp
| 34.205479 | 78 | 0.647978 |
795432a5d52bda31693254ba0946b2d0d4e4e6b9 | 1,286 | py | Python | stacker/hooks/ecs.py | chrishenry/stacker | 4fc631ebb139012e72f11b4cf9277f9ccac18aac | [
"BSD-2-Clause"
] | null | null | null | stacker/hooks/ecs.py | chrishenry/stacker | 4fc631ebb139012e72f11b4cf9277f9ccac18aac | [
"BSD-2-Clause"
] | null | null | null | stacker/hooks/ecs.py | chrishenry/stacker | 4fc631ebb139012e72f11b4cf9277f9ccac18aac | [
"BSD-2-Clause"
] | null | null | null | # A lot of this code exists to deal w/ the broken ECS connect_to_region
# function, and will be removed once this pull request is accepted:
# https://github.com/boto/boto/pull/3143
import logging
logger = logging.getLogger(__name__)
from boto.regioninfo import get_regions
from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection
def regions():
return get_regions("ec2containerservice",
connection_cls=EC2ContainerServiceConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
def create_clusters(region, namespace, mappings, parameters, **kwargs):
"""Creates ECS clusters.
Expects a "clusters" argument, which should contain a list of cluster
names to create.
"""
conn = connect_to_region(region)
try:
clusters = kwargs["clusters"]
except KeyError:
logger.error("setup_clusters hook missing \"clusters\" argument")
return False
if isinstance(clusters, basestring):
clusters = [clusters]
for cluster in clusters:
logger.debug("Creating ECS cluster: %s", cluster)
conn.create_cluster(cluster)
return True
| 28.577778 | 73 | 0.702955 |
795432bc1f94ed7466b1622f2c494f068e3a49f4 | 4,712 | py | Python | segmenter/evaluators/tasks.py | brandongk/segmenter | dbc042d31dc74f1abdc87ae10a6be78ba38ddb91 | [
"Unlicense"
] | null | null | null | segmenter/evaluators/tasks.py | brandongk/segmenter | dbc042d31dc74f1abdc87ae10a6be78ba38ddb91 | [
"Unlicense"
] | null | null | null | segmenter/evaluators/tasks.py | brandongk/segmenter | dbc042d31dc74f1abdc87ae10a6be78ba38ddb91 | [
"Unlicense"
] | null | null | null | import argparse
from launcher import Task
from segmenter.evaluators import Evaluators
from segmenter.jobs import BaseJob
from segmenter.models import FoldWeightFinders
import itertools
import os
class EvaluateTask(BaseJob):
name = 'evaluate'
def __init__(self, args):
super().__init__(args)
self.evaluator = Evaluators.get(args["evaluator"])
self.weight_finder = FoldWeightFinders.get(args["weight_finder"])
@staticmethod
def arguments(parser) -> None:
command_parser = parser.add_parser(EvaluateTask.name,
help='Evaluate a model.')
BaseJob.arguments(command_parser)
command_parser.add_argument("--evaluator",
type=str,
default="metric",
choices=Evaluators.choices(),
help='the evaluation to perform.')
command_parser.add_argument("--classes",
type=str,
help='the clases to evaluate',
required=False,
nargs='+')
command_parser.add_argument("--folds",
type=str,
help='the folds to evaluate.',
required=False,
nargs='+')
command_parser.add_argument("--aggregators",
type=str,
help='the aggregators to evaluate.',
required=False,
nargs='+')
command_parser.add_argument(
"--weight-finder",
type=str,
default="organized",
choices=FoldWeightFinders.choices(),
help='the strategy for finding fold weights')
@staticmethod
def arguments_to_cli(args) -> str:
args = " ".join([
args["dataset"],
"--evaluator {}".format(args["evaluator"]),
"--weight-finder {}".format(args["weight_finder"]),
"--classes {}".format(" ".join(args["classes"]))
if args["classes"] is not None else "",
"--folds {}".format(" ".join(args["folds"]))
if args["folds"] is not None else "",
"--aggregators {}".format(" ".join(args["aggregators"]))
if args["aggregators"] is not None else "",
])
return args
def execute(self) -> None:
from segmenter.aggregators import Aggregators
from segmenter.config import config_from_dir
super(EvaluateTask, self).execute()
job_configs = [
d for d in os.listdir(self.output_dir)
if os.path.isdir(os.path.join(self.output_dir, d))
]
if self.args["classes"] is not None:
self.classes = list(
filter(lambda c: c in self.args["classes"], self.classes))
for job_hash in job_configs:
job_config, job_hash = config_from_dir(
os.path.join(self.output_dir, job_hash))
folds = ["all"] if job_config["FOLDS"] == 0 else [
"fold{}".format(o) for o in range(job_config["FOLDS"])
]
if job_config["BOOST_FOLDS"] > 0:
boost_folds = [
"b{}".format(o)
for o in list(range(0, job_config["BOOST_FOLDS"] + 1))
]
folds = [
"".join(o)
for o in itertools.product(*[self.folds, boost_folds])
]
if self.args["folds"] is not None:
folds = list(filter(lambda c: c in self.args["folds"], folds))
if job_config["SEARCH"]:
folds = ["fold0"]
if len(folds) <= 1:
aggregators = ["dummy"]
else:
aggregators = Aggregators.choices()
if self.args["aggregators"] is not None:
aggregators = list(
filter(lambda c: c in self.args["aggregators"],
aggregators))
for clazz in self.classes:
self.evaluator(clazz,
job_config,
job_hash,
self.data_dir,
self.output_dir,
self.weight_finder,
folds=folds,
aggregators=aggregators).execute()
tasks = [EvaluateTask]
| 37.396825 | 78 | 0.471774 |
795433cd8ccf9f22479107e9c786b4a5abb560f8 | 5,054 | py | Python | week2-wireshark/pyshark_test.py | sharkwheels/Independet_study_2017 | bc3974a784c1d264b10a8584e9c5000d5d20496a | [
"MIT"
] | null | null | null | week2-wireshark/pyshark_test.py | sharkwheels/Independet_study_2017 | bc3974a784c1d264b10a8584e9c5000d5d20496a | [
"MIT"
] | null | null | null | week2-wireshark/pyshark_test.py | sharkwheels/Independet_study_2017 | bc3974a784c1d264b10a8584e9c5000d5d20496a | [
"MIT"
] | null | null | null | import pyshark
import collections
import time
import operator
import logging
import serial
import struct
import socket
### LOGGING AND SERIAL SETUP ###################################################
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(message)s',
filename='pysharktest.log',
filemode='w')
logging.debug('A debug message')
#logging.info('Some information')
logging.warning('A shot across the bows')
### SOCKET #########################################################################
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 5000 # Reserve a port for your service.
try:
s.connect((host, port))
time.sleep(2)
s.send(b"pyshark says what up!\n")
except socket.error as e:
print(e,s)
### DATA #########################################################################
capture = pyshark.FileCapture('steam2.pcapng',only_summaries=True)
def makeDicts():
"""Just mess around and make a custom dictionary out of the data dump"""
listOfDicts = []
for pkt in capture:
listOfDicts.append({"source": pkt.source, "destination":pkt.destination, "protocol":pkt.protocol, "info":pkt.info})
return listOfDicts
### UTILITY FUNCTIONS #########################################################################
def sortMaxtoMin(allTheStuff,keyToUse,cN):
""" Sort incoming data from most active to least, return the top X items """
rawSources = []
dictSources = {}
toReturn = []
for i in allTheStuff:
rawSources.append(i[keyToUse])
print(len(rawSources))
toCount = [item for item, count in collections.Counter(rawSources).items() if count > 0]
#print(toCount)
for i in toCount:
dictSources.update({i:rawSources.count(i)})
sortedIps = sorted(dictSources.items(),key=operator.itemgetter(1),reverse=True)
if cN <= 0 or cN > len(sortedIps):
cN = len(sortedIps)
#print(cN)
toReturn = sortedIps[:cN]
#print(toReturn)
return toReturn
def formatPrinter(title,listOfThings):
"""Just a print utitlity"""
print(title)
for i in listOfThings:
print("{0}: {1}".format(i[0],i[1]))
print("----------------")
print(" ")
def sendToSocket(title,listOfThings):
"""Send some things to a socket"""
print(title)
time.sleep(2)
for i in listOfThings:
time.sleep(1)
print("{0}: {1}".format(i[0],i[1]))
toSend = "{0}: {1}\n".format(i[0],i[1])
s.send(toSend.encode())
allTheStuff = makeDicts()
protocols = sortMaxtoMin(allTheStuff,"protocol",10) ## sourceList, the key you're looking for, how many reps above?
activeIP = sortMaxtoMin(allTheStuff,"source",5)
infoTest = sortMaxtoMin(allTheStuff,"info",5)
formatPrinter("Active IPs",activeIP)
formatPrinter("Common Protocols",protocols)
formatPrinter("info",infoTest)
sendToSocket("Protocols",protocols)
### THE SHIT I AIN'T USING #########################################################################
def getProtocols():
"""Look for some common protocols we'd like to note"""
prots = [
"SSDP",
"TCP",
"TLSv1.2",
"DNS",
"QUIC",
"MDNS",
"UDP",
"CDP",
"ARP",
"IGMPv2",
"ICMPv6",
"DHCPv6",
"NETBios",
"NBNS"
]
capProts = []
toSort = {}
for pkt in capture:
capProts.append(pkt.protocol)
for i in prots:
toSort.update({i:capProts.count(i)})
sortedprots = sorted(toSort.items(),key=operator.itemgetter(1),reverse=True)
return sortedprots
## trying to make data into a dict
def getData(protocol):
listOfDicts = []
for pkt in capture:
if pkt.protocol == protocol:
listOfDicts.append({"source": pkt.source, "destination":pkt.destination, "info":pkt.info})
return listOfDicts
def getTheInfo(infoItem):
options = ["Server Hello",
"Application Data",
"Change Cipher Spec",
"Encrypted Handshake Message",
"Server Key Exchange",
"Client Key Exchange",
"Encrypted Alert",
"New Session Ticket",
"Hello Request",
"Ignored Unknown Record",
"Certificate",
"M-SEARCH * HTTP/1.1 ",
"[TCP Keep-Alive]",
"[TCP Dup ACK 5230#1]",
"[TCP segment of a reassembled PDU]",
"[TCP Window Update]",
"[TCP Retransmission]",
"[TCP Keep-Alive ACK] "
]
allTheInfo = []
for x in infoItem:
print(x["info"])
allTheInfo.append(x["info"])
for i in options:
print(i, ": ", allTheInfo.count(i))
"""
cap0 = capture[0]
print(dir(cap0))
print(cap0.info)
print(cap0.length)
print(cap0.protocol)
print(cap0.no)
print(cap0.source)
print(cap0.destination)
print(cap0.summary_line)
print(cap0.time)
"""
## datasets to work with
"""
ipSSDP = getData("SSDP") #this is always search
ipTCP = getData("TCP") # this has a lot of itneresting things
ipTLSV = getData("TLSv1.2") #weird
ipDNS = getData("DNS") #hmm
ipQUIC = getData("QUIC") # kind of boring on the high level
ipMDNS = getData("MDNS")
print("Protocols")
print("----------------")
print("SSDP: ",len(ipSSDP))
print("TCP: ",len(ipTCP))
print("TLSV: ",len(ipTLSV))
print("DNS: ",len(ipDNS))
print("QUIC: ",len(ipQUIC))
print("MDNS: ", len(ipMDNS))
print("----------------")
""" | 24.77451 | 117 | 0.625049 |
7954350c032225faca564e7b44c8b7f102adc1d3 | 929 | py | Python | src/build/android/pylib/output/local_output_manager_test.py | kiss2u/naiveproxy | 724caf7f3c8bc2d2d0dcdf090e97429a3c88a85a | [
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | src/build/android/pylib/output/local_output_manager_test.py | kiss2u/naiveproxy | 724caf7f3c8bc2d2d0dcdf090e97429a3c88a85a | [
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | src/build/android/pylib/output/local_output_manager_test.py | kiss2u/naiveproxy | 724caf7f3c8bc2d2d0dcdf090e97429a3c88a85a | [
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | #! /usr/bin/env vpython
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=protected-access
import tempfile
import shutil
import unittest
from pylib.base import output_manager
from pylib.base import output_manager_test_case
from pylib.output import local_output_manager
class LocalOutputManagerTest(output_manager_test_case.OutputManagerTestCase):
def setUp(self):
self._output_dir = tempfile.mkdtemp()
self._output_manager = local_output_manager.LocalOutputManager(
self._output_dir)
def testUsableTempFile(self):
self.assertUsableTempFile(
self._output_manager._CreateArchivedFile(
'test_file', 'test_subdir', output_manager.Datatype.TEXT))
def tearDown(self):
shutil.rmtree(self._output_dir)
if __name__ == '__main__':
unittest.main()
| 26.542857 | 77 | 0.771798 |
7954356bde0683b99ed74793c46a2c73e6b2a00c | 3,085 | py | Python | impacket/dcerpc/wkssvc.py | rackerlabs/impacket | 327acaeab5289da6e99cd82fe0b88db329566e66 | [
"Apache-1.1"
] | 1 | 2015-05-23T00:07:36.000Z | 2015-05-23T00:07:36.000Z | impacket/impacket/dcerpc/wkssvc.py | Aliced3645/DataCenterMarketing | 67bc485e73cf538498a89b28465afb822717affb | [
"Apache-2.0"
] | null | null | null | impacket/impacket/dcerpc/wkssvc.py | Aliced3645/DataCenterMarketing | 67bc485e73cf538498a89b28465afb822717affb | [
"Apache-2.0"
] | 1 | 2021-07-25T23:46:43.000Z | 2021-07-25T23:46:43.000Z | # Copyright (c) 2003-2012 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id: wkssvc.py 529 2012-04-29 21:39:46Z bethus@gmail.com $
#
# Author: Alberto Solino
#
# Description:
# WKSSVC interface implementation.
#
from impacket.structure import Structure
from impacket import dcerpc
from impacket.dcerpc import ndrutils
from impacket.uuid import uuidtup_to_bin
MSRPC_UUID_WKSSVC = uuidtup_to_bin(('6BFFD098-A112-3610-9833-46C3F87E345A','1.0'))
class WKSTA_TRANSPORT_INFO_0(Structure):
structure = (
('UnUsed','<L'),
('NumberOfRemoteConnections','<L'),
('RefId1','<L'),
('RefId2','<L'),
('IsRoutableTransport','<L'),
# ('TransportName',':',ndrutils.NDRStringW),
# ('TransportAddress',':',ndrutils.NDRStringW),
)
class WKSSVCNetrWkstaTransportEnum(Structure):
opnum = 5
alignment = 4
structure = (
('ServerName',':',ndrutils.NDRUniqueStringW),
('TransportInfo','20s'),
('MaxBuffer','<L=0xffffffff'),
('refId','<L=1'),
('ResumeHandle','<L=0'),
)
class WKSSVCNetrWkstaTransportEnumResponse(Structure):
structure = (
('Level','<L'),
('Case','<L'),
('refId','<L'),
('Count','<L'),
('refId2','<L'),
('MaxCount','<L'),
('ArrayLen','_-Array','len(self.rawData)-40'),
('Array',':'),
('TotalEntries','<L'),
('refId3','<L'),
('ResumeHandle','<L'),
('ErrorCode','<L')
)
class DCERPCWksSvc:
def __init__(self, dcerpc):
self._dcerpc = dcerpc
def doRequest(self, request, noAnswer = 0, checkReturn = 1):
self._dcerpc.call(request.opnum, request)
if noAnswer:
return
else:
answer = self._dcerpc.recv()
if checkReturn and answer[-4:] != '\x00\x00\x00\x00':
raise Exception, 'DCE-RPC call returned an error.'
return answer
def NetrWkstaTransportEnum( self, serverName ):
transportEnum = WKSSVCNetrWkstaTransportEnum()
transportEnum['ServerName'] = ndrutils.NDRUniqueStringW()
transportEnum['ServerName']['Data'] = (serverName+'\x00').encode('utf-16le')
transportEnum['TransportInfo'] = '\x00'*8 + '\x04\x00\x04\x00' + '\x00'*8
data = self.doRequest(transportEnum, checkReturn = 1)
ans = WKSSVCNetrWkstaTransportEnumResponse(data)
data = ans['Array']
transportList = []
for i in range(ans['Count']):
ll = WKSTA_TRANSPORT_INFO_0(data)
transportList.append(ll)
data = data[len(ll):]
for i in range(ans['Count']):
transName = ndrutils.NDRStringW(data)
transportList[i]['TransportName'] = transName
data = data[len(transName):]
transAddress = ndrutils.NDRStringW(data)
transportList[i]['TransportAddress'] = transAddress
data = data[len(transAddress):]
ans['Array'] = transportList
return ans
| 31.479592 | 82 | 0.611994 |
7954359167c3721cd1fa69432123553e62904767 | 8,922 | py | Python | affiliates/facebook/migrations/0013_auto__add_field_facebookuser_full_name__add_field_facebookuser_first_n.py | glogiotatidis/affiliates | 34d0ded8e24be9dd207d6419a5157dc8ce34bc06 | [
"BSD-3-Clause"
] | 15 | 2015-01-01T07:17:44.000Z | 2020-11-09T06:28:29.000Z | affiliates/facebook/migrations/0013_auto__add_field_facebookuser_full_name__add_field_facebookuser_first_n.py | glogiotatidis/affiliates | 34d0ded8e24be9dd207d6419a5157dc8ce34bc06 | [
"BSD-3-Clause"
] | 16 | 2015-02-25T23:17:27.000Z | 2015-08-20T10:28:18.000Z | affiliates/facebook/migrations/0013_auto__add_field_facebookuser_full_name__add_field_facebookuser_first_n.py | glogiotatidis/affiliates | 34d0ded8e24be9dd207d6419a5157dc8ce34bc06 | [
"BSD-3-Clause"
] | 12 | 2015-01-17T20:57:03.000Z | 2019-11-03T15:04:31.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FacebookUser.full_name'
db.add_column('facebook_facebookuser', 'full_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True),
keep_default=False)
# Adding field 'FacebookUser.first_name'
db.add_column('facebook_facebookuser', 'first_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True),
keep_default=False)
# Adding field 'FacebookUser.last_name'
db.add_column('facebook_facebookuser', 'last_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True),
keep_default=False)
# Adding field 'FacebookUser.locale'
db.add_column('facebook_facebookuser', 'locale',
self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FacebookUser.full_name'
db.delete_column('facebook_facebookuser', 'full_name')
# Deleting field 'FacebookUser.first_name'
db.delete_column('facebook_facebookuser', 'first_name')
# Deleting field 'FacebookUser.last_name'
db.delete_column('facebook_facebookuser', 'last_name')
# Deleting field 'FacebookUser.locale'
db.delete_column('facebook_facebookuser', 'locale')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook.facebookaccountlink': {
'Meta': {'object_name': 'FacebookAccountLink'},
'activation_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'affiliates_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'account_links'", 'to': "orm['auth.User']"}),
'facebook_user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'_account_link'", 'unique': 'True', 'to': "orm['facebook.FacebookUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'facebook.facebookbanner': {
'Meta': {'object_name': 'FacebookBanner'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Banner'", 'unique': 'True', 'max_length': '255'})
},
'facebook.facebookbannerinstance': {
'Meta': {'object_name': 'FacebookBannerInstance'},
'banner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['facebook.FacebookBanner']"}),
'can_be_an_ad': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'custom_image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'total_clicks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'banner_instance_set'", 'to': "orm['facebook.FacebookUser']"})
},
'facebook.facebookbannerlocale': {
'Meta': {'object_name': 'FacebookBannerLocale'},
'banner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'locale_set'", 'to': "orm['facebook.FacebookBanner']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('affiliates.base.models.LocaleField', [], {'default': "'en-US'", 'max_length': '32'})
},
'facebook.facebookclickstats': {
'Meta': {'object_name': 'FacebookClickStats'},
'banner_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['facebook.FacebookBannerInstance']"}),
'clicks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hour': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 8, 23, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'facebook.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'leaderboard_position': ('django.db.models.fields.IntegerField', [], {'default': '2147483647'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'total_clicks': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['facebook']
| 66.088889 | 182 | 0.579242 |
795435a262c0457ffd3cfceac5e38cd7a38c686e | 12,232 | py | Python | trkm/sequencer/faker.py | josebadoe/trkm | fb25e2f4c67e88ca9dfbfd0816d402e9ddbc7836 | [
"Apache-2.0"
] | null | null | null | trkm/sequencer/faker.py | josebadoe/trkm | fb25e2f4c67e88ca9dfbfd0816d402e9ddbc7836 | [
"Apache-2.0"
] | null | null | null | trkm/sequencer/faker.py | josebadoe/trkm | fb25e2f4c67e88ca9dfbfd0816d402e9ddbc7836 | [
"Apache-2.0"
] | null | null | null | import configparser
import sys
from datetime import datetime, timedelta
import statistics, random
class RecordWrapper:
def __init__(self, name, time, idx, data):
self.name = name
self.time = time
self._idx = idx
self._data = data
@property
def hr(self):
return self._data['hr']
@property
def distance(self):
return self._data['total_distance']
@property
def speed(self):
return self._data['speed']
@property
def cadence(self):
return self._data['cadence']
@property
def temperature(self):
return None
class Fragment:
def __init__(self, length, start, end, min=None, max=None, starting_at=None):
self._length = length
if start < 0:
raise Exception("Start %f" % start)
if end < 0:
raise Exception("End %f" % end)
self.starting_at = starting_at or 0
self._start = start
self._end = end
self._min = min
self._max = max
self._parts = None
self._step = None
def init_cache(self):
if self._parts is None:
if self._step is None:
self._step = (self._end - self._start) / len(self)
def __getitem__(self, at):
if at < 0:
at += len(self)
if self._parts is None:
self.init_cache()
v = self._start + self._step * at
if self._min is not None:
v = max(v, self._min)
if self._max is not None:
v = min(v, self._max)
return v
(elt, at, _) = self.element_at(at)
if elt is not None:
return elt[at]
return self[-1]
def element_at(self, at):
if self._parts is None:
return (None, None, None)
for (i, elt) in enumerate(self._parts):
if at < len(elt):
return (elt, at, i)
else:
at -= len(elt)
return (None, None, None)
def __len__(self):
if self._parts:
return sum(map(len, self._parts))
else:
return self._length
def divide(self, at, displacement=0, absolute=None):
if at == 0:
if absolute is not None:
self._start == absolute
else:
self._start += displacement
elif at == self._length:
if absolute is not None:
self._end == absolute
else:
self._end += displacement
elif self._parts is None:
if absolute is not None:
p = absolute
else:
step = (self._end - self._start) / len(self)
p = self._start + step * at + displacement
self._parts = [
Fragment(at, self._start, p,
min=self._min, max=self._max,
starting_at = self.starting_at),
Fragment(self._length - at, p, self._end,
min=self._min, max=self._max,
starting_at = self.starting_at + at)
]
else:
(elt, at, i) = self.element_at(at)
if elt and at != 0:
elt.divide(at, displacement, absolute)
# if at == 0 and i > 0:
# self._parts[i-1].divide(len(self._parts[i-1]), displacement, absolute)
def force(self, starting_at, length, value):
if starting_at > self._length:
pass
elif starting_at <= 0 and length >= self._length:
self._start = value
self._end = value
self._parts = None
self._step = None
else:
length = min(length, self._length - starting_at)
(s_elt, s_at, _) = self.element_at(starting_at)
if s_elt is None:
self.divide(starting_at)
(e_elt, e_at, _) = self.element_at(starting_at + length)
if e_elt is None:
self.divide(starting_at + length)
for elt in self._parts:
if starting_at < len(elt):
l = min(length, len(elt) - starting_at)
elt.force(starting_at, l, 0)
if l >= length:
break
length -= l
starting_at = 0
else:
starting_at -= len(elt)
def __repr__(self):
if self._parts is None:
return ("Fragment[%r:%ds, %.2f, %.2f]"
% (self.starting_at, self._length, self._start, self._end))
else:
return ("Fragments %r:%ds[%s]"
% (self.starting_at, len(self), ", ".join(map(repr, self._parts))))
class Faker:
def __init__(self, name):
self.name = name
self.config = configparser.ConfigParser(interpolation=None, strict=True,
empty_lines_in_values=True)
self.config.read(self.name)
def parse_range(self, s, parser=int):
l = list(map(parser, s.split(',')))
return (l[0], l[-1])
def error(self, msg):
print(msg)
sys.exit(1)
def displacement(self, val, lo, hi):
return random.triangular(lo, hi, val) - val
def displace_midpoint(self, route, start, end, bounds, displacement_reduction):
if end - start < self.min_frag_len:
return
at = int(random.triangular(start, end, (start + end) / 2))
v = route[at]
lo = v - bounds
hi = v + bounds
route.divide(at, self.displacement(v, lo, hi))
new_bounds = bounds * displacement_reduction
self.displace_midpoint(route, start, at, new_bounds, displacement_reduction)
self.displace_midpoint(route, at, end, new_bounds, displacement_reduction)
def add_pause(self, route, at, lead_in, length, lead_out):
start = max(0, at - int(length / 2))
end = min(len(route), start + length)
p1 = start
p2 = end
leadin_start = max(0, start - lead_in)
leadout_end = min(end + lead_out, len(route))
x_start_v = route[leadin_start]
x_end_v = route[leadout_end]
if start > 0:
p1 = leadin_start
route.divide(leadin_start, absolute=x_start_v)
if end < len(route):
p2 = leadout_end
route.divide(leadout_end, absolute=x_end_v)
if start > 0:
route.divide(start, 0)
else:
leadin_start = None
if end < len(route):
route.divide(end, absolute=0)
route.divide(leadout_end)
else:
leadout_end = None
# for i in range(p1, p2+1):
# print("Pause of %d going at %d: %r" % (length, i, route[i]))
route.force(start, length, 0)
# for i in range(p1, p2+1):
# print("Pause of %d went at %d: %r" % (length, i, route[i]))
return route
def print_route(self, route):
for n in range(0, len(route)):
print("%5d: %.2f" % (n, route[n]))
# def squash(self, route, correction_factor, c_med, c_min, c_max, w_med, w_min, w_max):
# # keeping shape
# f_lo = (w_med - w_min) / ((c_med - c_min) * correction_factor)
# f_hi = (w_max - w_med) / ((c_max - c_med) * correction_factor)
# for (i, v) in enumerate(route):
# if v < c_med:
# route[i] = c_med - ((c_med - v) * f_lo)
# elif v > c_med:
# route[i] = c_med + ((v - c_med) * f_hi)
# return route
def route(self, length, avg_speed, speed_range, pauses=[]):
base = 1000
displacement_bounds = 500
decay_power = 1
displacement_reduction = 1 / (2 ** decay_power)
hi = base + displacement_bounds
lo = base - displacement_bounds
start = 1000 + self.displacement(1000, lo, hi)
end = 1000 + self.displacement(1000, lo, hi)
route = Fragment(length, start, end)
self.displace_midpoint(route, 0, length,
displacement_bounds,
displacement_reduction)
pp = sorted(map(lambda _: int(random.weibullvariate(length, 1.5)), pauses))
#print("BEFORE-APU: %r" % route)
for (i, p) in enumerate(pp):
self.add_pause(route, p, length=pauses[i], lead_in=2, lead_out=2)
#print("AFTER-APU: %r" % route)
r0 = list(map(lambda i: route[i], range(0, length)))
min_v = min(r0)
max_v = max(r0)
m = statistics.mean(r0)
f = avg_speed / m
# if min_v * f < speed_range[0] or max_v * f > speed_range[1]:
# r0 = self.squash(r0, f, m, min_v, max_v, avg_speed, *speed_range)
# m2 = statistics.mean(r0)
# print("Squashed, m0: %r, m2: %r" % (m, m2))
#r = list(map(lambda s: min(speed_range[1], max(speed_range[0], s * f)), r0))
#mr = statistics.mean(r)
#print("Cut, m0: %r, m2: %r" % (m, mr))
return [ min(max(s * f, speed_range[0]),
speed_range[1]) if s
else 0
for s in r0 ]
def all(self):
cfg = self.config['training']
cadence_range = self.parse_range(cfg['cadence'])
speed_range = self.parse_range(cfg['speed'], parser=float)
time_range = self.parse_range(cfg['time'],
parser=(lambda s:
datetime.strptime(s.strip(),
'%Y-%m-%d %H:%M:%S%z')))
base_hr = int(cfg['base_heart_rate'])
hr_range = self.parse_range(cfg['heart_rate'])
hr_effect_lasting = int(cfg['hr_effect_lasting'])
hr_effect_delay = int(cfg['hr_effect_delay'])
hr_factor0 = (hr_range[0] - base_hr) / (cadence_range[0])
hr_factor = (hr_range[1] - hr_range[0]) / (cadence_range[1] - cadence_range[0])
pauses = list(map(int, cfg['pauses'].split(',')))
# from km to meters
total_distance = float(cfg['distance']) * 1000
total_time = (time_range[1] - time_range[0]).seconds
avg_speed = (total_distance / 1000) / (total_time / 3600)
cadence_acc_factor = (
(cadence_range[1] - cadence_range[0])
/ (speed_range[1] - speed_range[0]))
if not speed_range[0] <= avg_speed <= speed_range[1]:
self.error("Required average speed %f is not in permitted range %f - %f"
% (avg_speed, *speed_range))
self.min_frag_len = 5 # seconds
route = self.route(total_time, avg_speed, speed_range, pauses)
distance_so_far = 0
hr_effect = hr_effect_delay + hr_effect_lasting
cadence_log = [ 0 ] * hr_effect
prev_t = 0
for t in range(0, total_time):
speed = route[t]
dist = speed * 1000 / 3600 * (t - prev_t)
cadence = (cadence_range[0]
+ (speed - speed_range[0]) * cadence_acc_factor)
cadence_log = cadence_log[1:] + [ cadence ]
cm = statistics.mean(cadence_log[0:hr_effect_lasting])
if cm >= cadence_range[0]:
hr = hr_range[0] + (cm - cadence_range[0]) * hr_factor
else:
hr = base_hr + hr_factor0 * cm
distance_so_far += dist
hr = round(hr)
cadence = round(cadence)
# print("At %d, speed: %.2f, dist: %.2f, total dist: %.2f, cadence: %.2f, cm: %.2f, hr: %.2f"
# % (t, speed, dist, distance_so_far, cadence, cm, hr))
data = {
'hr': hr,
'total_distance': distance_so_far,
'speed': speed,
'cadence': cadence
}
prev_t = t
yield RecordWrapper(self.name,
time_range[0] + timedelta(seconds=t), t, data)
def __iter__(self):
self._g = self.all()
return self
def __next__(self):
return next(self._g)
| 33.420765 | 105 | 0.514797 |
795435ad6d2408b39d8020f103b042f3a6f24ba2 | 1,596 | py | Python | experiments/visualisation/plot_outliers.py | KoDa-project/pykoda | a7460e5bf4d39b9cd3793efbbdbb341bc1e751c0 | [
"BSD-3-Clause"
] | 1 | 2022-02-21T10:00:26.000Z | 2022-02-21T10:00:26.000Z | experiments/visualisation/plot_outliers.py | KoDa-project/pykoda | a7460e5bf4d39b9cd3793efbbdbb341bc1e751c0 | [
"BSD-3-Clause"
] | null | null | null | experiments/visualisation/plot_outliers.py | KoDa-project/pykoda | a7460e5bf4d39b9cd3793efbbdbb341bc1e751c0 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import pykoda
def plot_outliers(company, date, n_sigma: float = 5.0):
"""Plot stations that accumulate significant delays, defined as the ones that accumulate a median delay
n_sigma times the median delay."""
df = pykoda.datautils.get_data_range(feed='TripUpdates', company=company, start_date=date,
start_hour=9, end_hour=23, merge_static=True)
departure_threshold = n_sigma * np.nanmedian(df.departure_delay.values)
arrival_threshold = n_sigma * np.nanmedian(df.arrival_delay.values)
df_dep = df.groupby('stop_id').median().query('departure_delay > @departure_threshold')
df_arr = df.groupby('stop_id').median().query('arrival_delay > @arrival_threshold')
pykoda.plotutils.setup_mpl()
fig, ax = plt.subplots(2, 1, subplot_kw=dict(projection=pykoda.geoutils.SWEREF99), sharex=True, sharey=True,
figsize=(6, 1.6 * 6))
plt.sca(ax[0])
plt.scatter(df_dep.stop_lon, df_dep.stop_lat, c=df_dep.departure_delay / 60, vmin=0)
plt.title('Stations with delayed departures')
plt.colorbar(label='Delay [m]')
plt.sca(ax[1])
plt.scatter(df_arr.stop_lon, df_arr.stop_lat, c=df_arr.arrival_delay / 60, vmin=0)
plt.colorbar(label='Delay [m]')
plt.title('Stations with delayed arrivals')
# Add base maps
pykoda.plotutils.add_basemap(9, ax[0])
pykoda.plotutils.add_basemap(9, ax[1])
if __name__ == '__main__':
COMPANY = 'otraf'
DATE = '2020_08_21'
plot_outliers(COMPANY, DATE)
plt.show()
| 36.272727 | 112 | 0.685464 |
795435b5a6a8db5c17602aaefc0b0c4320860f7e | 863 | py | Python | src/apps/climsoft/schemas/data_form_schema.py | opencdms/opencdms-api | f1ed6e1d883025a8658746fe457e0c975718c7be | [
"MIT"
] | 3 | 2020-12-01T09:25:18.000Z | 2022-02-14T23:57:34.000Z | src/apps/climsoft/schemas/data_form_schema.py | opencdms/opencdms-api | f1ed6e1d883025a8658746fe457e0c975718c7be | [
"MIT"
] | 11 | 2021-12-05T10:09:00.000Z | 2022-02-17T08:11:22.000Z | src/apps/climsoft/schemas/data_form_schema.py | opencdms/opencdms-api | f1ed6e1d883025a8658746fe457e0c975718c7be | [
"MIT"
] | 2 | 2021-03-10T19:03:05.000Z | 2021-12-11T08:36:04.000Z | from typing import List
from pydantic import BaseModel, constr
from common_schemas import Response
class CreateDataForm(BaseModel):
form_name: constr(max_length=250)
order_num: int
table_name: constr(max_length=255)
description: str
selected: bool
val_start_position: int
val_end_position: int
elem_code_location: constr(max_length=255)
sequencer: constr(max_length=50)
entry_mode: bool
class UpdateDataForm(BaseModel):
order_num: int
table_name: constr(max_length=255)
description: str
selected: bool
val_start_position: int
val_end_position: int
elem_code_location: constr(max_length=255)
sequencer: constr(max_length=50)
entry_mode: bool
class DataForm(CreateDataForm):
class Config:
orm_mode = True
class DataFormResponse(Response):
result: List[DataForm]
| 21.04878 | 46 | 0.736964 |
795436d5c747326d907b3a94003d54c5b17b6197 | 1,404 | py | Python | azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/azure_firewall_application_rule_protocol_py3.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/azure_firewall_application_rule_protocol_py3.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2019_02_01/models/azure_firewall_application_rule_protocol_py3.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureFirewallApplicationRuleProtocol(Model):
"""Properties of the application rule protocol.
:param protocol_type: Protocol type. Possible values include: 'Http',
'Https'
:type protocol_type: str or
~azure.mgmt.network.v2019_02_01.models.AzureFirewallApplicationRuleProtocolType
:param port: Port number for the protocol, cannot be greater than 64000.
This field is optional.
:type port: int
"""
_validation = {
'port': {'maximum': 64000, 'minimum': 0},
}
_attribute_map = {
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(self, *, protocol_type=None, port: int=None, **kwargs) -> None:
super(AzureFirewallApplicationRuleProtocol, self).__init__(**kwargs)
self.protocol_type = protocol_type
self.port = port
| 35.1 | 84 | 0.61396 |
795436da07e18a9690199ef52095bd67968cb8ee | 391 | py | Python | dtc/enums/order_status_enum.py | jseparovic/python-ws-dtc-client | fd3952cdaf7ab8c9d5a26ccf53b5e9acb3a9ea0f | [
"Apache-2.0"
] | 15 | 2020-04-26T05:25:53.000Z | 2022-02-11T19:38:42.000Z | dtc/enums/order_status_enum.py | jseparovic/python-ws-dtc-client | fd3952cdaf7ab8c9d5a26ccf53b5e9acb3a9ea0f | [
"Apache-2.0"
] | 2 | 2021-01-08T19:58:08.000Z | 2021-11-29T06:08:48.000Z | dtc/enums/order_status_enum.py | jseparovic/python-ws-dtc-client | fd3952cdaf7ab8c9d5a26ccf53b5e9acb3a9ea0f | [
"Apache-2.0"
] | 4 | 2020-11-23T13:38:01.000Z | 2021-12-27T13:21:06.000Z |
class OrderStatusEnum:
ORDER_STATUS_UNSPECIFIED = 0
ORDER_STATUS_ORDER_SENT = 1
ORDER_STATUS_PENDING_OPEN = 2
ORDER_STATUS_PENDING_CHILD = 3
ORDER_STATUS_OPEN = 4
ORDER_STATUS_PENDING_CANCEL_REPLACE = 5
ORDER_STATUS_PENDING_CANCEL = 6
ORDER_STATUS_FILLED = 7
ORDER_STATUS_CANCELED = 8
ORDER_STATUS_REJECTED = 9
ORDER_STATUS_PARTIALLY_FILLED = 10
| 27.928571 | 43 | 0.764706 |
795436e2e29f775b977cda9c4a2a40882a129526 | 2,874 | py | Python | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716232246.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716232246.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/ScopesClosuresAndDecorators/.history/Decoraators2_20210716232246.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | """ Decorator Parametors
In the previous ideos we saw some built-in decorators that can handle some arguments:
@wraps(fn) @lru_cache(maxsize=256) <\
def inner(): def factorial(n): \
... ... \>function call
This should look quite differient grom the decorators we have been creating and using:
@timed <----------- no function call
def Fibonacci(n):
...
"""
from symbol import parameters
from time import perf_counter
from unittest import result
def timed(fn):
from time import perf_counter
def inner(*arhs, **kwarrgs):
total_elapse = 0
for i in range(10): # hardcoded value 10 # need to pass as a parameter
start = perf_counter()
result = fn(*args, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / 10
print(avg_elapsed)
return result
return inner
"""
@timed
def my_func(): or my_func = timed(my_func)
...
On e Approach to passing (line 24) as a parameter
/ < extra parameter
def timed(fn, reps):
from time import perf_counter
def inner(*args, **kwargs):
total_elapsed = 0 / free variable
for i in range(reps): <
start = perf_counter()
result = fn(*ars, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / reps
print(avg_elapsed)
return result
return inner
my_func = timed(my_func, 10)
# Rethinking the solution
@timed
def my_func(): my_func = timed solution(my_func)
...
So, timed is a function that returns that inner closure that contains our original function
In order for this to work as intended:
@timed(10)
def my_func():
...
dec = timed(10) # will need to return our original timed decorator when called
dec = timed(10) # timed(10) returns a decorator
@dec
def my_func():
...
# Nested closures to the rescue!
def timed(fn): # Timed is basically a decorator == it only takes a single parameter
from time import perf_counter
def inner(*args, **kwargs):
total_elapsed = 0 / free variable bound to reps in outer
for i in range(reps): <
start = perf_counter()
result = fn(*ars, **kwargs)
total_elapsed += (perf_counter() - start)
avg_elapsed = total_elapsed / reps
print(avg_elapsed)
return result
return inner
return timed # cslling outer(n) returns our originsl decorator withrepd det to n (free variable)
my_func = outer(10)(my_func) or @outer(10)
def my_func():
...
"""
| 26.127273 | 103 | 0.573765 |
795437bff3b25a7c6e4261d1d6d7954ab7dc68c6 | 4,537 | py | Python | tempest/api/compute/v3/images/test_images_oneserver.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | 3 | 2015-03-03T15:43:06.000Z | 2016-10-24T06:12:40.000Z | tempest/api/compute/v3/images/test_images_oneserver.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/v3/images/test_images_oneserver.py | rcbops-qe/tempest | 88960aa32c473b64072671541a136dbae41b1d4c | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ImagesOneServerV3Test(base.BaseV3ComputeTest):
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
super(ImagesOneServerV3Test, self).setUp()
# Check if the server is in a clean state after test
try:
self.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
except Exception:
LOG.exception('server %s timed out to become ACTIVE. rebuilding'
% self.server_id)
# Rebuild server if cannot reach the ACTIVE state
# Usually it means the server had a serious accident
self.__class__.server_id = self.rebuild_server(self.server_id)
def tearDown(self):
"""Terminate test instances created after a test is executed."""
self.server_check_teardown()
super(ImagesOneServerV3Test, self).tearDown()
@classmethod
def setUpClass(cls):
super(ImagesOneServerV3Test, cls).setUpClass()
cls.client = cls.images_client
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
try:
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
except Exception:
cls.tearDownClass()
raise
def _get_default_flavor_disk_size(self, flavor_id):
resp, flavor = self.flavors_client.get_flavor_details(flavor_id)
return flavor['disk']
@test.attr(type='smoke')
def test_create_delete_image(self):
# Create a new image
name = data_utils.rand_name('image')
meta = {'image_type': 'test'}
resp, body = self.servers_client.create_image(self.server_id,
name, meta)
self.assertEqual(202, resp.status)
image_id = data_utils.parse_image_id(resp['location'])
self.client.wait_for_image_status(image_id, 'active')
# Verify the image was created correctly
resp, image = self.client.get_image_meta(image_id)
self.assertEqual(name, image['name'])
self.assertEqual('test', image['properties']['image_type'])
resp, original_image = self.client.get_image_meta(self.image_ref)
# Verify minRAM is the same as the original image
self.assertEqual(image['min_ram'], original_image['min_ram'])
# Verify minDisk is the same as the original image or the flavor size
flavor_disk_size = self._get_default_flavor_disk_size(self.flavor_ref)
self.assertIn(str(image['min_disk']),
(str(original_image['min_disk']), str(flavor_disk_size)))
# Verify the image was deleted correctly
resp, body = self.client.delete_image(image_id)
self.assertEqual('200', resp['status'])
self.client.wait_for_resource_deletion(image_id)
@test.attr(type=['gate'])
def test_create_image_specify_multibyte_character_image_name(self):
# prefix character is:
# http://www.fileformat.info/info/unicode/char/1F4A9/index.htm
utf8_name = data_utils.rand_name(u'\xF0\x9F\x92\xA9')
resp, body = self.servers_client.create_image(self.server_id,
utf8_name)
image_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(self.client.delete_image, image_id)
self.assertEqual('202', resp['status'])
| 41.245455 | 79 | 0.65594 |
7954392561f4fa26365e94acf03a824a254895ef | 1,824 | py | Python | tests/test_oas_couple.py | RogerEMO/srd | 40eb8bb02cfd3b1f60ed9eb3e361877fea744cb5 | [
"MIT"
] | 1 | 2021-11-22T18:15:09.000Z | 2021-11-22T18:15:09.000Z | tests/test_oas_couple.py | RogerEMO/srd | 40eb8bb02cfd3b1f60ed9eb3e361877fea744cb5 | [
"MIT"
] | 3 | 2021-05-10T18:46:16.000Z | 2021-06-01T16:51:48.000Z | tests/test_oas_couple.py | RogerEMO/srd | 40eb8bb02cfd3b1f60ed9eb3e361877fea744cb5 | [
"MIT"
] | 1 | 2021-05-05T17:20:06.000Z | 2021-05-05T17:20:06.000Z | import pytest
from math import isclose
import sys
sys.path.append('/Users/pyann/Dropbox (CEDIA)/srd/Model')
import srd
from srd import oas
year = 2019
@pytest.mark.parametrize('age, inc_oas', [(58, 0), (62, 0), (70, 7000)])
def test_age_oas(age, inc_oas):
p0 = srd.Person(age=age)
p1 = srd.Person(age=age)
hh = srd.Hhold(p0, second=p1, prov='qc')
oas_program = oas.program(year)
oas_program.file(hh)
for p in hh.sp:
assert isclose(p.inc_oas, inc_oas, rel_tol=0.1)
@pytest.mark.parametrize('net_inc, inc_oas', [(60000, 7000), (80000, 6000),
(150000, 0)])
def test_net_inc_oas(net_inc, inc_oas):
p0 = srd.Person(age=70, othtax=net_inc)
p1 = srd.Person(age=70, othtax=net_inc)
hh = srd.Hhold(p0, second=p1, prov='qc')
oas_program = oas.program(year)
oas_program.file(hh)
for p in hh.sp:
assert isclose(p.inc_oas, inc_oas, rel_tol=0.1)
@pytest.mark.parametrize('inc_non_work, inc_gis', [(0, 6000), (10000, 12*95.41),
(20000, 0)])
def test_inc_non_work_inc_gis(inc_non_work, inc_gis):
p0 = srd.Person(age=70, othtax=inc_non_work)
p1 = srd.Person(age=70, othtax=inc_non_work)
hh = srd.Hhold(p0, second=p1, prov='qc')
oas_program = oas.program(year)
oas_program.file(hh)
assert isclose(p0.inc_gis, inc_gis, rel_tol=0.1)
@pytest.mark.parametrize('age, allow_couple', [(58, 0), (62, 15000), (67, 0)])
def test_age_allow_couple(age, allow_couple):
p0 = srd.Person(age=70)
p1 = srd.Person()
p1.age = age
hh = srd.Hhold(p0, second=p1, prov='qc')
oas_program = oas.program(year)
oas_program.file(hh)
assert isclose(p1.allow_couple, allow_couple, rel_tol=0.2) | 31.448276 | 81 | 0.61568 |
79543ac4aeb94007f65bf2944ff1b6b6db01dbce | 3,207 | py | Python | tag_recommender/multilabel_classification.py | h4iku/tag-recom | 3acdeeed14ff11329ef724d30d99300d53ffc0f3 | [
"MIT"
] | 1 | 2022-01-04T05:51:38.000Z | 2022-01-04T05:51:38.000Z | tag_recommender/multilabel_classification.py | h4iku/tag-recom | 3acdeeed14ff11329ef724d30d99300d53ffc0f3 | [
"MIT"
] | null | null | null | tag_recommender/multilabel_classification.py | h4iku/tag-recom | 3acdeeed14ff11329ef724d30d99300d53ffc0f3 | [
"MIT"
] | null | null | null | import json
import pickle
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.calibration import CalibratedClassifierCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import MinMaxScaler, MultiLabelBinarizer
from sklearn.svm import LinearSVC
from datasets import DATASET
class FeatureSelector(BaseEstimator, TransformerMixin):
"""Selecting appropriate feature set in the pipeline"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data):
if self.key == 'title':
return [' '.join(q.title) for q in data]
elif self.key == 'body':
return [' '.join(q.body) for q in data]
elif self.key == 'codes':
return [' '.join(q.codes) for q in data]
else:
return [[q.has_code, q.has_link, q.has_math] for q in data]
def multilabel_clf(train_set, test_set):
"""Multilabel Classification using LinearSVM"""
train_tags = [q.tags for q in train_set]
# Classes need to be binarized for the classifier
mlb = MultiLabelBinarizer()
train_labels = mlb.fit_transform(train_tags)
classifier = Pipeline([
('feats', FeatureUnion([
('title_ngram', Pipeline([
('title', FeatureSelector('title')),
('title_tfidf', TfidfVectorizer(
ngram_range=(1, 3), sublinear_tf=True))
])),
('body_ngram', Pipeline([
('body', FeatureSelector('body')),
('body_tfidf', TfidfVectorizer(sublinear_tf=True))
])),
('codes_ngram', Pipeline([
('codes', FeatureSelector('codes')),
('codes_tfidf', TfidfVectorizer(sublinear_tf=True))
])),
('meta_feats', FeatureSelector('meta'))
])),
('clf', OneVsRestClassifier(CalibratedClassifierCV(LinearSVC(), cv=3)))
])
classifier.fit(train_set, train_labels)
# Getting probabilities for all tags in each test questions
probas = classifier.predict_proba(test_set)
tags_order = mlb.classes_.tolist()
min_max_scaler = MinMaxScaler()
results = []
for proba in probas:
prob = np.array([float(p)
for p in proba]).reshape(-1, 1)
normalized_proba = np.concatenate(
min_max_scaler.fit_transform(prob)
).tolist()
results.append(normalized_proba)
return tags_order, results
def main():
with DATASET.train_set.open('rb') as file:
train_set = pickle.load(file)
with DATASET.test_set.open('rb') as file:
test_set = pickle.load(file)
tags_order, predicted_labels = multilabel_clf(train_set, test_set)
with open(DATASET.fold_root / 'tags_order.json', 'w') as file:
json.dump(tags_order, file)
with open(DATASET.fold_root / 'mul_clf_proba.pickle', 'wb') as file:
pickle.dump(predicted_labels, file, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
main()
| 32.07 | 79 | 0.637356 |
79543afc4b69509596d0071915a987f7d3f07fa6 | 284 | py | Python | src/django_delta_logger/__init__.py | rennat/django_delta_logger | 5bb2a2edd8258e9c146a515886b2bf0e0df00365 | [
"MIT"
] | null | null | null | src/django_delta_logger/__init__.py | rennat/django_delta_logger | 5bb2a2edd8258e9c146a515886b2bf0e0df00365 | [
"MIT"
] | null | null | null | src/django_delta_logger/__init__.py | rennat/django_delta_logger | 5bb2a2edd8258e9c146a515886b2bf0e0df00365 | [
"MIT"
] | null | null | null | __VERSION__ = '0.1.1'
from enum import Enum
import json
from django.core.exceptions import ImproperlyConfigured
default_app_config = 'django_delta_logger.apps.DjangoDeltaLoggerConfig'
DOES_NOT_EXIST = object()
DeltaEventType = Enum('DeltaEventType', 'CREATED UPDATED DELETED')
| 18.933333 | 71 | 0.806338 |
79543affc7c074bf1486ae70fc9e5bae48ecb5c7 | 3,581 | py | Python | docs/generate_api_docs.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | docs/generate_api_docs.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | docs/generate_api_docs.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
def generate_docs():
build_dir = os.path.join(repo_root, "docs", "build")
if os.path.isdir(build_dir):
shutil.rmtree(build_dir)
os.mkdir(build_dir)
# open the .rst file
fname = os.path.join(repo_root, "docs", "build", "modules_api.rst")
with open(fname, "w") as api_docs:
# add header
api_docs.write("malcolm.modules\n")
api_docs.write("===============\n\n")
api_docs.write(".. module:: malcolm.modules\n\n")
modules_root = os.path.join(repo_root, "malcolm", "modules")
# Add the toctree
api_docs.write(".. toctree::\n")
api_docs.write(" :maxdepth: 1\n\n")
# Add entries for each module
for modulename in sorted(os.listdir(modules_root)):
module_root = os.path.join(modules_root, modulename)
if not os.path.isdir(module_root):
continue
# Copy the docs dir if it exists
docs_build = os.path.join(repo_root, "docs", "build", modulename)
docs_dir = os.path.join(module_root, "docs")
if os.path.isdir(docs_dir):
shutil.copytree(docs_dir, docs_build)
documents = [
x[:-4] for x in os.listdir(docs_dir) if x.endswith(".rst")]
else:
os.mkdir(docs_build)
documents = []
dirs = sorted(os.listdir(module_root))
# Make any parameters and defines docs
for fname in ["parameters.py", "defines.py"]:
docname = "%s_api" % fname[:-3]
if fname in dirs and docname not in documents:
# Make document for module
section = "malcolm.modules.%s.%s" % (modulename, fname[:-3])
make_automodule_doc(section, docs_build)
documents.append(docname)
for dirname in ["blocks", "includes", "controllers", "parts",
"infos", "vmetas"]:
docname = "%s_api" % dirname
if dirname in dirs and docname not in documents:
# Make document for module
section = "malcolm.modules.%s.%s" % (modulename, dirname)
make_automodule_doc(section, docs_build)
documents.append(docname)
# Make the index if it doesn't exist
if documents and "index" not in documents:
make_index_doc(modulename, docs_build, documents)
# Add to top level page
if documents:
api_docs.write(" %s/index\n" % modulename)
def make_automodule_doc(section, docs_build):
docname = section.rsplit(".")[-1]
with open(os.path.join(docs_build, docname + "_api.rst"), "w") as f:
f.write(docname + "\n")
f.write("=" * len(docname) + "\n\n")
f.write(".. automodule:: %s\n" % section)
f.write(" :members:\n")
def make_index_doc(modulename, docs_build, doc_dirs):
with open(os.path.join(docs_build, "index.rst"), "w") as f:
f.write(modulename + "\n")
f.write("=" * len(modulename) + "\n\n")
f.write(".. module:: malcolm.modules.%s\n\n" % modulename)
f.write(".. toctree::\n")
f.write(" :maxdepth: 1\n")
f.write(" :caption: malcolm.modules.%s\n\n" % modulename)
for doc in doc_dirs:
f.write(" %s\n" % doc)
if __name__ == "__main__":
generate_docs()
| 38.923913 | 80 | 0.547892 |
79543bfde1782f21d17a28d90e59435c93b21b87 | 8,285 | py | Python | sdk/lusid_notifications/models/create_sms_notification.py | finbourne/notifications-sdk-python-preview | 2368e05445c74dc248afc1c98efa9f2ca895de3b | [
"MIT"
] | null | null | null | sdk/lusid_notifications/models/create_sms_notification.py | finbourne/notifications-sdk-python-preview | 2368e05445c74dc248afc1c98efa9f2ca895de3b | [
"MIT"
] | null | null | null | sdk/lusid_notifications/models/create_sms_notification.py | finbourne/notifications-sdk-python-preview | 2368e05445c74dc248afc1c98efa9f2ca895de3b | [
"MIT"
] | null | null | null | # coding: utf-8
"""
FINBOURNE Notifications API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.1.317
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid_notifications.configuration import Configuration
class CreateSmsNotification(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'description': 'str',
'body': 'str',
'recipients': 'list[str]'
}
attribute_map = {
'description': 'description',
'body': 'body',
'recipients': 'recipients'
}
required_map = {
'description': 'required',
'body': 'required',
'recipients': 'required'
}
def __init__(self, description=None, body=None, recipients=None, local_vars_configuration=None): # noqa: E501
"""CreateSmsNotification - a model defined in OpenAPI"
:param description: The summary of the services provided by the notification (required)
:type description: str
:param body: The body of the SMS (required)
:type body: str
:param recipients: The phone numbers to which the SMS will be sent to (E.164 format) (required)
:type recipients: list[str]
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._description = None
self._body = None
self._recipients = None
self.discriminator = None
self.description = description
self.body = body
self.recipients = recipients
@property
def description(self):
"""Gets the description of this CreateSmsNotification. # noqa: E501
The summary of the services provided by the notification # noqa: E501
:return: The description of this CreateSmsNotification. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateSmsNotification.
The summary of the services provided by the notification # noqa: E501
:param description: The description of this CreateSmsNotification. # noqa: E501
:type description: str
"""
if self.local_vars_configuration.client_side_validation and description is None: # noqa: E501
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
description is not None and len(description) > 512):
raise ValueError("Invalid value for `description`, length must be less than or equal to `512`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
description is not None and len(description) < 1):
raise ValueError("Invalid value for `description`, length must be greater than or equal to `1`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
description is not None and not re.search(r'^[\s\S]*$', description)): # noqa: E501
raise ValueError(r"Invalid value for `description`, must be a follow pattern or equal to `/^[\s\S]*$/`") # noqa: E501
self._description = description
@property
def body(self):
"""Gets the body of this CreateSmsNotification. # noqa: E501
The body of the SMS # noqa: E501
:return: The body of this CreateSmsNotification. # noqa: E501
:rtype: str
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateSmsNotification.
The body of the SMS # noqa: E501
:param body: The body of this CreateSmsNotification. # noqa: E501
:type body: str
"""
if self.local_vars_configuration.client_side_validation and body is None: # noqa: E501
raise ValueError("Invalid value for `body`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
body is not None and len(body) > 1024):
raise ValueError("Invalid value for `body`, length must be less than or equal to `1024`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
body is not None and len(body) < 1):
raise ValueError("Invalid value for `body`, length must be greater than or equal to `1`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
body is not None and not re.search(r'^[\s\S]*$', body)): # noqa: E501
raise ValueError(r"Invalid value for `body`, must be a follow pattern or equal to `/^[\s\S]*$/`") # noqa: E501
self._body = body
@property
def recipients(self):
"""Gets the recipients of this CreateSmsNotification. # noqa: E501
The phone numbers to which the SMS will be sent to (E.164 format) # noqa: E501
:return: The recipients of this CreateSmsNotification. # noqa: E501
:rtype: list[str]
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this CreateSmsNotification.
The phone numbers to which the SMS will be sent to (E.164 format) # noqa: E501
:param recipients: The recipients of this CreateSmsNotification. # noqa: E501
:type recipients: list[str]
"""
if self.local_vars_configuration.client_side_validation and recipients is None: # noqa: E501
raise ValueError("Invalid value for `recipients`, must not be `None`") # noqa: E501
self._recipients = recipients
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateSmsNotification):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CreateSmsNotification):
return True
return self.to_dict() != other.to_dict()
| 36.179039 | 130 | 0.613277 |
79543d9c15bdd168f225c179bb1b62dd40f972b2 | 5,923 | py | Python | toontown/battle/FireCogPanel.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | null | null | null | toontown/battle/FireCogPanel.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 1 | 2021-06-08T17:16:48.000Z | 2021-06-08T17:16:48.000Z | toontown/battle/FireCogPanel.py | SuperM0use24/TT-CL-Edition | fdad8394f0656ae122b687d603f72afafd220c65 | [
"MIT"
] | 3 | 2021-06-03T05:36:36.000Z | 2021-06-22T15:07:31.000Z | from toontown.toonbase.ToontownBattleGlobals import *
from toontown.toonbase import ToontownGlobals
from direct.fsm import StateData
from direct.directnotify import DirectNotifyGlobal
from toontown.battle import BattleBase
from direct.gui.DirectGui import *
from panda3d.core import *
from toontown.toonbase import TTLocalizer
class FireCogPanel(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('ChooseAvatarPanel')
def __init__(self, doneEvent):
self.notify.debug('Init choose panel...')
StateData.StateData.__init__(self, doneEvent)
self.numAvatars = 0
self.chosenAvatar = 0
self.toon = 0
self.loaded = 0
def load(self):
gui = loader.loadModel('phase_3.5/models/gui/battle_gui')
self.frame = DirectFrame(relief=None, image=gui.find('**/BtlPick_TAB'), image_color=Vec4(1, 0.2, 0.2, 1))
self.frame.hide()
self.statusFrame = DirectFrame(parent=self.frame, relief=None, image=gui.find('**/ToonBtl_Status_BG'), image_color=Vec4(0.5, 0.9, 0.5, 1), pos=(0.611, 0, 0))
self.textFrame = DirectFrame(parent=self.frame, relief=None, image=gui.find('**/PckMn_Select_Tab'), image_color=Vec4(1, 1, 0, 1), image_scale=(1.0, 1.0, 2.0), text='', text_fg=Vec4(0, 0, 0, 1), text_pos=(0, 0.02, 0), text_scale=TTLocalizer.FCPtextFrame, pos=(-0.013, 0, 0.013))
self.textFrame['text'] = TTLocalizer.FireCogTitle % localAvatar.getPinkSlips()
self.avatarButtons = []
for i in xrange(4):
button = DirectButton(parent=self.frame, relief=None, text='', text_fg=Vec4(0, 0, 0, 1), text_scale=0.067, text_pos=(0, -0.015, 0), textMayChange=1, image_scale=(1.0, 1.0, 1.0), image=(gui.find('**/PckMn_Arrow_Up'), gui.find('**/PckMn_Arrow_Dn'), gui.find('**/PckMn_Arrow_Rlvr')), command=self.__handleAvatar, extraArgs=[i])
button.setScale(1, 1, 1)
button.setPos(0, 0, 0.2)
self.avatarButtons.append(button)
self.backButton = DirectButton(parent=self.frame, relief=None, image=(gui.find('**/PckMn_BackBtn'), gui.find('**/PckMn_BackBtn_Dn'), gui.find('**/PckMn_BackBtn_Rlvr')), pos=(-0.647, 0, 0.006), scale=1.05, text=TTLocalizer.TownBattleChooseAvatarBack, text_scale=0.05, text_pos=(0.01, -0.012), text_fg=Vec4(0, 0, 0.8, 1), command=self.__handleBack)
gui.removeNode()
self.loaded = 1
return
def unload(self):
if self.loaded:
self.frame.destroy()
del self.frame
del self.statusFrame
del self.textFrame
del self.avatarButtons
del self.backButton
self.loaded = 0
def enter(self, numAvatars, localNum = None, luredIndices = None, trappedIndices = None, track = None, fireCosts = None, immuneIndices = None):
if not self.loaded:
self.load()
self.frame.show()
invalidTargets = []
if not self.toon:
if len(luredIndices) > 0:
if track == BattleBase.TRAP or track == BattleBase.LURE:
invalidTargets += luredIndices
if len(trappedIndices) > 0:
if track == BattleBase.TRAP:
invalidTargets += trappedIndices
if len(immuneIndices) > 0:
invalidTargets += immuneIndices
self.__placeButtons(numAvatars, invalidTargets, localNum, fireCosts)
def exit(self):
self.frame.hide()
def __handleBack(self):
doneStatus = {'mode': 'Back'}
messenger.send(self.doneEvent, [doneStatus])
def __handleAvatar(self, avatar):
doneStatus = {'mode': 'Avatar',
'avatar': avatar}
messenger.send(self.doneEvent, [doneStatus])
def adjustCogs(self, numAvatars, luredIndices, trappedIndices, track):
invalidTargets = []
if len(luredIndices) > 0:
if track == BattleBase.TRAP or track == BattleBase.LURE:
invalidTargets += luredIndices
if len(trappedIndices) > 0:
if track == BattleBase.TRAP:
invalidTargets += trappedIndices
self.__placeButtons(numAvatars, invalidTargets, None)
return
def adjustToons(self, numToons, localNum):
self.__placeButtons(numToons, [], localNum)
def __placeButtons(self, numAvatars, invalidTargets, localNum, fireCosts):
canfire = 0
for i in xrange(4):
if numAvatars > i and i not in invalidTargets and i != localNum:
self.avatarButtons[i].show()
self.avatarButtons[i]['text'] = ''
if fireCosts[i] <= localAvatar.getPinkSlips():
self.avatarButtons[i]['state'] = DGG.NORMAL
self.avatarButtons[i]['text_fg'] = (0, 0, 0, 1)
canfire = 1
else:
self.avatarButtons[i]['state'] = DGG.DISABLED
self.avatarButtons[i]['text_fg'] = (1.0, 0, 0, 1)
else:
self.avatarButtons[i].hide()
if canfire:
self.textFrame['text'] = TTLocalizer.FireCogTitle % localAvatar.getPinkSlips()
else:
self.textFrame['text'] = TTLocalizer.FireCogLowTitle % localAvatar.getPinkSlips()
if numAvatars == 1:
self.avatarButtons[0].setX(0)
elif numAvatars == 2:
self.avatarButtons[0].setX(0.2)
self.avatarButtons[1].setX(-0.2)
elif numAvatars == 3:
self.avatarButtons[0].setX(0.4)
self.avatarButtons[1].setX(0.0)
self.avatarButtons[2].setX(-0.4)
elif numAvatars == 4:
self.avatarButtons[0].setX(0.6)
self.avatarButtons[1].setX(0.2)
self.avatarButtons[2].setX(-0.2)
self.avatarButtons[3].setX(-0.6)
else:
self.notify.error('Invalid number of avatars: %s' % numAvatars)
return None
| 45.914729 | 354 | 0.609657 |
79543e96cbadc0cd310634a83503f67f468d4dcc | 1,472 | py | Python | ae_python/property.py | Kalbra/after-effects-python | bdcaf395ba2b5d4c33d1109921a30cc00bad3775 | [
"MIT"
] | 2 | 2021-01-27T21:04:04.000Z | 2022-02-06T04:58:04.000Z | ae_python/property.py | Kalbra/after-effects-python | bdcaf395ba2b5d4c33d1109921a30cc00bad3775 | [
"MIT"
] | null | null | null | ae_python/property.py | Kalbra/after-effects-python | bdcaf395ba2b5d4c33d1109921a30cc00bad3775 | [
"MIT"
] | null | null | null | class Property:
def __init__(self, default_value):
# If the type is property so this class not a subclass will be created the values will be transfer.
if type(default_value) == Property:
self.default_value = default_value.default_value
self.value_stack = default_value.value_stack
else:
self.default_value = default_value
# The value stack is 3D array to set values at given times. This is needed to make animations. The array
# subarray is in the following format: [time, value]
self.value_stack = []
"""
With this method you can set a keyframe at a given time, so it is possible to make animation or changes over time.
:parameter time: The time when the keyframe will set.
:parameter value: The value you want to set.
"""
def setValueAtTime(self, time, value):
self.value_stack.append([time, value])
def __str__(self):
return str(self.default_value)
"""
The get item attribute. Used to return an element of an array.
:returns: Element of array, identified by number like normal.
"""
def __getitem__(self, item):
return self.default_value[int(item)]
"""
This method checks if a value.
:returns: True if the value is none else false.
"""
def isNone(self):
if self.default_value == None:
return True
else:
return False
| 32.711111 | 118 | 0.634511 |
79543eba331f0cf4356b7a81393e7bd643c0d96b | 4,867 | py | Python | configs/top_down/darkpose/coco-wholebody/hrnet_w48_coco_wholebody_384x288_dark_plus.py | saccadic/mmpose | b90aa3aa865c3246a6ab95536fe6607d48224e66 | [
"Apache-2.0"
] | null | null | null | configs/top_down/darkpose/coco-wholebody/hrnet_w48_coco_wholebody_384x288_dark_plus.py | saccadic/mmpose | b90aa3aa865c3246a6ab95536fe6607d48224e66 | [
"Apache-2.0"
] | null | null | null | configs/top_down/darkpose/coco-wholebody/hrnet_w48_coco_wholebody_384x288_dark_plus.py | saccadic/mmpose | b90aa3aa865c3246a6ab95536fe6607d48224e66 | [
"Apache-2.0"
] | null | null | null | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup=None,
# warmup='linear',
# warmup_iters=500,
# warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=133,
dataset_joints=133,
dataset_channel=[
list(range(133)),
],
inference_channel=list(range(133)))
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/top_down/'
'hrnet/hrnet_w48_coco_384x288_dark-741844ba_20200812.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384))),
),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=48,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='unbiased',
shift_heatmap=True,
modulate_kernel=11),
loss_pose=dict(type='JointsMSELoss', use_target_weight=True))
data_cfg = dict(
image_size=[288, 384],
heatmap_size=[72, 96],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=3, unbiased_encoding=True),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
train=dict(
type='TopDownCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_train_v1.0.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_val_v1.0.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoWholeBodyDataset',
ann_file=f'{data_root}/annotations/coco_wholebody_val_v1.0.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 28.461988 | 79 | 0.596055 |
79543f78b42f29602eb148c40831bbc7845911e6 | 1,334 | py | Python | packages/sklearn/_bak/svm/nodes/svm___NuSVC0/svm___NuSVC0.py | frecklebars/Ryven | 86a8c06effc47897d0b8fbbd1fa8580a957f9515 | [
"MIT"
] | 18 | 2021-01-18T09:52:41.000Z | 2022-03-22T10:48:44.000Z | packages/sklearn/_bak/svm/nodes/svm___NuSVC0/svm___NuSVC0.py | frecklebars/Ryven | 86a8c06effc47897d0b8fbbd1fa8580a957f9515 | [
"MIT"
] | null | null | null | packages/sklearn/_bak/svm/nodes/svm___NuSVC0/svm___NuSVC0.py | frecklebars/Ryven | 86a8c06effc47897d0b8fbbd1fa8580a957f9515 | [
"MIT"
] | 3 | 2021-01-18T09:49:42.000Z | 2022-03-22T10:48:47.000Z | from NIENV import *
# API METHODS --------------
# self.main_widget
# self.update_shape()
# Ports
# self.input(index)
# self.set_output_val(index, val)
# self.exec_output(index)
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', target='global')
# self.log_message('that\'s not good', target='error')
# --------------------------
from sklearn.svm import NuSVC
class NuSVC_NodeInstance(NodeInstance):
def __init__(self, params):
super(NuSVC_NodeInstance, self).__init__(params)
# self.special_actions['action name'] = {'method': M(self.action_method)}
# ...
def update_event(self, input_called=-1):
if input_called == 0:
clf = NuSVC()
if self.input(1) != None:
clf.set_params(**self.input(1))
X = self.input(2)
y = self.input(3)
clf.fit(X, y)
self.set_output_val(1, clf)
self.exec_output(0)
def get_data(self):
data = {}
return data
def set_data(self, data):
pass
def removing(self):
pass
| 23.403509 | 83 | 0.595952 |
79543fc29aaf4c3a43026b403fced3814c7b412b | 1,571 | py | Python | pypy/tool/pytest/test/test_astrewrite.py | hollmmax/zig | d80baa5a5fcbc82b3e2294b398edc20a98737a52 | [
"MIT"
] | null | null | null | pypy/tool/pytest/test/test_astrewrite.py | hollmmax/zig | d80baa5a5fcbc82b3e2294b398edc20a98737a52 | [
"MIT"
] | null | null | null | pypy/tool/pytest/test/test_astrewrite.py | hollmmax/zig | d80baa5a5fcbc82b3e2294b398edc20a98737a52 | [
"MIT"
] | null | null | null | from pytest import raises
from pypy.tool.pytest.astrewriter import ast_rewrite
def get_assert_explanation(space, src):
fn = "?"
w_code = ast_rewrite.rewrite_asserts(space, src, fn)
w_d = space.newdict(module=True)
excinfo = space.raises_w(space.w_AssertionError, space.exec_, w_code, w_d, w_d)
return space.text_w(space.getitem(space.getattr(excinfo.value.get_w_value(space), space.newtext("args")), space.newint(0)))
def test_simple(space):
src = """
x = 1
def f():
y = 2
assert x == y
f()
"""
expl = get_assert_explanation(space, src)
assert expl == 'assert 1 == 2'
def test_call(space):
src = """
x = 1
def g():
return 15
def f():
y = 2
assert g() == x + y
f()
"""
expl = get_assert_explanation(space, src)
assert expl == 'assert 15 == (1 == 2)\n + where 15 = g()'
def test_list(space):
src = """
x = 1
y = 2
assert [1, 1, x] == [1, 1, y]
"""
expl = get_assert_explanation(space, src)
# diff etc disabled for now
assert expl == 'assert [1, 1, 1] == [1, 1, 2]'
def test_boolop(space):
src = "x = 1; y = 2; assert x == 1 and y == 3"
expl = get_assert_explanation(space, src)
assert expl == 'assert (1 == 1 and 2 == 3)'
src = "x = 1; y = 2; assert x == 2 and y == 3"
expl = get_assert_explanation(space, src)
assert expl == 'assert (1 == 2)'
def test_attribute(space):
src = """
class A:
x = 1
def f():
a = A
assert a.x == 2
f()
"""
expl = get_assert_explanation(space, src)
assert expl == "assert 1 == 2\n + where 1 = <class 'A'>.x"
| 24.169231 | 127 | 0.59198 |
79544097aa291b961e17f90271907d5818c21ee0 | 2,205 | py | Python | src/turbo_allauth/tests/settings.py | danjac/django-turbo-allauth | dde7b5a9aede029198f7feef41fad1160ddf9aed | [
"MIT"
] | 2 | 2021-04-05T07:26:45.000Z | 2021-09-18T08:59:19.000Z | src/turbo_allauth/tests/settings.py | danjac/django-turbo-allauth | dde7b5a9aede029198f7feef41fad1160ddf9aed | [
"MIT"
] | null | null | null | src/turbo_allauth/tests/settings.py | danjac/django-turbo-allauth | dde7b5a9aede029198f7feef41fad1160ddf9aed | [
"MIT"
] | null | null | null | # Standard Library
import pathlib
SECRET_KEY = "seekret"
DATABASES = {
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "mem_db"},
}
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [pathlib.Path(__file__).parent.absolute() / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"debug": False,
"builtins": [],
"libraries": {},
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
},
}
]
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"turbo_allauth",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"turbo_allauth.tests.testapp.apps.TestAppConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sites.middleware.CurrentSiteMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"turbo_response.middleware.TurboStreamMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.gzip.GZipMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "testapp.urls"
LOGIN_REDIRECT_URL = "/"
SITE_ID = 1
SOCIALACCOUNT_PROVIDERS = {
"google": {
"SCOPE": ["profile", "email",],
"AUTH_PARAMS": {"access_type": "online",},
}
}
| 31.056338 | 74 | 0.650794 |
795440c03952e5b640a7538371706b087ab2ce21 | 784 | py | Python | class5_4.py | wchunhao2000/pyneta | 3c03dff486e3a777e3e6cca4e0de5b823cf64256 | [
"Apache-2.0"
] | null | null | null | class5_4.py | wchunhao2000/pyneta | 3c03dff486e3a777e3e6cca4e0de5b823cf64256 | [
"Apache-2.0"
] | null | null | null | class5_4.py | wchunhao2000/pyneta | 3c03dff486e3a777e3e6cca4e0de5b823cf64256 | [
"Apache-2.0"
] | null | null | null | from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
env = Environment(undefined=StrictUndefined)
env.loader = FileSystemLoader("./templates/class5/exercise4")
my_vrfs = [
{"vrf_name": "blue1", "rd_number": "100:1", "ipv4_af": True, "ipv6_af": True},
{"vrf_name": "blue2", "rd_number": "100:2", "ipv4_af": True, "ipv6_af": True},
{"vrf_name": "blue3", "rd_number": "100:3", "ipv4_af": True, "ipv6_af": True},
{"vrf_name": "blue4", "rd_number": "100:4", "ipv4_af": True, "ipv6_af": True},
{"vrf_name": "blue5", "rd_number": "100:5", "ipv4_af": True, "ipv6_af": True},
]
j2_vars = {"my_vrfs": my_vrfs}
template_file = "ios_vrf.j2"
template = env.get_template(template_file)
cfg = template.render(**j2_vars)
print(cfg)
| 39.2 | 82 | 0.67602 |
795440cdb62cf2c870e6ff4211462ff15e00102c | 2,863 | py | Python | addic7ed_cli/version.py | BenoitZugmeyer/addic7ed-cli | 12005ed1f25fc167210e75a004a9fa9998f88065 | [
"MIT"
] | 58 | 2015-01-06T17:09:41.000Z | 2022-01-08T20:27:55.000Z | addic7ed_cli/version.py | BenoitZugmeyer/addic7ed-cli | 12005ed1f25fc167210e75a004a9fa9998f88065 | [
"MIT"
] | 27 | 2015-02-18T19:49:08.000Z | 2020-04-04T21:06:32.000Z | addic7ed_cli/version.py | BenoitZugmeyer/addic7ed-cli | 12005ed1f25fc167210e75a004a9fa9998f88065 | [
"MIT"
] | 12 | 2015-05-11T19:16:22.000Z | 2019-11-08T16:59:05.000Z |
import re
import zipfile
import io
import shutil
from addic7ed_cli.util import parse_release
from addic7ed_cli.error import FatalError
from addic7ed_cli.request import session
from addic7ed_cli.language import iso639_3_codes
class Version(object):
def __init__(self, id, language_id, version, url, language, release, infos,
completeness, hearing_impaired):
self.id = id
self.language_id = language_id
self.version = version
self.url = url
self.language = language
self.release = release
self.infos = infos
self.completeness = completeness
self.release_hash = parse_release(infos) | parse_release(release)
self.hearing_impaired = hearing_impaired
self.weight = 0
@property
def iso639_language(self):
return iso639_3_codes[self.language]
def __eq__(self, other):
return self.url == other.url and self.language == other.language
def match_languages(self, languages):
if not languages:
return
l = float(len(languages))
weight = 0
for index, language in enumerate(languages):
if language.lower() in self.language.lower():
weight += (l - index) / l
self.weight += weight
def match_release(self, release):
if not release:
return
self.weight += len(release & self.release_hash) / float(len(release))
def match_completeness(self, completeness):
match = re.match('(\d+\.?\d+)', self.completeness)
weight = float(match.group(1)) / 100 if match else 1
self.weight += weight
def match_hearing_impaired(self, hearing_impaired):
if hearing_impaired == self.hearing_impaired:
self.weight += 0.1
def __str__(self):
return '{language} - {release} {infos} {completeness} {hi}' \
.format(hi='HI' if self.hearing_impaired else '',
**self.__dict__)
def download(self, filename):
content = session.get(self.url).content
if content[:9] == '<!DOCTYPE':
raise FatalError('Daily Download count exceeded.')
with open(filename, 'wb') as fp:
fp.write(content)
@staticmethod
def multidownload(files):
data = [
('multishow[]',
'{0.language_id}/{0.id}/{0.version}'.format(version))
for (version, _) in files
]
result = session.post('/downloadmultiple.php', data=data)
z = zipfile.ZipFile(io.BytesIO(result.content))
zipfilenames = (n for n in z.namelist() if n.endswith('.srt'))
for (filename, zipfilename) in zip((filename for (_, filename) in files), zipfilenames):
with open(filename, 'wb') as output:
shutil.copyfileobj(z.open(zipfilename), output)
| 31.119565 | 96 | 0.615788 |
795441e4c24c3f556118a23495662c52c0dcc16a | 142 | py | Python | userpreferences/urls.py | georgiawang5332/meatFoodManager | 204b5b7aa2a3c5a6c9cc38077a10a72c0c69f140 | [
"MIT"
] | null | null | null | userpreferences/urls.py | georgiawang5332/meatFoodManager | 204b5b7aa2a3c5a6c9cc38077a10a72c0c69f140 | [
"MIT"
] | null | null | null | userpreferences/urls.py | georgiawang5332/meatFoodManager | 204b5b7aa2a3c5a6c9cc38077a10a72c0c69f140 | [
"MIT"
] | null | null | null | from . import views
from django.urls import path
app_name = 'userpreferences'
urlpatterns = [
path('', views.index, name="preferences")
]
| 15.777778 | 43 | 0.71831 |
79544246a3026b18054233b70616e122b25c253a | 891 | py | Python | palindrome.py | xiaomiwujiecao/effectivePythonNote | 772e81864b171dcc19b6bcb1b31fc61bcfa1b9fe | [
"MIT"
] | null | null | null | palindrome.py | xiaomiwujiecao/effectivePythonNote | 772e81864b171dcc19b6bcb1b31fc61bcfa1b9fe | [
"MIT"
] | null | null | null | palindrome.py | xiaomiwujiecao/effectivePythonNote | 772e81864b171dcc19b6bcb1b31fc61bcfa1b9fe | [
"MIT"
] | null | null | null | # encoding=utf-8
# def palinedrome(word):
# """Return True if the given word is a palindrome """
# return word == word[::-1]
#
# print(repr(palinedrome.__doc__))
class Player(object):
"""Represents a player of the game
SubClassed my override the 'trick' method to provide
custom animations for the player's movement depending
on their power level,etc.
PUblic attibutes:
-power:UNused power-ups (float between 0 and 1)
-coins:Coins found during the level(integer).
"""
def find_anagrams(word,dictionary):
"""Find all anagrams for a word
This function only runs as fast as the test for
membership in the 'dictionary' container.
It will be slow if the dictionary is a list and fast if it's
a set.
Args:
word:String if the target word.
dictionary:Container with all strings that
Returns:
List of anagrams that were found.
Empty if none were found.
"""
| 26.205882 | 62 | 0.721661 |
79544281d1bfbe2a732aa57be51aa8aa5ba177f7 | 5,572 | py | Python | setup.py | xarkes/pydis | 1036274b2827884cab04a0816daa721759dc48c2 | [
"MIT"
] | 12 | 2018-06-17T14:48:28.000Z | 2022-01-17T16:30:52.000Z | setup.py | xarkes/pydis | 1036274b2827884cab04a0816daa721759dc48c2 | [
"MIT"
] | 4 | 2018-09-08T22:10:32.000Z | 2021-02-09T05:43:43.000Z | setup.py | xarkes/pydis | 1036274b2827884cab04a0816daa721759dc48c2 | [
"MIT"
] | 4 | 2018-09-01T23:56:39.000Z | 2019-11-19T06:24:35.000Z | import os
import re
import sys
import shutil
import subprocess
from setuptools import setup
from setuptools.command.develop import develop
from distutils.util import get_platform
from distutils.command.build import build
package_dir = os.path.dirname(os.path.abspath(__file__))
if sys.platform == 'darwin':
library_name = 'libZydis.dylib'
elif sys.platform in ('cygwin', 'win32'):
library_name = 'Zydis.dll'
else:
library_name = 'libZydis.so'
def cmake_build(source_dir, library_name, clean_build=False, build_dir=os.path.join(package_dir, 'build'),
dest_dir=os.path.join(package_dir, 'lib'), debug_build=False):
release_mode = 'Debug' if debug_build else 'Release'
on_windows = sys.platform in ('cygwin', 'win32')
if on_windows:
library_path = os.path.join(build_dir, release_mode, library_name)
else:
library_path = os.path.join(build_dir, library_name)
if clean_build:
shutil.rmtree(build_dir)
elif os.path.exists(library_path):
# The library is already built.
return True
if not os.path.exists(build_dir):
os.makedirs(build_dir)
build_options = ['-DBUILD_SHARED_LIBS=ON']
# If the python interpreter is 64bit tell cmake to build the 64bit module.
if on_windows and 'amd64' in get_platform():
build_options += ['-A', 'x64']
subprocess.check_call(['cmake', os.path.abspath(source_dir)] + build_options, cwd=build_dir)
subprocess.check_call(['cmake', '--build', '.', '--config', release_mode], cwd=build_dir)
if not os.path.exists(library_path):
raise Exception(f'Unable to find library after building at {library_path}')
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_file = os.path.join(dest_dir, library_name)
if os.path.exists(dest_file):
os.unlink(dest_file)
shutil.move(library_path, dest_file)
def build_zydis(command, debug_build):
library_path = os.path.join(package_dir, 'pydis', 'lib')
library = os.path.join(library_path, library_name)
if not os.path.exists(os.path.join(package_dir, 'zydis')):
subprocess.check_call(['git', 'submodule', 'init'], cwd=package_dir)
if not os.path.exists(library):
cmake_build(os.path.join(package_dir, 'zydis'), library_name, dest_dir=library_path,
debug_build=debug_build)
else:
command.announce('Zydis already built')
class DevelopCommand(develop):
def run(self):
self.execute(build_zydis, (self, True), msg='Building Zydis')
develop.run(self)
class BuildCommand(build):
def run(self):
self.execute(build_zydis, (self, False), msg='Building Zydis')
return build.run(self)
def set_wheel_tags(at_index):
"""
See:
https://www.python.org/dev/peps/pep-0425/
https://www.python.org/dev/peps/pep-0491/#file-name-convention
and for macs:
https://github.com/MacPython/wiki/wiki/Spinning-wheels
If the wheel is not supported on the platform you can debug why by looking
at the result of:
python3 -c 'from pip._internal import pep425tags; print(pep425tags.get_supported())
The result is all the valid tag combinations your platform supports.
"""
if '--plat-name' not in sys.argv:
sys.argv.insert(at_index + 1, '--plat-name')
platform_name = get_platform()
platform_name = platform_name.replace('-', '_').replace('.', '_')
# https://www.python.org/dev/peps/pep-0513/
if 'linux' in platform_name:
platform_name = platform_name.replace('linux', 'manylinux1')
sys.argv.insert(at_index + 2, platform_name)
if '--python-tag' not in sys.argv:
# Currently this is only tested on CPython
# Since ctypes is used it may not work on other python interpreters.
sys.argv.insert(at_index + 1, '--python-tag')
sys.argv.insert(at_index + 2, 'cp36')
def get_version():
with open(os.path.join('pydis', '__init__.py')) as f:
return re.search(r'__version__ = \'(.*?)\'', f.read()).group(1) or '0.0'
def setup_package():
try:
bdist_index = sys.argv.index('bdist_wheel')
set_wheel_tags(bdist_index)
except ValueError:
pass
with open('README.md') as readme:
long_description = readme.read()
setup(name='py-dis',
author='Kyle',
author_email='kyle@novogen.org',
description='Python bindings for Zydis library',
long_description=long_description,
long_description_content_type='text/markdown',
version=get_version(),
packages=['pydis'],
python_requires='>=3.6',
license='MIT',
scripts=['scripts/pydisinfo'],
cmdclass={
'build': BuildCommand,
'develop': DevelopCommand
},
package_data={'pydis': [os.path.join('lib', library_name)]},
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: C',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Disassemblers',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Microsoft'
))
if __name__ == '__main__':
setup_package()
| 32.970414 | 106 | 0.639986 |
7954433bfd68739f04acb0cbb067e03c180bb3c4 | 4,987 | py | Python | src/models/VGG19.py | harveyslash/Deep-Image-Analogy-TF | 9bda06fbe3a5786217a3db112d2f162573b1dd90 | [
"MIT"
] | 4 | 2018-02-27T21:43:42.000Z | 2021-08-22T14:42:47.000Z | src/models/VGG19.py | harveyslash/Deep-Image-Analogy-TF | 9bda06fbe3a5786217a3db112d2f162573b1dd90 | [
"MIT"
] | null | null | null | src/models/VGG19.py | harveyslash/Deep-Image-Analogy-TF | 9bda06fbe3a5786217a3db112d2f162573b1dd90 | [
"MIT"
] | 2 | 2018-02-25T21:50:08.000Z | 2018-11-26T23:42:32.000Z | import time
import os
from src.PatchMatch import PatchMatchOrig
import torchvision.models as models
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import torch.utils.model_zoo as model_zoo
import cv2
import torchvision
from torchvision import transforms
from torchvision.utils import make_grid
from PIL import Image
from collections import OrderedDict
from PIL import Image
# import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import ReduceLROnPlateau
class FeatureExtractor(nn.Sequential):
def __init__(self):
super(FeatureExtractor, self).__init__()
def add_layer(self, name, layer):
self.add_module(name, layer)
def forward(self, x):
list = []
for module in self._modules:
x = self._modules[module](x)
list.append(x)
return list
class VGG19:
def __init__(self,use_cuda=True):
self.cnn_temp = models.vgg19(pretrained=True).features
self.model = FeatureExtractor() # the new Feature extractor module network
conv_counter = 1
relu_counter = 1
batn_counter = 1
block_counter = 1
self.use_cuda = use_cuda
for i, layer in enumerate(list(self.cnn_temp)):
if isinstance(layer, nn.Conv2d):
name = "conv_" + str(block_counter) + "_" + str(conv_counter) + "__" + str(i)
conv_counter += 1
self.model.add_layer(name, layer)
if isinstance(layer, nn.ReLU):
name = "relu_" + str(block_counter) + "_" + str(relu_counter) + "__" + str(i)
relu_counter += 1
self.model.add_layer(name, nn.ReLU(inplace=False))
if isinstance(layer, nn.MaxPool2d):
name = "pool_" + str(block_counter) + "__" + str(i)
batn_counter = relu_counter = conv_counter = 1
block_counter += 1
self.model.add_layer(name, nn.AvgPool2d((2,2))) # ***
if isinstance(layer, nn.BatchNorm2d):
name = "batn_" + str(block_counter) + "_" + str(batn_counter) + "__" + str(i)
batn_counter += 1
self.model.add_layer(name, layer) # ***
if use_cuda:
self.model.cuda()
def forward_subnet(self, input_tensor, start_layer, end_layer):
for i, layer in enumerate(list(self.model)):
if i >= start_layer and i <= end_layer:
input_tensor = layer(input_tensor)
return input_tensor
def get_features(self, img_tensor):
if self.use_cuda:
img_tensor = img_tensor.cuda()
features = self.model(img_tensor)
features = [i.data.squeeze().cpu().numpy().transpose(1,2,0) for i in features]
return np.array(features)
def get_deconvoluted_feat(self,feat,feat_layer_num,iters=13):
def cn_last(th_array):
return th_array.transpose(1,2,0)
def cn_first(th_array):
return th_array.transpose(2,0,1)
feat = cn_first(feat)
feat = torch.from_numpy(feat).float()
scale = 2
if feat_layer_num == 5:
start_layer,end_layer = 21,29
noise = np.random.uniform(size=(1,512,28*scale,28*scale),low=0 , high=1)
elif feat_layer_num == 4:
start_layer,end_layer = 12,20
noise = np.random.uniform(size=(1,256,56*scale,56*scale),low=0 , high=1)
elif feat_layer_num == 3:
start_layer,end_layer = 7,11
noise = np.random.uniform(size=(1,128,112*scale,112*scale),low=0 , high=1)
elif feat_layer_num == 2:
start_layer,end_layer = 2,6
noise = np.random.uniform(size=(1,64,224*scale,224*scale),low=0 , high=1)
else:
print("Invalid layer number")
# noise = Variable(torch.from_numpy(noise).float()) # use this if you want custom noise
noise = torch.randn(noise.shape).float()
if self.use_cuda:
noise = noise.cuda()
feat = feat.cuda()
noise = Variable(noise,requires_grad=True)
feat = Variable(feat)
optimizer = optim.Adam([noise],lr=1,weight_decay=1)
loss_hist = []
for i in range(1,iters):
optimizer.zero_grad()
output = self.forward_subnet(input_tensor=noise,start_layer=start_layer,end_layer=end_layer)
diff = output - feat
norm = torch.norm(diff,p=2)
loss_value = norm**2
loss_value.backward()
optimizer.step()
noise.data.clamp_(0., 1.)
loss_hist.append(loss_value.cpu().data.numpy())
# plt.plot(loss_hist)
# plt.show()
noise.data.clamp_(0., 1.)
noise_cpu = noise.cpu().data.squeeze().numpy()
del feat
del noise
return cn_last(noise_cpu)
| 32.594771 | 104 | 0.596752 |
795443b86e788ab1e4007c1e6c784a392b6e6736 | 19,701 | py | Python | .modules/.sqlmap/lib/techniques/union/use.py | termux-one/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 1,103 | 2018-04-20T14:08:11.000Z | 2022-03-29T06:22:43.000Z | .modules/.sqlmap/lib/techniques/union/use.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 29 | 2019-04-03T14:52:38.000Z | 2022-03-24T12:33:05.000Z | .modules/.sqlmap/lib/techniques/union/use.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 161 | 2018-04-20T15:57:12.000Z | 2022-03-15T19:16:16.000Z | #!/usr/bin/env python
"""
Copyright (c) 2006-2018 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import binascii
import re
import time
import xml.etree.ElementTree
from extra.safe2bin.safe2bin import safecharencode
from lib.core.agent import agent
from lib.core.bigarray import BigArray
from lib.core.common import arrayizeValue
from lib.core.common import Backend
from lib.core.common import calculateDeltaSeconds
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import extractRegexResult
from lib.core.common import firstNotNone
from lib.core.common import flattenValue
from lib.core.common import getConsoleWidth
from lib.core.common import getPartRun
from lib.core.common import getUnicode
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import incrementCounter
from lib.core.common import initTechnique
from lib.core.common import isListLike
from lib.core.common import isNoneValue
from lib.core.common import isNumPosStrValue
from lib.core.common import listToStrValue
from lib.core.common import parseUnionPage
from lib.core.common import removeReflectiveValues
from lib.core.common import singleTimeDebugMessage
from lib.core.common import singleTimeWarnMessage
from lib.core.common import unArrayizeValue
from lib.core.common import wasLastResponseDBMSError
from lib.core.convert import htmlunescape
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import queries
from lib.core.dicts import FROM_DUMMY_TABLE
from lib.core.enums import DBMS
from lib.core.enums import HTTP_HEADER
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapDataException
from lib.core.exception import SqlmapSyntaxException
from lib.core.settings import MAX_BUFFERED_PARTIAL_UNION_LENGTH
from lib.core.settings import NULL
from lib.core.settings import SQL_SCALAR_REGEX
from lib.core.settings import TURN_OFF_RESUME_INFO_LIMIT
from lib.core.settings import UNICODE_ENCODING
from lib.core.threads import getCurrentThreadData
from lib.core.threads import runThreads
from lib.core.unescaper import unescaper
from lib.request.connect import Connect as Request
from lib.utils.progress import ProgressBar
from thirdparty.odict.odict import OrderedDict
def _oneShotUnionUse(expression, unpack=True, limited=False):
retVal = hashDBRetrieve("%s%s" % (conf.hexConvert or False, expression), checkConf=True) # as UNION data is stored raw unconverted
threadData = getCurrentThreadData()
threadData.resumed = retVal is not None
if retVal is None:
vector = kb.injection.data[PAYLOAD.TECHNIQUE.UNION].vector
if not kb.rowXmlMode:
injExpression = unescaper.escape(agent.concatQuery(expression, unpack))
kb.unionDuplicates = vector[7]
kb.forcePartialUnion = vector[8]
query = agent.forgeUnionQuery(injExpression, vector[0], vector[1], vector[2], vector[3], vector[4], vector[5], vector[6], None, limited)
where = PAYLOAD.WHERE.NEGATIVE if conf.limitStart or conf.limitStop else vector[6]
else:
where = vector[6]
query = agent.forgeUnionQuery(expression, vector[0], vector[1], vector[2], vector[3], vector[4], vector[5], vector[6], None, False)
payload = agent.payload(newValue=query, where=where)
# Perform the request
page, headers, _ = Request.queryPage(payload, content=True, raise404=False)
incrementCounter(PAYLOAD.TECHNIQUE.UNION)
if not kb.rowXmlMode:
# Parse the returned page to get the exact UNION-based
# SQL injection output
def _(regex):
return firstNotNone(
extractRegexResult(regex, removeReflectiveValues(page, payload), re.DOTALL | re.IGNORECASE),
extractRegexResult(regex, removeReflectiveValues(listToStrValue((_ for _ in headers.headers if not _.startswith(HTTP_HEADER.URI)) if headers else None), payload, True), re.DOTALL | re.IGNORECASE)
)
# Automatically patching last char trimming cases
if kb.chars.stop not in (page or "") and kb.chars.stop[:-1] in (page or ""):
warnMsg = "automatically patching output having last char trimmed"
singleTimeWarnMessage(warnMsg)
page = page.replace(kb.chars.stop[:-1], kb.chars.stop)
retVal = _("(?P<result>%s.*%s)" % (kb.chars.start, kb.chars.stop))
else:
output = extractRegexResult(r"(?P<result>(<row.+?/>)+)", page)
if output:
try:
root = xml.etree.ElementTree.fromstring("<root>%s</root>" % output.encode(UNICODE_ENCODING))
retVal = ""
for column in kb.dumpColumns:
base64 = True
for child in root:
value = child.attrib.get(column, "").strip()
if value and not re.match(r"\A[a-zA-Z0-9+/]+={0,2}\Z", value):
base64 = False
break
try:
value.decode("base64")
except binascii.Error:
base64 = False
break
if base64:
for child in root:
child.attrib[column] = child.attrib.get(column, "").decode("base64") or NULL
for child in root:
row = []
for column in kb.dumpColumns:
row.append(child.attrib.get(column, NULL))
retVal += "%s%s%s" % (kb.chars.start, kb.chars.delimiter.join(row), kb.chars.stop)
except:
pass
else:
retVal = getUnicode(retVal)
if retVal is not None:
retVal = getUnicode(retVal, kb.pageEncoding)
# Special case when DBMS is Microsoft SQL Server and error message is used as a result of UNION injection
if Backend.isDbms(DBMS.MSSQL) and wasLastResponseDBMSError():
retVal = htmlunescape(retVal).replace("<br>", "\n")
hashDBWrite("%s%s" % (conf.hexConvert or False, expression), retVal)
elif not kb.rowXmlMode:
trimmed = _("%s(?P<result>.*?)<" % (kb.chars.start))
if trimmed:
warnMsg = "possible server trimmed output detected "
warnMsg += "(probably due to its length and/or content): "
warnMsg += safecharencode(trimmed)
logger.warn(warnMsg)
else:
vector = kb.injection.data[PAYLOAD.TECHNIQUE.UNION].vector
kb.unionDuplicates = vector[7]
return retVal
def configUnion(char=None, columns=None):
def _configUnionChar(char):
if not isinstance(char, basestring):
return
kb.uChar = char
if conf.uChar is not None:
kb.uChar = char.replace("[CHAR]", conf.uChar if conf.uChar.isdigit() else "'%s'" % conf.uChar.strip("'"))
def _configUnionCols(columns):
if not isinstance(columns, basestring):
return
columns = columns.replace(" ", "")
if "-" in columns:
colsStart, colsStop = columns.split("-")
else:
colsStart, colsStop = columns, columns
if not colsStart.isdigit() or not colsStop.isdigit():
raise SqlmapSyntaxException("--union-cols must be a range of integers")
conf.uColsStart, conf.uColsStop = int(colsStart), int(colsStop)
if conf.uColsStart > conf.uColsStop:
errMsg = "--union-cols range has to be from lower to "
errMsg += "higher number of columns"
raise SqlmapSyntaxException(errMsg)
_configUnionChar(char)
_configUnionCols(conf.uCols or columns)
def unionUse(expression, unpack=True, dump=False):
"""
This function tests for an UNION SQL injection on the target
URL then call its subsidiary function to effectively perform an
UNION SQL injection on the affected URL
"""
initTechnique(PAYLOAD.TECHNIQUE.UNION)
abortedFlag = False
count = None
origExpr = expression
startLimit = 0
stopLimit = None
value = None
width = getConsoleWidth()
start = time.time()
_, _, _, _, _, expressionFieldsList, expressionFields, _ = agent.getFields(origExpr)
# Set kb.partRun in case the engine is called from the API
kb.partRun = getPartRun(alias=False) if conf.api else None
if Backend.isDbms(DBMS.MSSQL) and kb.dumpColumns:
kb.rowXmlMode = True
_ = "(%s FOR XML RAW, BINARY BASE64)" % expression
output = _oneShotUnionUse(_, False)
value = parseUnionPage(output)
kb.rowXmlMode = False
if expressionFieldsList and len(expressionFieldsList) > 1 and "ORDER BY" in expression.upper():
# Removed ORDER BY clause because UNION does not play well with it
expression = re.sub(r"(?i)\s*ORDER BY\s+[\w,]+", "", expression)
debugMsg = "stripping ORDER BY clause from statement because "
debugMsg += "it does not play well with UNION query SQL injection"
singleTimeDebugMessage(debugMsg)
# We have to check if the SQL query might return multiple entries
# if the technique is partial UNION query and in such case forge the
# SQL limiting the query output one entry at a time
# NOTE: we assume that only queries that get data from a table can
# return multiple entries
if value is None and (kb.injection.data[PAYLOAD.TECHNIQUE.UNION].where == PAYLOAD.WHERE.NEGATIVE or kb.forcePartialUnion or (dump and (conf.limitStart or conf.limitStop)) or "LIMIT " in expression.upper()) and " FROM " in expression.upper() and ((Backend.getIdentifiedDbms() not in FROM_DUMMY_TABLE) or (Backend.getIdentifiedDbms() in FROM_DUMMY_TABLE and not expression.upper().endswith(FROM_DUMMY_TABLE[Backend.getIdentifiedDbms()]))) and not re.search(SQL_SCALAR_REGEX, expression, re.I):
expression, limitCond, topLimit, startLimit, stopLimit = agent.limitCondition(expression, dump)
if limitCond:
# Count the number of SQL query entries output
countedExpression = expression.replace(expressionFields, queries[Backend.getIdentifiedDbms()].count.query % ('*' if len(expressionFieldsList) > 1 else expressionFields), 1)
if " ORDER BY " in countedExpression.upper():
_ = countedExpression.upper().rindex(" ORDER BY ")
countedExpression = countedExpression[:_]
output = _oneShotUnionUse(countedExpression, unpack)
count = unArrayizeValue(parseUnionPage(output))
if isNumPosStrValue(count):
if isinstance(stopLimit, int) and stopLimit > 0:
stopLimit = min(int(count), int(stopLimit))
else:
stopLimit = int(count)
infoMsg = "used SQL query returns "
infoMsg += "%d entries" % stopLimit
logger.info(infoMsg)
elif count and (not isinstance(count, basestring) or not count.isdigit()):
warnMsg = "it was not possible to count the number "
warnMsg += "of entries for the SQL query provided. "
warnMsg += "sqlmap will assume that it returns only "
warnMsg += "one entry"
logger.warn(warnMsg)
stopLimit = 1
elif (not count or int(count) == 0):
if not count:
warnMsg = "the SQL query provided does not "
warnMsg += "return any output"
logger.warn(warnMsg)
else:
value = [] # for empty tables
return value
if isNumPosStrValue(count) and int(count) > 1:
threadData = getCurrentThreadData()
try:
threadData.shared.limits = iter(xrange(startLimit, stopLimit))
except OverflowError:
errMsg = "boundary limits (%d,%d) are too large. Please rerun " % (startLimit, stopLimit)
errMsg += "with switch '--fresh-queries'"
raise SqlmapDataException(errMsg)
numThreads = min(conf.threads, (stopLimit - startLimit))
threadData.shared.value = BigArray()
threadData.shared.buffered = []
threadData.shared.counter = 0
threadData.shared.lastFlushed = startLimit - 1
threadData.shared.showEta = conf.eta and (stopLimit - startLimit) > 1
if threadData.shared.showEta:
threadData.shared.progress = ProgressBar(maxValue=(stopLimit - startLimit))
if stopLimit > TURN_OFF_RESUME_INFO_LIMIT:
kb.suppressResumeInfo = True
debugMsg = "suppressing possible resume console info because of "
debugMsg += "large number of rows. It might take too long"
logger.debug(debugMsg)
try:
def unionThread():
threadData = getCurrentThreadData()
while kb.threadContinue:
with kb.locks.limit:
try:
threadData.shared.counter += 1
num = threadData.shared.limits.next()
except StopIteration:
break
if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE):
field = expressionFieldsList[0]
elif Backend.isDbms(DBMS.ORACLE):
field = expressionFieldsList
else:
field = None
limitedExpr = agent.limitQuery(num, expression, field)
output = _oneShotUnionUse(limitedExpr, unpack, True)
if not kb.threadContinue:
break
if output:
with kb.locks.value:
if all(_ in output for _ in (kb.chars.start, kb.chars.stop)):
items = parseUnionPage(output)
if threadData.shared.showEta:
threadData.shared.progress.progress(threadData.shared.counter)
if isListLike(items):
# in case that we requested N columns and we get M!=N then we have to filter a bit
if len(items) > 1 and len(expressionFieldsList) > 1:
items = [item for item in items if isListLike(item) and len(item) == len(expressionFieldsList)]
items = [_ for _ in flattenValue(items)]
if len(items) > len(expressionFieldsList):
filtered = OrderedDict()
for item in items:
key = re.sub(r"[^A-Za-z0-9]", "", item).lower()
if key not in filtered or re.search(r"[^A-Za-z0-9]", item):
filtered[key] = item
items = filtered.values()
items = [items]
index = None
for index in xrange(1 + len(threadData.shared.buffered)):
if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num:
break
threadData.shared.buffered.insert(index or 0, (num, items))
else:
index = None
if threadData.shared.showEta:
threadData.shared.progress.progress(threadData.shared.counter)
for index in xrange(1 + len(threadData.shared.buffered)):
if index < len(threadData.shared.buffered) and threadData.shared.buffered[index][0] >= num:
break
threadData.shared.buffered.insert(index or 0, (num, None))
items = output.replace(kb.chars.start, "").replace(kb.chars.stop, "").split(kb.chars.delimiter)
while threadData.shared.buffered and (threadData.shared.lastFlushed + 1 >= threadData.shared.buffered[0][0] or len(threadData.shared.buffered) > MAX_BUFFERED_PARTIAL_UNION_LENGTH):
threadData.shared.lastFlushed, _ = threadData.shared.buffered[0]
if not isNoneValue(_):
threadData.shared.value.extend(arrayizeValue(_))
del threadData.shared.buffered[0]
if conf.verbose == 1 and not (threadData.resumed and kb.suppressResumeInfo) and not threadData.shared.showEta:
_ = ','.join("\"%s\"" % _ for _ in flattenValue(arrayizeValue(items))) if not isinstance(items, basestring) else items
status = "[%s] [INFO] %s: %s" % (time.strftime("%X"), "resumed" if threadData.resumed else "retrieved", _ if kb.safeCharEncode else safecharencode(_))
if len(status) > width:
status = "%s..." % status[:width - 3]
dataToStdout("%s\n" % status)
runThreads(numThreads, unionThread)
if conf.verbose == 1:
clearConsoleLine(True)
except KeyboardInterrupt:
abortedFlag = True
warnMsg = "user aborted during enumeration. sqlmap "
warnMsg += "will display partial output"
logger.warn(warnMsg)
finally:
for _ in sorted(threadData.shared.buffered):
if not isNoneValue(_[1]):
threadData.shared.value.extend(arrayizeValue(_[1]))
value = threadData.shared.value
kb.suppressResumeInfo = False
if not value and not abortedFlag:
output = _oneShotUnionUse(expression, unpack)
value = parseUnionPage(output)
duration = calculateDeltaSeconds(start)
if not kb.bruteMode:
debugMsg = "performed %d queries in %.2f seconds" % (kb.counters[PAYLOAD.TECHNIQUE.UNION], duration)
logger.debug(debugMsg)
return value
| 47.586957 | 495 | 0.566316 |
795445cfa92335606e79b3a9b81b347359a3d920 | 3,481 | py | Python | astacus/node/uploader.py | aiven/astacus | 2d64e1f33e01d50a41127f41d9da3d1ab0ce0387 | [
"Apache-2.0"
] | 19 | 2020-06-22T12:17:59.000Z | 2022-02-18T00:12:17.000Z | astacus/node/uploader.py | aiven/astacus | 2d64e1f33e01d50a41127f41d9da3d1ab0ce0387 | [
"Apache-2.0"
] | 7 | 2020-06-24T05:16:20.000Z | 2022-02-28T07:35:31.000Z | astacus/node/uploader.py | aiven/astacus | 2d64e1f33e01d50a41127f41d9da3d1ab0ce0387 | [
"Apache-2.0"
] | 2 | 2020-09-05T21:23:08.000Z | 2022-02-17T15:02:37.000Z | """
Copyright (c) 2020 Aiven Ltd
See LICENSE for details
"""
from .snapshotter import hash_hexdigest_readable, Snapshotter
from astacus.common import exceptions, utils
from astacus.common.progress import Progress
from astacus.common.storage import ThreadLocalStorage
import logging
logger = logging.getLogger(__name__)
class Uploader(ThreadLocalStorage):
def write_hashes_to_storage(
self, *, snapshotter: Snapshotter, hashes, parallel: int, progress: Progress, still_running_callback=lambda: True
):
todo = set(hash.hexdigest for hash in hashes)
progress.start(len(todo))
sizes = {"total": 0, "stored": 0}
def _upload_hexdigest_in_thread(hexdigest):
storage = self.local_storage
assert hexdigest
files = snapshotter.hexdigest_to_snapshotfiles.get(hexdigest, [])
for snapshotfile in files:
path = snapshotter.dst / snapshotfile.relative_path
if not path.is_file():
logger.warning("%s disappeared post-snapshot", path)
continue
with snapshotfile.open_for_reading(snapshotter.dst) as f:
current_hexdigest = hash_hexdigest_readable(f)
if current_hexdigest != snapshotfile.hexdigest:
logger.info("Hash of %s changed before upload", snapshotfile.relative_path)
continue
try:
with snapshotfile.open_for_reading(snapshotter.dst) as f:
upload_result = storage.upload_hexdigest_from_file(hexdigest, f)
except exceptions.TransientException as ex:
# Do not pollute logs with transient exceptions
logger.debug("Transient exception uploading %r: %r", path, ex)
return progress.upload_failure, 0, 0
except exceptions.AstacusException:
# Report failure - whole step will be retried later
logger.exception("Exception uploading %r", path)
return progress.upload_failure, 0, 0
with snapshotfile.open_for_reading(snapshotter.dst) as f:
current_hexdigest = hash_hexdigest_readable(f)
if current_hexdigest != snapshotfile.hexdigest:
logger.info("Hash of %s changed after upload", snapshotfile.relative_path)
storage.delete_hexdigest(hexdigest)
continue
return progress.upload_success, upload_result.size, upload_result.stored_size
# We didn't find single file with the matching hexdigest.
# Report it as missing but keep uploading other files.
return progress.upload_missing, 0, 0
def _result_cb(*, map_in, map_out):
# progress callback in 'main' thread
progress_callback, total, stored = map_out
sizes["total"] += total
sizes["stored"] += stored
progress_callback(map_in) # hexdigest
return still_running_callback()
sorted_todo = sorted(todo, key=lambda hexdigest: -snapshotter.hexdigest_to_snapshotfiles[hexdigest][0].file_size)
if not utils.parallel_map_to(
fun=_upload_hexdigest_in_thread, iterable=sorted_todo, result_callback=_result_cb, n=parallel
):
progress.add_fail()
return sizes["total"], sizes["stored"]
| 44.628205 | 121 | 0.629704 |
795445e4f0541ac5b3c6c85230108d640ffb3ab6 | 756 | py | Python | extensions/dice.py | ooxxe04/ConfessionBot-2.0 | 6ead21e38d6d28aed0f44d3e8ef4bd14f49f84b5 | [
"MIT"
] | null | null | null | extensions/dice.py | ooxxe04/ConfessionBot-2.0 | 6ead21e38d6d28aed0f44d3e8ef4bd14f49f84b5 | [
"MIT"
] | null | null | null | extensions/dice.py | ooxxe04/ConfessionBot-2.0 | 6ead21e38d6d28aed0f44d3e8ef4bd14f49f84b5 | [
"MIT"
] | 1 | 2020-03-05T11:30:18.000Z | 2020-03-05T11:30:18.000Z | import discord
from discord.ext import commands
import random
class Dice(commands.cog.Cog):
"""simple dice rolling command extension, could be treated like another example"""
def __init__(self, bot:commands.Bot):
self.bot = bot
@commands.command(aliases=['roll'])
async def dice(self, ctx:commands.Context, *numbers):
"""rolls one more many n-sided die"""
if len(numbers) == 0:
numbers = ['6']
elif len(numbers) > 8:
numbers = numbers[:8]
rolls = []
for i,n in enumerate(numbers):
if n.isdigit():
r = random.choice(range(1, int(n) + 1))
rolls.append(self.bot.babel(ctx, 'dice', 'roll_result', i=i + 1, r=r))
await ctx.reply('\n'.join(rolls))
def setup(bot):
bot.add_cog(Dice(bot))
| 29.076923 | 84 | 0.637566 |
795446cba04763c7a7aa8e75f727f5ec107744be | 739 | py | Python | var/spack/repos/builtin/packages/perl-libwww-perl/package.py | whitfin/spack | aabd2be31a511d0e00c1017f7311a421659319d9 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/perl-libwww-perl/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | var/spack/repos/builtin/packages/perl-libwww-perl/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlLibwwwPerl(PerlPackage):
"""The libwww-perl collection is a set of Perl modules which provides
a simple and consistent application programming interface to the
World-Wide Web. The main focus of the library is to provide classes and
functions that allow you to write WWW clients."""
homepage = "https://github.com/libwww-perl/libwww-perl"
url = "http://search.cpan.org/CPAN/authors/id/O/OA/OALDERS/libwww-perl-6.33.tar.gz"
version('6.33', '2e15c1c789ac9036c99d094e47e3da23')
| 38.894737 | 92 | 0.744249 |
795446de83b3b669abf384ca34176a1f052c94fd | 6,656 | py | Python | test/functional/feature_maxuploadtarget.py | asuka431/fujicoin-22.0 | 9e338be2116022f0d18df153298b8881e6145f04 | [
"MIT"
] | null | null | null | test/functional/feature_maxuploadtarget.py | asuka431/fujicoin-22.0 | 9e338be2116022f0d18df153298b8881e6145f04 | [
"MIT"
] | null | null | null | test/functional/feature_maxuploadtarget.py | asuka431/fujicoin-22.0 | 9e338be2116022f0d18df153298b8881e6145f04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Baricoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BaricoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(BaricoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-maxuploadtarget=800",
"-acceptnonstdtxn=1",
"-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
]]
self.supports_cli = False
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_and_ping(getdata_request)
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for _ in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(800):
p2p_conns[1].send_and_ping(getdata_request)
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_and_ping(getdata_request)
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
self.log.info("Restarting node 0 with download permission and 1MB maxuploadtarget")
self.restart_node(0, ["-whitelist=download@127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(20):
peer.send_and_ping(getdata_request)
assert_equal(peer.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
peer.send_and_ping(getdata_request)
self.log.info("Peer still connected after trying to download old block (download permission)")
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 1) # node is still connected
assert_equal(peer_info[0]['permissions'], ['download'])
if __name__ == '__main__':
MaxUploadTest().main()
| 39.856287 | 102 | 0.679387 |
795447b97a2ed1e48488c2441ac6683c4b7cc472 | 9,508 | py | Python | pycom/lib/uasyncio/core.py | o-gent/aero_one | 1815fb798937724c3694c7119639fcf628d0e57b | [
"MIT"
] | 1 | 2019-03-22T18:49:33.000Z | 2019-03-22T18:49:33.000Z | pycom/lib/uasyncio/core.py | o-gent/aero_one | 1815fb798937724c3694c7119639fcf628d0e57b | [
"MIT"
] | null | null | null | pycom/lib/uasyncio/core.py | o-gent/aero_one | 1815fb798937724c3694c7119639fcf628d0e57b | [
"MIT"
] | null | null | null | import utime as time
import utimeq
import ucollections
type_gen = type((lambda: (yield))())
DEBUG = 0
log = None
def set_debug(val):
global DEBUG, log
DEBUG = val
if val:
import logging
log = logging.getLogger("uasyncio.core")
class CancelledError(Exception):
pass
class TimeoutError(CancelledError):
pass
class EventLoop:
def __init__(self, runq_len=16, waitq_len=16):
self.runq = ucollections.deque((), runq_len, True)
self.waitq = utimeq.utimeq(waitq_len)
# Current task being run. Task is a top-level coroutine scheduled
# in the event loop (sub-coroutines executed transparently by
# yield from/await, event loop "doesn't see" them).
self.cur_task = None
def time(self):
return time.ticks_ms()
def create_task(self, coro):
# CPython 3.4.2
self.call_later_ms(0, coro)
# CPython asyncio incompatibility: we don't return Task object
def call_soon(self, callback, *args):
if __debug__ and DEBUG:
log.debug("Scheduling in runq: %s", (callback, args))
self.runq.append(callback)
if not isinstance(callback, type_gen):
self.runq.append(args)
def call_later(self, delay, callback, *args):
self.call_at_(time.ticks_add(self.time(), int(delay * 1000)), callback, args)
def call_later_ms(self, delay, callback, *args):
if not delay:
return self.call_soon(callback, *args)
self.call_at_(time.ticks_add(self.time(), delay), callback, args)
def call_at_(self, time, callback, args=()):
if __debug__ and DEBUG:
log.debug("Scheduling in waitq: %s", (time, callback, args))
self.waitq.push(time, callback, args)
def wait(self, delay):
# Default wait implementation, to be overriden in subclasses
# with IO scheduling
if __debug__ and DEBUG:
log.debug("Sleeping for: %s", delay)
time.sleep_ms(delay)
def run_forever(self):
cur_task = [0, 0, 0]
while True:
# Expire entries in waitq and move them to runq
tnow = self.time()
while self.waitq:
t = self.waitq.peektime()
delay = time.ticks_diff(t, tnow)
if delay > 0:
break
self.waitq.pop(cur_task)
if __debug__ and DEBUG:
log.debug("Moving from waitq to runq: %s", cur_task[1])
self.call_soon(cur_task[1], *cur_task[2])
# Process runq
l = len(self.runq)
if __debug__ and DEBUG:
log.debug("Entries in runq: %d", l)
while l:
cb = self.runq.popleft()
l -= 1
args = ()
if not isinstance(cb, type_gen):
args = self.runq.popleft()
l -= 1
if __debug__ and DEBUG:
log.info("Next callback to run: %s", (cb, args))
cb(*args)
continue
if __debug__ and DEBUG:
log.info("Next coroutine to run: %s", (cb, args))
self.cur_task = cb
delay = 0
try:
if args is ():
ret = next(cb)
else:
ret = cb.send(*args)
if __debug__ and DEBUG:
log.info("Coroutine %s yield result: %s", cb, ret)
if isinstance(ret, SysCall1):
arg = ret.arg
if isinstance(ret, SleepMs):
delay = arg
elif isinstance(ret, IORead):
cb.pend_throw(False)
self.add_reader(arg, cb)
continue
elif isinstance(ret, IOWrite):
cb.pend_throw(False)
self.add_writer(arg, cb)
continue
elif isinstance(ret, IOReadDone):
self.remove_reader(arg)
elif isinstance(ret, IOWriteDone):
self.remove_writer(arg)
elif isinstance(ret, StopLoop):
return arg
else:
assert False, "Unknown syscall yielded: %r (of type %r)" % (ret, type(ret))
elif isinstance(ret, type_gen):
self.call_soon(ret)
elif isinstance(ret, int):
# Delay
delay = ret
elif ret is None:
# Just reschedule
pass
elif ret is False:
# Don't reschedule
continue
else:
assert False, "Unsupported coroutine yield value: %r (of type %r)" % (ret, type(ret))
except StopIteration as e:
if __debug__ and DEBUG:
log.debug("Coroutine finished: %s", cb)
continue
except CancelledError as e:
if __debug__ and DEBUG:
log.debug("Coroutine cancelled: %s", cb)
continue
# Currently all syscalls don't return anything, so we don't
# need to feed anything to the next invocation of coroutine.
# If that changes, need to pass that value below.
if delay:
self.call_later_ms(delay, cb)
else:
self.call_soon(cb)
# Wait until next waitq task or I/O availability
delay = 0
if not self.runq:
delay = -1
if self.waitq:
tnow = self.time()
t = self.waitq.peektime()
delay = time.ticks_diff(t, tnow)
if delay < 0:
delay = 0
self.wait(delay)
def run_until_complete(self, coro):
def _run_and_stop():
yield from coro
yield StopLoop(0)
self.call_soon(_run_and_stop())
self.run_forever()
def stop(self):
self.call_soon((lambda: (yield StopLoop(0)))())
def close(self):
pass
class SysCall:
def __init__(self, *args):
self.args = args
def handle(self):
raise NotImplementedError
# Optimized syscall with 1 arg
class SysCall1(SysCall):
def __init__(self, arg):
self.arg = arg
class StopLoop(SysCall1):
pass
class IORead(SysCall1):
pass
class IOWrite(SysCall1):
pass
class IOReadDone(SysCall1):
pass
class IOWriteDone(SysCall1):
pass
_event_loop = None
_event_loop_class = EventLoop
def get_event_loop(runq_len=16, waitq_len=16):
global _event_loop
if _event_loop is None:
_event_loop = _event_loop_class(runq_len, waitq_len)
return _event_loop
def sleep(secs):
yield int(secs * 1000)
# Implementation of sleep_ms awaitable with zero heap memory usage
class SleepMs(SysCall1):
def __init__(self):
self.v = None
self.arg = None
def __call__(self, arg):
self.v = arg
#print("__call__")
return self
def __iter__(self):
#print("__iter__")
return self
def __next__(self):
if self.v is not None:
#print("__next__ syscall enter")
self.arg = self.v
self.v = None
return self
#print("__next__ syscall exit")
_stop_iter.__traceback__ = None
raise _stop_iter
_stop_iter = StopIteration()
sleep_ms = SleepMs()
def cancel(coro):
prev = coro.pend_throw(CancelledError())
if prev is False:
_event_loop.call_soon(coro)
class TimeoutObj:
def __init__(self, coro):
self.coro = coro
def wait_for_ms(coro, timeout):
def waiter(coro, timeout_obj):
res = yield from coro
if __debug__ and DEBUG:
log.debug("waiter: cancelling %s", timeout_obj)
timeout_obj.coro = None
return res
def timeout_func(timeout_obj):
if timeout_obj.coro:
if __debug__ and DEBUG:
log.debug("timeout_func: cancelling %s", timeout_obj.coro)
prev = timeout_obj.coro.pend_throw(TimeoutError())
#print("prev pend", prev)
if prev is False:
_event_loop.call_soon(timeout_obj.coro)
timeout_obj = TimeoutObj(_event_loop.cur_task)
_event_loop.call_later_ms(timeout, timeout_func, timeout_obj)
return (yield from waiter(coro, timeout_obj))
def wait_for(coro, timeout):
return wait_for_ms(coro, int(timeout * 1000))
def coroutine(f):
return f
#
# The functions below are deprecated in uasyncio, and provided only
# for compatibility with CPython asyncio
#
def ensure_future(coro, loop=_event_loop):
_event_loop.call_soon(coro)
# CPython asyncio incompatibility: we don't return Task object
return coro
# CPython asyncio incompatibility: Task is a function, not a class (for efficiency)
def Task(coro, loop=_event_loop):
# Same as async()
_event_loop.call_soon(coro) | 30.184127 | 109 | 0.532604 |
795447e73fa487496d45a4e047c7a14d5ade4ba6 | 322 | py | Python | discordRoll.py | rustikles/EZ-diceRoll | d2c69ac8dbcab2b6d5fb6312fe0802ba41b7625f | [
"BSD-3-Clause"
] | null | null | null | discordRoll.py | rustikles/EZ-diceRoll | d2c69ac8dbcab2b6d5fb6312fe0802ba41b7625f | [
"BSD-3-Clause"
] | null | null | null | discordRoll.py | rustikles/EZ-diceRoll | d2c69ac8dbcab2b6d5fb6312fe0802ba41b7625f | [
"BSD-3-Clause"
] | null | null | null | import discord
from discord.ext import commands
import random
bot = commands.Bot(command_prefix='!')
@bot.command()
async def roll(ctx, number):
try:
arg = random.randint(1, int(number))
except ValueError:
await ctx.send("What the fuck is that???")
else:
await ctx.send(str(arg))
bot.run('TOKEN')
| 18.941176 | 46 | 0.680124 |
7954487cab190e70c2dd79a8f6846ee6283c696e | 8,516 | py | Python | docs/conf.py | loanzen/quickbooks-py | c8e1a9857ea66ef9a5a0ed2d83b711a96c7e18b1 | [
"ISC"
] | 5 | 2015-09-03T00:21:28.000Z | 2018-04-30T03:04:43.000Z | docs/conf.py | loanzen/quickbooks-py | c8e1a9857ea66ef9a5a0ed2d83b711a96c7e18b1 | [
"ISC"
] | null | null | null | docs/conf.py | loanzen/quickbooks-py | c8e1a9857ea66ef9a5a0ed2d83b711a96c7e18b1 | [
"ISC"
] | 1 | 2016-05-01T17:26:32.000Z | 2016-05-01T17:26:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# quickbooks-py documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import quickbook3
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'QuickBooks Python Client'
copyright = u'2015, Ritesh Kadmawala'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = quickbook3.__version__
# The full version, including alpha/beta/rc tags.
release = quickbook3.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'quickbooks-pydoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'quickbooks-py.tex',
u'QuickBooks Python Client Documentation',
u'Ritesh Kadmawala', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'quickbooks-py',
u'QuickBooks Python Client Documentation',
[u'Ritesh Kadmawala'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'quickbooks-py',
u'QuickBooks Python Client Documentation',
u'Ritesh Kadmawala',
'quickbooks-py',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.855072 | 76 | 0.717708 |
7954489603b5accd410e51630eed922b37e30d67 | 2,537 | py | Python | workspace_tools/export/uvision4.py | bygreencn/mbed | 9196548e9a7230011d499556e9d162718febb7eb | [
"Apache-2.0"
] | 1 | 2019-05-07T15:01:19.000Z | 2019-05-07T15:01:19.000Z | workspace_tools/export/uvision4.py | bygreencn/mbed | 9196548e9a7230011d499556e9d162718febb7eb | [
"Apache-2.0"
] | null | null | null | workspace_tools/export/uvision4.py | bygreencn/mbed | 9196548e9a7230011d499556e9d162718febb7eb | [
"Apache-2.0"
] | null | null | null | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from exporters import Exporter
from os.path import basename
class Uvision4(Exporter):
NAME = 'uVision4'
TARGETS = ['LPC1768', 'LPC11U24', 'KL05Z', 'KL25Z', 'KL46Z', 'K20D5M', 'LPC1347', 'LPC1114', 'LPC11C24', 'LPC4088', 'LPC812', 'NUCLEO_F103RB']
USING_MICROLIB = ['LPC11U24', 'LPC1114', 'LPC11C24', 'LPC812', 'NUCLEO_F103RB']
FILE_TYPES = {
'c_sources':'1',
'cpp_sources':'8',
's_sources':'2'
}
# By convention uVision projects do not show header files in the editor:
# 'headers':'5',
def get_toolchain(self):
return 'uARM' if (self.target in self.USING_MICROLIB) else 'ARM'
def generate(self):
source_files = {
'mbed': [],
'hal': [],
'src': []
}
for r_type, n in Uvision4.FILE_TYPES.iteritems():
for file in getattr(self.resources, r_type):
f = {'name': basename(file), 'type': n, 'path': file}
if file.startswith("mbed\\common"):
source_files['mbed'].append(f)
elif file.startswith("mbed\\targets"):
source_files['hal'].append(f)
else:
source_files['src'].append(f)
source_files = dict( [(k,v) for k,v in source_files.items() if len(v)>0])
ctx = {
'name': self.program_name,
'include_paths': self.resources.inc_dirs,
'scatter_file': self.resources.linker_script,
'object_files': self.resources.objects + self.resources.libraries,
'source_files': source_files.items(),
'symbols': self.toolchain.get_symbols()
}
target = self.target.lower()
# Project file
self.gen_file('uvision4_%s.uvproj.tmpl' % target, ctx, '%s.uvproj' % self.program_name)
self.gen_file('uvision4_%s.uvopt.tmpl' % target, ctx, '%s.uvopt' % self.program_name)
| 37.308824 | 146 | 0.607804 |
79544903ef093a0813e81720a43248ae5f6fcbb9 | 3,279 | py | Python | recipes/retrieve_all_pictures_from_ODF_files_cli.py | jdum/odfdo | 2494d0bed39f5a55974643206e9bafeed40f3a6b | [
"Apache-2.0"
] | 18 | 2018-04-19T08:30:48.000Z | 2022-02-14T11:00:27.000Z | recipes/retrieve_all_pictures_from_ODF_files_cli.py | jdum/odfdo | 2494d0bed39f5a55974643206e9bafeed40f3a6b | [
"Apache-2.0"
] | 15 | 2018-04-22T00:52:41.000Z | 2021-07-05T10:16:38.000Z | recipes/retrieve_all_pictures_from_ODF_files_cli.py | jdum/odfdo | 2494d0bed39f5a55974643206e9bafeed40f3a6b | [
"Apache-2.0"
] | 6 | 2018-04-22T00:14:12.000Z | 2021-12-06T01:42:07.000Z | #!/usr/bin/env python
"""
Analyse a list of files and directory (recurse), open all ODF documents and copy pictures
from documents in a directory.
"""
import os
import optparse
from hashlib import sha1
import time
from odfdo import Document
# encoding = "UTF8"
default_dest_dir = "my_collected_pictures"
known_images = set()
counter_image = 0
counter_odf = 0
counter_outside = 0
def store_image(path, name, content, dest_dir=default_dest_dir):
"image new name is odffile_imagename"
dest_file = os.path.join(
dest_dir, "%s_%s" % (os.path.basename(path).replace(".", "_"), name)
)
while os.path.exists(dest_file):
dest_file += "_"
with open(dest_file, "wb") as f:
f.write(content)
global counter_image
counter_image += 1
def parse_odf_pics(path, dest_dir=default_dest_dir):
"""Using odfdo for:
- open possible ODF document: Document (including URI)
- find images inside the document: get_image_list, get_attribute
"""
lst = os.path.basename(path).split(".")
suffix = lst[-1].lower()
if not suffix.startswith("od"):
return
try:
document = Document(path)
except:
return
global counter_odf
global counter_outside
counter_odf += 1
for image in document.body.get_images():
image_url = image.url
if not image_url:
continue
try:
image_content = document.get_part(image_url)
except KeyError:
print("- not found inside document:", path)
print(" image URL:", image_url)
counter_outside += 1
continue
image_name = image_url.split("/")[-1]
if check_known(image_content):
store_image(path, image_name, image_content, dest_dir)
def check_known(content):
"remember already seen images by sha1 footprint"
footprint = sha1(content).digest()
if footprint in known_images:
return False
known_images.add(footprint)
return True
if __name__ == "__main__":
usage = "usage: %prog [options] <ODF documents dir>"
description = "Retrieve images from several ODF sources."
parser = optparse.OptionParser(usage, description=description)
parser.add_option(
"-d",
"--directory",
dest="directory",
help="write images in DIRECTORY",
action="store",
type="string",
)
options, sources = parser.parse_args()
if not sources:
print("Need some ODF source !")
parser.print_help()
exit(0)
if options.directory:
output_directory = options.directory
else:
output_directory = default_dest_dir
if not os.path.exists(output_directory):
os.mkdir(output_directory)
t0 = time.time()
for source in sources:
if os.path.isdir(source):
for root, dirs, files in os.walk(source):
for name in files:
parse_odf_pics(os.path.join(root, name), output_directory)
else:
parse_odf_pics(source, output_directory)
elapsed = int(time.time() - t0)
print(
"%s images copied (%s not found) from %s ODF files to %s in %ss."
% (counter_image, counter_outside, counter_odf, output_directory, elapsed)
)
| 29.017699 | 89 | 0.635255 |
79544975be592888bb2055cf756aa45efe25f091 | 1,897 | py | Python | tests/test_directive_only.py | balabit-deps/balabit-os-7-sphinx | 4e18ca37f4ddddf346c0b30835a544db20887259 | [
"BSD-2-Clause"
] | null | null | null | tests/test_directive_only.py | balabit-deps/balabit-os-7-sphinx | 4e18ca37f4ddddf346c0b30835a544db20887259 | [
"BSD-2-Clause"
] | null | null | null | tests/test_directive_only.py | balabit-deps/balabit-os-7-sphinx | 4e18ca37f4ddddf346c0b30835a544db20887259 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
test_only_directive
~~~~~~~~~~~~~~~~~~~
Test the only directive with the test root.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from docutils import nodes
from sphinx.util.nodes import process_only_nodes
import pytest
@pytest.mark.sphinx('text', testroot='directive-only')
def test_sectioning(app, status, warning):
def getsects(section):
if not isinstance(section, nodes.section):
return [getsects(n) for n in section.children]
title = section.next_node(nodes.title).astext().strip()
subsects = []
children = section.children[:]
while children:
node = children.pop(0)
if isinstance(node, nodes.section):
subsects.append(node)
continue
children = list(node.children) + children
return [title, [getsects(subsect) for subsect in subsects]]
def testsects(prefix, sects, indent=0):
title = sects[0]
parent_num = title.split()[0]
assert prefix == parent_num, \
'Section out of place: %r' % title
for i, subsect in enumerate(sects[1]):
num = subsect[0].split()[0]
assert re.match('[0-9]+[.0-9]*[.]', num), \
'Unnumbered section: %r' % subsect[0]
testsects(prefix + str(i + 1) + '.', subsect, indent + 4)
app.builder.build(['only'])
doctree = app.env.get_doctree('only')
app.env.apply_post_transforms(doctree, 'only')
parts = [getsects(n)
for n in [_n for _n in doctree.children if isinstance(_n, nodes.section)]]
for i, s in enumerate(parts):
testsects(str(i + 1) + '.', s, 4)
assert len(parts) == 4, 'Expected 4 document level headings, got:\n%s' % \
'\n'.join([p[0] for p in parts])
| 33.280702 | 87 | 0.590406 |
79544aa298a3f632aa43d37543872230660e6df3 | 8,382 | py | Python | source1/qc/qc.py | tltneon/SourceIO | 418224918c2b062a4c78a41d4d65329ba2decb22 | [
"MIT"
] | 199 | 2019-04-02T02:30:58.000Z | 2022-03-30T21:29:49.000Z | source1/qc/qc.py | syborg64/SourceIO | e4ba86d801f518e192260af08ef533759c2e1cc3 | [
"MIT"
] | 113 | 2019-03-03T19:36:25.000Z | 2022-03-31T19:44:05.000Z | source1/qc/qc.py | syborg64/SourceIO | e4ba86d801f518e192260af08ef533759c2e1cc3 | [
"MIT"
] | 38 | 2019-05-15T16:49:30.000Z | 2022-03-22T03:40:43.000Z | import math
from typing import List
import numpy as np
from ..mdl.v49.mdl_file import Mdl
from ..mdl.structs.bone import ProceduralBoneType
from ..mdl.structs.header import StudioHDRFlags
from ..mdl.structs.bodygroup import BodyPartV49
from pathlib import Path
def vector_i_transform(input: List, matrix: List):
temp = np.zeros(3)
output = np.zeros(3)
temp[0] = input[0] - matrix[3][0]
temp[1] = input[1] - matrix[3][1]
temp[2] = input[2] - matrix[3][2]
output[0] = temp[0] * matrix[0][0] + temp[1] * matrix[0][1] + temp[2] * matrix[0][2]
output[1] = temp[0] * matrix[1][0] + temp[1] * matrix[1][1] + temp[2] * matrix[1][2]
output[2] = temp[0] * matrix[2][0] + temp[1] * matrix[2][1] + temp[2] * matrix[2][2]
return output
def generate_qc(mdl: Mdl, buffer, plugin_version="UNKNOWN"):
buffer.write(f"// Created by SourceIO v{plugin_version}\n\n")
buffer.write(f"$modelname \"{mdl.header.name}\"\n")
def write_model(bodygroup: BodyPartV49):
model = bodygroup.models[0]
name = Path(model.name if (model.name and model.name != 'blank') else f"{bodygroup.name}-{model.name}").stem
buffer.write(f"$model \"{name}\" \"{name}\"")
if model.has_flexes or model.has_eyebals:
buffer.write("{\n\n")
if model.has_eyebals:
for n, eyeball in enumerate(model.eyeballs):
buffer.write('\teyeball')
diameter = eyeball.radius * 2
angle = round(math.degrees(math.atan(eyeball.z_offset)), 6)
iris_scale = 1 / eyeball.iris_scale
if n == 0 and angle > 0:
buffer.write(' "eye_right"')
elif n == 1 and angle < 0:
buffer.write(' "eye_left"')
else:
buffer.write(' "eye_{}"'.format(n))
bone = mdl.bones[eyeball.bone_index]
buffer.write(f" \"{bone.name}\"")
pos = vector_i_transform(eyeball.org, bone.pose_to_bone)
buffer.write(f" {pos[0]:.4} {pos[1]:.4} {pos[2]:.4}")
buffer.write(f" \"{mdl.materials[eyeball.material_id].name}\"")
buffer.write(" {}".format(diameter))
buffer.write(" {}".format(angle))
buffer.write(" \"iris_unused\"")
buffer.write(" {}".format(int(iris_scale)))
buffer.write("\n")
buffer.write("\n")
if model.has_flexes:
all_flexes = []
for mesh in model.meshes:
all_flexes.extend(mesh.flexes)
for flex in set(all_flexes):
flex_name = mdl.flex_names[flex.flex_desc_index]
buffer.write('\t//{} {} {}\n'.format('stereo' if flex.partner_index > 0 else 'mono',
flex_name,
mdl.flex_names[
flex.partner_index] if flex.partner_index > 0 else ''))
buffer.write("}")
else:
buffer.write("\n")
def write_bodygroup(bodygroup: BodyPartV49):
buffer.write(f"$bodygroup \"{bodygroup.name}\" ")
buffer.write("{\n")
for model in bodygroup.models:
if len(model.meshes) == 0:
buffer.write("\tblank\n")
else:
model_name = Path(model.name).stem
buffer.write(f"\tstudio \"{model_name}\"\n")
buffer.write("}\n")
def write_skins():
buffer.write('$texturegroup "skinfamilies"{\n')
for skin_fam in mdl.skin_groups:
buffer.write('{')
for mat in skin_fam:
mat_name = mat
buffer.write('"{}" '.format(mat_name))
buffer.write('}\n')
buffer.write('}\n\n')
def write_misc():
buffer.write(f"$surfaceprop \"{mdl.header.surface_prop}\"\n")
deflection = math.degrees(math.acos(mdl.header.max_eye_deflection))
buffer.write(f"$maxeyedeflection {deflection:.1f}\n")
eye_pos = mdl.header.eye_position
buffer.write(f"$eyeposition {eye_pos[0]:.3} {eye_pos[1]:.3} {eye_pos[2]:.3}\n")
if mdl.header.flags & StudioHDRFlags.AMBIENT_BOOST:
buffer.write('$ambientboost\n')
if mdl.header.flags & StudioHDRFlags.TRANSLUCENT_TWOPASS:
buffer.write('$mostlyopaque\n')
if mdl.header.flags & StudioHDRFlags.STATIC_PROP:
buffer.write('$staticprop\n')
if mdl.header.flags & StudioHDRFlags.SUBDIVISION_SURFACE:
buffer.write('$subd\n')
buffer.write('\n')
def write_texture_paths():
for n, texture_path in enumerate(mdl.materials_paths):
if n == 0 and not texture_path:
buffer.write('$cdmaterials "{}"\n'.format(texture_path))
elif texture_path:
buffer.write('$cdmaterials "{}"\n'.format(texture_path))
buffer.write('\n')
def write_used_materials():
buffer.write('//USED MATERISLS:\n')
for texture in mdl.materials:
buffer.write('\t//{}\n'.format(texture.name))
buffer.write('\n')
for bodygroup in mdl.body_parts:
if len(bodygroup.models) == 1:
write_model(bodygroup)
elif len(bodygroup.models) > 1:
write_bodygroup(bodygroup)
def write_jiggle_bones():
for bone in mdl.bones:
if bone.procedural_rule is not None:
if bone.procedural_rule_type == ProceduralBoneType.JIGGLE:
jbone = bone.procedural_rule
buffer.write(f"$jigglebone {bone.name} ")
buffer.write('{\n')
if jbone.flags & jbone.flags.IS_FLEXIBLE:
buffer.write("\tis_flexible {\n")
buffer.write('\t\tlength {}\n'.format(jbone.length))
buffer.write('\t\ttip_mass {}\n'.format(jbone.tip_mass))
buffer.write('\t\tpitch_stiffness {}\n'.format(jbone.pitch_stiffness))
buffer.write('\t\tpitch_damping {}\n'.format(jbone.pitch_damping))
buffer.write('\t\tyaw_stiffness {}\n'.format(jbone.yaw_stiffness))
buffer.write('\t\tyaw_damping {}\n'.format(jbone.yaw_damping))
if jbone.flags & jbone.flags.HAS_LENGTH_CONSTRAINT:
buffer.write('\t\talong_stiffness {}\n'.format(jbone.along_stiffness))
buffer.write('\t\talong_damping {}\n'.format(jbone.along_damping))
if jbone.flags & jbone.flags.HAS_ANGLE_CONSTRAINT:
buffer.write('\t\tangle_constraint {}\n'.format(round(jbone.angle_limit * 180 / 3.1415, 3)))
if jbone.flags & jbone.flags.HAS_PITCH_CONSTRAINT:
buffer.write('\t\tpitch_constraint {} {}\n'.format(jbone.min_pitch, jbone.max_pitch))
buffer.write('\t\tpitch_friction {}\n'.format(jbone.pitch_friction))
if jbone.flags & jbone.flags.HAS_YAW_CONSTRAINT:
buffer.write('\t\tyaw_constraint {} {}\n'.format(jbone.min_yaw, jbone.max_yaw))
buffer.write('\t\tyaw_friction {}\n'.format(jbone.yaw_friction))
buffer.write('\t}\n')
if jbone.flags & jbone.flags.IS_RIGID:
buffer.write('is_rigid {\n')
buffer.write('}\n')
buffer.write('}\n\n')
def write_sequences():
buffer.write(f"$sequence \"idle\" ")
buffer.write("{\n")
file_name = Path(mdl.body_parts[0].models[0].name).stem
buffer.write(f"\t\"{file_name}\"\n")
buffer.write("\tactivity \"ACT_DIERAGDOLL\" 1\n")
buffer.write(f"\tfadein {0.2:.2f}\n")
buffer.write(f"\tfadeout {0.2:.2f}\n")
buffer.write("\tfps 30\n")
buffer.write("}\n")
buffer.write('\n')
write_skins()
write_misc()
write_texture_paths()
write_used_materials()
write_jiggle_bones()
write_sequences()
| 43.884817 | 120 | 0.535433 |
79544b939af404fd729292c81efd150c8933f9bb | 1,855 | py | Python | onnx_export.py | christiansafka/ONNX-Inference-AWS-Lambda | 28663ff0b25bb813aba92a37dc7dfba396551373 | [
"MIT"
] | 1 | 2022-03-01T11:39:49.000Z | 2022-03-01T11:39:49.000Z | onnx_export.py | christiansafka/ONNX-Inference-AWS-Lambda | 28663ff0b25bb813aba92a37dc7dfba396551373 | [
"MIT"
] | null | null | null | onnx_export.py | christiansafka/ONNX-Inference-AWS-Lambda | 28663ff0b25bb813aba92a37dc7dfba396551373 | [
"MIT"
] | null | null | null | import torch
import torch.onnx
import onnxruntime
import numpy as np
from efficientnet_pytorch.model import EfficientNetAutoEncoder
model = EfficientNetAutoEncoder.from_pretrained('efficientnet-b0')
model.eval()
dummy_input = torch.rand(1, 3, 224, 224)
# # Export the model
dynamic_axes = {'input' : {0 : 'batch_size'},
'output' : {0 : 'batch_size'}}
torch.onnx.export(model, # model being run
##since model is in the cuda mode, input also need to be
dummy_input, # model input (or a tuple for multiple inputs)
"efficientnet_autoencoder.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'],
dynamic_axes=dynamic_axes,
)
# Test if ONNX results match PyTorch results
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
ae_output, latent_fc_output = model(dummy_input)
ort_session = onnxruntime.InferenceSession("efficientnet_autoencoder.onnx")
# compute ONNX Runtime output prediction
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(dummy_input)}
ort_outs = ort_session.run(None, ort_inputs)
# compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(to_numpy(latent_fc_output[:]), ort_outs[1][:], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
| 40.326087 | 112 | 0.672237 |
79544b9f2088f5aa0b7657f58614f66b0f6c0d26 | 1,920 | py | Python | phub.py | Dark-PRINCESS/Dark-PRINCESS- | 0ad9c67960c8f88745442d264fdcd113b9925807 | [
"MIT"
] | 1 | 2020-10-23T09:35:36.000Z | 2020-10-23T09:35:36.000Z | phub.py | Dark-PRINCESS/Dark-PRINCESS- | 0ad9c67960c8f88745442d264fdcd113b9925807 | [
"MIT"
] | null | null | null | phub.py | Dark-PRINCESS/Dark-PRINCESS- | 0ad9c67960c8f88745442d264fdcd113b9925807 | [
"MIT"
] | null | null | null |
from telethon import events
import asyncio
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="phub"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 101)
await event.edit("phub")
animation_chars = [
"P_",
"PO_",
"POR_",
"PORN_",
"PORNH_",
"PORNHU_",
"PORNHUB_",
"PORNHUB",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
@borg.on(admin_cmd(pattern=r"amore"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 101)
await event.edit("amore")
animation_chars = [
"A_",
"AM_",
"AMO_",
"AMOR_",
"AMORE_",
"AMORE❤_",
".-.",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
from telethon import events
import asyncio
@borg.on(admin_cmd(pattern=r"sexy"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.5
animation_ttl = range(0, 101)
await event.edit("Sexy")
animation_chars = [
"S_",
"SE_",
"SEX_",
"SEXY_",
"SEXY👄_",
"SEXY👄",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 10])
| 14.222222 | 50 | 0.450521 |
79544bfe1647e4df39c71b069e146b332c81d7bc | 3,407 | py | Python | snippets/EM/EM_pipeline_2018_IMPORTANT/deprecated/config_myelin3D.py | michielkleinnijenhuis/EM | f46a9b11298919b359e80d9f23a7e824df1356cb | [
"Apache-2.0"
] | null | null | null | snippets/EM/EM_pipeline_2018_IMPORTANT/deprecated/config_myelin3D.py | michielkleinnijenhuis/EM | f46a9b11298919b359e80d9f23a7e824df1356cb | [
"Apache-2.0"
] | null | null | null | snippets/EM/EM_pipeline_2018_IMPORTANT/deprecated/config_myelin3D.py | michielkleinnijenhuis/EM | f46a9b11298919b359e80d9f23a7e824df1356cb | [
"Apache-2.0"
] | null | null | null | def config(parameters={}, run_from='', run_upto='', run_only=''):
# dataset
datadir = '/data/ndcn-fmrib-water-brain/ndcn0180/EM/Myrf_00'
datasets = ['T4_1']
steps = [
'normalize_datasets', 'dm3_to_tif', 'register', 'downsample', 'tif_to_h5',
'datamask', 'myelinmask', 'myelinmask_multi',
'connected_components', 'connected_components_filter', 'connected_components_mapping',
'separate_sheaths', 'separate_sheaths_weighted',
'seg_stats', 'h5_to_nii',
]
# options
options = {
'normalize_datasets': False,
'dm3_to_tif': True,
'register': True,
'downsample': True,
'tif_to_h5': True,
'run_eed': True,
'datamask': True,
'myelinmask': True,
'myelinmask_multi': False,
'connected_components': True,
'connected_components_filter': True,
'connected_components_mapping': True,
'separate_sheaths': True,
'separate_sheaths_weighted': True,
'seg_stats': True,
'h5_to_nii': True,
}
# if run_from:
# idx = steps.index(run_from)
# for i, step in enumerate(steps):
# options[step] = i >= idx
if run_from:
idx = steps.index(run_from)
for step in steps[:idx]:
options[step] = False
if run_upto:
idx = steps.index(run_upto)
for step in steps[idx:]:
options[step] = False
if run_only:
for step in steps:
options[step] = run_only == step
# dataset parameters
parameters['ds'] = {
'dm3dir':
'elsize': [1, 1, 1, 1],
'axlab': ['z', 'y', 'x', 'c'],
'datapostfix': '_norm.tif',
'probspostfix': '_norm_probs.tif',
'maskpostfix': '_maskDS_manual.nii.gz',
'threshold': -1,
}
# myelin mask parameters
parameters['mm'] = {
'lower_threshold': 0.5,
'min_size': 1000,
'maskpostfix': '_maskMM_manual.nii.gz',
}
# connected component parameters
parameters['cc'] = {
'map_propnames': [
'label',
'area',
'eccentricity',
'euler_number',
'extent',
'solidity',
],
'criteria': (1000, 100000, None, 1.0, 0.00, 0, 0.00),
# FIXME: make criteria into dictionary
# (min_area,
# max_area,
# max_intensity_mb,
# max_eccentricity,
# min_solidity,
# min_euler_number,
# min_extent) = criteria
}
# separate sheaths parameters
parameters['ss'] = {
'MAdilation': 100,
'sigmoidweighting': 0.01,
}
# convert-to-nifti parameters
parameters['cn'] = {
'xXyY': (1000, 4000, 1000, 4000), # None
'dsets': [
'data',
'probs',
'maskDS',
'maskMM',
'CC_2D',
'CC_2D_props/label',
'CC_2D_props/area',
'CC_2D_props/eccentricity',
'CC_2D_props/euler_number',
'CC_2D_props/extent',
'CC_2D_props/solidity',
# 'CC_2D_props/label_remapped',
'labelMM',
'labelMM_steps/wsmask',
'labelMM_steps/distance_simple',
'labelMM_sw',
],
}
return datadir, datasets, options, parameters
| 28.391667 | 94 | 0.521867 |
79544d6026e90baa5f4fe262dd6e91c2ca1c6cfe | 1,759 | py | Python | FlaskApp/app.py | pitt-makerspace/ms_lockout | 39337584e6c2b26b30f3297b5ad2563eb6a71eb1 | [
"MIT"
] | null | null | null | FlaskApp/app.py | pitt-makerspace/ms_lockout | 39337584e6c2b26b30f3297b5ad2563eb6a71eb1 | [
"MIT"
] | null | null | null | FlaskApp/app.py | pitt-makerspace/ms_lockout | 39337584e6c2b26b30f3297b5ad2563eb6a71eb1 | [
"MIT"
] | null | null | null | """
Pitt MakerSpace Control Panel Web Interface.
"""
import datetime
from flask import Flask, render_template, redirect, url_for, request
from csv_helper import read_csv
# DATABASE SETUP
CSV_FILE = 'ms_lockout.csv'
DATA = read_csv(CSV_FILE)
USER_IDS = [col[0] for col in DATA]
access_col_idx = 3
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/control')
def control():
return render_template('control.html')
@app.route('/usage')
def usage():
return render_template('usage.html')
@app.route('/post', methods=['POST'])
def get_post():
device, uid = request.form['device'], request.form['uid']
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M")
message = 'Time: %s | Device: %s | UID: %s' % (time, device, uid)
print(message)
try:
user_index = USER_IDS.index(uid)
print('User found: %i' % user_index)
if DATA[user_index][access_col_idx] == 'yes':
print('%s has acess.' % DATA[user_index][1])
response = 'yes'
else:
print('%s does not have acess.' % DATA[user_index][1])
response = 'no'
except:
print('User not found!')
response = 'not-found'
return response
# route for handling the login page logic
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != 'admin' or request.form['password'] != 'admin':
error = 'Invalid Credentials. Please try again.'
else:
return redirect(url_for('/'))
return render_template('login.html', error=error)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| 24.430556 | 86 | 0.617396 |
79544d82dbfe8f45523cdaceacc46a24c18f5808 | 4,076 | py | Python | alipay/aop/api/request/AlipayMarketingCampaignDrawcampWhitelistCreateRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayMarketingCampaignDrawcampWhitelistCreateRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayMarketingCampaignDrawcampWhitelistCreateRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignDrawcampWhitelistCreateModel import AlipayMarketingCampaignDrawcampWhitelistCreateModel
class AlipayMarketingCampaignDrawcampWhitelistCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignDrawcampWhitelistCreateModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignDrawcampWhitelistCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.drawcamp.whitelist.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.110345 | 148 | 0.653582 |
79544e4ee85761da98d177ce17162b89b285b8c6 | 211 | py | Python | hms_tz/hms_tz/doctype/lab_bundle/test_lab_bundle.py | av-dev2/hms_tz | a36dbe8bfacf6a770913b1bfa000d43edd2cd87a | [
"MIT"
] | 5 | 2021-04-20T06:11:25.000Z | 2021-11-18T15:37:25.000Z | hms_tz/hms_tz/doctype/lab_bundle/test_lab_bundle.py | av-dev2/hms_tz | a36dbe8bfacf6a770913b1bfa000d43edd2cd87a | [
"MIT"
] | 90 | 2021-04-05T13:36:34.000Z | 2022-03-31T07:26:25.000Z | hms_tz/hms_tz/doctype/lab_bundle/test_lab_bundle.py | av-dev2/hms_tz | a36dbe8bfacf6a770913b1bfa000d43edd2cd87a | [
"MIT"
] | 10 | 2021-03-26T06:43:20.000Z | 2022-02-18T06:36:58.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Aakvatech and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestLabBundle(unittest.TestCase):
pass
| 19.181818 | 48 | 0.763033 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.