hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71d63cc7c9ddb1abda047591af6a10facfba083 | 1,119 | py | Python | Web/Python/paraview/web/camera.py | qiangwushuang/ParaView | f35ed90a6582e9d21924b66a905498f6a38ab4a3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2021-07-07T22:53:19.000Z | 2021-07-31T19:29:35.000Z | Web/Python/paraview/web/camera.py | qiangwushuang/ParaView | f35ed90a6582e9d21924b66a905498f6a38ab4a3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2020-11-18T16:50:34.000Z | 2022-01-21T13:31:47.000Z | Web/Python/paraview/web/camera.py | qiangwushuang/ParaView | f35ed90a6582e9d21924b66a905498f6a38ab4a3 | [
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2020-10-02T10:14:35.000Z | 2022-03-10T07:50:22.000Z | from paraview import simple
from vtk.web import camera
def update_camera(viewProxy, cameraData):
viewProxy.CameraFocalPoint = cameraData['focalPoint']
viewProxy.CameraPosition = cameraData['position']
viewProxy.CameraViewUp = cameraData['viewUp']
simple.Render(viewProxy)
def create_spherical_camera(viewProxy, dataHandler, phiValues, thetaValues):
return camera.SphericalCamera(dataHandler, viewProxy.CenterOfRotation, viewProxy.CameraPosition, viewProxy.CameraViewUp, phiValues, thetaValues)
def create_cylindrical_camera(viewProxy, dataHandler, phiValues, translationValues):
return camera.CylindricalCamera(dataHandler, viewProxy.CenterOfRotation, viewProxy.CameraPosition, viewProxy.CameraViewUp, phiValues, translationValues)
def create_cube_camera(viewProxy, dataHandler, viewForward, viewUp, positions):
return camera.CubeCamera(dataHandler, viewForward, viewUp, positions)
def create_stereo_cube_camera(viewProxy, dataHandler, viewForward, viewUp, positions, eyeSeparation = 6.5):
return camera.StereoCubeCamera(dataHandler, viewForward, viewUp, positions, eyeSeparation)
| 53.285714 | 156 | 0.822163 | from paraview import simple
from vtk.web import camera
def update_camera(viewProxy, cameraData):
viewProxy.CameraFocalPoint = cameraData['focalPoint']
viewProxy.CameraPosition = cameraData['position']
viewProxy.CameraViewUp = cameraData['viewUp']
simple.Render(viewProxy)
def create_spherical_camera(viewProxy, dataHandler, phiValues, thetaValues):
return camera.SphericalCamera(dataHandler, viewProxy.CenterOfRotation, viewProxy.CameraPosition, viewProxy.CameraViewUp, phiValues, thetaValues)
def create_cylindrical_camera(viewProxy, dataHandler, phiValues, translationValues):
return camera.CylindricalCamera(dataHandler, viewProxy.CenterOfRotation, viewProxy.CameraPosition, viewProxy.CameraViewUp, phiValues, translationValues)
def create_cube_camera(viewProxy, dataHandler, viewForward, viewUp, positions):
return camera.CubeCamera(dataHandler, viewForward, viewUp, positions)
def create_stereo_cube_camera(viewProxy, dataHandler, viewForward, viewUp, positions, eyeSeparation = 6.5):
return camera.StereoCubeCamera(dataHandler, viewForward, viewUp, positions, eyeSeparation)
| true | true |
f71d643bcaead34b7c38c908b32114c9c2c37e08 | 5,835 | py | Python | shell/bii.py | bowlofstew/client | 0d5ae42aaf9863e3871828b6df06170aad17c560 | [
"MIT"
] | 40 | 2015-04-15T09:40:23.000Z | 2022-02-11T11:07:24.000Z | shell/bii.py | bowlofstew/client | 0d5ae42aaf9863e3871828b6df06170aad17c560 | [
"MIT"
] | 19 | 2015-04-15T18:34:53.000Z | 2018-11-17T00:11:05.000Z | shell/bii.py | bowlofstew/client | 0d5ae42aaf9863e3871828b6df06170aad17c560 | [
"MIT"
] | 22 | 2015-04-15T09:45:46.000Z | 2020-09-29T17:04:19.000Z | import sys
import os
import shlex
import traceback
from biicode.client.command.executor import ToolExecutor
from biicode.client.command.tool_catalog import ToolCatalog
from biicode.common.exception import BiiException
from biicode.client.shell.userio import UserIO
from biicode.common.utils.bii_logging import logger
from biicode.client.command.biicommand import BiiCommand
from biicode.client.dev.cpp.cpptoolchain import CPPToolChain
from biicode.client.shell.biistream import BiiOutputStream
from biicode.common.output_stream import OutputStream, INFO
from biicode.client.setups.setup_commands import SetupCommands
from biicode.client.dev.hardware.raspberry_pi.rpitoolchain import RPiToolChain
from biicode.client.dev.hardware.arduino.arduinotoolchain import ArduinoToolChain
from biicode.client.shell.updates_manager import UpdatesStore, UpdatesManager
from biicode.common.model.server_info import ClientVersion
from biicode.client.exception import ObsoleteClient
from biicode.client.conf import BII_RESTURL
from biicode.client.rest.bii_rest_api_client import BiiRestApiClient
from biicode.client.dev.node.nodetoolchain import NodeToolChain
from biicode.client.workspace.bii_paths import BiiPaths
from biicode.client.workspace.hive_disk_image import HiveDiskImage
from biicode.client.workspace.user_cache import UserCache
class Bii(object):
'''Entry point class for bii executable'''
def __init__(self, user_io, current_folder, user_biicode_folder):
self.user_io = user_io
self.bii_paths = BiiPaths(current_folder, user_biicode_folder)
self.user_cache = UserCache(self.bii_paths.user_bii_home)
toolcatalog = ToolCatalog(BiiCommand, tools=[CPPToolChain,
RPiToolChain,
SetupCommands,
ArduinoToolChain,
NodeToolChain])
self.executor = ToolExecutor(self, toolcatalog)
self._biiapi = None
@property
def hive_disk_image(self):
# not able to keep it persistent, as tests make a database locked operational error
return HiveDiskImage(self.bii_paths, self.user_cache, self.user_io.out)
@property
def biiapi(self):
if self._biiapi is None:
from biicode.client.api.biiapi_proxy import BiiAPIProxy
from biicode.client.api.biiapi_auth_manager import BiiApiAuthManager
auth_manager = BiiApiAuthManager(self._restapi, self.user_io, self.user_cache.localdb)
self._biiapi = BiiAPIProxy(self.user_cache.localdb, auth_manager, self.user_io)
return self._biiapi
@property
def _restapi(self):
return BiiRestApiClient(BII_RESTURL)
def execute(self, argv):
'''Executes user provided command. Eg. bii run:cpp'''
errors = False
try:
if isinstance(argv, basestring): # To make tests easier to write
argv = shlex.split(argv)
self.executor.execute(argv) # Executor only raises not expected Exceptions
except (KeyboardInterrupt, SystemExit) as e:
logger.debug('Execution terminated: %s', e)
errors = True
except BiiException as e:
errors = True
self.user_io.out.error(str(e))
except Exception as e:
tb = traceback.format_exc()
logger.error(tb)
errors = True
self.user_io.out.error('Unexpected Exception\n %s' % e)
self.user_io.out.error('Error executing command.\n'
'\tCheck the documentation in http://docs.biicode.com\n'
'\tor ask in the forum http://forum.biicode.com\n')
return errors
def run_main(args, user_io=None, current_folder=None, user_folder=None, biiapi_client=None):
try:
user_folder = user_folder or os.path.expanduser("~")
biicode_folder = os.path.join(user_folder, '.biicode')
current_folder = current_folder or os.getcwd()
user_io = user_io or create_user_io(biicode_folder)
bii = Bii(user_io, current_folder, biicode_folder)
# Update manager doesn't need proxy nor authentication to call get_server_info
biiapi_client = biiapi_client or bii.biiapi
updates_manager = get_updates_manager(biiapi_client, biicode_folder)
try: # Check for updates
updates_manager.check_for_updates(bii.user_io.out)
except ObsoleteClient as e:
bii.user_io.out.error(e.message)
return int(True)
errors = bii.execute(args)
return int(errors)
except OSError as e:
print str(e)
return 1
def create_user_io(biicode_folder):
"""Creates the bii folder and init user_io with outputstream and logfile"""
try:
os.makedirs(biicode_folder)
except:
pass
log_file = os.path.join(biicode_folder, 'bii.log')
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
from colorama import init
init()
OutputStream.color = True
user_io = UserIO(sys.stdin, BiiOutputStream(sys.stdout, log_file, level=INFO))
return user_io
def get_updates_manager(biiapi, biicode_folder):
file_store = os.path.join(biicode_folder, ".remote_version_info")
updates_store = UpdatesStore(file_store)
current_client = ClientVersion(get_current_client_version())
manager = UpdatesManager(updates_store, biiapi, current_client)
return manager
def get_current_client_version():
from biicode.common import __version__ as current_version
return current_version
def main(args):
error = run_main(args)
sys.exit(error)
if __name__ == '__main__':
main(sys.argv[1:])
| 39.693878 | 98 | 0.688775 | import sys
import os
import shlex
import traceback
from biicode.client.command.executor import ToolExecutor
from biicode.client.command.tool_catalog import ToolCatalog
from biicode.common.exception import BiiException
from biicode.client.shell.userio import UserIO
from biicode.common.utils.bii_logging import logger
from biicode.client.command.biicommand import BiiCommand
from biicode.client.dev.cpp.cpptoolchain import CPPToolChain
from biicode.client.shell.biistream import BiiOutputStream
from biicode.common.output_stream import OutputStream, INFO
from biicode.client.setups.setup_commands import SetupCommands
from biicode.client.dev.hardware.raspberry_pi.rpitoolchain import RPiToolChain
from biicode.client.dev.hardware.arduino.arduinotoolchain import ArduinoToolChain
from biicode.client.shell.updates_manager import UpdatesStore, UpdatesManager
from biicode.common.model.server_info import ClientVersion
from biicode.client.exception import ObsoleteClient
from biicode.client.conf import BII_RESTURL
from biicode.client.rest.bii_rest_api_client import BiiRestApiClient
from biicode.client.dev.node.nodetoolchain import NodeToolChain
from biicode.client.workspace.bii_paths import BiiPaths
from biicode.client.workspace.hive_disk_image import HiveDiskImage
from biicode.client.workspace.user_cache import UserCache
class Bii(object):
'''Entry point class for bii executable'''
def __init__(self, user_io, current_folder, user_biicode_folder):
self.user_io = user_io
self.bii_paths = BiiPaths(current_folder, user_biicode_folder)
self.user_cache = UserCache(self.bii_paths.user_bii_home)
toolcatalog = ToolCatalog(BiiCommand, tools=[CPPToolChain,
RPiToolChain,
SetupCommands,
ArduinoToolChain,
NodeToolChain])
self.executor = ToolExecutor(self, toolcatalog)
self._biiapi = None
@property
def hive_disk_image(self):
return HiveDiskImage(self.bii_paths, self.user_cache, self.user_io.out)
@property
def biiapi(self):
if self._biiapi is None:
from biicode.client.api.biiapi_proxy import BiiAPIProxy
from biicode.client.api.biiapi_auth_manager import BiiApiAuthManager
auth_manager = BiiApiAuthManager(self._restapi, self.user_io, self.user_cache.localdb)
self._biiapi = BiiAPIProxy(self.user_cache.localdb, auth_manager, self.user_io)
return self._biiapi
@property
def _restapi(self):
return BiiRestApiClient(BII_RESTURL)
def execute(self, argv):
'''Executes user provided command. Eg. bii run:cpp'''
errors = False
try:
if isinstance(argv, basestring):
argv = shlex.split(argv)
self.executor.execute(argv)
except (KeyboardInterrupt, SystemExit) as e:
logger.debug('Execution terminated: %s', e)
errors = True
except BiiException as e:
errors = True
self.user_io.out.error(str(e))
except Exception as e:
tb = traceback.format_exc()
logger.error(tb)
errors = True
self.user_io.out.error('Unexpected Exception\n %s' % e)
self.user_io.out.error('Error executing command.\n'
'\tCheck the documentation in http://docs.biicode.com\n'
'\tor ask in the forum http://forum.biicode.com\n')
return errors
def run_main(args, user_io=None, current_folder=None, user_folder=None, biiapi_client=None):
try:
user_folder = user_folder or os.path.expanduser("~")
biicode_folder = os.path.join(user_folder, '.biicode')
current_folder = current_folder or os.getcwd()
user_io = user_io or create_user_io(biicode_folder)
bii = Bii(user_io, current_folder, biicode_folder)
biiapi_client = biiapi_client or bii.biiapi
updates_manager = get_updates_manager(biiapi_client, biicode_folder)
try: # Check for updates
updates_manager.check_for_updates(bii.user_io.out)
except ObsoleteClient as e:
bii.user_io.out.error(e.message)
return int(True)
errors = bii.execute(args)
return int(errors)
except OSError as e:
print str(e)
return 1
def create_user_io(biicode_folder):
"""Creates the bii folder and init user_io with outputstream and logfile"""
try:
os.makedirs(biicode_folder)
except:
pass
log_file = os.path.join(biicode_folder, 'bii.log')
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
from colorama import init
init()
OutputStream.color = True
user_io = UserIO(sys.stdin, BiiOutputStream(sys.stdout, log_file, level=INFO))
return user_io
def get_updates_manager(biiapi, biicode_folder):
file_store = os.path.join(biicode_folder, ".remote_version_info")
updates_store = UpdatesStore(file_store)
current_client = ClientVersion(get_current_client_version())
manager = UpdatesManager(updates_store, biiapi, current_client)
return manager
def get_current_client_version():
from biicode.common import __version__ as current_version
return current_version
def main(args):
error = run_main(args)
sys.exit(error)
if __name__ == '__main__':
main(sys.argv[1:])
| false | true |
f71d6480f5768061a17266b485d534816caa4c41 | 3,535 | py | Python | callbacks.py | WHATDOESTHEFOXSAY2U/Colab_Train | 393269fe0fdf6c175926f485e7dfaf653a3686f3 | [
"Apache-2.0"
] | 8 | 2017-09-17T03:20:20.000Z | 2020-10-16T03:07:39.000Z | callbacks.py | navtosh-das/automatic-essay-grader-master | 30fdf2f9f72fbef51447ecc91070189ccca301b2 | [
"Apache-2.0"
] | 4 | 2020-02-26T00:16:29.000Z | 2022-02-26T05:19:48.000Z | callbacks.py | navtosh-das/automatic-essay-grader-master | 30fdf2f9f72fbef51447ecc91070189ccca301b2 | [
"Apache-2.0"
] | 4 | 2017-08-31T20:05:44.000Z | 2018-10-21T20:09:43.000Z | """
Contains custom callbacks.
"""
from constants import minimum_scores, maximum_scores
import constants
import datetime
import json
from keras.callbacks import Callback, ModelCheckpoint
import numpy as np
import os
from sklearn.metrics import cohen_kappa_score
from util import process_data, create_folder
class QWKScore(Callback):
def __init__(self, essays, save_to_file=True, print_to_screen=True):
super()
self.essays = essays
self.save_to_file = save_to_file
self.print_to_screen = print_to_screen
def on_epoch_end(self, epoch, logs={}):
# for each essay set calculate the QWK scores
qwk_scores = []
number_essays = []
if self.print_to_screen:
print("\nQWK Scores")
for essay_set in range(1, 9):
essays_in_set = self.essays[self.essays['essay_set'] == essay_set]
X, y = process_data(essays_in_set)
y_true = essays_in_set['domain1_score'].values
normalised_prediction = self.model.predict(X)
normalised_prediction = np.array(normalised_prediction)
y_pred = np.around((normalised_prediction * (maximum_scores[essay_set] - minimum_scores[essay_set])) + minimum_scores[essay_set])
qwk_score = cohen_kappa_score(y_true, y_pred, weights='quadratic')
qwk_scores.append(qwk_score)
number_essays.append(len(essays_in_set))
if self.print_to_screen:
print("Set {}: {:.2f}".format(essay_set, qwk_score), end=' ')
qwk_scores = np.array(qwk_scores)
number_essays = np.array(number_essays)
weighted_qwk_score = np.sum(qwk_scores * number_essays) / np.sum(number_essays)
if self.print_to_screen:
print('\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score))
if self.save_to_file:
summary = "Epoch " + str(epoch + 1)
log_values = "\n"
for key, value in logs.items():
log_values += "{}: {:.4f} ".format(key, value)
individual_qwk_scores = "\n"
for essay_set in range(8):
individual_qwk_scores += "Set {}: {:.2f} ".format(essay_set + 1, qwk_scores[essay_set])
summary = summary + log_values + individual_qwk_scores
summary += '\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score)
summary += '\n\n'
with open(os.path.join(constants.SAVE_DIR, "scores.txt"), "a") as f:
f.write(summary)
class SaveModel(ModelCheckpoint):
"""
Wrapper of Model Checkpoint class.
"""
def __init__(self, directory, filename, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1):
# make folder with the current time as name
now = datetime.datetime.now()
current_time = "{}_{}_{}_{}_{}_{}".format(now.day, now.month, now.year, now.hour, now.minute, now.second)
constants.SAVE_DIR = os.path.join(directory, current_time)
create_folder(constants.SAVE_DIR)
ModelCheckpoint.__init__(self, os.path.join(constants.SAVE_DIR, filename), monitor=monitor, save_best_only=save_best_only, save_weights_only=save_weights_only, mode=mode, period=period)
def on_train_begin(self, logs=None):
# save model architecture.
parsed = json.loads(self.model.to_json())
with open(os.path.join(constants.SAVE_DIR, 'model.txt'), 'w') as file:
file.write(json.dumps(parsed, indent=4))
| 40.632184 | 193 | 0.648656 |
from constants import minimum_scores, maximum_scores
import constants
import datetime
import json
from keras.callbacks import Callback, ModelCheckpoint
import numpy as np
import os
from sklearn.metrics import cohen_kappa_score
from util import process_data, create_folder
class QWKScore(Callback):
def __init__(self, essays, save_to_file=True, print_to_screen=True):
super()
self.essays = essays
self.save_to_file = save_to_file
self.print_to_screen = print_to_screen
def on_epoch_end(self, epoch, logs={}):
qwk_scores = []
number_essays = []
if self.print_to_screen:
print("\nQWK Scores")
for essay_set in range(1, 9):
essays_in_set = self.essays[self.essays['essay_set'] == essay_set]
X, y = process_data(essays_in_set)
y_true = essays_in_set['domain1_score'].values
normalised_prediction = self.model.predict(X)
normalised_prediction = np.array(normalised_prediction)
y_pred = np.around((normalised_prediction * (maximum_scores[essay_set] - minimum_scores[essay_set])) + minimum_scores[essay_set])
qwk_score = cohen_kappa_score(y_true, y_pred, weights='quadratic')
qwk_scores.append(qwk_score)
number_essays.append(len(essays_in_set))
if self.print_to_screen:
print("Set {}: {:.2f}".format(essay_set, qwk_score), end=' ')
qwk_scores = np.array(qwk_scores)
number_essays = np.array(number_essays)
weighted_qwk_score = np.sum(qwk_scores * number_essays) / np.sum(number_essays)
if self.print_to_screen:
print('\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score))
if self.save_to_file:
summary = "Epoch " + str(epoch + 1)
log_values = "\n"
for key, value in logs.items():
log_values += "{}: {:.4f} ".format(key, value)
individual_qwk_scores = "\n"
for essay_set in range(8):
individual_qwk_scores += "Set {}: {:.2f} ".format(essay_set + 1, qwk_scores[essay_set])
summary = summary + log_values + individual_qwk_scores
summary += '\nWeighted QWK score: {:.2f}'.format(weighted_qwk_score)
summary += '\n\n'
with open(os.path.join(constants.SAVE_DIR, "scores.txt"), "a") as f:
f.write(summary)
class SaveModel(ModelCheckpoint):
def __init__(self, directory, filename, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1):
now = datetime.datetime.now()
current_time = "{}_{}_{}_{}_{}_{}".format(now.day, now.month, now.year, now.hour, now.minute, now.second)
constants.SAVE_DIR = os.path.join(directory, current_time)
create_folder(constants.SAVE_DIR)
ModelCheckpoint.__init__(self, os.path.join(constants.SAVE_DIR, filename), monitor=monitor, save_best_only=save_best_only, save_weights_only=save_weights_only, mode=mode, period=period)
def on_train_begin(self, logs=None):
parsed = json.loads(self.model.to_json())
with open(os.path.join(constants.SAVE_DIR, 'model.txt'), 'w') as file:
file.write(json.dumps(parsed, indent=4))
| true | true |
f71d64ae1faeafbcfe3e3258855aea0147730782 | 87 | py | Python | fandogh_cli/__init__.py | behroozmirzaie7/fandogh-cli | e23d5c761a85b539b1c5f80bd9c6fd7bd2e5f9f0 | [
"MIT"
] | 131 | 2018-05-14T21:00:40.000Z | 2022-03-29T10:00:54.000Z | fandogh_cli/__init__.py | behroozmirzaie7/fandogh-cli | e23d5c761a85b539b1c5f80bd9c6fd7bd2e5f9f0 | [
"MIT"
] | 130 | 2018-05-14T19:43:18.000Z | 2021-08-28T08:52:04.000Z | fandogh_cli/__init__.py | behroozmirzaie7/fandogh-cli | e23d5c761a85b539b1c5f80bd9c6fd7bd2e5f9f0 | [
"MIT"
] | 37 | 2018-05-15T05:59:56.000Z | 2022-03-08T05:26:54.000Z | VERSION = "1.21.3"
NAME = "fandogh_cli"
if __name__ == "__main__":
print(VERSION)
| 14.5 | 26 | 0.643678 | VERSION = "1.21.3"
NAME = "fandogh_cli"
if __name__ == "__main__":
print(VERSION)
| true | true |
f71d650791f7f46ff04b43e0175f98b5c704dac5 | 7,390 | py | Python | main.py | hujunxianligong/Graph-U-Nets | d1a483400131fbe75a55cff27439585c62c4a575 | [
"MIT"
] | 1 | 2021-01-03T09:23:41.000Z | 2021-01-03T09:23:41.000Z | main.py | hujunxianligong/Graph-U-Nets | d1a483400131fbe75a55cff27439585c62c4a575 | [
"MIT"
] | null | null | null | main.py | hujunxianligong/Graph-U-Nets | d1a483400131fbe75a55cff27439585c62c4a575 | [
"MIT"
] | null | null | null | import sys
import os
import torch
import random
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.optim as optim
import math
from network import GUNet
from mlp_dropout import MLPClassifier
from sklearn import metrics
from util import cmd_args, load_data
sys.path.append(
'%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname(
os.path.realpath(__file__)))
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
model = GUNet
self.s2v = model(
latent_dim=cmd_args.latent_dim,
output_dim=cmd_args.out_dim,
num_node_feats=cmd_args.feat_dim+cmd_args.attr_dim,
num_edge_feats=0,
k=cmd_args.sortpooling_k)
out_dim = cmd_args.out_dim
if out_dim == 0:
out_dim = self.s2v.dense_dim
self.mlp = MLPClassifier(
input_size=out_dim, hidden_size=cmd_args.hidden,
num_class=cmd_args.num_class, with_dropout=cmd_args.dropout)
def PrepareFeatureLabel(self, batch_graph):
labels = torch.LongTensor(len(batch_graph))
n_nodes = 0
if batch_graph[0].node_tags is not None:
node_tag_flag = True
concat_tag = []
else:
node_tag_flag = False
if batch_graph[0].node_features is not None:
node_feat_flag = True
concat_feat = []
else:
node_feat_flag = False
for i in range(len(batch_graph)):
labels[i] = batch_graph[i].label
n_nodes += batch_graph[i].num_nodes
if node_tag_flag:
concat_tag += batch_graph[i].node_tags
if node_feat_flag:
tmp = torch.from_numpy(
batch_graph[i].node_features).type('torch.FloatTensor')
concat_feat.append(tmp)
if node_tag_flag:
concat_tag = torch.LongTensor(concat_tag).view(-1, 1)
node_tag = torch.zeros(n_nodes, cmd_args.feat_dim)
node_tag.scatter_(1, concat_tag, 1)
if node_feat_flag:
node_feat = torch.cat(concat_feat, 0)
if node_feat_flag and node_tag_flag:
# concatenate one-hot embedding of node tags (node labels)
# with continuous node features
node_feat = torch.cat([node_tag.type_as(node_feat), node_feat], 1)
elif node_feat_flag is False and node_tag_flag:
node_feat = node_tag
elif node_feat_flag and node_tag_flag is False:
pass
else:
node_feat = torch.ones(n_nodes, 1)
if cmd_args.mode == 'gpu':
node_feat = node_feat.cuda()
labels = labels.cuda()
return node_feat, labels
def forward(self, batch_graph):
node_feat, labels = self.PrepareFeatureLabel(batch_graph)
embed = self.s2v(batch_graph, node_feat, None)
return self.mlp(embed, labels)
def output_features(self, batch_graph):
node_feat, labels = self.PrepareFeatureLabel(batch_graph)
embed = self.s2v(batch_graph, node_feat, None)
return embed, labels
def loop_dataset(g_list, classifier, sample_idxes, optimizer=None,
bsize=cmd_args.batch_size):
total_loss = []
total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize # noqa
pbar = tqdm(range(total_iters), unit='batch')
all_targets = []
all_scores = []
n_samples = 0
for pos in pbar:
selected_idx = sample_idxes[pos * bsize: (pos + 1) * bsize]
batch_graph = [g_list[idx] for idx in selected_idx]
targets = [g_list[idx].label for idx in selected_idx]
all_targets += targets
logits, loss, acc = classifier(batch_graph)
all_scores.append(logits[:, 1].detach()) # for binary classification
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.data.cpu().numpy()
pbar.set_description('loss: %0.5f acc: %0.5f' % (loss, acc))
total_loss.append(np.array([loss, acc]) * len(selected_idx))
n_samples += len(selected_idx)
if optimizer is None:
assert n_samples == len(sample_idxes)
total_loss = np.array(total_loss)
avg_loss = np.sum(total_loss, 0) / n_samples
all_scores = torch.cat(all_scores).cpu().numpy()
# np.savetxt('test_scores.txt', all_scores) # output test predictions
all_targets = np.array(all_targets)
fpr, tpr, _ = metrics.roc_curve(all_targets, all_scores, pos_label=1)
auc = metrics.auc(fpr, tpr)
avg_loss = np.concatenate((avg_loss, [auc]))
return avg_loss
if __name__ == '__main__':
print(cmd_args)
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
train_graphs, test_graphs = load_data()
print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))
if cmd_args.sortpooling_k <= 1:
num_nodes_list = sorted([
g.num_nodes for g in train_graphs + test_graphs])
cmd_args.sortpooling_k = num_nodes_list[
int(math.ceil(cmd_args.sortpooling_k * len(num_nodes_list))) - 1]
cmd_args.sortpooling_k = max(10, cmd_args.sortpooling_k)
print('k used in SortPooling is: ' + str(cmd_args.sortpooling_k))
classifier = Classifier()
if cmd_args.mode == 'gpu':
classifier = classifier.cuda()
optimizer = optim.Adam(
classifier.parameters(), lr=cmd_args.learning_rate, amsgrad=True,
weight_decay=0.0008)
train_idxes = list(range(len(train_graphs)))
best_loss = None
max_acc = 0.0
for epoch in range(cmd_args.num_epochs):
random.shuffle(train_idxes)
classifier.train()
avg_loss = loop_dataset(
train_graphs, classifier, train_idxes, optimizer=optimizer)
if not cmd_args.printAUC:
avg_loss[2] = 0.0
print('\033[92maverage training of epoch %d: loss %.5f acc %.5f auc %.5f\033[0m' % (epoch, avg_loss[0], avg_loss[1], avg_loss[2])) # noqa
classifier.eval()
test_loss = loop_dataset(
test_graphs, classifier, list(range(len(test_graphs))))
if not cmd_args.printAUC:
test_loss[2] = 0.0
print('\033[93maverage test of epoch %d: loss %.5f acc %.5f auc %.5f\033[0m' % (epoch, test_loss[0], test_loss[1], test_loss[2])) # noqa
max_acc = max(max_acc, test_loss[1])
with open('acc_result_%s.txt' % cmd_args.data, 'a+') as f:
# f.write(str(test_loss[1]) + '\n')
f.write(str(max_acc) + '\n')
if cmd_args.printAUC:
with open('auc_results.txt', 'a+') as f:
f.write(str(test_loss[2]) + '\n')
if cmd_args.extract_features:
features, labels = classifier.output_features(train_graphs)
labels = labels.type('torch.FloatTensor')
np.savetxt('extracted_features_train.txt', torch.cat(
[labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(),
'%.4f')
features, labels = classifier.output_features(test_graphs)
labels = labels.type('torch.FloatTensor')
np.savetxt('extracted_features_test.txt', torch.cat(
[labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(),
'%.4f')
| 35.023697 | 145 | 0.625304 | import sys
import os
import torch
import random
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.optim as optim
import math
from network import GUNet
from mlp_dropout import MLPClassifier
from sklearn import metrics
from util import cmd_args, load_data
sys.path.append(
'%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname(
os.path.realpath(__file__)))
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
model = GUNet
self.s2v = model(
latent_dim=cmd_args.latent_dim,
output_dim=cmd_args.out_dim,
num_node_feats=cmd_args.feat_dim+cmd_args.attr_dim,
num_edge_feats=0,
k=cmd_args.sortpooling_k)
out_dim = cmd_args.out_dim
if out_dim == 0:
out_dim = self.s2v.dense_dim
self.mlp = MLPClassifier(
input_size=out_dim, hidden_size=cmd_args.hidden,
num_class=cmd_args.num_class, with_dropout=cmd_args.dropout)
def PrepareFeatureLabel(self, batch_graph):
labels = torch.LongTensor(len(batch_graph))
n_nodes = 0
if batch_graph[0].node_tags is not None:
node_tag_flag = True
concat_tag = []
else:
node_tag_flag = False
if batch_graph[0].node_features is not None:
node_feat_flag = True
concat_feat = []
else:
node_feat_flag = False
for i in range(len(batch_graph)):
labels[i] = batch_graph[i].label
n_nodes += batch_graph[i].num_nodes
if node_tag_flag:
concat_tag += batch_graph[i].node_tags
if node_feat_flag:
tmp = torch.from_numpy(
batch_graph[i].node_features).type('torch.FloatTensor')
concat_feat.append(tmp)
if node_tag_flag:
concat_tag = torch.LongTensor(concat_tag).view(-1, 1)
node_tag = torch.zeros(n_nodes, cmd_args.feat_dim)
node_tag.scatter_(1, concat_tag, 1)
if node_feat_flag:
node_feat = torch.cat(concat_feat, 0)
if node_feat_flag and node_tag_flag:
node_feat = torch.cat([node_tag.type_as(node_feat), node_feat], 1)
elif node_feat_flag is False and node_tag_flag:
node_feat = node_tag
elif node_feat_flag and node_tag_flag is False:
pass
else:
node_feat = torch.ones(n_nodes, 1)
if cmd_args.mode == 'gpu':
node_feat = node_feat.cuda()
labels = labels.cuda()
return node_feat, labels
def forward(self, batch_graph):
node_feat, labels = self.PrepareFeatureLabel(batch_graph)
embed = self.s2v(batch_graph, node_feat, None)
return self.mlp(embed, labels)
def output_features(self, batch_graph):
node_feat, labels = self.PrepareFeatureLabel(batch_graph)
embed = self.s2v(batch_graph, node_feat, None)
return embed, labels
def loop_dataset(g_list, classifier, sample_idxes, optimizer=None,
bsize=cmd_args.batch_size):
total_loss = []
total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize
pbar = tqdm(range(total_iters), unit='batch')
all_targets = []
all_scores = []
n_samples = 0
for pos in pbar:
selected_idx = sample_idxes[pos * bsize: (pos + 1) * bsize]
batch_graph = [g_list[idx] for idx in selected_idx]
targets = [g_list[idx].label for idx in selected_idx]
all_targets += targets
logits, loss, acc = classifier(batch_graph)
all_scores.append(logits[:, 1].detach())
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.data.cpu().numpy()
pbar.set_description('loss: %0.5f acc: %0.5f' % (loss, acc))
total_loss.append(np.array([loss, acc]) * len(selected_idx))
n_samples += len(selected_idx)
if optimizer is None:
assert n_samples == len(sample_idxes)
total_loss = np.array(total_loss)
avg_loss = np.sum(total_loss, 0) / n_samples
all_scores = torch.cat(all_scores).cpu().numpy()
ray(all_targets)
fpr, tpr, _ = metrics.roc_curve(all_targets, all_scores, pos_label=1)
auc = metrics.auc(fpr, tpr)
avg_loss = np.concatenate((avg_loss, [auc]))
return avg_loss
if __name__ == '__main__':
print(cmd_args)
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
train_graphs, test_graphs = load_data()
print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))
if cmd_args.sortpooling_k <= 1:
num_nodes_list = sorted([
g.num_nodes for g in train_graphs + test_graphs])
cmd_args.sortpooling_k = num_nodes_list[
int(math.ceil(cmd_args.sortpooling_k * len(num_nodes_list))) - 1]
cmd_args.sortpooling_k = max(10, cmd_args.sortpooling_k)
print('k used in SortPooling is: ' + str(cmd_args.sortpooling_k))
classifier = Classifier()
if cmd_args.mode == 'gpu':
classifier = classifier.cuda()
optimizer = optim.Adam(
classifier.parameters(), lr=cmd_args.learning_rate, amsgrad=True,
weight_decay=0.0008)
train_idxes = list(range(len(train_graphs)))
best_loss = None
max_acc = 0.0
for epoch in range(cmd_args.num_epochs):
random.shuffle(train_idxes)
classifier.train()
avg_loss = loop_dataset(
train_graphs, classifier, train_idxes, optimizer=optimizer)
if not cmd_args.printAUC:
avg_loss[2] = 0.0
print('\033[92maverage training of epoch %d: loss %.5f acc %.5f auc %.5f\033[0m' % (epoch, avg_loss[0], avg_loss[1], avg_loss[2]))
classifier.eval()
test_loss = loop_dataset(
test_graphs, classifier, list(range(len(test_graphs))))
if not cmd_args.printAUC:
test_loss[2] = 0.0
print('\033[93maverage test of epoch %d: loss %.5f acc %.5f auc %.5f\033[0m' % (epoch, test_loss[0], test_loss[1], test_loss[2]))
max_acc = max(max_acc, test_loss[1])
with open('acc_result_%s.txt' % cmd_args.data, 'a+') as f:
f.write(str(max_acc) + '\n')
if cmd_args.printAUC:
with open('auc_results.txt', 'a+') as f:
f.write(str(test_loss[2]) + '\n')
if cmd_args.extract_features:
features, labels = classifier.output_features(train_graphs)
labels = labels.type('torch.FloatTensor')
np.savetxt('extracted_features_train.txt', torch.cat(
[labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(),
'%.4f')
features, labels = classifier.output_features(test_graphs)
labels = labels.type('torch.FloatTensor')
np.savetxt('extracted_features_test.txt', torch.cat(
[labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(),
'%.4f')
| true | true |
f71d65ca0d14c5df4be828df9a38fc4cf62124f4 | 1,636 | py | Python | python_developer_tools/cv/classes/ResNeXt.py | carlsummer/python_developer_tools | a8c4365b7cc601cda55648cdfd8c0cb1faae132f | [
"Apache-2.0"
] | 32 | 2021-06-21T04:49:48.000Z | 2022-03-29T05:46:59.000Z | python_developer_tools/cv/classes/ResNeXt.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 1 | 2021-11-12T03:45:55.000Z | 2021-11-12T03:45:55.000Z | python_developer_tools/cv/classes/ResNeXt.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 10 | 2021-06-03T08:05:05.000Z | 2021-12-13T03:10:42.000Z | import torch
import torch.nn as nn
import torchvision
class ResNeXtBlock(nn.Module):
def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 2, cardinality=32):
super(ResNeXtBlock,self).__init__()
self.expansion = expansion
self.downsampling = downsampling
self.bottleneck = nn.Sequential(
nn.Conv2d(in_channels=in_places, out_channels=places, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False, groups=cardinality),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places * self.expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(places * self.expansion),
)
if self.downsampling:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels=in_places, out_channels=places * self.expansion, kernel_size=1, stride=stride,bias=False),
nn.BatchNorm2d(places * self.expansion)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.bottleneck(x)
if self.downsampling:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
if __name__ =='__main__':
model = ResNeXtBlock(in_places=256, places=128)
print(model)
input = torch.randn(1,256,64,64)
out = model(input)
print(out.shape) | 34.083333 | 136 | 0.635697 | import torch
import torch.nn as nn
import torchvision
class ResNeXtBlock(nn.Module):
def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 2, cardinality=32):
super(ResNeXtBlock,self).__init__()
self.expansion = expansion
self.downsampling = downsampling
self.bottleneck = nn.Sequential(
nn.Conv2d(in_channels=in_places, out_channels=places, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False, groups=cardinality),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places * self.expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(places * self.expansion),
)
if self.downsampling:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels=in_places, out_channels=places * self.expansion, kernel_size=1, stride=stride,bias=False),
nn.BatchNorm2d(places * self.expansion)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.bottleneck(x)
if self.downsampling:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
if __name__ =='__main__':
model = ResNeXtBlock(in_places=256, places=128)
print(model)
input = torch.randn(1,256,64,64)
out = model(input)
print(out.shape) | true | true |
f71d65e945a2a090fa91fb86ee8b0f75888af76e | 20,918 | py | Python | kedro/io/core.py | jonasrk/kedro | 9fd2abd112c0118b7a001aba7f11fa287d10db18 | [
"Apache-2.0"
] | 1 | 2020-12-30T01:44:36.000Z | 2020-12-30T01:44:36.000Z | kedro/io/core.py | jonasrk/kedro | 9fd2abd112c0118b7a001aba7f11fa287d10db18 | [
"Apache-2.0"
] | null | null | null | kedro/io/core.py | jonasrk/kedro | 9fd2abd112c0118b7a001aba7f11fa287d10db18 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides a set of classes which underpin the data loading and
saving functionality provided by ``kedro.io``.
"""
import abc
import copy
import logging
import os
from collections import namedtuple
from datetime import datetime, timezone
from glob import iglob
from pathlib import Path, PurePath
from typing import Any, Callable, Dict, List, Optional, Tuple, Type
from urllib.parse import urlparse
from warnings import warn
from kedro.utils import load_obj
VERSIONED_FLAG_KEY = "versioned"
VERSION_KEY = "version"
class DataSetError(Exception):
"""``DataSetError`` raised by ``AbstractDataSet`` implementations
in case of failure of input/output methods.
``AbstractDataSet`` implementations should provide instructive
information in case of failure.
"""
pass
class DataSetNotFoundError(DataSetError):
"""``DataSetNotFoundError`` raised by ``DataCatalog`` class in case of
trying to use a non-existing data set.
"""
pass
class DataSetAlreadyExistsError(DataSetError):
"""``DataSetAlreadyExistsError`` raised by ``DataCatalog`` class in case
of trying to add a data set which already exists in the ``DataCatalog``.
"""
pass
class VersionNotFoundError(DataSetError):
"""``VersionNotFoundError`` raised by ``AbstractVersionedDataSet`` implementations
in case of no load versions available for the data set.
"""
pass
class AbstractDataSet(abc.ABC):
"""``AbstractDataSet`` is the base class for all data set implementations.
All data set implementations should extend this abstract class
and implement the methods marked as abstract.
Example:
::
>>> from kedro.io import AbstractDataSet
>>> import pandas as pd
>>>
>>> class MyOwnDataSet(AbstractDataSet):
>>> def __init__(self, param1, param2):
>>> self._param1 = param1
>>> self._param2 = param2
>>>
>>> def _load(self) -> pd.DataFrame:
>>> print("Dummy load: {}".format(self._param1))
>>> return pd.DataFrame()
>>>
>>> def _save(self, df: pd.DataFrame) -> None:
>>> print("Dummy save: {}".format(self._param2))
>>>
>>> def _describe(self):
>>> return dict(param1=self._param1, param2=self._param2)
"""
@classmethod
def from_config(
cls: Type,
name: str,
config: Dict[str, Any],
load_version: str = None,
save_version: str = None,
) -> "AbstractDataSet":
"""Create a data set instance using the configuration provided.
Args:
name: Data set name.
config: Data set config dictionary.
load_version: Version string to be used for ``load`` operation if
the data set is versioned. Has no effect on the data set
if versioning was not enabled.
save_version: Version string to be used for ``save`` operation if
the data set is versioned. Has no effect on the data set
if versioning was not enabled.
Returns:
An instance of an ``AbstractDataSet`` subclass.
Raises:
DataSetError: When the function fails to create the data set
from its config.
"""
try:
class_obj, config = parse_dataset_definition(
config, load_version, save_version
)
except Exception as ex:
raise DataSetError(
"An exception occurred when parsing config "
"for DataSet `{}`:\n{}".format(name, str(ex))
)
try:
data_set = class_obj(**config) # type: ignore
except TypeError as err:
raise DataSetError(
"\n{}.\nDataSet '{}' must only contain "
"arguments valid for the constructor "
"of `{}.{}`.".format(
str(err), name, class_obj.__module__, class_obj.__qualname__
)
)
except Exception as err:
raise DataSetError(
"\n{}.\nFailed to instantiate DataSet "
"'{}' of type `{}.{}`.".format(
str(err), name, class_obj.__module__, class_obj.__qualname__
)
)
return data_set
@property
def _logger(self) -> logging.Logger:
return logging.getLogger(__name__)
def get_last_load_version(self) -> Optional[str]:
"""Versioned datasets should override this property to return last loaded
version"""
# pylint: disable=no-self-use
return None # pragma: no cover
def load(self) -> Any:
"""Loads data by delegation to the provided load method.
Returns:
Data returned by the provided load method.
Raises:
DataSetError: When underlying load method raises error.
"""
self._logger.debug("Loading %s", str(self))
try:
return self._load()
except DataSetError:
raise
except Exception as exc:
# This exception handling is by design as the composed data sets
# can throw any type of exception.
message = "Failed while loading data from data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def get_last_save_version(self) -> Optional[str]:
"""Versioned datasets should override this property to return last saved
version."""
# pylint: disable=no-self-use
return None # pragma: no cover
def save(self, data: Any) -> None:
"""Saves data by delegation to the provided save method.
Args:
data: the value to be saved by provided save method.
Raises:
DataSetError: when underlying save method raises error.
"""
if data is None:
raise DataSetError("Saving `None` to a `DataSet` is not allowed")
try:
self._logger.debug("Saving %s", str(self))
self._save(data)
except DataSetError:
raise
except Exception as exc:
message = "Failed while saving data to data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def __str__(self):
def _to_str(obj, is_root=False):
"""Returns a string representation where
1. The root level (i.e. the DataSet.__init__ arguments) are
formatted like DataSet(key=value).
2. Dictionaries have the keys alphabetically sorted recursively.
3. Empty dictionaries and None values are not shown.
"""
fmt = "{}={}" if is_root else "'{}': {}" # 1
if isinstance(obj, dict):
sorted_dict = sorted(obj.items(), key=lambda pair: str(pair[0])) # 2
text = ", ".join(
fmt.format(key, _to_str(value)) # 2
for key, value in sorted_dict
if value or isinstance(value, bool)
) # 3
return text if is_root else "{" + text + "}" # 1
# not a dictionary
return str(obj)
return "{}({})".format(type(self).__name__, _to_str(self._describe(), True))
@abc.abstractmethod
def _load(self) -> Any:
raise NotImplementedError(
"`{}` is a subclass of AbstractDataSet and"
"it must implement the `_load` method".format(self.__class__.__name__)
)
@abc.abstractmethod
def _save(self, data: Any) -> None:
raise NotImplementedError(
"`{}` is a subclass of AbstractDataSet and"
"it must implement the `_save` method".format(self.__class__.__name__)
)
@abc.abstractmethod
def _describe(self) -> Dict[str, Any]:
raise NotImplementedError(
"`{}` is a subclass of AbstractDataSet and"
"it must implement the `_describe` method".format(self.__class__.__name__)
)
def exists(self) -> bool:
"""Checks whether a data set's output already exists by calling
the provided _exists() method.
Returns:
Flag indicating whether the output already exists.
Raises:
DataSetError: when underlying exists method raises error.
"""
try:
self._logger.debug("Checking whether target of %s exists", str(self))
return self._exists()
except Exception as exc:
message = "Failed during exists check for data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def _exists(self) -> bool:
self._logger.warning(
"`exists()` not implemented for `%s`. Assuming output does not exist.",
self.__class__.__name__,
)
return False
def release(self) -> None:
"""Release any cached data.
Raises:
DataSetError: when underlying exists method raises error.
"""
try:
self._logger.debug("Releasing %s", str(self))
self._release()
except Exception as exc:
message = "Failed during release for data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def _release(self) -> None:
pass
def generate_timestamp() -> str:
"""Generate the timestamp to be used by versioning.
Returns:
String representation of the current timestamp.
"""
current_ts = datetime.now(tz=timezone.utc)
fmt = (
"{d.year:04d}-{d.month:02d}-{d.day:02d}T{d.hour:02d}"
".{d.minute:02d}.{d.second:02d}.{ms:03d}Z"
)
return fmt.format(d=current_ts, ms=current_ts.microsecond // 1000)
class Version(namedtuple("Version", ["load", "save"])):
"""This namedtuple is used to provide load and save versions for versioned
data sets. If ``Version.load`` is None, then the latest available version
is loaded. If ``Version.save`` is None, then save version is formatted as
YYYY-MM-DDThh.mm.ss.sssZ of the current timestamp.
"""
__slots__ = ()
CONSISTENCY_WARNING = (
"Save version `{}` did not match load version `{}` for {}. This is strongly "
"discouraged due to inconsistencies it may cause between `save` and "
"`load` operations. Please refrain from setting exact load version for "
"intermediate data sets where possible to avoid this warning."
)
def parse_dataset_definition(
config: Dict[str, Any], load_version: str = None, save_version: str = None
) -> Tuple[Type[AbstractDataSet], Dict]:
"""Parse and instantiate a dataset class using the configuration provided.
Args:
config: Data set config dictionary. It *must* contain the `type` key
with fully qualified class name.
load_version: Version string to be used for ``load`` operation if
the data set is versioned. Has no effect on the data set
if versioning was not enabled.
save_version: Version string to be used for ``save`` operation if
the data set is versioned. Has no effect on the data set
if versioning was not enabled.
Raises:
DataSetError: If the function fails to parse the configuration provided.
Returns:
2-tuple: (Dataset class object, configuration dictionary)
"""
save_version = save_version or generate_timestamp()
config = copy.deepcopy(config)
if "type" not in config:
raise DataSetError("`type` is missing from DataSet catalog configuration")
class_obj = config.pop("type")
if isinstance(class_obj, str):
try:
class_obj = load_obj(class_obj, "kedro.io")
except ImportError:
raise DataSetError(
"Cannot import module when trying to load type `{}`.".format(class_obj)
)
except AttributeError:
raise DataSetError("Class `{}` not found.".format(class_obj))
if not issubclass(class_obj, AbstractDataSet):
raise DataSetError(
"DataSet type `{}.{}` is invalid: all data set types must extend "
"`AbstractDataSet`.".format(class_obj.__module__, class_obj.__qualname__)
)
if VERSION_KEY in config:
# remove "version" key so that it's not passed
# to the "unversioned" data set constructor
message = (
"`%s` attribute removed from data set configuration since it is a "
"reserved word and cannot be directly specified"
)
logging.getLogger(__name__).warning(message, VERSION_KEY)
del config[VERSION_KEY]
if config.pop(VERSIONED_FLAG_KEY, False): # data set is versioned
config[VERSION_KEY] = Version(load_version, save_version)
return class_obj, config
def _local_exists(filepath: str) -> bool:
filepath = Path(filepath)
return filepath.exists() or any(par.is_file() for par in filepath.parents)
def is_remote_path(filepath: str) -> bool:
"""Check if the given path looks like a remote URL (has scheme)."""
# Get rid of Windows-specific "C:\" start,
# which is treated as a URL scheme.
_, filepath = os.path.splitdrive(filepath)
return bool(urlparse(filepath).scheme)
class AbstractVersionedDataSet(AbstractDataSet, abc.ABC):
"""
``AbstractVersionedDataSet`` is the base class for all versioned data set
implementations. All data sets that implement versioning should extend this
abstract class and implement the methods marked as abstract.
Example:
::
>>> from kedro.io import AbstractVersionedDataSet
>>> import pandas as pd
>>>
>>>
>>> class MyOwnDataSet(AbstractVersionedDataSet):
>>> def __init__(self, param1, param2, filepath, version):
>>> super().__init__(filepath, version)
>>> self._param1 = param1
>>> self._param2 = param2
>>>
>>> def _load(self) -> pd.DataFrame:
>>> load_path = self._get_load_path()
>>> return pd.read_csv(load_path)
>>>
>>> def _save(self, df: pd.DataFrame) -> None:
>>> save_path = self._get_save_path()
>>> df.to_csv(str(save_path))
>>>
>>> def _exists(self) -> bool:
>>> path = self._get_load_path()
>>> return path.is_file()
>>>
>>> def _describe(self):
>>> return dict(version=self._version, param1=self._param1, param2=self._param2)
"""
# pylint: disable=abstract-method
def __init__(
self,
filepath: PurePath,
version: Optional[Version],
exists_function: Callable[[str], bool] = None,
glob_function: Callable[[str], List[str]] = None,
):
"""Creates a new instance of ``AbstractVersionedDataSet``.
Args:
filepath: Path to file.
version: If specified, should be an instance of
``kedro.io.core.Version``. If its ``load`` attribute is
None, the latest version will be loaded. If its ``save``
attribute is None, save version will be autogenerated.
exists_function: Function that is used for determining whether
a path exists in a filesystem.
glob_function: Function that is used for finding all paths
in a filesystem, which match a given pattern.
"""
self._filepath = filepath
self._version = version
self._exists_function = exists_function or _local_exists
self._glob_function = glob_function or iglob
self._last_load_version = None # type: Optional[str]
self._last_save_version = None # type: Optional[str]
def get_last_load_version(self) -> Optional[str]:
return self._last_load_version
def _lookup_load_version(self) -> Optional[str]:
if not self._version:
return None
if self._version.load:
return self._version.load
# When load version is unpinned, fetch the most recent existing
# version from the given path
pattern = str(self._get_versioned_path("*"))
version_paths = sorted(self._glob_function(pattern), reverse=True)
most_recent = next(
(path for path in version_paths if self._exists_function(path)), None
)
if not most_recent:
raise VersionNotFoundError(
"Did not find any versions for {}".format(str(self))
)
return PurePath(most_recent).parent.name
def _get_load_path(self) -> PurePath:
if not self._version:
# When versioning is disabled, load from original filepath
return self._filepath
load_version = self._last_load_version or self._lookup_load_version()
return self._get_versioned_path(load_version) # type: ignore
def get_last_save_version(self) -> Optional[str]:
return self._last_save_version
def _lookup_save_version(self) -> Optional[str]:
if not self._version:
return None
return self._version.save or generate_timestamp()
def _get_save_path(self) -> PurePath:
if not self._version:
# When versioning is disabled, return original filepath
return self._filepath
save_version = self._last_save_version or self._lookup_save_version()
versioned_path = self._get_versioned_path(save_version) # type: ignore
if self._exists_function(str(versioned_path)):
raise DataSetError(
"Save path `{}` for {} must not exist if versioning "
"is enabled.".format(versioned_path, str(self))
)
return versioned_path
def _get_versioned_path(self, version: str) -> PurePath:
return self._filepath / version / self._filepath.name
def load(self) -> Any:
self._last_load_version = self._lookup_load_version()
return super().load()
def save(self, data: Any) -> None:
self._last_save_version = self._lookup_save_version()
super().save(data)
load_version = self._lookup_load_version()
if load_version != self._last_save_version:
warn(
CONSISTENCY_WARNING.format(
self._last_save_version, load_version, str(self)
)
)
def exists(self) -> bool:
"""Checks whether a data set's output already exists by calling
the provided _exists() method.
Returns:
Flag indicating whether the output already exists.
Raises:
DataSetError: when underlying exists method raises error.
"""
self._logger.debug("Checking whether target of %s exists", str(self))
try:
return self._exists()
except VersionNotFoundError:
return False
except Exception as exc:
message = "Failed during exists check for data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
| 35.156303 | 96 | 0.612821 |
import abc
import copy
import logging
import os
from collections import namedtuple
from datetime import datetime, timezone
from glob import iglob
from pathlib import Path, PurePath
from typing import Any, Callable, Dict, List, Optional, Tuple, Type
from urllib.parse import urlparse
from warnings import warn
from kedro.utils import load_obj
VERSIONED_FLAG_KEY = "versioned"
VERSION_KEY = "version"
class DataSetError(Exception):
pass
class DataSetNotFoundError(DataSetError):
pass
class DataSetAlreadyExistsError(DataSetError):
pass
class VersionNotFoundError(DataSetError):
pass
class AbstractDataSet(abc.ABC):
@classmethod
def from_config(
cls: Type,
name: str,
config: Dict[str, Any],
load_version: str = None,
save_version: str = None,
) -> "AbstractDataSet":
try:
class_obj, config = parse_dataset_definition(
config, load_version, save_version
)
except Exception as ex:
raise DataSetError(
"An exception occurred when parsing config "
"for DataSet `{}`:\n{}".format(name, str(ex))
)
try:
data_set = class_obj(**config)
except TypeError as err:
raise DataSetError(
"\n{}.\nDataSet '{}' must only contain "
"arguments valid for the constructor "
"of `{}.{}`.".format(
str(err), name, class_obj.__module__, class_obj.__qualname__
)
)
except Exception as err:
raise DataSetError(
"\n{}.\nFailed to instantiate DataSet "
"'{}' of type `{}.{}`.".format(
str(err), name, class_obj.__module__, class_obj.__qualname__
)
)
return data_set
@property
def _logger(self) -> logging.Logger:
return logging.getLogger(__name__)
def get_last_load_version(self) -> Optional[str]:
return None
def load(self) -> Any:
self._logger.debug("Loading %s", str(self))
try:
return self._load()
except DataSetError:
raise
except Exception as exc:
message = "Failed while loading data from data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def get_last_save_version(self) -> Optional[str]:
return None
def save(self, data: Any) -> None:
if data is None:
raise DataSetError("Saving `None` to a `DataSet` is not allowed")
try:
self._logger.debug("Saving %s", str(self))
self._save(data)
except DataSetError:
raise
except Exception as exc:
message = "Failed while saving data to data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def __str__(self):
def _to_str(obj, is_root=False):
fmt = "{}={}" if is_root else "'{}': {}"
if isinstance(obj, dict):
sorted_dict = sorted(obj.items(), key=lambda pair: str(pair[0]))
text = ", ".join(
fmt.format(key, _to_str(value))
for key, value in sorted_dict
if value or isinstance(value, bool)
)
return text if is_root else "{" + text + "}"
return str(obj)
return "{}({})".format(type(self).__name__, _to_str(self._describe(), True))
@abc.abstractmethod
def _load(self) -> Any:
raise NotImplementedError(
"`{}` is a subclass of AbstractDataSet and"
"it must implement the `_load` method".format(self.__class__.__name__)
)
@abc.abstractmethod
def _save(self, data: Any) -> None:
raise NotImplementedError(
"`{}` is a subclass of AbstractDataSet and"
"it must implement the `_save` method".format(self.__class__.__name__)
)
@abc.abstractmethod
def _describe(self) -> Dict[str, Any]:
raise NotImplementedError(
"`{}` is a subclass of AbstractDataSet and"
"it must implement the `_describe` method".format(self.__class__.__name__)
)
def exists(self) -> bool:
try:
self._logger.debug("Checking whether target of %s exists", str(self))
return self._exists()
except Exception as exc:
message = "Failed during exists check for data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def _exists(self) -> bool:
self._logger.warning(
"`exists()` not implemented for `%s`. Assuming output does not exist.",
self.__class__.__name__,
)
return False
def release(self) -> None:
try:
self._logger.debug("Releasing %s", str(self))
self._release()
except Exception as exc:
message = "Failed during release for data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
def _release(self) -> None:
pass
def generate_timestamp() -> str:
current_ts = datetime.now(tz=timezone.utc)
fmt = (
"{d.year:04d}-{d.month:02d}-{d.day:02d}T{d.hour:02d}"
".{d.minute:02d}.{d.second:02d}.{ms:03d}Z"
)
return fmt.format(d=current_ts, ms=current_ts.microsecond // 1000)
class Version(namedtuple("Version", ["load", "save"])):
__slots__ = ()
CONSISTENCY_WARNING = (
"Save version `{}` did not match load version `{}` for {}. This is strongly "
"discouraged due to inconsistencies it may cause between `save` and "
"`load` operations. Please refrain from setting exact load version for "
"intermediate data sets where possible to avoid this warning."
)
def parse_dataset_definition(
config: Dict[str, Any], load_version: str = None, save_version: str = None
) -> Tuple[Type[AbstractDataSet], Dict]:
save_version = save_version or generate_timestamp()
config = copy.deepcopy(config)
if "type" not in config:
raise DataSetError("`type` is missing from DataSet catalog configuration")
class_obj = config.pop("type")
if isinstance(class_obj, str):
try:
class_obj = load_obj(class_obj, "kedro.io")
except ImportError:
raise DataSetError(
"Cannot import module when trying to load type `{}`.".format(class_obj)
)
except AttributeError:
raise DataSetError("Class `{}` not found.".format(class_obj))
if not issubclass(class_obj, AbstractDataSet):
raise DataSetError(
"DataSet type `{}.{}` is invalid: all data set types must extend "
"`AbstractDataSet`.".format(class_obj.__module__, class_obj.__qualname__)
)
if VERSION_KEY in config:
# to the "unversioned" data set constructor
message = (
"`%s` attribute removed from data set configuration since it is a "
"reserved word and cannot be directly specified"
)
logging.getLogger(__name__).warning(message, VERSION_KEY)
del config[VERSION_KEY]
if config.pop(VERSIONED_FLAG_KEY, False): # data set is versioned
config[VERSION_KEY] = Version(load_version, save_version)
return class_obj, config
def _local_exists(filepath: str) -> bool:
filepath = Path(filepath)
return filepath.exists() or any(par.is_file() for par in filepath.parents)
def is_remote_path(filepath: str) -> bool:
# Get rid of Windows-specific "C:\" start,
# which is treated as a URL scheme.
_, filepath = os.path.splitdrive(filepath)
return bool(urlparse(filepath).scheme)
class AbstractVersionedDataSet(AbstractDataSet, abc.ABC):
# pylint: disable=abstract-method
def __init__(
self,
filepath: PurePath,
version: Optional[Version],
exists_function: Callable[[str], bool] = None,
glob_function: Callable[[str], List[str]] = None,
):
self._filepath = filepath
self._version = version
self._exists_function = exists_function or _local_exists
self._glob_function = glob_function or iglob
self._last_load_version = None # type: Optional[str]
self._last_save_version = None # type: Optional[str]
def get_last_load_version(self) -> Optional[str]:
return self._last_load_version
def _lookup_load_version(self) -> Optional[str]:
if not self._version:
return None
if self._version.load:
return self._version.load
# When load version is unpinned, fetch the most recent existing
# version from the given path
pattern = str(self._get_versioned_path("*"))
version_paths = sorted(self._glob_function(pattern), reverse=True)
most_recent = next(
(path for path in version_paths if self._exists_function(path)), None
)
if not most_recent:
raise VersionNotFoundError(
"Did not find any versions for {}".format(str(self))
)
return PurePath(most_recent).parent.name
def _get_load_path(self) -> PurePath:
if not self._version:
# When versioning is disabled, load from original filepath
return self._filepath
load_version = self._last_load_version or self._lookup_load_version()
return self._get_versioned_path(load_version) # type: ignore
def get_last_save_version(self) -> Optional[str]:
return self._last_save_version
def _lookup_save_version(self) -> Optional[str]:
if not self._version:
return None
return self._version.save or generate_timestamp()
def _get_save_path(self) -> PurePath:
if not self._version:
# When versioning is disabled, return original filepath
return self._filepath
save_version = self._last_save_version or self._lookup_save_version()
versioned_path = self._get_versioned_path(save_version) # type: ignore
if self._exists_function(str(versioned_path)):
raise DataSetError(
"Save path `{}` for {} must not exist if versioning "
"is enabled.".format(versioned_path, str(self))
)
return versioned_path
def _get_versioned_path(self, version: str) -> PurePath:
return self._filepath / version / self._filepath.name
def load(self) -> Any:
self._last_load_version = self._lookup_load_version()
return super().load()
def save(self, data: Any) -> None:
self._last_save_version = self._lookup_save_version()
super().save(data)
load_version = self._lookup_load_version()
if load_version != self._last_save_version:
warn(
CONSISTENCY_WARNING.format(
self._last_save_version, load_version, str(self)
)
)
def exists(self) -> bool:
self._logger.debug("Checking whether target of %s exists", str(self))
try:
return self._exists()
except VersionNotFoundError:
return False
except Exception as exc:
message = "Failed during exists check for data set {}.\n{}".format(
str(self), str(exc)
)
raise DataSetError(message) from exc
| true | true |
f71d664600932b67be3bc734c66fe4ff4b43a8de | 17,171 | py | Python | engine/SCons/Tool/rpmutils.py | cctbx/scons | 9eb46f7e2a965e1041e5b1a6bc941c1e97bceb00 | [
"MIT"
] | 1 | 2020-05-28T17:50:54.000Z | 2020-05-28T17:50:54.000Z | engine/SCons/Tool/rpmutils.py | cctbx/scons | 9eb46f7e2a965e1041e5b1a6bc941c1e97bceb00 | [
"MIT"
] | 4 | 2018-07-24T05:46:04.000Z | 2018-08-07T06:10:45.000Z | engine/SCons/Tool/rpmutils.py | cctbx/scons | 9eb46f7e2a965e1041e5b1a6bc941c1e97bceb00 | [
"MIT"
] | 1 | 2018-07-23T10:34:27.000Z | 2018-07-23T10:34:27.000Z | """SCons.Tool.rpmutils.py
RPM specific helper routines for general usage in the test framework
and SCons core modules.
Since we check for the RPM package target name in several places,
we have to know which machine/system name RPM will use for the current
hardware setup. The following dictionaries and functions try to
mimic the exact naming rules of the RPM source code.
They were directly derived from the file "rpmrc.in" of the version
rpm-4.9.1.3. For updating to a more recent version of RPM, this Python
script can be used standalone. The usage() function below shows the
exact syntax.
"""
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
__revision__ = "src/engine/SCons/Tool/rpmutils.py rel_3.0.0:4395:8972f6a2f699 2017/09/18 12:59:24 bdbaddog"
import platform
import subprocess
import SCons.Util
# Start of rpmrc dictionaries (Marker, don't change or remove!)
os_canon = {
'AIX' : ['AIX','5'],
'AmigaOS' : ['AmigaOS','5'],
'BSD_OS' : ['bsdi','12'],
'CYGWIN32_95' : ['cygwin32','15'],
'CYGWIN32_NT' : ['cygwin32','14'],
'Darwin' : ['darwin','21'],
'FreeBSD' : ['FreeBSD','8'],
'HP-UX' : ['hpux10','6'],
'IRIX' : ['Irix','2'],
'IRIX64' : ['Irix64','10'],
'Linux' : ['Linux','1'],
'Linux/390' : ['OS/390','20'],
'Linux/ESA' : ['VM/ESA','20'],
'MacOSX' : ['macosx','21'],
'MiNT' : ['FreeMiNT','17'],
'NEXTSTEP' : ['NextStep','11'],
'OS/390' : ['OS/390','18'],
'OSF1' : ['osf1','7'],
'SCO_SV' : ['SCO_SV3.2v5.0.2','9'],
'SunOS4' : ['SunOS','4'],
'SunOS5' : ['solaris','3'],
'UNIX_SV' : ['MP_RAS','16'],
'VM/ESA' : ['VM/ESA','19'],
'machten' : ['machten','13'],
'osf3.2' : ['osf1','7'],
'osf4.0' : ['osf1','7'],
}
buildarch_compat = {
'alpha' : ['noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64'],
'ia64' : ['noarch'],
'm68k' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'ppc' : ['noarch','fat'],
'ppc32dy4' : ['noarch'],
'ppc64' : ['noarch','fat'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['noarch'],
'ppc8560' : ['noarch'],
'ppciseries' : ['noarch'],
'ppcpseries' : ['noarch'],
's390' : ['noarch'],
's390x' : ['noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9v'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['noarch'],
'sun4d' : ['noarch'],
'sun4m' : ['noarch'],
'sun4u' : ['noarch'],
'x86_64' : ['noarch'],
}
os_compat = {
'BSD_OS' : ['bsdi'],
'Darwin' : ['MacOSX'],
'FreeMiNT' : ['mint','MiNT','TOS'],
'IRIX64' : ['IRIX'],
'MiNT' : ['FreeMiNT','mint','TOS'],
'TOS' : ['FreeMiNT','MiNT','mint'],
'bsdi4.0' : ['bsdi'],
'hpux10.00' : ['hpux9.07'],
'hpux10.01' : ['hpux10.00'],
'hpux10.10' : ['hpux10.01'],
'hpux10.20' : ['hpux10.10'],
'hpux10.30' : ['hpux10.20'],
'hpux11.00' : ['hpux10.30'],
'hpux9.05' : ['hpux9.04'],
'hpux9.07' : ['hpux9.05'],
'mint' : ['FreeMiNT','MiNT','TOS'],
'ncr-sysv4.3' : ['ncr-sysv4.2'],
'osf4.0' : ['osf3.2','osf1'],
'solaris2.4' : ['solaris2.3'],
'solaris2.5' : ['solaris2.3','solaris2.4'],
'solaris2.6' : ['solaris2.3','solaris2.4','solaris2.5'],
'solaris2.7' : ['solaris2.3','solaris2.4','solaris2.5','solaris2.6'],
}
arch_compat = {
'alpha' : ['axp','noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64','athlon','noarch'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i370' : ['noarch'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64','athlon','noarch'],
'ia64' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'osfmach3_i386' : ['i486'],
'osfmach3_i486' : ['i486','osfmach3_i386'],
'osfmach3_i586' : ['i586','osfmach3_i486'],
'osfmach3_i686' : ['i686','osfmach3_i586'],
'osfmach3_ppc' : ['ppc'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc' : ['rs6000'],
'ppc32dy4' : ['ppc'],
'ppc64' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
'rs6000' : ['noarch','fat'],
's390' : ['noarch'],
's390x' : ['s390','noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['amd64','athlon','noarch'],
}
buildarchtranslate = {
'alphaev5' : ['alpha'],
'alphaev56' : ['alpha'],
'alphaev6' : ['alpha'],
'alphaev67' : ['alpha'],
'alphapca56' : ['alpha'],
'amd64' : ['x86_64'],
'armv3l' : ['armv3l'],
'armv4b' : ['armv4b'],
'armv4l' : ['armv4l'],
'armv4tl' : ['armv4tl'],
'armv5tejl' : ['armv5tejl'],
'armv5tel' : ['armv5tel'],
'armv6l' : ['armv6l'],
'armv7l' : ['armv7l'],
'atariclone' : ['m68kmint'],
'atarist' : ['m68kmint'],
'atariste' : ['m68kmint'],
'ataritt' : ['m68kmint'],
'athlon' : ['i386'],
'falcon' : ['m68kmint'],
'geode' : ['i386'],
'hades' : ['m68kmint'],
'i386' : ['i386'],
'i486' : ['i386'],
'i586' : ['i386'],
'i686' : ['i386'],
'ia32e' : ['x86_64'],
'ia64' : ['ia64'],
'milan' : ['m68kmint'],
'osfmach3_i386' : ['i386'],
'osfmach3_i486' : ['i386'],
'osfmach3_i586' : ['i386'],
'osfmach3_i686' : ['i386'],
'osfmach3_ppc' : ['ppc'],
'pentium3' : ['i386'],
'pentium4' : ['i386'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc32dy4' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
's390' : ['s390'],
's390x' : ['s390x'],
'sh3' : ['sh3'],
'sh4' : ['sh4'],
'sh4a' : ['sh4'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparc'],
'sparcv9v' : ['sparc'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['x86_64'],
}
optflags = {
'alpha' : ['-O2','-g','-mieee'],
'alphaev5' : ['-O2','-g','-mieee','-mtune=ev5'],
'alphaev56' : ['-O2','-g','-mieee','-mtune=ev56'],
'alphaev6' : ['-O2','-g','-mieee','-mtune=ev6'],
'alphaev67' : ['-O2','-g','-mieee','-mtune=ev67'],
'alphapca56' : ['-O2','-g','-mieee','-mtune=pca56'],
'amd64' : ['-O2','-g'],
'armv3l' : ['-O2','-g','-march=armv3'],
'armv4b' : ['-O2','-g','-march=armv4'],
'armv4l' : ['-O2','-g','-march=armv4'],
'armv4tl' : ['-O2','-g','-march=armv4t'],
'armv5tejl' : ['-O2','-g','-march=armv5te'],
'armv5tel' : ['-O2','-g','-march=armv5te'],
'armv6l' : ['-O2','-g','-march=armv6'],
'armv7l' : ['-O2','-g','-march=armv7'],
'atariclone' : ['-O2','-g','-fomit-frame-pointer'],
'atarist' : ['-O2','-g','-fomit-frame-pointer'],
'atariste' : ['-O2','-g','-fomit-frame-pointer'],
'ataritt' : ['-O2','-g','-fomit-frame-pointer'],
'athlon' : ['-O2','-g','-march=athlon'],
'falcon' : ['-O2','-g','-fomit-frame-pointer'],
'fat' : ['-O2','-g','-arch','i386','-arch','ppc'],
'geode' : ['-Os','-g','-m32','-march=geode'],
'hades' : ['-O2','-g','-fomit-frame-pointer'],
'hppa1.0' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.1' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.2' : ['-O2','-g','-mpa-risc-1-0'],
'hppa2.0' : ['-O2','-g','-mpa-risc-1-0'],
'i386' : ['-O2','-g','-march=i386','-mtune=i686'],
'i486' : ['-O2','-g','-march=i486'],
'i586' : ['-O2','-g','-march=i586'],
'i686' : ['-O2','-g','-march=i686'],
'ia32e' : ['-O2','-g'],
'ia64' : ['-O2','-g'],
'm68k' : ['-O2','-g','-fomit-frame-pointer'],
'milan' : ['-O2','-g','-fomit-frame-pointer'],
'mips' : ['-O2','-g'],
'mipsel' : ['-O2','-g'],
'parisc' : ['-O2','-g','-mpa-risc-1-0'],
'pentium3' : ['-O2','-g','-march=pentium3'],
'pentium4' : ['-O2','-g','-march=pentium4'],
'ppc' : ['-O2','-g','-fsigned-char'],
'ppc32dy4' : ['-O2','-g','-fsigned-char'],
'ppc64' : ['-O2','-g','-fsigned-char'],
'ppc8260' : ['-O2','-g','-fsigned-char'],
'ppc8560' : ['-O2','-g','-fsigned-char'],
'ppciseries' : ['-O2','-g','-fsigned-char'],
'ppcpseries' : ['-O2','-g','-fsigned-char'],
's390' : ['-O2','-g'],
's390x' : ['-O2','-g'],
'sh3' : ['-O2','-g'],
'sh4' : ['-O2','-g','-mieee'],
'sh4a' : ['-O2','-g','-mieee'],
'sparc' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparc64' : ['-O2','-g','-m64','-mtune=ultrasparc'],
'sparc64v' : ['-O2','-g','-m64','-mtune=niagara'],
'sparcv8' : ['-O2','-g','-m32','-mtune=ultrasparc','-mv8'],
'sparcv9' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparcv9v' : ['-O2','-g','-m32','-mtune=niagara'],
'x86_64' : ['-O2','-g'],
}
arch_canon = {
'IP' : ['sgi','7'],
'alpha' : ['alpha','2'],
'alphaev5' : ['alphaev5','2'],
'alphaev56' : ['alphaev56','2'],
'alphaev6' : ['alphaev6','2'],
'alphaev67' : ['alphaev67','2'],
'alphapca56' : ['alphapca56','2'],
'amd64' : ['amd64','1'],
'armv3l' : ['armv3l','12'],
'armv4b' : ['armv4b','12'],
'armv4l' : ['armv4l','12'],
'armv5tejl' : ['armv5tejl','12'],
'armv5tel' : ['armv5tel','12'],
'armv6l' : ['armv6l','12'],
'armv7l' : ['armv7l','12'],
'atariclone' : ['m68kmint','13'],
'atarist' : ['m68kmint','13'],
'atariste' : ['m68kmint','13'],
'ataritt' : ['m68kmint','13'],
'athlon' : ['athlon','1'],
'falcon' : ['m68kmint','13'],
'geode' : ['geode','1'],
'hades' : ['m68kmint','13'],
'i370' : ['i370','14'],
'i386' : ['i386','1'],
'i486' : ['i486','1'],
'i586' : ['i586','1'],
'i686' : ['i686','1'],
'ia32e' : ['ia32e','1'],
'ia64' : ['ia64','9'],
'm68k' : ['m68k','6'],
'm68kmint' : ['m68kmint','13'],
'milan' : ['m68kmint','13'],
'mips' : ['mips','4'],
'mipsel' : ['mipsel','11'],
'pentium3' : ['pentium3','1'],
'pentium4' : ['pentium4','1'],
'ppc' : ['ppc','5'],
'ppc32dy4' : ['ppc32dy4','5'],
'ppc64' : ['ppc64','16'],
'ppc64iseries' : ['ppc64iseries','16'],
'ppc64pseries' : ['ppc64pseries','16'],
'ppc8260' : ['ppc8260','5'],
'ppc8560' : ['ppc8560','5'],
'ppciseries' : ['ppciseries','5'],
'ppcpseries' : ['ppcpseries','5'],
'rs6000' : ['rs6000','8'],
's390' : ['s390','14'],
's390x' : ['s390x','15'],
'sh' : ['sh','17'],
'sh3' : ['sh3','17'],
'sh4' : ['sh4','17'],
'sh4a' : ['sh4a','17'],
'sparc' : ['sparc','3'],
'sparc64' : ['sparc64','2'],
'sparc64v' : ['sparc64v','2'],
'sparcv8' : ['sparcv8','3'],
'sparcv9' : ['sparcv9','3'],
'sparcv9v' : ['sparcv9v','3'],
'sun4' : ['sparc','3'],
'sun4c' : ['sparc','3'],
'sun4d' : ['sparc','3'],
'sun4m' : ['sparc','3'],
'sun4u' : ['sparc64','2'],
'x86_64' : ['x86_64','1'],
'xtensa' : ['xtensa','18'],
}
# End of rpmrc dictionaries (Marker, don't change or remove!)
def defaultMachine(use_rpm_default=True):
""" Return the canonicalized machine name. """
if use_rpm_default:
try:
# This should be the most reliable way to get the default arch
rmachine = subprocess.check_output(['rpm', '--eval=%_target_cpu'], shell=False).rstrip()
rmachine = SCons.Util.to_str(rmachine)
except Exception as e:
# Something went wrong, try again by looking up platform.machine()
return defaultMachine(False)
else:
rmachine = platform.machine()
# Try to lookup the string in the canon table
if rmachine in arch_canon:
rmachine = arch_canon[rmachine][0]
return rmachine
def defaultSystem():
""" Return the canonicalized system name. """
rsystem = platform.system()
# Try to lookup the string in the canon tables
if rsystem in os_canon:
rsystem = os_canon[rsystem][0]
return rsystem
def defaultNames():
""" Return the canonicalized machine and system name. """
return defaultMachine(), defaultSystem()
def updateRpmDicts(rpmrc, pyfile):
""" Read the given rpmrc file with RPM definitions and update the
info dictionaries in the file pyfile with it.
The arguments will usually be 'rpmrc.in' from a recent RPM source
tree, and 'rpmutils.py' referring to this script itself.
See also usage() below.
"""
try:
# Read old rpmutils.py file
oldpy = open(pyfile,"r").readlines()
# Read current rpmrc.in file
rpm = open(rpmrc,"r").readlines()
# Parse for data
data = {}
# Allowed section names that get parsed
sections = ['optflags',
'arch_canon',
'os_canon',
'buildarchtranslate',
'arch_compat',
'os_compat',
'buildarch_compat']
for l in rpm:
l = l.rstrip('\n').replace(':',' ')
# Skip comments
if l.lstrip().startswith('#'):
continue
tokens = l.strip().split()
if len(tokens):
key = tokens[0]
if key in sections:
# Have we met this section before?
if tokens[0] not in data:
# No, so insert it
data[key] = {}
# Insert data
data[key][tokens[1]] = tokens[2:]
# Write new rpmutils.py file
out = open(pyfile,"w")
pm = 0
for l in oldpy:
if pm:
if l.startswith('# End of rpmrc dictionaries'):
pm = 0
out.write(l)
else:
out.write(l)
if l.startswith('# Start of rpmrc dictionaries'):
pm = 1
# Write data sections to single dictionaries
for key, entries in data.items():
out.write("%s = {\n" % key)
for arch in sorted(entries.keys()):
out.write(" '%s' : ['%s'],\n" % (arch, "','".join(entries[arch])))
out.write("}\n\n")
out.close()
except:
pass
def usage():
print("rpmutils.py rpmrc.in rpmutils.py")
def main():
import sys
if len(sys.argv) < 3:
usage()
sys.exit(0)
updateRpmDicts(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| 31.333942 | 107 | 0.524314 |
from __future__ import print_function
__revision__ = "src/engine/SCons/Tool/rpmutils.py rel_3.0.0:4395:8972f6a2f699 2017/09/18 12:59:24 bdbaddog"
import platform
import subprocess
import SCons.Util
os_canon = {
'AIX' : ['AIX','5'],
'AmigaOS' : ['AmigaOS','5'],
'BSD_OS' : ['bsdi','12'],
'CYGWIN32_95' : ['cygwin32','15'],
'CYGWIN32_NT' : ['cygwin32','14'],
'Darwin' : ['darwin','21'],
'FreeBSD' : ['FreeBSD','8'],
'HP-UX' : ['hpux10','6'],
'IRIX' : ['Irix','2'],
'IRIX64' : ['Irix64','10'],
'Linux' : ['Linux','1'],
'Linux/390' : ['OS/390','20'],
'Linux/ESA' : ['VM/ESA','20'],
'MacOSX' : ['macosx','21'],
'MiNT' : ['FreeMiNT','17'],
'NEXTSTEP' : ['NextStep','11'],
'OS/390' : ['OS/390','18'],
'OSF1' : ['osf1','7'],
'SCO_SV' : ['SCO_SV3.2v5.0.2','9'],
'SunOS4' : ['SunOS','4'],
'SunOS5' : ['solaris','3'],
'UNIX_SV' : ['MP_RAS','16'],
'VM/ESA' : ['VM/ESA','19'],
'machten' : ['machten','13'],
'osf3.2' : ['osf1','7'],
'osf4.0' : ['osf1','7'],
}
buildarch_compat = {
'alpha' : ['noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64'],
'ia64' : ['noarch'],
'm68k' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'ppc' : ['noarch','fat'],
'ppc32dy4' : ['noarch'],
'ppc64' : ['noarch','fat'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['noarch'],
'ppc8560' : ['noarch'],
'ppciseries' : ['noarch'],
'ppcpseries' : ['noarch'],
's390' : ['noarch'],
's390x' : ['noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9v'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['noarch'],
'sun4d' : ['noarch'],
'sun4m' : ['noarch'],
'sun4u' : ['noarch'],
'x86_64' : ['noarch'],
}
os_compat = {
'BSD_OS' : ['bsdi'],
'Darwin' : ['MacOSX'],
'FreeMiNT' : ['mint','MiNT','TOS'],
'IRIX64' : ['IRIX'],
'MiNT' : ['FreeMiNT','mint','TOS'],
'TOS' : ['FreeMiNT','MiNT','mint'],
'bsdi4.0' : ['bsdi'],
'hpux10.00' : ['hpux9.07'],
'hpux10.01' : ['hpux10.00'],
'hpux10.10' : ['hpux10.01'],
'hpux10.20' : ['hpux10.10'],
'hpux10.30' : ['hpux10.20'],
'hpux11.00' : ['hpux10.30'],
'hpux9.05' : ['hpux9.04'],
'hpux9.07' : ['hpux9.05'],
'mint' : ['FreeMiNT','MiNT','TOS'],
'ncr-sysv4.3' : ['ncr-sysv4.2'],
'osf4.0' : ['osf3.2','osf1'],
'solaris2.4' : ['solaris2.3'],
'solaris2.5' : ['solaris2.3','solaris2.4'],
'solaris2.6' : ['solaris2.3','solaris2.4','solaris2.5'],
'solaris2.7' : ['solaris2.3','solaris2.4','solaris2.5','solaris2.6'],
}
arch_compat = {
'alpha' : ['axp','noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64','athlon','noarch'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i370' : ['noarch'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64','athlon','noarch'],
'ia64' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'osfmach3_i386' : ['i486'],
'osfmach3_i486' : ['i486','osfmach3_i386'],
'osfmach3_i586' : ['i586','osfmach3_i486'],
'osfmach3_i686' : ['i686','osfmach3_i586'],
'osfmach3_ppc' : ['ppc'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc' : ['rs6000'],
'ppc32dy4' : ['ppc'],
'ppc64' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
'rs6000' : ['noarch','fat'],
's390' : ['noarch'],
's390x' : ['s390','noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['amd64','athlon','noarch'],
}
buildarchtranslate = {
'alphaev5' : ['alpha'],
'alphaev56' : ['alpha'],
'alphaev6' : ['alpha'],
'alphaev67' : ['alpha'],
'alphapca56' : ['alpha'],
'amd64' : ['x86_64'],
'armv3l' : ['armv3l'],
'armv4b' : ['armv4b'],
'armv4l' : ['armv4l'],
'armv4tl' : ['armv4tl'],
'armv5tejl' : ['armv5tejl'],
'armv5tel' : ['armv5tel'],
'armv6l' : ['armv6l'],
'armv7l' : ['armv7l'],
'atariclone' : ['m68kmint'],
'atarist' : ['m68kmint'],
'atariste' : ['m68kmint'],
'ataritt' : ['m68kmint'],
'athlon' : ['i386'],
'falcon' : ['m68kmint'],
'geode' : ['i386'],
'hades' : ['m68kmint'],
'i386' : ['i386'],
'i486' : ['i386'],
'i586' : ['i386'],
'i686' : ['i386'],
'ia32e' : ['x86_64'],
'ia64' : ['ia64'],
'milan' : ['m68kmint'],
'osfmach3_i386' : ['i386'],
'osfmach3_i486' : ['i386'],
'osfmach3_i586' : ['i386'],
'osfmach3_i686' : ['i386'],
'osfmach3_ppc' : ['ppc'],
'pentium3' : ['i386'],
'pentium4' : ['i386'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc32dy4' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
's390' : ['s390'],
's390x' : ['s390x'],
'sh3' : ['sh3'],
'sh4' : ['sh4'],
'sh4a' : ['sh4'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparc'],
'sparcv9v' : ['sparc'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['x86_64'],
}
optflags = {
'alpha' : ['-O2','-g','-mieee'],
'alphaev5' : ['-O2','-g','-mieee','-mtune=ev5'],
'alphaev56' : ['-O2','-g','-mieee','-mtune=ev56'],
'alphaev6' : ['-O2','-g','-mieee','-mtune=ev6'],
'alphaev67' : ['-O2','-g','-mieee','-mtune=ev67'],
'alphapca56' : ['-O2','-g','-mieee','-mtune=pca56'],
'amd64' : ['-O2','-g'],
'armv3l' : ['-O2','-g','-march=armv3'],
'armv4b' : ['-O2','-g','-march=armv4'],
'armv4l' : ['-O2','-g','-march=armv4'],
'armv4tl' : ['-O2','-g','-march=armv4t'],
'armv5tejl' : ['-O2','-g','-march=armv5te'],
'armv5tel' : ['-O2','-g','-march=armv5te'],
'armv6l' : ['-O2','-g','-march=armv6'],
'armv7l' : ['-O2','-g','-march=armv7'],
'atariclone' : ['-O2','-g','-fomit-frame-pointer'],
'atarist' : ['-O2','-g','-fomit-frame-pointer'],
'atariste' : ['-O2','-g','-fomit-frame-pointer'],
'ataritt' : ['-O2','-g','-fomit-frame-pointer'],
'athlon' : ['-O2','-g','-march=athlon'],
'falcon' : ['-O2','-g','-fomit-frame-pointer'],
'fat' : ['-O2','-g','-arch','i386','-arch','ppc'],
'geode' : ['-Os','-g','-m32','-march=geode'],
'hades' : ['-O2','-g','-fomit-frame-pointer'],
'hppa1.0' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.1' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.2' : ['-O2','-g','-mpa-risc-1-0'],
'hppa2.0' : ['-O2','-g','-mpa-risc-1-0'],
'i386' : ['-O2','-g','-march=i386','-mtune=i686'],
'i486' : ['-O2','-g','-march=i486'],
'i586' : ['-O2','-g','-march=i586'],
'i686' : ['-O2','-g','-march=i686'],
'ia32e' : ['-O2','-g'],
'ia64' : ['-O2','-g'],
'm68k' : ['-O2','-g','-fomit-frame-pointer'],
'milan' : ['-O2','-g','-fomit-frame-pointer'],
'mips' : ['-O2','-g'],
'mipsel' : ['-O2','-g'],
'parisc' : ['-O2','-g','-mpa-risc-1-0'],
'pentium3' : ['-O2','-g','-march=pentium3'],
'pentium4' : ['-O2','-g','-march=pentium4'],
'ppc' : ['-O2','-g','-fsigned-char'],
'ppc32dy4' : ['-O2','-g','-fsigned-char'],
'ppc64' : ['-O2','-g','-fsigned-char'],
'ppc8260' : ['-O2','-g','-fsigned-char'],
'ppc8560' : ['-O2','-g','-fsigned-char'],
'ppciseries' : ['-O2','-g','-fsigned-char'],
'ppcpseries' : ['-O2','-g','-fsigned-char'],
's390' : ['-O2','-g'],
's390x' : ['-O2','-g'],
'sh3' : ['-O2','-g'],
'sh4' : ['-O2','-g','-mieee'],
'sh4a' : ['-O2','-g','-mieee'],
'sparc' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparc64' : ['-O2','-g','-m64','-mtune=ultrasparc'],
'sparc64v' : ['-O2','-g','-m64','-mtune=niagara'],
'sparcv8' : ['-O2','-g','-m32','-mtune=ultrasparc','-mv8'],
'sparcv9' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparcv9v' : ['-O2','-g','-m32','-mtune=niagara'],
'x86_64' : ['-O2','-g'],
}
arch_canon = {
'IP' : ['sgi','7'],
'alpha' : ['alpha','2'],
'alphaev5' : ['alphaev5','2'],
'alphaev56' : ['alphaev56','2'],
'alphaev6' : ['alphaev6','2'],
'alphaev67' : ['alphaev67','2'],
'alphapca56' : ['alphapca56','2'],
'amd64' : ['amd64','1'],
'armv3l' : ['armv3l','12'],
'armv4b' : ['armv4b','12'],
'armv4l' : ['armv4l','12'],
'armv5tejl' : ['armv5tejl','12'],
'armv5tel' : ['armv5tel','12'],
'armv6l' : ['armv6l','12'],
'armv7l' : ['armv7l','12'],
'atariclone' : ['m68kmint','13'],
'atarist' : ['m68kmint','13'],
'atariste' : ['m68kmint','13'],
'ataritt' : ['m68kmint','13'],
'athlon' : ['athlon','1'],
'falcon' : ['m68kmint','13'],
'geode' : ['geode','1'],
'hades' : ['m68kmint','13'],
'i370' : ['i370','14'],
'i386' : ['i386','1'],
'i486' : ['i486','1'],
'i586' : ['i586','1'],
'i686' : ['i686','1'],
'ia32e' : ['ia32e','1'],
'ia64' : ['ia64','9'],
'm68k' : ['m68k','6'],
'm68kmint' : ['m68kmint','13'],
'milan' : ['m68kmint','13'],
'mips' : ['mips','4'],
'mipsel' : ['mipsel','11'],
'pentium3' : ['pentium3','1'],
'pentium4' : ['pentium4','1'],
'ppc' : ['ppc','5'],
'ppc32dy4' : ['ppc32dy4','5'],
'ppc64' : ['ppc64','16'],
'ppc64iseries' : ['ppc64iseries','16'],
'ppc64pseries' : ['ppc64pseries','16'],
'ppc8260' : ['ppc8260','5'],
'ppc8560' : ['ppc8560','5'],
'ppciseries' : ['ppciseries','5'],
'ppcpseries' : ['ppcpseries','5'],
'rs6000' : ['rs6000','8'],
's390' : ['s390','14'],
's390x' : ['s390x','15'],
'sh' : ['sh','17'],
'sh3' : ['sh3','17'],
'sh4' : ['sh4','17'],
'sh4a' : ['sh4a','17'],
'sparc' : ['sparc','3'],
'sparc64' : ['sparc64','2'],
'sparc64v' : ['sparc64v','2'],
'sparcv8' : ['sparcv8','3'],
'sparcv9' : ['sparcv9','3'],
'sparcv9v' : ['sparcv9v','3'],
'sun4' : ['sparc','3'],
'sun4c' : ['sparc','3'],
'sun4d' : ['sparc','3'],
'sun4m' : ['sparc','3'],
'sun4u' : ['sparc64','2'],
'x86_64' : ['x86_64','1'],
'xtensa' : ['xtensa','18'],
}
# End of rpmrc dictionaries (Marker, don't change or remove!)
def defaultMachine(use_rpm_default=True):
if use_rpm_default:
try:
rmachine = subprocess.check_output(['rpm', '--eval=%_target_cpu'], shell=False).rstrip()
rmachine = SCons.Util.to_str(rmachine)
except Exception as e:
return defaultMachine(False)
else:
rmachine = platform.machine()
if rmachine in arch_canon:
rmachine = arch_canon[rmachine][0]
return rmachine
def defaultSystem():
rsystem = platform.system()
if rsystem in os_canon:
rsystem = os_canon[rsystem][0]
return rsystem
def defaultNames():
return defaultMachine(), defaultSystem()
def updateRpmDicts(rpmrc, pyfile):
try:
oldpy = open(pyfile,"r").readlines()
rpm = open(rpmrc,"r").readlines()
data = {}
sections = ['optflags',
'arch_canon',
'os_canon',
'buildarchtranslate',
'arch_compat',
'os_compat',
'buildarch_compat']
for l in rpm:
l = l.rstrip('\n').replace(':',' ')
if l.lstrip().startswith('#'):
continue
tokens = l.strip().split()
if len(tokens):
key = tokens[0]
if key in sections:
if tokens[0] not in data:
data[key] = {}
data[key][tokens[1]] = tokens[2:]
out = open(pyfile,"w")
pm = 0
for l in oldpy:
if pm:
if l.startswith('# End of rpmrc dictionaries'):
pm = 0
out.write(l)
else:
out.write(l)
if l.startswith('# Start of rpmrc dictionaries'):
pm = 1
for key, entries in data.items():
out.write("%s = {\n" % key)
for arch in sorted(entries.keys()):
out.write(" '%s' : ['%s'],\n" % (arch, "','".join(entries[arch])))
out.write("}\n\n")
out.close()
except:
pass
def usage():
print("rpmutils.py rpmrc.in rpmutils.py")
def main():
import sys
if len(sys.argv) < 3:
usage()
sys.exit(0)
updateRpmDicts(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| true | true |
f71d672bcf533ac2ac8ebfb1e4f4f25cd9aa808f | 4,105 | py | Python | mesos_operations/mesos_operations.py | sasank1/plugins | f12dcb207dcd53819f2f23eeaab3b60a40885d4b | [
"BSD-2-Clause"
] | 36 | 2016-03-30T10:59:37.000Z | 2022-02-28T21:46:42.000Z | mesos_operations/mesos_operations.py | sasank1/plugins | f12dcb207dcd53819f2f23eeaab3b60a40885d4b | [
"BSD-2-Clause"
] | 21 | 2016-10-07T06:20:56.000Z | 2022-03-10T12:09:34.000Z | mesos_operations/mesos_operations.py | sasank1/plugins | f12dcb207dcd53819f2f23eeaab3b60a40885d4b | [
"BSD-2-Clause"
] | 65 | 2016-03-16T09:11:48.000Z | 2022-03-09T13:10:08.000Z | #!/usr/bin/python
import json
import urllib2
### For monitoring the performance metrics of your Mesos Agents using Site24x7 Server Monitoring Plugins.
### 1. Have the site24x7 server monitoring agent up and running.
### 2. Download the plugin from github
### 3. Create a folder in name of the plugin under agent plugins directory (/opt/site24x7/monagent/plugins/)
### 4. Place the plugin inside the folder
### Language : Python
### Author: Anita, Zoho Corp
### Tested in Ubuntu
# Change the mesos agents stats accordingly here.
HOST="localhost"
PORT="5050"
URL = "http://"+HOST+":"+PORT+"/metrics/snapshot"
USERNAME = None
PASSWORD = None
# If any changes done in the plugin, plugin_version must be incremented by 1. For. E.g 2,3,4..
PLUGIN_VERSION = "1"
# Setting this to true will alert you when there is a communication problem while posting plugin data to server
HEARTBEAT = "true"
class MesosAgents():
def __init__(self):
self.data = {}
self.data['plugin_version'] = PLUGIN_VERSION
self.data['heartbeat_required'] = HEARTBEAT
def getData(self):
try:
### Create Authentication Handler for the HTTP Request
pwdmgr = urllib2.HTTPPasswordMgr()
pwdmgr.add_password(None, URL, USERNAME, PASSWORD)
auth = urllib2.HTTPBasicAuthHandler(pwdmgr)
### Create Proxy Handler for the HTTP Request
proxy = urllib2.ProxyHandler({}) # Uses NO Proxy
### Create a HTTP Request with the authentication and proxy handlers
opener = urllib2.build_opener(proxy, auth)
urllib2.install_opener(opener)
### Get HTTP Response
response = urllib2.urlopen(URL, timeout=10)
### Parse the response data
if response.getcode() == 200:
bytedata = response.read()
output = bytedata.decode()
self.data = self.parseClusterData(output)
else:
self.data['msg'] = str(response.getcode())
self.status = 0
except Exception as e:
self.data['msg'] = str(e)
self.status = 0
return self.data
def parseClusterData(self, output):
try:
data = json.loads(output.decode('UTF-8'))
self.data['master_slave_registrations'] = data['master/slave_registrations']#Number of agents that were able to cleanly re-join the cluster and connect back to the master after the master is disconnected.
self.data['master_master_slave_reregistrations'] = data['master/slave_reregistrations']#Number of agent re-registrations
self.data['master_slave_removals'] = data['master/slave_removals']#Number of agent removed for various reasons, including maintenance
self.data['master_slave_shutdowns_scheduled'] = data['master/slave_shutdowns_scheduled']#Number of agents which have failed their health check and are scheduled to be removed.
self.data['master_slave_shutdowns_canceled'] = data['master/slave_shutdowns_canceled']#Number of cancelled agent shutdowns.
self.data['master_slave_shutdowns_completed'] = data['master/slave_shutdowns_completed']#Number of agents that failed their health check.
self.data['master_slaves_active'] = data['master/slaves_active']#Number of active agents
self.data['master_slaves_connected'] = data['master/slaves_connected']#Number of connected agents
self.data['master_slaves_disconnected'] = data['master/slaves_disconnected']#Number of disconnected agents
self.data['master_slaves_inactive'] = data['master/slaves_inactive']#Number of inactive agents
except Exception as e:
self.data['msg'] = str(e)
self.status = 0
return self.data
if __name__ == '__main__':
mesosagt = MesosAgents()
result = mesosagt.getData()
print(json.dumps(result, indent=4, sort_keys=True))
| 42.760417 | 216 | 0.649939 |
import json
import urllib2
msg'] = str(e)
self.status = 0
return self.data
def parseClusterData(self, output):
try:
data = json.loads(output.decode('UTF-8'))
self.data['master_slave_registrations'] = data['master/slave_registrations']
self.data['master_master_slave_reregistrations'] = data['master/slave_reregistrations']
self.data['master_slave_removals'] = data['master/slave_removals']
self.data['master_slave_shutdowns_scheduled'] = data['master/slave_shutdowns_scheduled']
self.data['master_slave_shutdowns_canceled'] = data['master/slave_shutdowns_canceled']
self.data['master_slave_shutdowns_completed'] = data['master/slave_shutdowns_completed']
self.data['master_slaves_active'] = data['master/slaves_active']
self.data['master_slaves_connected'] = data['master/slaves_connected']
self.data['master_slaves_disconnected'] = data['master/slaves_disconnected']
self.data['master_slaves_inactive'] = data['master/slaves_inactive']
except Exception as e:
self.data['msg'] = str(e)
self.status = 0
return self.data
if __name__ == '__main__':
mesosagt = MesosAgents()
result = mesosagt.getData()
print(json.dumps(result, indent=4, sort_keys=True))
| true | true |
f71d6913191f30ac3e407165484554354a24b22d | 1,734 | py | Python | muddery/server/upgrader/base_upgrader.py | MarsZone/DreamLand | 87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | muddery/server/upgrader/base_upgrader.py | MarsZone/DreamLand | 87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | muddery/server/upgrader/base_upgrader.py | MarsZone/DreamLand | 87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | """
Upgrade custom's game dir to the latest version.
"""
from utils import compare_version
class BaseUpgrader(object):
"""
Upgrade a game dir from the version in [<from_version>, <to_version>) to version
<target_version>.
"""
# Can upgrade the game of version between from_version and to_version.
# min version 0.0.0 (include this version)
from_min_version = (0, 0, 0)
# from max version 0.0.0 (not include this version)
from_max_version = (0, 0, 0)
# Upgrade to the target version. None means the latest version.
target_version = None
def upgrade_game(self, game_dir, game_template, muddery_lib):
"""
Upgrade a game.
Args:
game_dir: (string) the game dir to be upgraded.
game_template: (string) the game template used to upgrade the game dir.
muddery_lib: (string) muddery's dir
"""
pass
def upgrade_data(self, data_path, game_template, muddery_lib):
"""
Upgrade game data.
Args:
data_path: (string) the data path to be upgraded.
game_template: (string) the game template used to upgrade the game dir.
muddery_lib: (string) muddery's dir
"""
pass
def can_upgrade(self, game_ver):
"""
game_version: (list)version numbers.
"""
# The game version should be equal or bigger than from_min_version.
if compare_version(game_ver, self.from_min_version) == -1:
return False
# The game version should be smaller than from_max_version.
if compare_version(game_ver, self.from_max_version) != -1:
return False
return True
| 29.896552 | 84 | 0.619954 |
from utils import compare_version
class BaseUpgrader(object):
from_min_version = (0, 0, 0)
from_max_version = (0, 0, 0)
target_version = None
def upgrade_game(self, game_dir, game_template, muddery_lib):
pass
def upgrade_data(self, data_path, game_template, muddery_lib):
pass
def can_upgrade(self, game_ver):
if compare_version(game_ver, self.from_min_version) == -1:
return False
if compare_version(game_ver, self.from_max_version) != -1:
return False
return True
| true | true |
f71d69f1c9f19da80953622dfdb60cfd07448bed | 17,664 | py | Python | saticl/training.py | edornd/multimodal-icl | f79bfa73665db471c12ee9cb57bbee1bcabb0467 | [
"MIT"
] | 6 | 2021-12-08T05:58:18.000Z | 2021-12-29T09:55:32.000Z | saticl/training.py | edornd/multimodal-icl | f79bfa73665db471c12ee9cb57bbee1bcabb0467 | [
"MIT"
] | null | null | null | saticl/training.py | edornd/multimodal-icl | f79bfa73665db471c12ee9cb57bbee1bcabb0467 | [
"MIT"
] | null | null | null | from itertools import chain
from pathlib import Path
from typing import Tuple
import torch
from accelerate import Accelerator
from torch.utils.data import DataLoader
from saticl.config import Configuration, SSLConfiguration
from saticl.datasets.icl import ICLDataset
from saticl.datasets.transforms import invariance_transforms, inverse_transform, ssl_transforms
from saticl.datasets.wrappers import SSLDataset
from saticl.logging.tensorboard import TensorBoardLogger
from saticl.losses.regularization import AugmentationInvariance
from saticl.models.icl import ICLSegmenter
from saticl.prepare import prepare_dataset, prepare_metrics, prepare_metrics_ssl, prepare_model, prepare_model_ssl
from saticl.tasks import Task
from saticl.trainer.base import Trainer
from saticl.trainer.callbacks import Checkpoint, DisplaySamples, EarlyStopping, EarlyStoppingCriterion
from saticl.trainer.invariance import AugInvarianceTrainer
from saticl.trainer.ssl import SSLStage, SSLTrainer
from saticl.utils.common import flatten_config, get_logger, git_revision_hash, store_config
from saticl.utils.ml import checkpoint_path, init_experiment, seed_everything, seed_worker
LOG = get_logger(__name__)
def init_from_previous_step(config: Configuration, new_model: ICLSegmenter, old_model: ICLSegmenter,
model_folder: Path, task: Task) -> Tuple[ICLSegmenter, ICLSegmenter]:
if task.step == 0:
LOG.info("Step 0: training from scratch without old model")
return new_model, old_model
LOG.info("Loading checkpoint from step: %d", task.step - 1)
if config.task.step_checkpoint is not None:
ckpt_path = Path(config.task.step_checkpoint)
else:
ckpt_path = checkpoint_path(model_folder, task_name=task.name, step=task.step - 1)
assert ckpt_path.exists() and ckpt_path.is_file(), f"Checkpoint for step {task.step-1} not found at {str(ckpt_path)}"
checkpoint = torch.load(str(ckpt_path), map_location="cpu")
# load checkpoint into the new model, without strict matching because of ICL heads
new_model.load_state_dict(checkpoint, strict=False)
if config.model.init_balanced:
new_model.init_classifier()
# load the same checkpoint into the old model, this time strict since it's the very same
old_model.load_state_dict(checkpoint, strict=True)
old_model.freeze()
old_model.eval()
del checkpoint
return new_model, old_model
def train(config: Configuration):
# assertions before starting
assert config.name is not None or config.task.step == 0, "Specify the experiment name with ICL steps >= 1!"
assert torch.backends.cudnn.enabled, "AMP requires CUDNN backend to be enabled."
# prepare accelerator ASAP
accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)
# Create the directory tree:
# outputs
# |-- dataset
# |--task_name
# |-- exp_name
# |-- models
# |-- logs
accelerator.wait_for_everyone()
log_name = f"output-{config.task.step}.log"
exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)
config_path = out_folder / f"segmenter-config-s{config.task.step}.yaml"
LOG.info("Run started")
LOG.info("Experiment ID: %s", exp_id)
LOG.info("Output folder: %s", out_folder)
LOG.info("Models folder: %s", model_folder)
LOG.info("Logs folder: %s", logs_folder)
LOG.info("Configuration: %s", config_path)
# seeding everything
LOG.info("Using seed: %d", config.seed)
seed_everything(config.seed)
# prepare datasets
LOG.info("Loading datasets...")
train_set, valid_set = prepare_dataset(config=config, partial_transforms=False)
LOG.info("Full sets - train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
add_background = not train_set.has_background()
task = Task(dataset=config.dataset,
name=config.task.name,
step=config.task.step,
add_background=add_background)
train_mask, valid_mask = 0, 255
train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)
valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)
# construct data loaders
train_loader = DataLoader(dataset=train_set,
batch_size=config.trainer.batch_size,
shuffle=True,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker,
drop_last=True)
valid_loader = DataLoader(dataset=valid_set,
batch_size=config.trainer.batch_size,
shuffle=False,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker)
LOG.info("ICL sets - Train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
# prepare models
LOG.info("Preparing model...")
new_model = prepare_model(config=config, task=task)
new_model = new_model.to(accelerator.device)
if task.step > 0:
old_task = Task(dataset=config.dataset,
name=config.task.name,
step=task.step - 1,
add_background=add_background)
old_model = prepare_model(config=config, task=old_task)
old_model = old_model.to(accelerator.device)
else:
old_model = None
new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)
LOG.info("Done preparing models")
# prepare optimizer and scheduler
optimizer = config.optimizer.instantiate(new_model.parameters())
scheduler = config.scheduler.instantiate(optimizer)
# prepare losses
weights = None
if config.class_weights:
weights = train_set.load_class_weights(Path(config.class_weights),
device=accelerator.device,
normalize=config.ce.tversky)
LOG.info("Using class weights: %s", str(weights))
segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count(), weight=weights)
distill_loss = config.kd.instantiate()
if task.step > 0 and config.ce.unbiased:
seg_loss_name = str(type(segment_loss))
kdd_loss_name = str(type(distill_loss))
if "Unbiased" not in seg_loss_name:
LOG.warn(f"Non-ubiased segmentation loss '{seg_loss_name}' for step {task.step}!")
if "Unbiased" not in kdd_loss_name:
LOG.warn(f"Non-unbiased KD loss '{kdd_loss_name}' for step {task.step}")
# prepare metrics and logger
monitored = config.trainer.monitor.name
train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)
logger = TensorBoardLogger(log_folder=logs_folder,
filename_suffix=f"step-{task.step}",
icl_step=task.step,
comment=config.comment)
# logging configuration to tensorboard
LOG.debug("Logging flattened config. to TensorBoard")
logger.log_table("config", flatten_config(config.dict()))
# prepare trainer
LOG.info("Visualize: %s, num. batches for visualization: %s", str(config.visualize), str(config.num_samples))
num_samples = int(config.visualize) * config.num_samples
# choose trainer class depending on task or regularization
trainer_class = Trainer
kwargs = dict()
if config.aug.apply:
inv_transforms = invariance_transforms(config.aug)
LOG.info("Invariance transforms: ")
LOG.info(str(inv_transforms))
kwargs.update(aug_criterion=AugmentationInvariance(transform=inv_transforms),
aug_lambda=config.aug.factor,
aug_lambda_icl=config.aug.factor_icl,
temperature=config.trainer.temperature,
temp_epochs=config.trainer.temp_epochs)
trainer_class = AugInvarianceTrainer
trainer = trainer_class(accelerator=accelerator,
task=task,
new_model=new_model,
old_model=old_model,
optimizer=optimizer,
scheduler=scheduler,
train_metrics=train_metrics,
val_metrics=valid_metrics,
old_classes=train_set.old_categories(),
new_classes=train_set.new_categories(),
seg_criterion=segment_loss,
kdd_criterion=distill_loss,
kde_criterion=None,
kdd_lambda=config.kd.decoder_factor,
kde_lambda=config.kd.encoder_factor,
logger=logger,
samples=num_samples,
debug=config.debug,
**kwargs)
trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,
criterion=EarlyStoppingCriterion.maximum,
patience=config.trainer.patience)) \
.add_callback(Checkpoint(call_every=1,
model_folder=model_folder,
name_format=f"task{task.name}_step-{task.step}",
save_best=True)) \
.add_callback(DisplaySamples(inverse_transform=inverse_transform(),
color_palette=train_set.palette()))
# storing config and starting training
config.version = git_revision_hash()
store_config(config, path=config_path)
trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)
LOG.info(f"Training completed at epoch {trainer.current_epoch:<2d} "
f"(best {monitored}: {trainer.best_score:.4f})")
LOG.info("Experiment %s (step %d) completed!", exp_id, task.step)
def train_ssl(config: SSLConfiguration):
# assertions before starting
assert config.name is not None or config.task.step == 0, "Specify the experiment name with ICL steps >= 1!"
assert torch.backends.cudnn.enabled, "AMP requires CUDNN backend to be enabled."
if config.in_channels != 4:
LOG.warn("Forcing input channels to 4 (previous value: %d)", config.in_channels)
config.in_channels = 4
# prepare accelerator ASAP
accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)
# Create the directory tree:
# outputs
# |-- dataset
# |--task_name
# |-- exp_name
# |-- models
# |-- logs
accelerator.wait_for_everyone()
log_name = f"output-{config.task.step}.log"
exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)
config_path = out_folder / f"segmenter-config-s{config.task.step}.yaml"
store_config(config, path=config_path)
LOG.info("Run started")
LOG.info("Experiment ID: %s", exp_id)
LOG.info("Output folder: %s", out_folder)
LOG.info("Models folder: %s", model_folder)
LOG.info("Logs folder: %s", logs_folder)
LOG.info("Configuration: %s", config_path)
# seeding everything
LOG.info("Using seed: %d", config.seed)
seed_everything(config.seed)
# prepare datasets
LOG.info("Loading datasets...")
train_set, valid_set = prepare_dataset(config=config)
train_set = SSLDataset(train_set, transform=ssl_transforms())
LOG.info("Full sets - train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
add_background = not train_set.has_background()
task = Task(dataset=config.dataset,
name=config.task.name,
step=config.task.step,
add_background=add_background)
train_mask, valid_mask = 0, 255
train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)
valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)
train_loader = DataLoader(dataset=train_set,
batch_size=config.trainer.batch_size,
shuffle=True,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker,
drop_last=True)
valid_loader = DataLoader(dataset=valid_set,
batch_size=config.trainer.batch_size,
shuffle=False,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker)
LOG.info("ICL sets - Train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
# prepare models
LOG.info("Preparing model...")
new_model, ssl_model = prepare_model_ssl(config=config, task=task)
new_model = new_model.to(accelerator.device)
ssl_model = ssl_model.to(accelerator.device)
if task.step > 0:
old_task = Task(dataset=config.dataset,
name=config.task.name,
step=task.step - 1,
add_background=add_background)
old_model = prepare_model(config=config, task=old_task)
old_model = old_model.to(accelerator.device)
else:
old_model = None
new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)
LOG.info("Done preparing models")
# prepare optimizer and scheduler
parameters = chain(new_model.parameters(), ssl_model.head.parameters())
optimizer = config.optimizer.instantiate(parameters)
scheduler = config.scheduler.instantiate(optimizer)
# prepare losses, including SSL
segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count())
distill_loss = config.kd.instantiate()
pretext_loss = config.ssl_loss()
# asserts to verify their validity
if task.step > 0 and config.ce.unbiased:
seg_loss_name = str(type(segment_loss))
kdd_loss_name = str(type(distill_loss))
assert "Unbiased" in seg_loss_name, f"Wrong loss '{seg_loss_name}' for step {task.step}"
assert "Unbiased" in kdd_loss_name, f"Wrong loss '{kdd_loss_name}' for step {task.step}"
# prepare metrics and logger
monitored = config.trainer.monitor.name
train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)
ssl_metrics = prepare_metrics_ssl(num_classes=config.model.pretext_classes, device=accelerator.device)
logger = TensorBoardLogger(log_folder=logs_folder,
filename_suffix=f"step-{task.step}",
icl_step=task.step,
comment=config.comment)
# logging configuration to tensorboard
LOG.debug("Logging flattened config. to TensorBoard")
logger.log_table("config", flatten_config(config.dict()))
# prepare trainer
LOG.info("Visualize: %s, num. batches for visualization: %s", str(config.visualize), str(config.num_samples))
num_samples = int(config.visualize) * config.num_samples
trainer = SSLTrainer(accelerator=accelerator,
task=task,
new_model=new_model,
old_model=old_model,
ssl_model=ssl_model,
optimizer=optimizer,
scheduler=scheduler,
train_metrics=train_metrics,
val_metrics=valid_metrics,
old_classes=train_set.old_categories(),
new_classes=train_set.new_categories(),
seg_criterion=segment_loss,
ssl_criterion=pretext_loss,
kdd_criterion=distill_loss,
kde_criterion=None,
kdd_lambda=config.kd.decoder_factor,
kde_lambda=config.kd.encoder_factor,
logger=logger,
samples=num_samples,
debug=config.debug)
trainer.add_metrics(SSLStage.ssl, metrics=ssl_metrics)
trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,
criterion=EarlyStoppingCriterion.maximum,
patience=config.trainer.patience)) \
.add_callback(Checkpoint(call_every=1,
model_folder=model_folder,
name_format=f"task{task.name}_step-{task.step}",
save_best=True)) \
.add_callback(DisplaySamples(inverse_transform=inverse_transform(),
color_palette=train_set.palette()))
trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)
LOG.info(f"Training completed at epoch {trainer.current_epoch:<2d} "
f"(best {monitored}: {trainer.best_score:.4f})")
LOG.info("Experiment %s (step %d) completed!", exp_id, task.step)
| 50.613181 | 121 | 0.637908 | from itertools import chain
from pathlib import Path
from typing import Tuple
import torch
from accelerate import Accelerator
from torch.utils.data import DataLoader
from saticl.config import Configuration, SSLConfiguration
from saticl.datasets.icl import ICLDataset
from saticl.datasets.transforms import invariance_transforms, inverse_transform, ssl_transforms
from saticl.datasets.wrappers import SSLDataset
from saticl.logging.tensorboard import TensorBoardLogger
from saticl.losses.regularization import AugmentationInvariance
from saticl.models.icl import ICLSegmenter
from saticl.prepare import prepare_dataset, prepare_metrics, prepare_metrics_ssl, prepare_model, prepare_model_ssl
from saticl.tasks import Task
from saticl.trainer.base import Trainer
from saticl.trainer.callbacks import Checkpoint, DisplaySamples, EarlyStopping, EarlyStoppingCriterion
from saticl.trainer.invariance import AugInvarianceTrainer
from saticl.trainer.ssl import SSLStage, SSLTrainer
from saticl.utils.common import flatten_config, get_logger, git_revision_hash, store_config
from saticl.utils.ml import checkpoint_path, init_experiment, seed_everything, seed_worker
LOG = get_logger(__name__)
def init_from_previous_step(config: Configuration, new_model: ICLSegmenter, old_model: ICLSegmenter,
model_folder: Path, task: Task) -> Tuple[ICLSegmenter, ICLSegmenter]:
if task.step == 0:
LOG.info("Step 0: training from scratch without old model")
return new_model, old_model
LOG.info("Loading checkpoint from step: %d", task.step - 1)
if config.task.step_checkpoint is not None:
ckpt_path = Path(config.task.step_checkpoint)
else:
ckpt_path = checkpoint_path(model_folder, task_name=task.name, step=task.step - 1)
assert ckpt_path.exists() and ckpt_path.is_file(), f"Checkpoint for step {task.step-1} not found at {str(ckpt_path)}"
checkpoint = torch.load(str(ckpt_path), map_location="cpu")
new_model.load_state_dict(checkpoint, strict=False)
if config.model.init_balanced:
new_model.init_classifier()
old_model.load_state_dict(checkpoint, strict=True)
old_model.freeze()
old_model.eval()
del checkpoint
return new_model, old_model
def train(config: Configuration):
# assertions before starting
assert config.name is not None or config.task.step == 0, "Specify the experiment name with ICL steps >= 1!"
assert torch.backends.cudnn.enabled, "AMP requires CUDNN backend to be enabled."
# prepare accelerator ASAP
accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)
# Create the directory tree:
# outputs
# |-- dataset
# |--task_name
# |-- exp_name
# |-- models
# |-- logs
accelerator.wait_for_everyone()
log_name = f"output-{config.task.step}.log"
exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)
config_path = out_folder / f"segmenter-config-s{config.task.step}.yaml"
LOG.info("Run started")
LOG.info("Experiment ID: %s", exp_id)
LOG.info("Output folder: %s", out_folder)
LOG.info("Models folder: %s", model_folder)
LOG.info("Logs folder: %s", logs_folder)
LOG.info("Configuration: %s", config_path)
# seeding everything
LOG.info("Using seed: %d", config.seed)
seed_everything(config.seed)
# prepare datasets
LOG.info("Loading datasets...")
train_set, valid_set = prepare_dataset(config=config, partial_transforms=False)
LOG.info("Full sets - train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
add_background = not train_set.has_background()
task = Task(dataset=config.dataset,
name=config.task.name,
step=config.task.step,
add_background=add_background)
train_mask, valid_mask = 0, 255
train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)
valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)
# construct data loaders
train_loader = DataLoader(dataset=train_set,
batch_size=config.trainer.batch_size,
shuffle=True,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker,
drop_last=True)
valid_loader = DataLoader(dataset=valid_set,
batch_size=config.trainer.batch_size,
shuffle=False,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker)
LOG.info("ICL sets - Train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
# prepare models
LOG.info("Preparing model...")
new_model = prepare_model(config=config, task=task)
new_model = new_model.to(accelerator.device)
if task.step > 0:
old_task = Task(dataset=config.dataset,
name=config.task.name,
step=task.step - 1,
add_background=add_background)
old_model = prepare_model(config=config, task=old_task)
old_model = old_model.to(accelerator.device)
else:
old_model = None
new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)
LOG.info("Done preparing models")
# prepare optimizer and scheduler
optimizer = config.optimizer.instantiate(new_model.parameters())
scheduler = config.scheduler.instantiate(optimizer)
# prepare losses
weights = None
if config.class_weights:
weights = train_set.load_class_weights(Path(config.class_weights),
device=accelerator.device,
normalize=config.ce.tversky)
LOG.info("Using class weights: %s", str(weights))
segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count(), weight=weights)
distill_loss = config.kd.instantiate()
if task.step > 0 and config.ce.unbiased:
seg_loss_name = str(type(segment_loss))
kdd_loss_name = str(type(distill_loss))
if "Unbiased" not in seg_loss_name:
LOG.warn(f"Non-ubiased segmentation loss '{seg_loss_name}' for step {task.step}!")
if "Unbiased" not in kdd_loss_name:
LOG.warn(f"Non-unbiased KD loss '{kdd_loss_name}' for step {task.step}")
# prepare metrics and logger
monitored = config.trainer.monitor.name
train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)
logger = TensorBoardLogger(log_folder=logs_folder,
filename_suffix=f"step-{task.step}",
icl_step=task.step,
comment=config.comment)
# logging configuration to tensorboard
LOG.debug("Logging flattened config. to TensorBoard")
logger.log_table("config", flatten_config(config.dict()))
# prepare trainer
LOG.info("Visualize: %s, num. batches for visualization: %s", str(config.visualize), str(config.num_samples))
num_samples = int(config.visualize) * config.num_samples
# choose trainer class depending on task or regularization
trainer_class = Trainer
kwargs = dict()
if config.aug.apply:
inv_transforms = invariance_transforms(config.aug)
LOG.info("Invariance transforms: ")
LOG.info(str(inv_transforms))
kwargs.update(aug_criterion=AugmentationInvariance(transform=inv_transforms),
aug_lambda=config.aug.factor,
aug_lambda_icl=config.aug.factor_icl,
temperature=config.trainer.temperature,
temp_epochs=config.trainer.temp_epochs)
trainer_class = AugInvarianceTrainer
trainer = trainer_class(accelerator=accelerator,
task=task,
new_model=new_model,
old_model=old_model,
optimizer=optimizer,
scheduler=scheduler,
train_metrics=train_metrics,
val_metrics=valid_metrics,
old_classes=train_set.old_categories(),
new_classes=train_set.new_categories(),
seg_criterion=segment_loss,
kdd_criterion=distill_loss,
kde_criterion=None,
kdd_lambda=config.kd.decoder_factor,
kde_lambda=config.kd.encoder_factor,
logger=logger,
samples=num_samples,
debug=config.debug,
**kwargs)
trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,
criterion=EarlyStoppingCriterion.maximum,
patience=config.trainer.patience)) \
.add_callback(Checkpoint(call_every=1,
model_folder=model_folder,
name_format=f"task{task.name}_step-{task.step}",
save_best=True)) \
.add_callback(DisplaySamples(inverse_transform=inverse_transform(),
color_palette=train_set.palette()))
# storing config and starting training
config.version = git_revision_hash()
store_config(config, path=config_path)
trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)
LOG.info(f"Training completed at epoch {trainer.current_epoch:<2d} "
f"(best {monitored}: {trainer.best_score:.4f})")
LOG.info("Experiment %s (step %d) completed!", exp_id, task.step)
def train_ssl(config: SSLConfiguration):
# assertions before starting
assert config.name is not None or config.task.step == 0, "Specify the experiment name with ICL steps >= 1!"
assert torch.backends.cudnn.enabled, "AMP requires CUDNN backend to be enabled."
if config.in_channels != 4:
LOG.warn("Forcing input channels to 4 (previous value: %d)", config.in_channels)
config.in_channels = 4
# prepare accelerator ASAP
accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)
# Create the directory tree:
# outputs
# |-- dataset
# |--task_name
# |-- exp_name
# |-- models
# |-- logs
accelerator.wait_for_everyone()
log_name = f"output-{config.task.step}.log"
exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)
config_path = out_folder / f"segmenter-config-s{config.task.step}.yaml"
store_config(config, path=config_path)
LOG.info("Run started")
LOG.info("Experiment ID: %s", exp_id)
LOG.info("Output folder: %s", out_folder)
LOG.info("Models folder: %s", model_folder)
LOG.info("Logs folder: %s", logs_folder)
LOG.info("Configuration: %s", config_path)
# seeding everything
LOG.info("Using seed: %d", config.seed)
seed_everything(config.seed)
# prepare datasets
LOG.info("Loading datasets...")
train_set, valid_set = prepare_dataset(config=config)
train_set = SSLDataset(train_set, transform=ssl_transforms())
LOG.info("Full sets - train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
add_background = not train_set.has_background()
task = Task(dataset=config.dataset,
name=config.task.name,
step=config.task.step,
add_background=add_background)
train_mask, valid_mask = 0, 255
train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)
valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)
train_loader = DataLoader(dataset=train_set,
batch_size=config.trainer.batch_size,
shuffle=True,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker,
drop_last=True)
valid_loader = DataLoader(dataset=valid_set,
batch_size=config.trainer.batch_size,
shuffle=False,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker)
LOG.info("ICL sets - Train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
# prepare models
LOG.info("Preparing model...")
new_model, ssl_model = prepare_model_ssl(config=config, task=task)
new_model = new_model.to(accelerator.device)
ssl_model = ssl_model.to(accelerator.device)
if task.step > 0:
old_task = Task(dataset=config.dataset,
name=config.task.name,
step=task.step - 1,
add_background=add_background)
old_model = prepare_model(config=config, task=old_task)
old_model = old_model.to(accelerator.device)
else:
old_model = None
new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)
LOG.info("Done preparing models")
# prepare optimizer and scheduler
parameters = chain(new_model.parameters(), ssl_model.head.parameters())
optimizer = config.optimizer.instantiate(parameters)
scheduler = config.scheduler.instantiate(optimizer)
# prepare losses, including SSL
segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count())
distill_loss = config.kd.instantiate()
pretext_loss = config.ssl_loss()
# asserts to verify their validity
if task.step > 0 and config.ce.unbiased:
seg_loss_name = str(type(segment_loss))
kdd_loss_name = str(type(distill_loss))
assert "Unbiased" in seg_loss_name, f"Wrong loss '{seg_loss_name}' for step {task.step}"
assert "Unbiased" in kdd_loss_name, f"Wrong loss '{kdd_loss_name}' for step {task.step}"
# prepare metrics and logger
monitored = config.trainer.monitor.name
train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)
ssl_metrics = prepare_metrics_ssl(num_classes=config.model.pretext_classes, device=accelerator.device)
logger = TensorBoardLogger(log_folder=logs_folder,
filename_suffix=f"step-{task.step}",
icl_step=task.step,
comment=config.comment)
# logging configuration to tensorboard
LOG.debug("Logging flattened config. to TensorBoard")
logger.log_table("config", flatten_config(config.dict()))
# prepare trainer
LOG.info("Visualize: %s, num. batches for visualization: %s", str(config.visualize), str(config.num_samples))
num_samples = int(config.visualize) * config.num_samples
trainer = SSLTrainer(accelerator=accelerator,
task=task,
new_model=new_model,
old_model=old_model,
ssl_model=ssl_model,
optimizer=optimizer,
scheduler=scheduler,
train_metrics=train_metrics,
val_metrics=valid_metrics,
old_classes=train_set.old_categories(),
new_classes=train_set.new_categories(),
seg_criterion=segment_loss,
ssl_criterion=pretext_loss,
kdd_criterion=distill_loss,
kde_criterion=None,
kdd_lambda=config.kd.decoder_factor,
kde_lambda=config.kd.encoder_factor,
logger=logger,
samples=num_samples,
debug=config.debug)
trainer.add_metrics(SSLStage.ssl, metrics=ssl_metrics)
trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,
criterion=EarlyStoppingCriterion.maximum,
patience=config.trainer.patience)) \
.add_callback(Checkpoint(call_every=1,
model_folder=model_folder,
name_format=f"task{task.name}_step-{task.step}",
save_best=True)) \
.add_callback(DisplaySamples(inverse_transform=inverse_transform(),
color_palette=train_set.palette()))
trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)
LOG.info(f"Training completed at epoch {trainer.current_epoch:<2d} "
f"(best {monitored}: {trainer.best_score:.4f})")
LOG.info("Experiment %s (step %d) completed!", exp_id, task.step)
| true | true |
f71d6afb0bb38164f69476aebefe7d64806e5c99 | 484 | py | Python | src/scrapper/scrappers/webSiteScrappers/leadersScrapper.py | media-watch/mediascrapper | 43e35eb4ae8011ff83cabb2d6c95e26a673aba27 | [
"BSD-2-Clause"
] | null | null | null | src/scrapper/scrappers/webSiteScrappers/leadersScrapper.py | media-watch/mediascrapper | 43e35eb4ae8011ff83cabb2d6c95e26a673aba27 | [
"BSD-2-Clause"
] | 4 | 2021-07-14T22:25:00.000Z | 2021-07-14T22:40:00.000Z | src/scrapper/scrappers/webSiteScrappers/leadersScrapper.py | media-watch/mediascrapper | 43e35eb4ae8011ff83cabb2d6c95e26a673aba27 | [
"BSD-2-Clause"
] | null | null | null | from bs4 import BeautifulSoup
class LeadersScrapper:
def scrap(self, html):
soup = BeautifulSoup(html, features="html.parser")
title = soup.find("h1").text
date = soup.find("div",{"class":"infos"}).text
data = [ arti.text for arti in soup.find("div", {"class":"article_body"}).findChildren()]
idx = data.index("Lire aussi")
article = " ".join(data[:idx])
return {"title":title, "date":date, "article":article}
| 37.230769 | 97 | 0.592975 | from bs4 import BeautifulSoup
class LeadersScrapper:
def scrap(self, html):
soup = BeautifulSoup(html, features="html.parser")
title = soup.find("h1").text
date = soup.find("div",{"class":"infos"}).text
data = [ arti.text for arti in soup.find("div", {"class":"article_body"}).findChildren()]
idx = data.index("Lire aussi")
article = " ".join(data[:idx])
return {"title":title, "date":date, "article":article}
| true | true |
f71d6b8d8c08175c683fa96aa4f11e6b6b31103d | 705 | py | Python | contact/migrations/0002_auto_20200914_2108.py | SandyAbdullahi/francis_cms | 55b64d91fcbc1d6b4dd853aff7294dedd417e0ab | [
"MIT"
] | null | null | null | contact/migrations/0002_auto_20200914_2108.py | SandyAbdullahi/francis_cms | 55b64d91fcbc1d6b4dd853aff7294dedd417e0ab | [
"MIT"
] | null | null | null | contact/migrations/0002_auto_20200914_2108.py | SandyAbdullahi/francis_cms | 55b64d91fcbc1d6b4dd853aff7294dedd417e0ab | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-14 18:08
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('contact', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='contactpage',
name='body',
),
migrations.AddField(
model_name='contactpage',
name='headline_message',
field=wagtail.core.fields.RichTextField(blank=True),
),
migrations.AddField(
model_name='contactpage',
name='headline_title',
field=wagtail.core.fields.RichTextField(blank=True),
),
]
| 24.310345 | 64 | 0.585816 |
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('contact', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='contactpage',
name='body',
),
migrations.AddField(
model_name='contactpage',
name='headline_message',
field=wagtail.core.fields.RichTextField(blank=True),
),
migrations.AddField(
model_name='contactpage',
name='headline_title',
field=wagtail.core.fields.RichTextField(blank=True),
),
]
| true | true |
f71d6cbd63ff5c532cd6188976d551d9a5e087ea | 2,743 | py | Python | src/slider_experiment/slider_experiment/thruster_pwm_tsl.py | frank20a/collaborative-sats | 9d26d3c8f66cf43bbd514f02434851439e746797 | [
"MIT"
] | null | null | null | src/slider_experiment/slider_experiment/thruster_pwm_tsl.py | frank20a/collaborative-sats | 9d26d3c8f66cf43bbd514f02434851439e746797 | [
"MIT"
] | 6 | 2022-03-22T18:54:38.000Z | 2022-03-31T16:42:37.000Z | src/slider_experiment/slider_experiment/thruster_pwm_tsl.py | frank20a/collaborative-sats | 9d26d3c8f66cf43bbd514f02434851439e746797 | [
"MIT"
] | null | null | null | import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Vector3
from std_msgs.msg import Int16
from rclpy.qos import QoSPresetProfiles
from ament_index_python import get_package_share_directory
import numpy as np
import sys, os
from .parameters import force
from .flags import flags
def create_pwm(value, resolution):
if value < 0.0:
value = -value
if value > 1.0:
value = 1.0
return np.concatenate((np.ones(np.floor(resolution * value).astype(np.int32)), np.zeros(np.ceil(resolution * (1 - value)).astype(np.int32))))
class ThrustController(Node):
def __init__(self):
super().__init__('thrust_controller')
self.declare_parameter('verbose', 0)
self.declare_parameter('frequency', 10)
self.declare_parameter('resolution', 100)
self.verbose = self.get_parameter('verbose').get_parameter_value().integer_value
self.frequency = self.get_parameter('frequency').get_parameter_value().integer_value
self.resolution = self.get_parameter('resolution').get_parameter_value().integer_value
sys.path.insert(1, os.path.join(get_package_share_directory('slider_experiment'), 'python_build/tsl_optimizer'))
import tsl_optimizer as optimizer
self.solver = optimizer.solver()
self.signals = [create_pwm(0, self.resolution) for i in range(8)]
self.i = 0
self.create_subscription(Vector3, 'thrust_cmd', self.callback, QoSPresetProfiles.get_from_short_key('system_default'))
self.pub = self.create_publisher(Int16, 'thruster_flags', QoSPresetProfiles.get_from_short_key('sensor_data'))
self.create_timer(1/(self.frequency * self.resolution), self.send_signals)
def callback(self, msg: Vector3):
T = self.solver.run(p = [msg.x, msg.y, msg.z]).solution
if self.verbose > 0:
self.get_logger().info(f'\n Fx = {msg.x: 2.2f}\n Fy = {msg.y: 2.2f}\ntau = {msg.z: 2.2f}')
self.get_logger().info(f'cmd: {T}')
self.signals = [create_pwm(T[i] / force, self.resolution) for i in range(8)]
def send_signals(self):
req = Int16()
tmp = 0
for i in range(8):
if self.signals[i][self.i] == 1:
tmp ^= flags[i]
try:
req.data = tmp
except AssertionError:
print(tmp)
self.i += 1
self.i %= self.resolution
self.pub.publish(req)
def main(args=None):
rclpy.init(args=args)
node = ThrustController()
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | 31.895349 | 145 | 0.62778 | import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Vector3
from std_msgs.msg import Int16
from rclpy.qos import QoSPresetProfiles
from ament_index_python import get_package_share_directory
import numpy as np
import sys, os
from .parameters import force
from .flags import flags
def create_pwm(value, resolution):
if value < 0.0:
value = -value
if value > 1.0:
value = 1.0
return np.concatenate((np.ones(np.floor(resolution * value).astype(np.int32)), np.zeros(np.ceil(resolution * (1 - value)).astype(np.int32))))
class ThrustController(Node):
def __init__(self):
super().__init__('thrust_controller')
self.declare_parameter('verbose', 0)
self.declare_parameter('frequency', 10)
self.declare_parameter('resolution', 100)
self.verbose = self.get_parameter('verbose').get_parameter_value().integer_value
self.frequency = self.get_parameter('frequency').get_parameter_value().integer_value
self.resolution = self.get_parameter('resolution').get_parameter_value().integer_value
sys.path.insert(1, os.path.join(get_package_share_directory('slider_experiment'), 'python_build/tsl_optimizer'))
import tsl_optimizer as optimizer
self.solver = optimizer.solver()
self.signals = [create_pwm(0, self.resolution) for i in range(8)]
self.i = 0
self.create_subscription(Vector3, 'thrust_cmd', self.callback, QoSPresetProfiles.get_from_short_key('system_default'))
self.pub = self.create_publisher(Int16, 'thruster_flags', QoSPresetProfiles.get_from_short_key('sensor_data'))
self.create_timer(1/(self.frequency * self.resolution), self.send_signals)
def callback(self, msg: Vector3):
T = self.solver.run(p = [msg.x, msg.y, msg.z]).solution
if self.verbose > 0:
self.get_logger().info(f'\n Fx = {msg.x: 2.2f}\n Fy = {msg.y: 2.2f}\ntau = {msg.z: 2.2f}')
self.get_logger().info(f'cmd: {T}')
self.signals = [create_pwm(T[i] / force, self.resolution) for i in range(8)]
def send_signals(self):
req = Int16()
tmp = 0
for i in range(8):
if self.signals[i][self.i] == 1:
tmp ^= flags[i]
try:
req.data = tmp
except AssertionError:
print(tmp)
self.i += 1
self.i %= self.resolution
self.pub.publish(req)
def main(args=None):
rclpy.init(args=args)
node = ThrustController()
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | true | true |
f71d6cf373223c59a20c6cb59a39218776e382a0 | 2,697 | py | Python | flink-python/pyflink/table/tests/test_environment_completeness.py | sekikn/flink | 405cf8f429c6a5031c21597abe9193bedcb8e15b | [
"Apache-2.0"
] | 4 | 2020-05-28T03:36:05.000Z | 2022-02-22T13:46:05.000Z | flink-python/pyflink/table/tests/test_environment_completeness.py | sekikn/flink | 405cf8f429c6a5031c21597abe9193bedcb8e15b | [
"Apache-2.0"
] | 5 | 2021-03-30T04:48:08.000Z | 2021-12-24T08:22:11.000Z | flink-python/pyflink/table/tests/test_environment_completeness.py | sekikn/flink | 405cf8f429c6a5031c21597abe9193bedcb8e15b | [
"Apache-2.0"
] | null | null | null | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase
from pyflink.table import TableEnvironment
class EnvironmentAPICompletenessTests(PythonAPICompletenessTestCase, unittest.TestCase):
"""
Tests whether the Python :class:`TableEnvironment` is consistent with
Java `org.apache.flink.table.api.TableEnvironment`.
"""
@classmethod
def python_class(cls):
return TableEnvironment
@classmethod
def java_class(cls):
return "org.apache.flink.table.api.TableEnvironment"
@classmethod
def excluded_methods(cls):
# getCompletionHints has been deprecated. It will be removed in the next release.
# TODO add TableEnvironment#create method with EnvironmentSettings as a parameter
return {
'getCompletionHints',
'create',
'loadModule',
'unloadModule',
'createTemporarySystemFunction',
'dropTemporarySystemFunction',
'createFunction',
'dropFunction',
'createTemporaryFunction',
'dropTemporaryFunction'}
@classmethod
def java_method_name(cls, python_method_name):
"""
Due to 'from' is python keyword, so we use 'from_path'
in Python API corresponding 'from' in Java API.
:param python_method_name:
:return:
"""
return {'from_path': 'from'}.get(python_method_name, python_method_name)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 35.96 | 89 | 0.656285 | true | true | |
f71d6e590e9719f5ddc95217fd64f92e9bd77eaa | 375 | py | Python | recover-commits.py | pendashteh/git-recover-index | 2dba70959a463879a34a072a30fba839e974d8e8 | [
"MIT"
] | 39 | 2016-03-17T18:36:08.000Z | 2021-06-18T18:15:30.000Z | recover-commits.py | pendashteh/git-recover-index | 2dba70959a463879a34a072a30fba839e974d8e8 | [
"MIT"
] | 1 | 2019-01-06T22:10:18.000Z | 2019-01-06T22:10:18.000Z | recover-commits.py | pendashteh/git-recover-index | 2dba70959a463879a34a072a30fba839e974d8e8 | [
"MIT"
] | 6 | 2016-05-06T14:00:20.000Z | 2019-09-04T16:04:26.000Z | import os
commits_file = os.environ['_git_recover_index_tmpfile']
commits_recover_path = os.environ['_git_recover_index_recover_path']
commits = [line.rstrip('\n') for line in open(commits_file)]
from subprocess import call
filename = commits_recover_path + "/file-"
i = 1
for c in commits:
f = open(filename + str(i),"wb")
call(["git", "show", c],stdout=f)
i+=1
| 31.25 | 68 | 0.717333 | import os
commits_file = os.environ['_git_recover_index_tmpfile']
commits_recover_path = os.environ['_git_recover_index_recover_path']
commits = [line.rstrip('\n') for line in open(commits_file)]
from subprocess import call
filename = commits_recover_path + "/file-"
i = 1
for c in commits:
f = open(filename + str(i),"wb")
call(["git", "show", c],stdout=f)
i+=1
| true | true |
f71d6e6997ab22122d04476adc125c2e2dca54b2 | 374 | py | Python | code/day09/demo04.py | picktsh/python | 0f758dcdf9eee3580d8f6e2241ef557b6320ef54 | [
"MIT"
] | 1 | 2019-12-31T16:44:06.000Z | 2019-12-31T16:44:06.000Z | code/day09/demo04.py | picktsh/python | 0f758dcdf9eee3580d8f6e2241ef557b6320ef54 | [
"MIT"
] | null | null | null | code/day09/demo04.py | picktsh/python | 0f758dcdf9eee3580d8f6e2241ef557b6320ef54 | [
"MIT"
] | 1 | 2022-01-13T10:32:22.000Z | 2022-01-13T10:32:22.000Z | ### 多个函数间的配合
## 变量的作用域
rent = 3000
variable_cost = 0
def cost():
global variable_cost # 使用全局的变量
utilities = int(input('请输入本月的水电费用'))
food_cost = int(input('请输入本月的食材费用'))
variable_cost = utilities + food_cost
print('本月的变动成本费用是' + str(variable_cost))
def sum_cost():
sum = rent + variable_cost
print('本月的总成本是' + str(sum))
cost()
sum_cost()
| 16.26087 | 44 | 0.657754 | = 0
def cost():
global variable_cost
utilities = int(input('请输入本月的水电费用'))
food_cost = int(input('请输入本月的食材费用'))
variable_cost = utilities + food_cost
print('本月的变动成本费用是' + str(variable_cost))
def sum_cost():
sum = rent + variable_cost
print('本月的总成本是' + str(sum))
cost()
sum_cost()
| true | true |
f71d6eddc110a6ecb2c6c457700d79c73a0ae05f | 404 | py | Python | python/kata/6-kyu/Corner Fill/solution.py | Carlososuna11/codewars-handbook | a0e7c9ac5ad19cfaed3ad463c04616daa3fed82e | [
"MIT"
] | null | null | null | python/kata/6-kyu/Corner Fill/solution.py | Carlososuna11/codewars-handbook | a0e7c9ac5ad19cfaed3ad463c04616daa3fed82e | [
"MIT"
] | null | null | null | python/kata/6-kyu/Corner Fill/solution.py | Carlososuna11/codewars-handbook | a0e7c9ac5ad19cfaed3ad463c04616daa3fed82e | [
"MIT"
] | null | null | null | def corner_fill(square):
removeStarting = lambda x: [y[:-1] for y in x[1:]]
corner = lambda x: x[0]+[y[-1] for y in x[1:]]
result =[]
n = len(square)
if n == 0: return []
for i in range(n):
if i % 2 ==0:
result = result + corner(square)
else:
result = result + corner(square)[::-1]
square = removeStarting(square)
return result | 31.076923 | 55 | 0.519802 | def corner_fill(square):
removeStarting = lambda x: [y[:-1] for y in x[1:]]
corner = lambda x: x[0]+[y[-1] for y in x[1:]]
result =[]
n = len(square)
if n == 0: return []
for i in range(n):
if i % 2 ==0:
result = result + corner(square)
else:
result = result + corner(square)[::-1]
square = removeStarting(square)
return result | true | true |
f71d6f2d262c160937a5abfacbcec8080a0ea116 | 228 | py | Python | algorithms/tree_diameter.py | nazaruka/Catan-AI | d7c91c7011ac82ad41d3b80a29c73c23ab7b579e | [
"MIT"
] | 9 | 2019-07-16T15:28:00.000Z | 2022-02-08T00:53:39.000Z | algorithms/tree_diameter.py | nazaruka/Catan-AI | d7c91c7011ac82ad41d3b80a29c73c23ab7b579e | [
"MIT"
] | 2 | 2020-01-09T15:27:58.000Z | 2020-09-23T18:04:41.000Z | algorithms/tree_diameter.py | nazaruka/Catan-AI | d7c91c7011ac82ad41d3b80a29c73c23ab7b579e | [
"MIT"
] | 5 | 2019-08-07T16:51:38.000Z | 2021-05-06T04:01:32.000Z | import networkx
from algorithms.dfs import dfs
def tree_diameter(t: networkx.Graph):
if __debug__:
assert networkx.is_tree(t)
v, _ = dfs(t)
_, longest_path_length = dfs(t, v)
return longest_path_length
| 20.727273 | 38 | 0.697368 | import networkx
from algorithms.dfs import dfs
def tree_diameter(t: networkx.Graph):
if __debug__:
assert networkx.is_tree(t)
v, _ = dfs(t)
_, longest_path_length = dfs(t, v)
return longest_path_length
| true | true |
f71d6fc4eaee727eeda655eda3c7d6e9b60c45e6 | 4,713 | py | Python | app.py | lucaskolson/ddd | f273c61856bca27a40b9691b2a9842d8705a3503 | [
"MIT"
] | null | null | null | app.py | lucaskolson/ddd | f273c61856bca27a40b9691b2a9842d8705a3503 | [
"MIT"
] | null | null | null | app.py | lucaskolson/ddd | f273c61856bca27a40b9691b2a9842d8705a3503 | [
"MIT"
] | null | null | null | import dash
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
from dash import callback_context
df = px.data.election()
geojson = px.data.election_geojson()
candidates = df.winner.unique()
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title = "ICE Detention Data Dashboard"
fy = ['2015-10-01', '2016-10-01', '2017-10-01', '2018-10-01']
loc = ["East Coast", "West Coast", "Southwest", "Midwest", "All"]
app.layout = html.Div(
children=[
html.Div(
children=[
html.H1(
children="ICE Detention Analytics", className="header-title"
),
html.P(
children="A dashboard and data repository of"
" ICE detention trends and facilities across the US"
" between 2010 and 2020",
className="header-description",
),
],
className="header",
),
html.Div(
children=[
dcc.RadioItems(
id='candidate',
options=[{'value': x, 'label': x}
for x in candidates],
value=candidates[0],
labelStyle={'display': 'inline-block'}
),
html.Div(
children=[dcc.Graph(
id="choropleth", config={"displayModeBar": False},
),
html.Button("Download CSV", id="btn_csv"),
dcc.Download(id="download-dataframe-csv"),
html.Button("Download Image", id="btn_image"),
dcc.Download(id="download-image")],
className="card",
),
dcc.RadioItems(
id='us_loc',
options=[{'value': x, 'label': x}
for x in loc],
value=loc[0],
labelStyle={'display': 'inline-block'}
),
html.Div(
children=dcc.Graph(
id="fy_arrests", config={"displayModeBar": False},
),
className="card",
),
],
className="wrapper",
),
]
)
@app.callback(
Output("choropleth", "figure"),
[Input("candidate", "value")])
def display_choropleth(candidate):
fig = px.choropleth(
df, geojson=geojson, color=candidate,
locations="district", featureidkey="properties.district",
projection="mercator", range_color=[0, 6500])
fig.update_geos(fitbounds="locations", visible=False)
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
return fig
@app.callback(
Output("download-dataframe-csv", "data"),
Input("btn_csv", "n_clicks"),
prevent_initial_call=True,
)
def func(n_clicks):
return dcc.send_data_frame(df.to_csv, "mydf.csv")
@app.callback(
Output("download-image", "data"),
Input("btn_image", "n_clicks"),
prevent_initial_call=True,
)
def func(n_clicks):
return dcc.send_file(
"./plot_downloads/test.png"
)
@app.callback(
Output("fy_arrests", "figure"),
[Input("us_loc", "value")])
def display_arrest_fy(us_loc):
arrests_by_fy = pd.read_csv("./data/arrests_by_fy.csv")
if us_loc == "West Coast":
aor = ['LOS', 'SEA', 'SFR', 'SND']
elif us_loc == "East Coast":
aor = ['ATL', 'BAL', 'BOS', 'BUF', 'DET', 'MIA', 'NEW', 'NOL', 'NYC', 'PHI', 'WAS', 'HQ']
elif us_loc == "Midwest":
aor = ['CHI', 'SPM']
elif us_loc == "Southwest":
aor = ['DAL', 'DEN', 'ELP', 'HOU', 'PHO', 'SLC', 'SNA']
elif us_loc == "All":
aor = ['ATL', 'BAL', 'BOS', 'BUF', 'CHI', 'DAL', 'DEN', 'DET', 'ELP', 'HOU', 'HQ', 'LOS', 'MIA', 'NEW', 'NOL','NYC', 'PHI', 'PHO', 'SEA', 'SFR', 'SLC', 'SNA', 'SND', 'SPM', 'WAS']
else:
aor = ['ATL', 'BAL', 'BOS', 'BUF', 'CHI', 'DAL', 'DEN', 'DET', 'ELP', 'HOU', 'HQ', 'LOS', 'MIA', 'NEW', 'NOL','NYC', 'PHI', 'PHO', 'SEA', 'SFR', 'SLC', 'SNA', 'SND', 'SPM', 'WAS']
fig = px.line(arrests_by_fy, x=fy,
y=aor,
title = "Arrests in AOR per FY",
labels=dict(x="Fiscal Year", y="Number of Arrests"))
fig.update_xaxes(title="Fiscal Year", nticks = 4)
fig.update_yaxes(title="Number of Arrests")
fig.update_layout(legend_title_text='AOR')
return fig
if __name__ == "__main__":
app.run_server(debug=True)
| 32.503448 | 187 | 0.51708 | import dash
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
from dash import callback_context
df = px.data.election()
geojson = px.data.election_geojson()
candidates = df.winner.unique()
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title = "ICE Detention Data Dashboard"
fy = ['2015-10-01', '2016-10-01', '2017-10-01', '2018-10-01']
loc = ["East Coast", "West Coast", "Southwest", "Midwest", "All"]
app.layout = html.Div(
children=[
html.Div(
children=[
html.H1(
children="ICE Detention Analytics", className="header-title"
),
html.P(
children="A dashboard and data repository of"
" ICE detention trends and facilities across the US"
" between 2010 and 2020",
className="header-description",
),
],
className="header",
),
html.Div(
children=[
dcc.RadioItems(
id='candidate',
options=[{'value': x, 'label': x}
for x in candidates],
value=candidates[0],
labelStyle={'display': 'inline-block'}
),
html.Div(
children=[dcc.Graph(
id="choropleth", config={"displayModeBar": False},
),
html.Button("Download CSV", id="btn_csv"),
dcc.Download(id="download-dataframe-csv"),
html.Button("Download Image", id="btn_image"),
dcc.Download(id="download-image")],
className="card",
),
dcc.RadioItems(
id='us_loc',
options=[{'value': x, 'label': x}
for x in loc],
value=loc[0],
labelStyle={'display': 'inline-block'}
),
html.Div(
children=dcc.Graph(
id="fy_arrests", config={"displayModeBar": False},
),
className="card",
),
],
className="wrapper",
),
]
)
@app.callback(
Output("choropleth", "figure"),
[Input("candidate", "value")])
def display_choropleth(candidate):
fig = px.choropleth(
df, geojson=geojson, color=candidate,
locations="district", featureidkey="properties.district",
projection="mercator", range_color=[0, 6500])
fig.update_geos(fitbounds="locations", visible=False)
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
return fig
@app.callback(
Output("download-dataframe-csv", "data"),
Input("btn_csv", "n_clicks"),
prevent_initial_call=True,
)
def func(n_clicks):
return dcc.send_data_frame(df.to_csv, "mydf.csv")
@app.callback(
Output("download-image", "data"),
Input("btn_image", "n_clicks"),
prevent_initial_call=True,
)
def func(n_clicks):
return dcc.send_file(
"./plot_downloads/test.png"
)
@app.callback(
Output("fy_arrests", "figure"),
[Input("us_loc", "value")])
def display_arrest_fy(us_loc):
arrests_by_fy = pd.read_csv("./data/arrests_by_fy.csv")
if us_loc == "West Coast":
aor = ['LOS', 'SEA', 'SFR', 'SND']
elif us_loc == "East Coast":
aor = ['ATL', 'BAL', 'BOS', 'BUF', 'DET', 'MIA', 'NEW', 'NOL', 'NYC', 'PHI', 'WAS', 'HQ']
elif us_loc == "Midwest":
aor = ['CHI', 'SPM']
elif us_loc == "Southwest":
aor = ['DAL', 'DEN', 'ELP', 'HOU', 'PHO', 'SLC', 'SNA']
elif us_loc == "All":
aor = ['ATL', 'BAL', 'BOS', 'BUF', 'CHI', 'DAL', 'DEN', 'DET', 'ELP', 'HOU', 'HQ', 'LOS', 'MIA', 'NEW', 'NOL','NYC', 'PHI', 'PHO', 'SEA', 'SFR', 'SLC', 'SNA', 'SND', 'SPM', 'WAS']
else:
aor = ['ATL', 'BAL', 'BOS', 'BUF', 'CHI', 'DAL', 'DEN', 'DET', 'ELP', 'HOU', 'HQ', 'LOS', 'MIA', 'NEW', 'NOL','NYC', 'PHI', 'PHO', 'SEA', 'SFR', 'SLC', 'SNA', 'SND', 'SPM', 'WAS']
fig = px.line(arrests_by_fy, x=fy,
y=aor,
title = "Arrests in AOR per FY",
labels=dict(x="Fiscal Year", y="Number of Arrests"))
fig.update_xaxes(title="Fiscal Year", nticks = 4)
fig.update_yaxes(title="Number of Arrests")
fig.update_layout(legend_title_text='AOR')
return fig
if __name__ == "__main__":
app.run_server(debug=True)
| true | true |
f71d6ffb1dd30874cf880bc17a48e08b5e247ed7 | 1,072 | py | Python | migrations/versions/3aa95a42561c_this_is_a_migration.py | rice0208/Flog | d56be0b814a0c6ca1fe4abd2c764552121698a94 | [
"MIT"
] | 14 | 2020-09-20T01:23:01.000Z | 2022-02-09T09:11:10.000Z | migrations/versions/3aa95a42561c_this_is_a_migration.py | rice0208/Flog | d56be0b814a0c6ca1fe4abd2c764552121698a94 | [
"MIT"
] | 15 | 2020-12-23T13:19:46.000Z | 2022-01-22T08:38:22.000Z | migrations/versions/3aa95a42561c_this_is_a_migration.py | rice0208/Flog | d56be0b814a0c6ca1fe4abd2c764552121698a94 | [
"MIT"
] | 4 | 2021-03-14T01:49:30.000Z | 2021-11-25T08:31:55.000Z | """this is a migration
Revision ID: 3aa95a42561c
Revises: 98fef64846fe
Create Date: 2021-10-04 10:49:46.832296
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "3aa95a42561c"
down_revision = "98fef64846fe"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("belong", schema=None) as batch_op:
batch_op.create_foreign_key("owner_id", "user", ["owner_id"], ["id"])
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.add_column(sa.Column("avatar_style", sa.String(length=1024)))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.drop_column("avatar_style")
with op.batch_alter_table("belong", schema=None) as batch_op:
batch_op.drop_constraint("owner_id", type_="foreignkey")
# ### end Alembic commands ###
| 27.487179 | 78 | 0.698694 | from alembic import op
import sqlalchemy as sa
revision = "3aa95a42561c"
down_revision = "98fef64846fe"
branch_labels = None
depends_on = None
def upgrade():
r", schema=None) as batch_op:
batch_op.add_column(sa.Column("avatar_style", sa.String(length=1024)))
| true | true |
f71d703c6b01e0801154484a75bebd175b111b06 | 699 | py | Python | e3nn/__init__.py | claycurry34/e3nn | 3cfbf679d10781a01d9c83b04a2e7d79d4914c23 | [
"MIT"
] | null | null | null | e3nn/__init__.py | claycurry34/e3nn | 3cfbf679d10781a01d9c83b04a2e7d79d4914c23 | [
"MIT"
] | null | null | null | e3nn/__init__.py | claycurry34/e3nn | 3cfbf679d10781a01d9c83b04a2e7d79d4914c23 | [
"MIT"
] | null | null | null | __version__ = "0.4.4"
from typing import Dict
_OPT_DEFAULTS: Dict[str, bool] = dict(
specialized_code=True,
optimize_einsums=True,
jit_script_fx=True,
)
def set_optimization_defaults(**kwargs) -> None:
r"""Globally set the default optimization settings.
Parameters
----------
**kwargs
Keyword arguments to set the default optimization settings.
"""
for k, v in kwargs.items():
if k not in _OPT_DEFAULTS:
raise ValueError(f"Unknown optimization option: {k}")
_OPT_DEFAULTS[k] = v
def get_optimization_defaults() -> Dict[str, bool]:
r"""Get the global default optimization settings."""
return dict(_OPT_DEFAULTS)
| 23.3 | 67 | 0.663805 | __version__ = "0.4.4"
from typing import Dict
_OPT_DEFAULTS: Dict[str, bool] = dict(
specialized_code=True,
optimize_einsums=True,
jit_script_fx=True,
)
def set_optimization_defaults(**kwargs) -> None:
for k, v in kwargs.items():
if k not in _OPT_DEFAULTS:
raise ValueError(f"Unknown optimization option: {k}")
_OPT_DEFAULTS[k] = v
def get_optimization_defaults() -> Dict[str, bool]:
return dict(_OPT_DEFAULTS)
| true | true |
f71d71cddd85fa3d98cd38c34ab0b09669a18685 | 752 | py | Python | notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/Project Euler/Problem 40/sol1.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/Project Euler/Problem 40/sol1.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 160 | 2021-04-26T19:04:15.000Z | 2022-03-26T20:18:37.000Z | notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/Project Euler/Problem 40/sol1.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | # -.- coding: latin-1 -.-
from __future__ import print_function
"""
Champernowne's constant
Problem 40
An irrational decimal fraction is created by concatenating the positive integers:
0.123456789101112131415161718192021...
It can be seen that the 12th digit of the fractional part is 1.
If dn represents the nth digit of the fractional part, find the value of the following expression.
d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000
"""
constant = []
i = 1
while len(constant) < 1e6:
constant.append(str(i))
i += 1
constant = "".join(constant)
print(
int(constant[0])
* int(constant[9])
* int(constant[99])
* int(constant[999])
* int(constant[9999])
* int(constant[99999])
* int(constant[999999])
)
| 20.888889 | 98 | 0.6875 |
from __future__ import print_function
constant = []
i = 1
while len(constant) < 1e6:
constant.append(str(i))
i += 1
constant = "".join(constant)
print(
int(constant[0])
* int(constant[9])
* int(constant[99])
* int(constant[999])
* int(constant[9999])
* int(constant[99999])
* int(constant[999999])
)
| true | true |
f71d71fe37a1f2f44e56d006e3c304850b855a98 | 2,553 | py | Python | src/books.py | LucasAntognoni/book-bot | 625632b8115654921caa3961153412c129961164 | [
"MIT"
] | 3 | 2019-07-09T19:56:15.000Z | 2020-02-19T23:26:00.000Z | src/books.py | LucasAntognoni/book-bot | 625632b8115654921caa3961153412c129961164 | [
"MIT"
] | null | null | null | src/books.py | LucasAntognoni/book-bot | 625632b8115654921caa3961153412c129961164 | [
"MIT"
] | null | null | null | import requests
class Books(object):
BASE_URL = \
'https://www.googleapis.com/books/v1/volumes?' \
'q="{}"&projection={}&printType={}&langRestrict={}&maxResults={}'
MAX_RESULTS = 1
PRINT_TYPE = 'books'
PROJECTION = 'full'
LANGUAGE = 'en'
# SEARCH_FIELDS = {
# "title": "intitle",
# "author": "inauthor",
# "publisher": "inpublisher",
# "subject": "subject",
# "isbn": "isbn",
# }
BOOK_FIELDS = [
'title',
'authors',
'categories',
'description',
'imageLinks'
]
def __init__(self):
pass
@staticmethod
def get_attribute(data, attribute, default_value):
return data.get(attribute) or default_value
def process_search(self, data):
book = {}
for field in self.BOOK_FIELDS:
book[field] = self.get_attribute(data, field, '')
if (field == 'authors') or (field == 'categories') and book[field] != '':
if len(book[field]) > 1:
book[field] = ', '.join(book[field])
else:
book[field] = book[field][0]
if field == 'imageLinks' and book[field] != '':
book[field] = self.get_attribute(book[field], 'thumbnail', '')
return book
def search(self, field, query):
"""
Search book on Google Books API
Parameters
----------
field
Search field
query
Value to be searched
Returns
-------
JSON
Search results in JSON format if successful, None o/w
"""
if field == 'search':
url = self.BASE_URL.format(query.replace(' ', '+'),
self.PROJECTION,
self.PRINT_TYPE,
self.LANGUAGE,
self.MAX_RESULTS)
else:
return None
try:
response = requests.get(url)
if response.status_code == 200:
response_json = response.json()
if response_json['totalItems'] != 0:
return self.process_search(response_json['items'][0]['volumeInfo'])
else:
return None
except requests.exceptions.RequestException as e:
print(e)
return None
| 25.277228 | 87 | 0.461418 | import requests
class Books(object):
BASE_URL = \
'https://www.googleapis.com/books/v1/volumes?' \
'q="{}"&projection={}&printType={}&langRestrict={}&maxResults={}'
MAX_RESULTS = 1
PRINT_TYPE = 'books'
PROJECTION = 'full'
LANGUAGE = 'en'
BOOK_FIELDS = [
'title',
'authors',
'categories',
'description',
'imageLinks'
]
def __init__(self):
pass
@staticmethod
def get_attribute(data, attribute, default_value):
return data.get(attribute) or default_value
def process_search(self, data):
book = {}
for field in self.BOOK_FIELDS:
book[field] = self.get_attribute(data, field, '')
if (field == 'authors') or (field == 'categories') and book[field] != '':
if len(book[field]) > 1:
book[field] = ', '.join(book[field])
else:
book[field] = book[field][0]
if field == 'imageLinks' and book[field] != '':
book[field] = self.get_attribute(book[field], 'thumbnail', '')
return book
def search(self, field, query):
if field == 'search':
url = self.BASE_URL.format(query.replace(' ', '+'),
self.PROJECTION,
self.PRINT_TYPE,
self.LANGUAGE,
self.MAX_RESULTS)
else:
return None
try:
response = requests.get(url)
if response.status_code == 200:
response_json = response.json()
if response_json['totalItems'] != 0:
return self.process_search(response_json['items'][0]['volumeInfo'])
else:
return None
except requests.exceptions.RequestException as e:
print(e)
return None
| true | true |
f71d72c5ef13e3198b890b424d27e5e967a73e65 | 6,945 | py | Python | qiskit/visualization/timeline/plotters/matplotlib.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 1,456 | 2017-08-05T16:33:05.000Z | 2018-06-05T04:15:35.000Z | qiskit/visualization/timeline/plotters/matplotlib.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 365 | 2017-08-04T06:09:16.000Z | 2018-06-05T08:33:37.000Z | qiskit/visualization/timeline/plotters/matplotlib.py | Roshan-Thomas/qiskit-terra | 77219b5c7b7146b1545c5e5190739b36f4064b2f | [
"Apache-2.0"
] | 463 | 2017-08-05T04:10:01.000Z | 2018-06-05T06:43:21.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Matplotlib plotter API."""
from typing import Optional, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
from qiskit.visualization.exceptions import VisualizationError
from qiskit.visualization.timeline import core, types, drawings
from qiskit.visualization.timeline.plotters.base_plotter import BasePlotter
from qiskit.visualization.utils import matplotlib_close_if_inline
class MplPlotter(BasePlotter):
"""Matplotlib API for pulse drawer.
This plotter arranges bits along y axis of 2D canvas with vertical offset.
"""
def __init__(self, canvas: core.DrawerCanvas, axis: Optional[plt.Axes] = None):
"""Create new plotter.
Args:
canvas: Configured drawer canvas object. Canvas object should be updated
with `.update` method before initializing the plotter.
axis: Matplotlib axis object. When `axis` is provided, the plotter updates
given axis instead of creating and returning new matplotlib figure.
"""
super().__init__(canvas=canvas)
if axis is None:
fig_height = self.canvas.vmax - self.canvas.vmin
fig_h = self.canvas.formatter["general.fig_unit_height"] * fig_height
fig_w = self.canvas.formatter["general.fig_width"]
self.figure = plt.figure(figsize=(fig_w, fig_h))
self.ax = self.figure.add_subplot(1, 1, 1)
else:
self.figure = axis.figure
self.ax = axis
self.initialize_canvas()
def initialize_canvas(self):
"""Format appearance of matplotlib canvas."""
self.ax.set_facecolor(self.canvas.formatter["color.background"])
# axis lines
self.ax.spines["right"].set_color("none")
self.ax.spines["left"].set_color("none")
self.ax.spines["top"].set_color("none")
# axis labels
self.ax.set_yticks([])
axis_config = self.canvas.layout["time_axis_map"](time_window=self.canvas.time_range)
self.ax.set_xticks(list(axis_config.axis_map.keys()))
self.ax.set_xticklabels(
list(axis_config.axis_map.values()),
fontsize=self.canvas.formatter["text_size.axis_label"],
)
self.ax.set_xlabel(
axis_config.label, fontsize=self.canvas.formatter["text_size.axis_label"]
)
# boundary
self.ax.set_xlim(*self.canvas.time_range)
self.ax.set_ylim(self.canvas.vmin, self.canvas.vmax)
def draw(self):
"""Output drawings stored in canvas object."""
for _, data in self.canvas.collections:
xvals = np.asarray(data.xvals, dtype=float)
yvals = np.asarray(data.yvals, dtype=float)
offsets = [self.canvas.assigned_coordinates[bit] for bit in data.bits]
if isinstance(data, drawings.BoxData):
# box data
if data.data_type in [
str(types.BoxType.SCHED_GATE.value),
str(types.BoxType.DELAY.value),
]:
# draw a smoothly rounded rectangle
xs, ys1, ys2 = self._time_bucket_outline(xvals, yvals)
self.ax.fill_between(
x=xs, y1=ys1 + offsets[0], y2=ys2 + offsets[0], **data.styles
)
else:
# draw a rectangle
x0, x1 = xvals
y0, y1 = yvals + offsets[0]
rect = Rectangle(xy=(x0, y0), width=x1 - x0, height=y1 - y0)
pc = PatchCollection([rect], **data.styles)
self.ax.add_collection(pc)
elif isinstance(data, drawings.LineData):
# line data
self.ax.plot(xvals, yvals + offsets[0], **data.styles)
elif isinstance(data, drawings.TextData):
# text data
if data.latex is not None:
s = rf"${data.latex}$"
else:
s = data.text
self.ax.text(x=xvals[0], y=yvals[0] + offsets[0], s=s, **data.styles)
elif isinstance(data, drawings.GateLinkData):
# gate link data
self.ax.plot(xvals.repeat(len(offsets)), offsets, **data.styles)
else:
VisualizationError(
"Data {name} is not supported by {plotter}"
"".format(name=data, plotter=self.__class__.__name__)
)
def _time_bucket_outline(
self, xvals: np.ndarray, yvals: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Generate outline of time bucket. Edges are smoothly faded.
Args:
xvals: Left and right point coordinates.
yvals: Bottom and top point coordinates.
Returns:
Coordinate vectors of time bucket fringe.
"""
x0, x1 = xvals
y0, y1 = yvals
width = x1 - x0
y_mid = 0.5 * (y0 + y1)
risefall = int(min(self.canvas.formatter["time_bucket.edge_dt"], max(width / 2 - 2, 0)))
edge = np.sin(np.pi / 2 * np.arange(0, risefall) / risefall)
xs = np.concatenate(
[
np.arange(x0, x0 + risefall),
[x0 + risefall, x1 - risefall],
np.arange(x1 - risefall + 1, x1 + 1),
]
)
l1 = (y1 - y_mid) * np.concatenate([edge, [1, 1], edge[::-1]])
l2 = (y0 - y_mid) * np.concatenate([edge, [1, 1], edge[::-1]])
return xs, l1, l2
def save_file(self, filename: str):
"""Save image to file.
Args:
filename: File path to output image data.
"""
plt.savefig(filename, bbox_inches="tight", dpi=self.canvas.formatter["general.dpi"])
def get_image(self, interactive: bool = False) -> matplotlib.pyplot.Figure:
"""Get image data to return.
Args:
interactive: When set `True` show the circuit in a new window.
This depends on the matplotlib backend being used supporting this.
Returns:
Matplotlib figure data.
"""
matplotlib_close_if_inline(self.figure)
if self.figure and interactive:
self.figure.show()
return self.figure
| 35.798969 | 96 | 0.589201 |
from typing import Optional, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
from qiskit.visualization.exceptions import VisualizationError
from qiskit.visualization.timeline import core, types, drawings
from qiskit.visualization.timeline.plotters.base_plotter import BasePlotter
from qiskit.visualization.utils import matplotlib_close_if_inline
class MplPlotter(BasePlotter):
def __init__(self, canvas: core.DrawerCanvas, axis: Optional[plt.Axes] = None):
super().__init__(canvas=canvas)
if axis is None:
fig_height = self.canvas.vmax - self.canvas.vmin
fig_h = self.canvas.formatter["general.fig_unit_height"] * fig_height
fig_w = self.canvas.formatter["general.fig_width"]
self.figure = plt.figure(figsize=(fig_w, fig_h))
self.ax = self.figure.add_subplot(1, 1, 1)
else:
self.figure = axis.figure
self.ax = axis
self.initialize_canvas()
def initialize_canvas(self):
self.ax.set_facecolor(self.canvas.formatter["color.background"])
self.ax.spines["right"].set_color("none")
self.ax.spines["left"].set_color("none")
self.ax.spines["top"].set_color("none")
self.ax.set_yticks([])
axis_config = self.canvas.layout["time_axis_map"](time_window=self.canvas.time_range)
self.ax.set_xticks(list(axis_config.axis_map.keys()))
self.ax.set_xticklabels(
list(axis_config.axis_map.values()),
fontsize=self.canvas.formatter["text_size.axis_label"],
)
self.ax.set_xlabel(
axis_config.label, fontsize=self.canvas.formatter["text_size.axis_label"]
)
self.ax.set_xlim(*self.canvas.time_range)
self.ax.set_ylim(self.canvas.vmin, self.canvas.vmax)
def draw(self):
for _, data in self.canvas.collections:
xvals = np.asarray(data.xvals, dtype=float)
yvals = np.asarray(data.yvals, dtype=float)
offsets = [self.canvas.assigned_coordinates[bit] for bit in data.bits]
if isinstance(data, drawings.BoxData):
if data.data_type in [
str(types.BoxType.SCHED_GATE.value),
str(types.BoxType.DELAY.value),
]:
xs, ys1, ys2 = self._time_bucket_outline(xvals, yvals)
self.ax.fill_between(
x=xs, y1=ys1 + offsets[0], y2=ys2 + offsets[0], **data.styles
)
else:
x0, x1 = xvals
y0, y1 = yvals + offsets[0]
rect = Rectangle(xy=(x0, y0), width=x1 - x0, height=y1 - y0)
pc = PatchCollection([rect], **data.styles)
self.ax.add_collection(pc)
elif isinstance(data, drawings.LineData):
self.ax.plot(xvals, yvals + offsets[0], **data.styles)
elif isinstance(data, drawings.TextData):
if data.latex is not None:
s = rf"${data.latex}$"
else:
s = data.text
self.ax.text(x=xvals[0], y=yvals[0] + offsets[0], s=s, **data.styles)
elif isinstance(data, drawings.GateLinkData):
self.ax.plot(xvals.repeat(len(offsets)), offsets, **data.styles)
else:
VisualizationError(
"Data {name} is not supported by {plotter}"
"".format(name=data, plotter=self.__class__.__name__)
)
def _time_bucket_outline(
self, xvals: np.ndarray, yvals: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
x0, x1 = xvals
y0, y1 = yvals
width = x1 - x0
y_mid = 0.5 * (y0 + y1)
risefall = int(min(self.canvas.formatter["time_bucket.edge_dt"], max(width / 2 - 2, 0)))
edge = np.sin(np.pi / 2 * np.arange(0, risefall) / risefall)
xs = np.concatenate(
[
np.arange(x0, x0 + risefall),
[x0 + risefall, x1 - risefall],
np.arange(x1 - risefall + 1, x1 + 1),
]
)
l1 = (y1 - y_mid) * np.concatenate([edge, [1, 1], edge[::-1]])
l2 = (y0 - y_mid) * np.concatenate([edge, [1, 1], edge[::-1]])
return xs, l1, l2
def save_file(self, filename: str):
plt.savefig(filename, bbox_inches="tight", dpi=self.canvas.formatter["general.dpi"])
def get_image(self, interactive: bool = False) -> matplotlib.pyplot.Figure:
matplotlib_close_if_inline(self.figure)
if self.figure and interactive:
self.figure.show()
return self.figure
| true | true |
f71d72cda2b0734621a761af5136ca30a617de91 | 13,762 | py | Python | tests/ut/python/dataset/test_compose.py | limberc/mindspore | e294acdffc9246cb6d77ea18ea00d08244d30c59 | [
"Apache-2.0"
] | 1 | 2021-01-25T07:39:46.000Z | 2021-01-25T07:39:46.000Z | tests/ut/python/dataset/test_compose.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/dataset/test_compose.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as c_transforms
import mindspore.dataset.transforms.py_transforms as py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
from util import visualize_list, save_and_check_md5, config_get_set_seed, config_get_set_num_parallel_workers
GENERATE_GOLDEN = False
def test_compose():
"""
Test C++ and Python Compose Op
"""
ds.config.set_seed(0)
def test_config(arr, op_list):
try:
data = ds.NumpySlicesDataset(arr, column_names="col", shuffle=False)
data = data.map(input_columns=["col"], operations=op_list)
res = []
for i in data.create_dict_iterator(output_numpy=True):
res.append(i["col"].tolist())
return res
except (TypeError, ValueError) as e:
return str(e)
# Test simple compose with only 1 op, this would generate a warning
assert test_config([[1, 0], [3, 4]], c_transforms.Compose([c_transforms.Fill(2)])) == [[2, 2], [2, 2]]
# Test 1 column -> 2 columns -> 1 -> 2 -> 1
assert test_config([[1, 0]],
c_transforms.Compose(
[c_transforms.Duplicate(), c_transforms.Concatenate(), c_transforms.Duplicate(),
c_transforms.Concatenate()])) \
== [[1, 0] * 4]
# Test one Python transform followed by a C++ transform. Type after OneHot is a float (mixed use-case)
assert test_config([1, 0],
c_transforms.Compose([py_transforms.OneHotOp(2), c_transforms.TypeCast(mstype.int32)])) \
== [[[0, 1]], [[1, 0]]]
# Test exceptions.
with pytest.raises(TypeError) as error_info:
c_transforms.Compose([1, c_transforms.TypeCast(mstype.int32)])
assert "op_list[0] is neither a c_transform op (TensorOperation) nor a callable pyfunc." in str(error_info.value)
# Test empty op list
with pytest.raises(ValueError) as error_info:
test_config([1, 0], c_transforms.Compose([]))
assert "op_list can not be empty." in str(error_info.value)
# Test Python compose op
assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2)])) == [[[0, 1]], [[1, 0]]]
assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2), (lambda x: x + x)])) == [[[0, 2]],
[[2, 0]]]
# Test nested Python compose op
assert test_config([1, 0],
py_transforms.Compose([py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)])) \
== [[[0, 2]], [[2, 0]]]
# Test passing a list of Python ops without Compose wrapper
assert test_config([1, 0],
[py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)]) \
== [[[0, 2]], [[2, 0]]]
assert test_config([1, 0], [py_transforms.OneHotOp(2), (lambda x: x + x)]) == [[[0, 2]], [[2, 0]]]
# Test a non callable function
with pytest.raises(ValueError) as error_info:
py_transforms.Compose([1])
assert "transforms[0] is not callable." in str(error_info.value)
# Test empty Python op list
with pytest.raises(ValueError) as error_info:
test_config([1, 0], py_transforms.Compose([]))
assert "transforms list is empty." in str(error_info.value)
# Pass in extra brackets
with pytest.raises(TypeError) as error_info:
py_transforms.Compose([(lambda x: x + x)])()
assert "Compose was called without an image. Fix invocation (avoid it being invoked as Compose([...])())." in str(
error_info.value)
def test_lambdas():
"""
Test Multi Column Python Compose Op
"""
ds.config.set_seed(0)
def test_config(arr, input_columns, output_cols, op_list):
data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)
data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols,
column_order=output_cols)
res = []
for i in data.create_dict_iterator(output_numpy=True):
for col_name in output_cols:
res.append(i[col_name].tolist())
return res
arr = ([[1]], [[3]])
assert test_config(arr, ["col0", "col1"], ["a"], py_transforms.Compose([(lambda x, y: x)])) == [[1]]
assert test_config(arr, ["col0", "col1"], ["a"], py_transforms.Compose([lambda x, y: x, lambda x: x])) == [[1]]
assert test_config(arr, ["col0", "col1"], ["a", "b"],
py_transforms.Compose([lambda x, y: x, lambda x: (x, x * 2)])) == \
[[1], [2]]
assert test_config(arr, ["col0", "col1"], ["a", "b"],
[lambda x, y: (x, x + y), lambda x, y: (x, y * 2)]) == [[1], [8]]
def test_c_py_compose_transforms_module():
"""
Test combining Python and C++ transforms
"""
ds.config.set_seed(0)
def test_config(arr, input_columns, output_cols, op_list):
data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)
data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols,
column_order=output_cols)
res = []
for i in data.create_dict_iterator(output_numpy=True):
for col_name in output_cols:
res.append(i[col_name].tolist())
return res
arr = [1, 0]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), c_transforms.Mask(c_transforms.Relational.EQ, 1)]) == \
[[[False, True]],
[[True, False]]]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1)]) \
== [[[1, 1]], [[1, 1]]]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1), (lambda x: x + x)]) \
== [[[2, 2]], [[2, 2]]]
assert test_config([[1, 3]], ["cols"], ["cols"],
[c_transforms.PadEnd([3], -1), (lambda x: x + x)]) \
== [[2, 6, -2]]
arr = ([[1]], [[3]])
assert test_config(arr, ["col0", "col1"], ["a"], [(lambda x, y: x + y), c_transforms.PadEnd([2], -1)]) == [[4, -1]]
def test_c_py_compose_vision_module(plot=False, run_golden=True):
"""
Test combining Python and C++ vision transforms
"""
original_seed = config_get_set_seed(10)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
def test_config(plot, file_name, op_list):
data_dir = "../data/dataset/testImageNetData/train/"
data1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data1 = data1.map(operations=op_list, input_columns=["image"])
data2 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data2 = data2.map(operations=c_vision.Decode(), input_columns=["image"])
original_images = []
transformed_images = []
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
transformed_images.append(item["image"])
for item in data2.create_dict_iterator(num_epochs=1, output_numpy=True):
original_images.append(item["image"])
if run_golden:
# Compare with expected md5 from images
save_and_check_md5(data1, file_name, generate_golden=GENERATE_GOLDEN)
if plot:
visualize_list(original_images, transformed_images)
test_config(op_list=[c_vision.Decode(),
py_vision.ToPIL(),
py_vision.Resize((224, 224)),
np.array],
plot=plot, file_name="compose_c_py_1.npz")
test_config(op_list=[c_vision.Decode(),
c_vision.Resize((224, 244)),
py_vision.ToPIL(),
np.array,
c_vision.Resize((24, 24))],
plot=plot, file_name="compose_c_py_2.npz")
test_config(op_list=[py_vision.Decode(),
py_vision.Resize((224, 224)),
np.array,
c_vision.RandomColor()],
plot=plot, file_name="compose_c_py_3.npz")
# Restore configuration
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers((original_num_parallel_workers))
def test_py_transforms_with_c_vision():
"""
These examples will fail, as py_transforms.Random(Apply/Choice/Order) expect callable functions
"""
ds.config.set_seed(0)
def test_config(op_list):
data_dir = "../data/dataset/testImageNetData/train/"
data = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data = data.map(operations=op_list)
res = []
for i in data.create_dict_iterator(output_numpy=True):
for col_name in output_cols:
res.append(i[col_name].tolist())
return res
with pytest.raises(ValueError) as error_info:
test_config(py_transforms.RandomApply([c_vision.RandomResizedCrop(200)]))
assert "transforms[0] is not callable." in str(error_info.value)
with pytest.raises(ValueError) as error_info:
test_config(py_transforms.RandomChoice([c_vision.RandomResizedCrop(200)]))
assert "transforms[0] is not callable." in str(error_info.value)
with pytest.raises(ValueError) as error_info:
test_config(py_transforms.RandomOrder([np.array, c_vision.RandomResizedCrop(200)]))
assert "transforms[1] is not callable." in str(error_info.value)
with pytest.raises(RuntimeError) as error_info:
test_config([py_transforms.OneHotOp(20, 0.1)])
assert "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()" in str(
error_info.value)
def test_py_vision_with_c_transforms():
"""
Test combining Python vision operations with C++ transforms operations
"""
ds.config.set_seed(0)
def test_config(op_list):
data_dir = "../data/dataset/testImageNetData/train/"
data1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data1 = data1.map(operations=op_list, input_columns=["image"])
transformed_images = []
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
transformed_images.append(item["image"])
return transformed_images
# Test with Mask Op
output_arr = test_config([py_vision.Decode(),
py_vision.CenterCrop((2)), np.array,
c_transforms.Mask(c_transforms.Relational.GE, 100)])
exp_arr = [np.array([[[True, False, False],
[True, False, False]],
[[True, False, False],
[True, False, False]]]),
np.array([[[True, False, False],
[True, False, False]],
[[True, False, False],
[True, False, False]]])]
for exp_a, output in zip(exp_arr, output_arr):
np.testing.assert_array_equal(exp_a, output)
# Test with Fill Op
output_arr = test_config([py_vision.Decode(),
py_vision.CenterCrop((4)), np.array,
c_transforms.Fill(10)])
exp_arr = [np.ones((4, 4, 3)) * 10] * 2
for exp_a, output in zip(exp_arr, output_arr):
np.testing.assert_array_equal(exp_a, output)
# Test with Concatenate Op, which will raise an error since ConcatenateOp only supports rank 1 tensors.
with pytest.raises(RuntimeError) as error_info:
test_config([py_vision.Decode(),
py_vision.CenterCrop((2)), np.array,
c_transforms.Concatenate(0)])
assert "Only 1D tensors supported" in str(error_info.value)
def test_compose_with_custom_function():
"""
Test Python Compose with custom function
"""
def custom_function(x):
return (x, x * x)
# First dataset
op_list = [
lambda x: x * 3,
custom_function,
# convert two column output to one
lambda *images: np.stack(images)
]
data = ds.NumpySlicesDataset([[1, 2]], column_names=["col0"], shuffle=False)
data = data.map(input_columns=["col0"], operations=op_list)
#
res = []
for i in data.create_dict_iterator(output_numpy=True):
res.append(i["col0"].tolist())
assert res == [[[3, 6], [9, 36]]]
if __name__ == "__main__":
test_compose()
test_lambdas()
test_c_py_compose_transforms_module()
test_c_py_compose_vision_module(plot=True)
test_py_transforms_with_c_vision()
test_py_vision_with_c_transforms()
test_compose_with_custom_function()
| 40.476471 | 120 | 0.60994 |
import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as c_transforms
import mindspore.dataset.transforms.py_transforms as py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
from util import visualize_list, save_and_check_md5, config_get_set_seed, config_get_set_num_parallel_workers
GENERATE_GOLDEN = False
def test_compose():
ds.config.set_seed(0)
def test_config(arr, op_list):
try:
data = ds.NumpySlicesDataset(arr, column_names="col", shuffle=False)
data = data.map(input_columns=["col"], operations=op_list)
res = []
for i in data.create_dict_iterator(output_numpy=True):
res.append(i["col"].tolist())
return res
except (TypeError, ValueError) as e:
return str(e)
assert test_config([[1, 0], [3, 4]], c_transforms.Compose([c_transforms.Fill(2)])) == [[2, 2], [2, 2]]
assert test_config([[1, 0]],
c_transforms.Compose(
[c_transforms.Duplicate(), c_transforms.Concatenate(), c_transforms.Duplicate(),
c_transforms.Concatenate()])) \
== [[1, 0] * 4]
assert test_config([1, 0],
c_transforms.Compose([py_transforms.OneHotOp(2), c_transforms.TypeCast(mstype.int32)])) \
== [[[0, 1]], [[1, 0]]]
with pytest.raises(TypeError) as error_info:
c_transforms.Compose([1, c_transforms.TypeCast(mstype.int32)])
assert "op_list[0] is neither a c_transform op (TensorOperation) nor a callable pyfunc." in str(error_info.value)
with pytest.raises(ValueError) as error_info:
test_config([1, 0], c_transforms.Compose([]))
assert "op_list can not be empty." in str(error_info.value)
assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2)])) == [[[0, 1]], [[1, 0]]]
assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2), (lambda x: x + x)])) == [[[0, 2]],
[[2, 0]]]
assert test_config([1, 0],
py_transforms.Compose([py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)])) \
== [[[0, 2]], [[2, 0]]]
assert test_config([1, 0],
[py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)]) \
== [[[0, 2]], [[2, 0]]]
assert test_config([1, 0], [py_transforms.OneHotOp(2), (lambda x: x + x)]) == [[[0, 2]], [[2, 0]]]
with pytest.raises(ValueError) as error_info:
py_transforms.Compose([1])
assert "transforms[0] is not callable." in str(error_info.value)
with pytest.raises(ValueError) as error_info:
test_config([1, 0], py_transforms.Compose([]))
assert "transforms list is empty." in str(error_info.value)
with pytest.raises(TypeError) as error_info:
py_transforms.Compose([(lambda x: x + x)])()
assert "Compose was called without an image. Fix invocation (avoid it being invoked as Compose([...])())." in str(
error_info.value)
def test_lambdas():
ds.config.set_seed(0)
def test_config(arr, input_columns, output_cols, op_list):
data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)
data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols,
column_order=output_cols)
res = []
for i in data.create_dict_iterator(output_numpy=True):
for col_name in output_cols:
res.append(i[col_name].tolist())
return res
arr = ([[1]], [[3]])
assert test_config(arr, ["col0", "col1"], ["a"], py_transforms.Compose([(lambda x, y: x)])) == [[1]]
assert test_config(arr, ["col0", "col1"], ["a"], py_transforms.Compose([lambda x, y: x, lambda x: x])) == [[1]]
assert test_config(arr, ["col0", "col1"], ["a", "b"],
py_transforms.Compose([lambda x, y: x, lambda x: (x, x * 2)])) == \
[[1], [2]]
assert test_config(arr, ["col0", "col1"], ["a", "b"],
[lambda x, y: (x, x + y), lambda x, y: (x, y * 2)]) == [[1], [8]]
def test_c_py_compose_transforms_module():
ds.config.set_seed(0)
def test_config(arr, input_columns, output_cols, op_list):
data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)
data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols,
column_order=output_cols)
res = []
for i in data.create_dict_iterator(output_numpy=True):
for col_name in output_cols:
res.append(i[col_name].tolist())
return res
arr = [1, 0]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), c_transforms.Mask(c_transforms.Relational.EQ, 1)]) == \
[[[False, True]],
[[True, False]]]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1)]) \
== [[[1, 1]], [[1, 1]]]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1), (lambda x: x + x)]) \
== [[[2, 2]], [[2, 2]]]
assert test_config([[1, 3]], ["cols"], ["cols"],
[c_transforms.PadEnd([3], -1), (lambda x: x + x)]) \
== [[2, 6, -2]]
arr = ([[1]], [[3]])
assert test_config(arr, ["col0", "col1"], ["a"], [(lambda x, y: x + y), c_transforms.PadEnd([2], -1)]) == [[4, -1]]
def test_c_py_compose_vision_module(plot=False, run_golden=True):
original_seed = config_get_set_seed(10)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
def test_config(plot, file_name, op_list):
data_dir = "../data/dataset/testImageNetData/train/"
data1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data1 = data1.map(operations=op_list, input_columns=["image"])
data2 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data2 = data2.map(operations=c_vision.Decode(), input_columns=["image"])
original_images = []
transformed_images = []
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
transformed_images.append(item["image"])
for item in data2.create_dict_iterator(num_epochs=1, output_numpy=True):
original_images.append(item["image"])
if run_golden:
save_and_check_md5(data1, file_name, generate_golden=GENERATE_GOLDEN)
if plot:
visualize_list(original_images, transformed_images)
test_config(op_list=[c_vision.Decode(),
py_vision.ToPIL(),
py_vision.Resize((224, 224)),
np.array],
plot=plot, file_name="compose_c_py_1.npz")
test_config(op_list=[c_vision.Decode(),
c_vision.Resize((224, 244)),
py_vision.ToPIL(),
np.array,
c_vision.Resize((24, 24))],
plot=plot, file_name="compose_c_py_2.npz")
test_config(op_list=[py_vision.Decode(),
py_vision.Resize((224, 224)),
np.array,
c_vision.RandomColor()],
plot=plot, file_name="compose_c_py_3.npz")
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers((original_num_parallel_workers))
def test_py_transforms_with_c_vision():
ds.config.set_seed(0)
def test_config(op_list):
data_dir = "../data/dataset/testImageNetData/train/"
data = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data = data.map(operations=op_list)
res = []
for i in data.create_dict_iterator(output_numpy=True):
for col_name in output_cols:
res.append(i[col_name].tolist())
return res
with pytest.raises(ValueError) as error_info:
test_config(py_transforms.RandomApply([c_vision.RandomResizedCrop(200)]))
assert "transforms[0] is not callable." in str(error_info.value)
with pytest.raises(ValueError) as error_info:
test_config(py_transforms.RandomChoice([c_vision.RandomResizedCrop(200)]))
assert "transforms[0] is not callable." in str(error_info.value)
with pytest.raises(ValueError) as error_info:
test_config(py_transforms.RandomOrder([np.array, c_vision.RandomResizedCrop(200)]))
assert "transforms[1] is not callable." in str(error_info.value)
with pytest.raises(RuntimeError) as error_info:
test_config([py_transforms.OneHotOp(20, 0.1)])
assert "The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()" in str(
error_info.value)
def test_py_vision_with_c_transforms():
ds.config.set_seed(0)
def test_config(op_list):
data_dir = "../data/dataset/testImageNetData/train/"
data1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data1 = data1.map(operations=op_list, input_columns=["image"])
transformed_images = []
for item in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
transformed_images.append(item["image"])
return transformed_images
output_arr = test_config([py_vision.Decode(),
py_vision.CenterCrop((2)), np.array,
c_transforms.Mask(c_transforms.Relational.GE, 100)])
exp_arr = [np.array([[[True, False, False],
[True, False, False]],
[[True, False, False],
[True, False, False]]]),
np.array([[[True, False, False],
[True, False, False]],
[[True, False, False],
[True, False, False]]])]
for exp_a, output in zip(exp_arr, output_arr):
np.testing.assert_array_equal(exp_a, output)
output_arr = test_config([py_vision.Decode(),
py_vision.CenterCrop((4)), np.array,
c_transforms.Fill(10)])
exp_arr = [np.ones((4, 4, 3)) * 10] * 2
for exp_a, output in zip(exp_arr, output_arr):
np.testing.assert_array_equal(exp_a, output)
with pytest.raises(RuntimeError) as error_info:
test_config([py_vision.Decode(),
py_vision.CenterCrop((2)), np.array,
c_transforms.Concatenate(0)])
assert "Only 1D tensors supported" in str(error_info.value)
def test_compose_with_custom_function():
def custom_function(x):
return (x, x * x)
op_list = [
lambda x: x * 3,
custom_function,
lambda *images: np.stack(images)
]
data = ds.NumpySlicesDataset([[1, 2]], column_names=["col0"], shuffle=False)
data = data.map(input_columns=["col0"], operations=op_list)
res = []
for i in data.create_dict_iterator(output_numpy=True):
res.append(i["col0"].tolist())
assert res == [[[3, 6], [9, 36]]]
if __name__ == "__main__":
test_compose()
test_lambdas()
test_c_py_compose_transforms_module()
test_c_py_compose_vision_module(plot=True)
test_py_transforms_with_c_vision()
test_py_vision_with_c_transforms()
test_compose_with_custom_function()
| true | true |
f71d732ef7336c113e6a500672479f9fa4c6e935 | 4,185 | py | Python | packnet_sfm/models/SemiSupModel_fisheye.py | vbelissen/packnet-sfm | dfba692596b08ccff17abb9423c1958cecc75b0f | [
"MIT"
] | null | null | null | packnet_sfm/models/SemiSupModel_fisheye.py | vbelissen/packnet-sfm | dfba692596b08ccff17abb9423c1958cecc75b0f | [
"MIT"
] | null | null | null | packnet_sfm/models/SemiSupModel_fisheye.py | vbelissen/packnet-sfm | dfba692596b08ccff17abb9423c1958cecc75b0f | [
"MIT"
] | null | null | null | # Copyright 2020 Toyota Research Institute. All rights reserved.
import torch
from packnet_sfm.models.SelfSupModel_fisheye import SfmModel, SelfSupModel_fisheye
from packnet_sfm.losses.supervised_loss_valeo import SupervisedLoss
from packnet_sfm.models.model_utils import merge_outputs
from packnet_sfm.utils.depth import depth2inv
class SemiSupModel_fisheye(SelfSupModel_fisheye):
"""
Model that inherits a depth and pose networks, plus the self-supervised loss from
SelfSupModel and includes a supervised loss for semi-supervision.
Parameters
----------
supervised_loss_weight : float
Weight for the supervised loss
kwargs : dict
Extra parameters
"""
def __init__(self, supervised_loss_weight=0.9, **kwargs):
# Initializes SelfSupModel
super().__init__(**kwargs)
# If supervision weight is 0.0, use SelfSupModel directly
assert 0. < supervised_loss_weight <= 1., "Model requires (0, 1] supervision"
# Store weight and initializes supervised loss
self.supervised_loss_weight = supervised_loss_weight
self._supervised_loss = SupervisedLoss(**kwargs)
# Pose network is only required if there is self-supervision
self._network_requirements['pose_net'] = self.supervised_loss_weight < 1
# GT depth is only required if there is supervision
self._train_requirements['gt_depth'] = self.supervised_loss_weight > 0
@property
def logs(self):
"""Return logs."""
return {
**super().logs,
**self._supervised_loss.logs
}
def supervised_loss(self, inv_depths, gt_inv_depths,
path_to_ego_mask,
return_logs=False, progress=0.0):
"""
Calculates the supervised loss.
Parameters
----------
inv_depths : torch.Tensor [B,1,H,W]
Predicted inverse depth maps from the original image
gt_inv_depths : torch.Tensor [B,1,H,W]
Ground-truth inverse depth maps from the original image
return_logs : bool
True if logs are stored
progress :
Training progress percentage
Returns
-------
output : dict
Dictionary containing a "loss" scalar a "metrics" dictionary
"""
return self._supervised_loss(
inv_depths, gt_inv_depths,
path_to_ego_mask,
return_logs=return_logs, progress=progress)
def forward(self, batch, return_logs=False, progress=0.0):
"""
Processes a batch.
Parameters
----------
batch : dict
Input batch
return_logs : bool
True if logs are stored
progress :
Training progress percentage
Returns
-------
output : dict
Dictionary containing a "loss" scalar and different metrics and predictions
for logging and downstream usage.
"""
if not self.training:
# If not training, no need for self-supervised loss
return SfmModel.forward(self, batch)
else:
if self.supervised_loss_weight == 1.:
# If no self-supervision, no need to calculate loss
self_sup_output = SfmModel.forward(self, batch)
loss = torch.tensor([0.]).type_as(batch['rgb'])
else:
# Otherwise, calculate and weight self-supervised loss
self_sup_output = SelfSupModel_fisheye.forward(self, batch)
loss = (1.0 - self.supervised_loss_weight) * self_sup_output['loss']
# Calculate and weight supervised loss
sup_output = self.supervised_loss(
self_sup_output['inv_depths'], depth2inv(batch['depth']),
batch['path_to_ego_mask'],
return_logs=return_logs, progress=progress)
loss += self.supervised_loss_weight * sup_output['loss']
# Merge and return outputs
return {
'loss': loss,
**merge_outputs(self_sup_output, sup_output),
}
| 36.710526 | 87 | 0.615054 |
import torch
from packnet_sfm.models.SelfSupModel_fisheye import SfmModel, SelfSupModel_fisheye
from packnet_sfm.losses.supervised_loss_valeo import SupervisedLoss
from packnet_sfm.models.model_utils import merge_outputs
from packnet_sfm.utils.depth import depth2inv
class SemiSupModel_fisheye(SelfSupModel_fisheye):
def __init__(self, supervised_loss_weight=0.9, **kwargs):
super().__init__(**kwargs)
assert 0. < supervised_loss_weight <= 1., "Model requires (0, 1] supervision"
self.supervised_loss_weight = supervised_loss_weight
self._supervised_loss = SupervisedLoss(**kwargs)
self._network_requirements['pose_net'] = self.supervised_loss_weight < 1
self._train_requirements['gt_depth'] = self.supervised_loss_weight > 0
@property
def logs(self):
return {
**super().logs,
**self._supervised_loss.logs
}
def supervised_loss(self, inv_depths, gt_inv_depths,
path_to_ego_mask,
return_logs=False, progress=0.0):
return self._supervised_loss(
inv_depths, gt_inv_depths,
path_to_ego_mask,
return_logs=return_logs, progress=progress)
def forward(self, batch, return_logs=False, progress=0.0):
if not self.training:
return SfmModel.forward(self, batch)
else:
if self.supervised_loss_weight == 1.:
self_sup_output = SfmModel.forward(self, batch)
loss = torch.tensor([0.]).type_as(batch['rgb'])
else:
self_sup_output = SelfSupModel_fisheye.forward(self, batch)
loss = (1.0 - self.supervised_loss_weight) * self_sup_output['loss']
sup_output = self.supervised_loss(
self_sup_output['inv_depths'], depth2inv(batch['depth']),
batch['path_to_ego_mask'],
return_logs=return_logs, progress=progress)
loss += self.supervised_loss_weight * sup_output['loss']
return {
'loss': loss,
**merge_outputs(self_sup_output, sup_output),
}
| true | true |
f71d73d71ba339d6c7641179ef7f69755e76319e | 616 | py | Python | cythonTest/onlineSample/combination/jit_combination.py | terasakisatoshi/pythonCodes | baee095ecee96f6b5ec6431267cdc6c40512a542 | [
"MIT"
] | null | null | null | cythonTest/onlineSample/combination/jit_combination.py | terasakisatoshi/pythonCodes | baee095ecee96f6b5ec6431267cdc6c40512a542 | [
"MIT"
] | null | null | null | cythonTest/onlineSample/combination/jit_combination.py | terasakisatoshi/pythonCodes | baee095ecee96f6b5ec6431267cdc6c40512a542 | [
"MIT"
] | null | null | null | import time
from numba import jit
import numpy as np
@jit()
def jit_sum_conbination(N):
xs = [i for i in range(N)]
ys = [i for i in range(N)]
total = 0
for x in xs:
for y in ys:
total += x+y
return total
def py_sum_conbination(N):
xs = np.arange(N)
ys = np.arange(N)
total = 0
for x in xs:
for y in ys:
total += x+y
return total
def main():
N = 10000
start = time.time()
total = jit_sum_conbination(N)
end = time.time()
print(total)
print('elapsed time=', end-start)
if __name__ == '__main__':
main()
| 18.117647 | 37 | 0.553571 | import time
from numba import jit
import numpy as np
@jit()
def jit_sum_conbination(N):
xs = [i for i in range(N)]
ys = [i for i in range(N)]
total = 0
for x in xs:
for y in ys:
total += x+y
return total
def py_sum_conbination(N):
xs = np.arange(N)
ys = np.arange(N)
total = 0
for x in xs:
for y in ys:
total += x+y
return total
def main():
N = 10000
start = time.time()
total = jit_sum_conbination(N)
end = time.time()
print(total)
print('elapsed time=', end-start)
if __name__ == '__main__':
main()
| true | true |
f71d73df5330b5ef66e0e587f04a759ffd88b242 | 4,409 | py | Python | test/functional/p2p_zpos_fakestake_accepted.py | salo50/coin | 2c7cc3a4b4cb8931e14845a60eb6b051eae18a72 | [
"MIT"
] | null | null | null | test/functional/p2p_zpos_fakestake_accepted.py | salo50/coin | 2c7cc3a4b4cb8931e14845a60eb6b051eae18a72 | [
"MIT"
] | null | null | null | test/functional/p2p_zpos_fakestake_accepted.py | salo50/coin | 2c7cc3a4b4cb8931e14845a60eb6b051eae18a72 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The redspace Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Performs the same check as in Test_02 verifying that zPoS forked blocks that stake a zerocoin which is spent on mainchain on an higher block are still accepted.
'''
from test_framework.authproxy import JSONRPCException
from fake_stake.base_test import redspace_FakeStakeTest
from time import sleep
class zPoSFakeStakeAccepted(redspace_FakeStakeTest):
def set_test_params(self):
''' Setup test environment
:param:
:return:
'''
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-staking=1', '-debug=net', '-zrscstake']] * self.num_nodes
def run_test(self):
self.description = "Performs the same check as in Test_02 verifying that zPoS forked blocks that stake a zerocoin which is spent on mainchain on an higher block are still accepted."
self.init_test()
DENOM_TO_USE = 1000 # zc denomination
INITAL_MINED_BLOCKS = 321
MORE_MINED_BLOCKS = 301
FORK_DEPTH = 75
self.NUM_BLOCKS = 2
# 1) Starting mining blocks
self.log.info("Mining %d blocks to get to zPOS activation...." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
sleep(2)
# 2) Collect the possible prevouts and mint zerocoins with those
self.log.info("Collecting all unspent coins which we generated from mining...")
balance = self.node.getbalance("*", 100)
self.log.info("Minting zerocoins...")
initial_mints = 0
while balance > DENOM_TO_USE:
try:
self.node.mintzerocoin(DENOM_TO_USE)
except JSONRPCException:
break
sleep(1)
initial_mints += 1
self.node.generate(1)
sleep(1)
if initial_mints % 5 == 0:
self.log.info("Minted %d coins" % initial_mints)
if initial_mints >= 20:
break
balance = self.node.getbalance("*", 100)
self.log.info("Minted %d coins in the %d-denom, remaining balance %d", initial_mints, DENOM_TO_USE, balance)
sleep(2)
# 3) mine more blocks
self.log.info("Mining %d more blocks ... and getting spendable zerocoins" % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
mints = self.node.listmintedzerocoins(True, True)
sleep(1)
mints_hashes = [x["serial hash"] for x in mints]
# This mints are not ready spendable, only few of them.
self.log.info("Got %d confirmed mints" % len(mints_hashes))
# 4) Start mining again so that spends get confirmed in a block.
self.log.info("Mining 200 more blocks...")
self.node.generate(200)
sleep(2)
# 5) spend mints
self.log.info("Spending mints in block %d..." % self.node.getblockcount())
spends = 0
for mint in mints_hashes:
# create a single element list to pass to RPC spendzerocoinmints
mint_arg = []
mint_arg.append(mint)
try:
self.node.spendzerocoinmints(mint_arg)
sleep(1)
spends += 1
except JSONRPCException as e:
self.log.warning(str(e))
continue
sleep(1)
self.log.info("Successfully spent %d mints" % spends)
self.log.info("Mining 6 more blocks...")
self.node.generate(6)
sleep(2)
# 6) Collect some prevouts for random txes
self.log.info("Collecting inputs for txes...")
utxo_list = self.node.listunspent()
sleep(1)
# 7) Create valid forked zPoS blocks and send them
self.log.info("Creating stake zPoS blocks...")
err_msgs = self.test_spam("Fork", mints, spending_utxo_list=utxo_list, fZPoS=True, fRandomHeight=True, randomRange=FORK_DEPTH, randomRange2=50, fMustPass=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zPoSFakeStakeAccepted().main()
| 37.364407 | 189 | 0.621456 |
from test_framework.authproxy import JSONRPCException
from fake_stake.base_test import redspace_FakeStakeTest
from time import sleep
class zPoSFakeStakeAccepted(redspace_FakeStakeTest):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-staking=1', '-debug=net', '-zrscstake']] * self.num_nodes
def run_test(self):
self.description = "Performs the same check as in Test_02 verifying that zPoS forked blocks that stake a zerocoin which is spent on mainchain on an higher block are still accepted."
self.init_test()
DENOM_TO_USE = 1000
INITAL_MINED_BLOCKS = 321
MORE_MINED_BLOCKS = 301
FORK_DEPTH = 75
self.NUM_BLOCKS = 2
self.log.info("Mining %d blocks to get to zPOS activation...." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
sleep(2)
self.log.info("Collecting all unspent coins which we generated from mining...")
balance = self.node.getbalance("*", 100)
self.log.info("Minting zerocoins...")
initial_mints = 0
while balance > DENOM_TO_USE:
try:
self.node.mintzerocoin(DENOM_TO_USE)
except JSONRPCException:
break
sleep(1)
initial_mints += 1
self.node.generate(1)
sleep(1)
if initial_mints % 5 == 0:
self.log.info("Minted %d coins" % initial_mints)
if initial_mints >= 20:
break
balance = self.node.getbalance("*", 100)
self.log.info("Minted %d coins in the %d-denom, remaining balance %d", initial_mints, DENOM_TO_USE, balance)
sleep(2)
self.log.info("Mining %d more blocks ... and getting spendable zerocoins" % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
mints = self.node.listmintedzerocoins(True, True)
sleep(1)
mints_hashes = [x["serial hash"] for x in mints]
self.log.info("Got %d confirmed mints" % len(mints_hashes))
self.log.info("Mining 200 more blocks...")
self.node.generate(200)
sleep(2)
self.log.info("Spending mints in block %d..." % self.node.getblockcount())
spends = 0
for mint in mints_hashes:
mint_arg = []
mint_arg.append(mint)
try:
self.node.spendzerocoinmints(mint_arg)
sleep(1)
spends += 1
except JSONRPCException as e:
self.log.warning(str(e))
continue
sleep(1)
self.log.info("Successfully spent %d mints" % spends)
self.log.info("Mining 6 more blocks...")
self.node.generate(6)
sleep(2)
self.log.info("Collecting inputs for txes...")
utxo_list = self.node.listunspent()
sleep(1)
self.log.info("Creating stake zPoS blocks...")
err_msgs = self.test_spam("Fork", mints, spending_utxo_list=utxo_list, fZPoS=True, fRandomHeight=True, randomRange=FORK_DEPTH, randomRange2=50, fMustPass=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zPoSFakeStakeAccepted().main()
| true | true |
f71d73fee6e604915153c8e6e7d293a6a713e8a7 | 11,201 | py | Python | docs/source/conf.py | colindonfack/statsmodels | a251e4cd4b689781916e1001d7e8ca2c0395ef1f | [
"BSD-3-Clause"
] | 15 | 2015-03-03T09:47:42.000Z | 2022-01-05T18:28:31.000Z | docs/source/conf.py | colindonfack/statsmodels | a251e4cd4b689781916e1001d7e8ca2c0395ef1f | [
"BSD-3-Clause"
] | 7 | 2015-11-20T08:33:04.000Z | 2020-07-24T19:34:39.000Z | docs/source/conf.py | colindonfack/statsmodels | a251e4cd4b689781916e1001d7e8ca2c0395ef1f | [
"BSD-3-Clause"
] | 14 | 2015-01-06T22:08:34.000Z | 2021-01-01T16:33:23.000Z | # -*- coding: utf-8 -*-
#
# statsmodels documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 22 11:17:58 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax', # One of mathjax or imgmath
# 'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'github', # for GitHub links,
# 'numpydoc', # numpydoc or napoleon, but not both
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'statsmodels'
copyright = u'2009-2017, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
autosummary_generate = True
autoclass_content = 'class'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from statsmodels import __version__
release = __version__
# The full version, including dev tag.
version = __version__
# set inheritance_graph_attrs
# you need graphviz installed to use this
# see: http://sphinx.pocoo.org/ext/inheritance.html
# and graphviz dot documentation http://www.graphviz.org/content/attrs
#NOTE: giving the empty string to size allows graphviz to figure out
# the size
inheritance_graph_attrs = dict(size='""', ratio="compress", fontsize=14,
rankdir="LR")
#inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75,
# color='dodgerblue1', style='filled')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['*/autosummary/class.rst', '*/autosummary/glmfamilies.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
if 'htmlhelp' in sys.argv:
#html_theme = 'statsmodels_htmlhelp' #doesn't look nice yet
html_theme = 'default'
print('################# using statsmodels_htmlhelp ############')
else:
html_theme = 'statsmodels'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/statsmodels_hybi_banner.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/statsmodels_hybi_favico.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index' : ['indexsidebar.html','searchbox.html','sidelinks.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'statsmodelsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'statsmodels.tex', u'statsmodels Documentation',
u'Josef Perktold, Skipper Seabold', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# imgmath options
imgmath_image_format = 'png'
imgmath_latex_preamble = r'\usepackage[active]{preview}'
imgmath_use_preview = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'statsmodels', u'statsmodels Documentation',
[u'Josef Perktold, Skipper Seabold, Jonathan Taylor'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'statsmodels'
epub_author = u'Josef Perktold, Skipper Seabold'
epub_publisher = u'Josef Perktold, Skipper Seabold'
epub_copyright = u'2009-2017, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'python': ('https://docs.python.org/3/', None),
'pydagogue': ('https://matthew-brett.github.io/pydagogue/', None),
'patsy': ('https://patsy.readthedocs.io/en/latest/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
}
from os.path import dirname, abspath, join
plot_basedir = join(dirname(dirname(os.path.abspath(__file__))), 'source')
# ghissue config
github_project_url = "https://github.com/statsmodels/statsmodels"
# for the examples landing page
import json
example_context = json.load(open('examples/landing.json'))
html_context = {'examples': example_context }
# --------------- DOCTEST -------------------
doctest_global_setup = """
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import pandas as pd
"""
| 33.942424 | 103 | 0.70949 |
import sys, os
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'github',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'statsmodels'
copyright = u'2009-2017, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
autosummary_generate = True
autoclass_content = 'class'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from statsmodels import __version__
release = __version__
# The full version, including dev tag.
version = __version__
# set inheritance_graph_attrs
# you need graphviz installed to use this
# see: http://sphinx.pocoo.org/ext/inheritance.html
# and graphviz dot documentation http://www.graphviz.org/content/attrs
#NOTE: giving the empty string to size allows graphviz to figure out
# the size
inheritance_graph_attrs = dict(size='""', ratio="compress", fontsize=14,
rankdir="LR")
#inheritance_node_attrs = dict(shape='ellipse', fontsize=14, height=0.75,
# color='dodgerblue1', style='filled')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['*/autosummary/class.rst', '*/autosummary/glmfamilies.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
if 'htmlhelp' in sys.argv:
#html_theme = 'statsmodels_htmlhelp' #doesn't look nice yet
html_theme = 'default'
print('################# using statsmodels_htmlhelp ############')
else:
html_theme = 'statsmodels'
html_theme_path = ['../themes']
html_logo = 'images/statsmodels_hybi_banner.png'
html_favicon = 'images/statsmodels_hybi_favico.ico'
html_static_path = ['_static']
html_sidebars = {'index' : ['indexsidebar.html','searchbox.html','sidelinks.html']}
html_domain_indices = True
htmlhelp_basename = 'statsmodelsdoc'
latex_documents = [
('index', 'statsmodels.tex', u'statsmodels Documentation',
u'Josef Perktold, Skipper Seabold', 'manual'),
]
imgmath_image_format = 'png'
imgmath_latex_preamble = r'\usepackage[active]{preview}'
imgmath_use_preview = True
man_pages = [
('index', 'statsmodels', u'statsmodels Documentation',
[u'Josef Perktold, Skipper Seabold, Jonathan Taylor'], 1)
]
epub_title = u'statsmodels'
epub_author = u'Josef Perktold, Skipper Seabold'
epub_publisher = u'Josef Perktold, Skipper Seabold'
epub_copyright = u'2009-2017, Josef Perktold, Skipper Seabold, Jonathan Taylor, statsmodels-developers'
intersphinx_mapping = {
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'python': ('https://docs.python.org/3/', None),
'pydagogue': ('https://matthew-brett.github.io/pydagogue/', None),
'patsy': ('https://patsy.readthedocs.io/en/latest/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
}
from os.path import dirname, abspath, join
plot_basedir = join(dirname(dirname(os.path.abspath(__file__))), 'source')
github_project_url = "https://github.com/statsmodels/statsmodels"
import json
example_context = json.load(open('examples/landing.json'))
html_context = {'examples': example_context }
doctest_global_setup = """
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import pandas as pd
"""
| true | true |
f71d74209b671ba4f44feb645fe56447c58e62e9 | 20,551 | py | Python | venv/lib/python3.9/site-packages/jupyter_client/tests/test_kernelmanager.py | CMU-IDS-2022/final-project-the-evaluators | 3b9262ad1a0f7315208a94a05ea1ce38e679d01d | [
"BSD-3-Clause"
] | null | null | null | venv/lib/python3.9/site-packages/jupyter_client/tests/test_kernelmanager.py | CMU-IDS-2022/final-project-the-evaluators | 3b9262ad1a0f7315208a94a05ea1ce38e679d01d | [
"BSD-3-Clause"
] | null | null | null | venv/lib/python3.9/site-packages/jupyter_client/tests/test_kernelmanager.py | CMU-IDS-2022/final-project-the-evaluators | 3b9262ad1a0f7315208a94a05ea1ce38e679d01d | [
"BSD-3-Clause"
] | null | null | null | """Tests for the KernelManager"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import asyncio
import concurrent.futures
import json
import os
import signal
import sys
import time
from subprocess import PIPE
import pytest
from jupyter_core import paths
from traitlets.config.loader import Config
from ..manager import _ShutdownStatus
from ..manager import start_new_async_kernel
from ..manager import start_new_kernel
from .utils import AsyncKMSubclass
from .utils import SyncKMSubclass
from jupyter_client import AsyncKernelManager
from jupyter_client import KernelManager
pjoin = os.path.join
TIMEOUT = 30
@pytest.fixture(params=["tcp", "ipc"])
def transport(request):
if sys.platform == "win32" and request.param == "ipc": #
pytest.skip("Transport 'ipc' not supported on Windows.")
return request.param
@pytest.fixture
def config(transport):
c = Config()
c.KernelManager.transport = transport
if transport == "ipc":
c.KernelManager.ip = "test"
return c
def _install_kernel(name="signaltest", extra_env=None):
if extra_env is None:
extra_env = {}
kernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", name)
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, "kernel.json"), "w") as f:
f.write(
json.dumps(
{
"argv": [
sys.executable,
"-m",
"jupyter_client.tests.signalkernel",
"-f",
"{connection_file}",
],
"display_name": "Signal Test Kernel",
"env": {"TEST_VARS": "${TEST_VARS}:test_var_2", **extra_env},
}
)
)
@pytest.fixture
def install_kernel():
return _install_kernel()
def install_kernel_dont_shutdown():
_install_kernel("signaltest-no-shutdown", {"NO_SHUTDOWN_REPLY": "1"})
def install_kernel_dont_terminate():
return _install_kernel(
"signaltest-no-terminate", {"NO_SHUTDOWN_REPLY": "1", "NO_SIGTERM_REPLY": "1"}
)
@pytest.fixture
def start_kernel():
km, kc = start_new_kernel(kernel_name="signaltest")
yield km, kc
kc.stop_channels()
km.shutdown_kernel()
assert km.context.closed
@pytest.fixture
def km(config):
km = KernelManager(config=config)
return km
@pytest.fixture
def km_subclass(config):
km = SyncKMSubclass(config=config)
return km
@pytest.fixture
def zmq_context():
import zmq
ctx = zmq.Context()
yield ctx
ctx.term()
@pytest.fixture(params=[AsyncKernelManager, AsyncKMSubclass])
def async_km(request, config):
km = request.param(config=config)
return km
@pytest.fixture
def async_km_subclass(config):
km = AsyncKMSubclass(config=config)
return km
@pytest.fixture
async def start_async_kernel():
km, kc = await start_new_async_kernel(kernel_name="signaltest")
yield km, kc
kc.stop_channels()
await km.shutdown_kernel()
assert km.context.closed
class TestKernelManagerShutDownGracefully:
parameters = (
"name, install, expected",
[
("signaltest", _install_kernel, _ShutdownStatus.ShutdownRequest),
(
"signaltest-no-shutdown",
install_kernel_dont_shutdown,
_ShutdownStatus.SigtermRequest,
),
(
"signaltest-no-terminate",
install_kernel_dont_terminate,
_ShutdownStatus.SigkillRequest,
),
],
)
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
@pytest.mark.parametrize(*parameters)
def test_signal_kernel_subprocesses(self, name, install, expected):
# ipykernel doesn't support 3.6 and this test uses async shutdown_request
if expected == _ShutdownStatus.ShutdownRequest and sys.version_info < (3, 7):
pytest.skip()
install()
km, kc = start_new_kernel(kernel_name=name)
assert km._shutdown_status == _ShutdownStatus.Unset
assert km.is_alive()
# kc.execute("1")
kc.stop_channels()
km.shutdown_kernel()
if expected == _ShutdownStatus.ShutdownRequest:
expected = [expected, _ShutdownStatus.SigtermRequest]
else:
expected = [expected]
assert km._shutdown_status in expected
@pytest.mark.asyncio
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
@pytest.mark.parametrize(*parameters)
async def test_async_signal_kernel_subprocesses(self, name, install, expected):
install()
km, kc = await start_new_async_kernel(kernel_name=name)
assert km._shutdown_status == _ShutdownStatus.Unset
assert await km.is_alive()
# kc.execute("1")
kc.stop_channels()
await km.shutdown_kernel()
if expected == _ShutdownStatus.ShutdownRequest:
expected = [expected, _ShutdownStatus.SigtermRequest]
else:
expected = [expected]
assert km._shutdown_status in expected
class TestKernelManager:
def test_lifecycle(self, km):
km.start_kernel(stdout=PIPE, stderr=PIPE)
assert km.is_alive()
is_done = km.ready.done()
assert is_done
km.restart_kernel(now=True)
assert km.is_alive()
km.interrupt_kernel()
assert isinstance(km, KernelManager)
km.shutdown_kernel(now=True)
assert km.context.closed
def test_get_connect_info(self, km):
cinfo = km.get_connection_info()
keys = sorted(cinfo.keys())
expected = sorted(
[
"ip",
"transport",
"hb_port",
"shell_port",
"stdin_port",
"iopub_port",
"control_port",
"key",
"signature_scheme",
]
)
assert keys == expected
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
def test_signal_kernel_subprocesses(self, install_kernel, start_kernel):
km, kc = start_kernel
def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
N = 5
for i in range(N):
execute("start")
time.sleep(1) # make sure subprocs stay up
reply = execute("check")
assert reply["user_expressions"]["poll"] == [None] * N
# start a job on the kernel to be interrupted
kc.execute("sleep")
time.sleep(1) # ensure sleep message has been handled before we interrupt
km.interrupt_kernel()
reply = kc.get_shell_msg(TIMEOUT)
content = reply["content"]
assert content["status"] == "ok"
assert content["user_expressions"]["interrupted"]
# wait up to 10s for subprocesses to handle signal
for i in range(100):
reply = execute("check")
if reply["user_expressions"]["poll"] != [-signal.SIGINT] * N:
time.sleep(0.1)
else:
break
# verify that subprocesses were interrupted
assert reply["user_expressions"]["poll"] == [-signal.SIGINT] * N
def test_start_new_kernel(self, install_kernel, start_kernel):
km, kc = start_kernel
assert km.is_alive()
assert kc.is_alive()
assert km.context.closed is False
def _env_test_body(self, kc):
def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
reply = execute("env")
assert reply is not None
assert reply["user_expressions"]["env"] == "test_var_1:test_var_2"
def test_templated_kspec_env(self, install_kernel, start_kernel):
km, kc = start_kernel
assert km.is_alive()
assert kc.is_alive()
assert km.context.closed is False
self._env_test_body(kc)
def test_cleanup_context(self, km):
assert km.context is not None
km.cleanup_resources(restart=False)
assert km.context.closed
def test_no_cleanup_shared_context(self, zmq_context):
"""kernel manager does not terminate shared context"""
km = KernelManager(context=zmq_context)
assert km.context == zmq_context
assert km.context is not None
km.cleanup_resources(restart=False)
assert km.context.closed is False
assert zmq_context.closed is False
def test_subclass_callables(self, km_subclass):
km_subclass.reset_counts()
km_subclass.start_kernel(stdout=PIPE, stderr=PIPE)
assert km_subclass.call_count("start_kernel") == 1
assert km_subclass.call_count("_launch_kernel") == 1
is_alive = km_subclass.is_alive()
assert is_alive
km_subclass.reset_counts()
km_subclass.restart_kernel(now=True)
assert km_subclass.call_count("restart_kernel") == 1
assert km_subclass.call_count("shutdown_kernel") == 1
assert km_subclass.call_count("interrupt_kernel") == 1
assert km_subclass.call_count("_kill_kernel") == 1
assert km_subclass.call_count("cleanup_resources") == 1
assert km_subclass.call_count("start_kernel") == 1
assert km_subclass.call_count("_launch_kernel") == 1
assert km_subclass.call_count("signal_kernel") == 1
is_alive = km_subclass.is_alive()
assert is_alive
assert km_subclass.call_count("is_alive") >= 1
km_subclass.reset_counts()
km_subclass.interrupt_kernel()
assert km_subclass.call_count("interrupt_kernel") == 1
assert km_subclass.call_count("signal_kernel") == 1
assert isinstance(km_subclass, KernelManager)
km_subclass.reset_counts()
km_subclass.shutdown_kernel(now=False)
assert km_subclass.call_count("shutdown_kernel") == 1
assert km_subclass.call_count("interrupt_kernel") == 1
assert km_subclass.call_count("request_shutdown") == 1
assert km_subclass.call_count("finish_shutdown") == 1
assert km_subclass.call_count("cleanup_resources") == 1
assert km_subclass.call_count("signal_kernel") == 1
assert km_subclass.call_count("is_alive") >= 1
is_alive = km_subclass.is_alive()
assert is_alive is False
assert km_subclass.call_count("is_alive") >= 1
assert km_subclass.context.closed
class TestParallel:
@pytest.mark.timeout(TIMEOUT)
def test_start_sequence_kernels(self, config, install_kernel):
"""Ensure that a sequence of kernel startups doesn't break anything."""
self._run_signaltest_lifecycle(config)
self._run_signaltest_lifecycle(config)
self._run_signaltest_lifecycle(config)
@pytest.mark.timeout(TIMEOUT + 10)
def test_start_parallel_thread_kernels(self, config, install_kernel):
if config.KernelManager.transport == "ipc": # FIXME
pytest.skip("IPC transport is currently not working for this test!")
self._run_signaltest_lifecycle(config)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as thread_executor:
future1 = thread_executor.submit(self._run_signaltest_lifecycle, config)
future2 = thread_executor.submit(self._run_signaltest_lifecycle, config)
future1.result()
future2.result()
@pytest.mark.timeout(TIMEOUT)
@pytest.mark.skipif(
(sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)),
reason='"Bad file descriptor" error',
)
def test_start_parallel_process_kernels(self, config, install_kernel):
if config.KernelManager.transport == "ipc": # FIXME
pytest.skip("IPC transport is currently not working for this test!")
self._run_signaltest_lifecycle(config)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_executor:
future1 = thread_executor.submit(self._run_signaltest_lifecycle, config)
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as process_executor:
future2 = process_executor.submit(self._run_signaltest_lifecycle, config)
future2.result()
future1.result()
@pytest.mark.timeout(TIMEOUT)
@pytest.mark.skipif(
(sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)),
reason='"Bad file descriptor" error',
)
def test_start_sequence_process_kernels(self, config, install_kernel):
if config.KernelManager.transport == "ipc": # FIXME
pytest.skip("IPC transport is currently not working for this test!")
self._run_signaltest_lifecycle(config)
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as pool_executor:
future = pool_executor.submit(self._run_signaltest_lifecycle, config)
future.result()
def _prepare_kernel(self, km, startup_timeout=TIMEOUT, **kwargs):
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return kc
def _run_signaltest_lifecycle(self, config=None):
km = KernelManager(config=config, kernel_name="signaltest")
kc = self._prepare_kernel(km, stdout=PIPE, stderr=PIPE)
def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
execute("start")
assert km.is_alive()
execute("check")
assert km.is_alive()
km.restart_kernel(now=True)
assert km.is_alive()
execute("check")
km.shutdown_kernel()
assert km.context.closed
kc.stop_channels()
@pytest.mark.asyncio
class TestAsyncKernelManager:
async def test_lifecycle(self, async_km):
await async_km.start_kernel(stdout=PIPE, stderr=PIPE)
is_alive = await async_km.is_alive()
assert is_alive
is_ready = async_km.ready.done()
assert is_ready
await async_km.restart_kernel(now=True)
is_alive = await async_km.is_alive()
assert is_alive
await async_km.interrupt_kernel()
assert isinstance(async_km, AsyncKernelManager)
await async_km.shutdown_kernel(now=True)
is_alive = await async_km.is_alive()
assert is_alive is False
assert async_km.context.closed
async def test_get_connect_info(self, async_km):
cinfo = async_km.get_connection_info()
keys = sorted(cinfo.keys())
expected = sorted(
[
"ip",
"transport",
"hb_port",
"shell_port",
"stdin_port",
"iopub_port",
"control_port",
"key",
"signature_scheme",
]
)
assert keys == expected
@pytest.mark.timeout(10)
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
async def test_signal_kernel_subprocesses(self, install_kernel, start_async_kernel):
km, kc = start_async_kernel
async def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = await kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
# Ensure that shutdown_kernel and stop_channels are called at the end of the test.
# Note: we cannot use addCleanup(<func>) for these since it doesn't prpperly handle
# coroutines - which km.shutdown_kernel now is.
N = 5
for i in range(N):
await execute("start")
await asyncio.sleep(1) # make sure subprocs stay up
reply = await execute("check")
assert reply["user_expressions"]["poll"] == [None] * N
# start a job on the kernel to be interrupted
request_id = kc.execute("sleep")
await asyncio.sleep(1) # ensure sleep message has been handled before we interrupt
await km.interrupt_kernel()
while True:
reply = await kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
assert content["user_expressions"]["interrupted"] is True
# wait up to 5s for subprocesses to handle signal
for i in range(50):
reply = await execute("check")
if reply["user_expressions"]["poll"] != [-signal.SIGINT] * N:
await asyncio.sleep(0.1)
else:
break
# verify that subprocesses were interrupted
assert reply["user_expressions"]["poll"] == [-signal.SIGINT] * N
@pytest.mark.timeout(10)
async def test_start_new_async_kernel(self, install_kernel, start_async_kernel):
km, kc = start_async_kernel
is_alive = await km.is_alive()
assert is_alive
is_alive = await kc.is_alive()
assert is_alive
async def test_subclass_callables(self, async_km_subclass):
async_km_subclass.reset_counts()
await async_km_subclass.start_kernel(stdout=PIPE, stderr=PIPE)
assert async_km_subclass.call_count("start_kernel") == 1
assert async_km_subclass.call_count("_launch_kernel") == 1
is_alive = await async_km_subclass.is_alive()
assert is_alive
assert async_km_subclass.call_count("is_alive") >= 1
async_km_subclass.reset_counts()
await async_km_subclass.restart_kernel(now=True)
assert async_km_subclass.call_count("restart_kernel") == 1
assert async_km_subclass.call_count("shutdown_kernel") == 1
assert async_km_subclass.call_count("interrupt_kernel") == 1
assert async_km_subclass.call_count("_kill_kernel") == 1
assert async_km_subclass.call_count("cleanup_resources") == 1
assert async_km_subclass.call_count("start_kernel") == 1
assert async_km_subclass.call_count("_launch_kernel") == 1
assert async_km_subclass.call_count("signal_kernel") == 1
is_alive = await async_km_subclass.is_alive()
assert is_alive
assert async_km_subclass.call_count("is_alive") >= 1
async_km_subclass.reset_counts()
await async_km_subclass.interrupt_kernel()
assert async_km_subclass.call_count("interrupt_kernel") == 1
assert async_km_subclass.call_count("signal_kernel") == 1
assert isinstance(async_km_subclass, AsyncKernelManager)
async_km_subclass.reset_counts()
await async_km_subclass.shutdown_kernel(now=False)
assert async_km_subclass.call_count("shutdown_kernel") == 1
assert async_km_subclass.call_count("interrupt_kernel") == 1
assert async_km_subclass.call_count("request_shutdown") == 1
assert async_km_subclass.call_count("finish_shutdown") == 1
assert async_km_subclass.call_count("cleanup_resources") == 1
assert async_km_subclass.call_count("signal_kernel") == 1
assert async_km_subclass.call_count("is_alive") >= 1
is_alive = await async_km_subclass.is_alive()
assert is_alive is False
assert async_km_subclass.call_count("is_alive") >= 1
assert async_km_subclass.context.closed
| 35.190068 | 100 | 0.634373 |
import asyncio
import concurrent.futures
import json
import os
import signal
import sys
import time
from subprocess import PIPE
import pytest
from jupyter_core import paths
from traitlets.config.loader import Config
from ..manager import _ShutdownStatus
from ..manager import start_new_async_kernel
from ..manager import start_new_kernel
from .utils import AsyncKMSubclass
from .utils import SyncKMSubclass
from jupyter_client import AsyncKernelManager
from jupyter_client import KernelManager
pjoin = os.path.join
TIMEOUT = 30
@pytest.fixture(params=["tcp", "ipc"])
def transport(request):
if sys.platform == "win32" and request.param == "ipc":
pytest.skip("Transport 'ipc' not supported on Windows.")
return request.param
@pytest.fixture
def config(transport):
c = Config()
c.KernelManager.transport = transport
if transport == "ipc":
c.KernelManager.ip = "test"
return c
def _install_kernel(name="signaltest", extra_env=None):
if extra_env is None:
extra_env = {}
kernel_dir = pjoin(paths.jupyter_data_dir(), "kernels", name)
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, "kernel.json"), "w") as f:
f.write(
json.dumps(
{
"argv": [
sys.executable,
"-m",
"jupyter_client.tests.signalkernel",
"-f",
"{connection_file}",
],
"display_name": "Signal Test Kernel",
"env": {"TEST_VARS": "${TEST_VARS}:test_var_2", **extra_env},
}
)
)
@pytest.fixture
def install_kernel():
return _install_kernel()
def install_kernel_dont_shutdown():
_install_kernel("signaltest-no-shutdown", {"NO_SHUTDOWN_REPLY": "1"})
def install_kernel_dont_terminate():
return _install_kernel(
"signaltest-no-terminate", {"NO_SHUTDOWN_REPLY": "1", "NO_SIGTERM_REPLY": "1"}
)
@pytest.fixture
def start_kernel():
km, kc = start_new_kernel(kernel_name="signaltest")
yield km, kc
kc.stop_channels()
km.shutdown_kernel()
assert km.context.closed
@pytest.fixture
def km(config):
km = KernelManager(config=config)
return km
@pytest.fixture
def km_subclass(config):
km = SyncKMSubclass(config=config)
return km
@pytest.fixture
def zmq_context():
import zmq
ctx = zmq.Context()
yield ctx
ctx.term()
@pytest.fixture(params=[AsyncKernelManager, AsyncKMSubclass])
def async_km(request, config):
km = request.param(config=config)
return km
@pytest.fixture
def async_km_subclass(config):
km = AsyncKMSubclass(config=config)
return km
@pytest.fixture
async def start_async_kernel():
km, kc = await start_new_async_kernel(kernel_name="signaltest")
yield km, kc
kc.stop_channels()
await km.shutdown_kernel()
assert km.context.closed
class TestKernelManagerShutDownGracefully:
parameters = (
"name, install, expected",
[
("signaltest", _install_kernel, _ShutdownStatus.ShutdownRequest),
(
"signaltest-no-shutdown",
install_kernel_dont_shutdown,
_ShutdownStatus.SigtermRequest,
),
(
"signaltest-no-terminate",
install_kernel_dont_terminate,
_ShutdownStatus.SigkillRequest,
),
],
)
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
@pytest.mark.parametrize(*parameters)
def test_signal_kernel_subprocesses(self, name, install, expected):
# ipykernel doesn't support 3.6 and this test uses async shutdown_request
if expected == _ShutdownStatus.ShutdownRequest and sys.version_info < (3, 7):
pytest.skip()
install()
km, kc = start_new_kernel(kernel_name=name)
assert km._shutdown_status == _ShutdownStatus.Unset
assert km.is_alive()
kc.stop_channels()
km.shutdown_kernel()
if expected == _ShutdownStatus.ShutdownRequest:
expected = [expected, _ShutdownStatus.SigtermRequest]
else:
expected = [expected]
assert km._shutdown_status in expected
@pytest.mark.asyncio
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
@pytest.mark.parametrize(*parameters)
async def test_async_signal_kernel_subprocesses(self, name, install, expected):
install()
km, kc = await start_new_async_kernel(kernel_name=name)
assert km._shutdown_status == _ShutdownStatus.Unset
assert await km.is_alive()
# kc.execute("1")
kc.stop_channels()
await km.shutdown_kernel()
if expected == _ShutdownStatus.ShutdownRequest:
expected = [expected, _ShutdownStatus.SigtermRequest]
else:
expected = [expected]
assert km._shutdown_status in expected
class TestKernelManager:
def test_lifecycle(self, km):
km.start_kernel(stdout=PIPE, stderr=PIPE)
assert km.is_alive()
is_done = km.ready.done()
assert is_done
km.restart_kernel(now=True)
assert km.is_alive()
km.interrupt_kernel()
assert isinstance(km, KernelManager)
km.shutdown_kernel(now=True)
assert km.context.closed
def test_get_connect_info(self, km):
cinfo = km.get_connection_info()
keys = sorted(cinfo.keys())
expected = sorted(
[
"ip",
"transport",
"hb_port",
"shell_port",
"stdin_port",
"iopub_port",
"control_port",
"key",
"signature_scheme",
]
)
assert keys == expected
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
def test_signal_kernel_subprocesses(self, install_kernel, start_kernel):
km, kc = start_kernel
def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
N = 5
for i in range(N):
execute("start")
time.sleep(1)
reply = execute("check")
assert reply["user_expressions"]["poll"] == [None] * N
kc.execute("sleep")
time.sleep(1)
km.interrupt_kernel()
reply = kc.get_shell_msg(TIMEOUT)
content = reply["content"]
assert content["status"] == "ok"
assert content["user_expressions"]["interrupted"]
for i in range(100):
reply = execute("check")
if reply["user_expressions"]["poll"] != [-signal.SIGINT] * N:
time.sleep(0.1)
else:
break
assert reply["user_expressions"]["poll"] == [-signal.SIGINT] * N
def test_start_new_kernel(self, install_kernel, start_kernel):
km, kc = start_kernel
assert km.is_alive()
assert kc.is_alive()
assert km.context.closed is False
def _env_test_body(self, kc):
def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
reply = execute("env")
assert reply is not None
assert reply["user_expressions"]["env"] == "test_var_1:test_var_2"
def test_templated_kspec_env(self, install_kernel, start_kernel):
km, kc = start_kernel
assert km.is_alive()
assert kc.is_alive()
assert km.context.closed is False
self._env_test_body(kc)
def test_cleanup_context(self, km):
assert km.context is not None
km.cleanup_resources(restart=False)
assert km.context.closed
def test_no_cleanup_shared_context(self, zmq_context):
km = KernelManager(context=zmq_context)
assert km.context == zmq_context
assert km.context is not None
km.cleanup_resources(restart=False)
assert km.context.closed is False
assert zmq_context.closed is False
def test_subclass_callables(self, km_subclass):
km_subclass.reset_counts()
km_subclass.start_kernel(stdout=PIPE, stderr=PIPE)
assert km_subclass.call_count("start_kernel") == 1
assert km_subclass.call_count("_launch_kernel") == 1
is_alive = km_subclass.is_alive()
assert is_alive
km_subclass.reset_counts()
km_subclass.restart_kernel(now=True)
assert km_subclass.call_count("restart_kernel") == 1
assert km_subclass.call_count("shutdown_kernel") == 1
assert km_subclass.call_count("interrupt_kernel") == 1
assert km_subclass.call_count("_kill_kernel") == 1
assert km_subclass.call_count("cleanup_resources") == 1
assert km_subclass.call_count("start_kernel") == 1
assert km_subclass.call_count("_launch_kernel") == 1
assert km_subclass.call_count("signal_kernel") == 1
is_alive = km_subclass.is_alive()
assert is_alive
assert km_subclass.call_count("is_alive") >= 1
km_subclass.reset_counts()
km_subclass.interrupt_kernel()
assert km_subclass.call_count("interrupt_kernel") == 1
assert km_subclass.call_count("signal_kernel") == 1
assert isinstance(km_subclass, KernelManager)
km_subclass.reset_counts()
km_subclass.shutdown_kernel(now=False)
assert km_subclass.call_count("shutdown_kernel") == 1
assert km_subclass.call_count("interrupt_kernel") == 1
assert km_subclass.call_count("request_shutdown") == 1
assert km_subclass.call_count("finish_shutdown") == 1
assert km_subclass.call_count("cleanup_resources") == 1
assert km_subclass.call_count("signal_kernel") == 1
assert km_subclass.call_count("is_alive") >= 1
is_alive = km_subclass.is_alive()
assert is_alive is False
assert km_subclass.call_count("is_alive") >= 1
assert km_subclass.context.closed
class TestParallel:
@pytest.mark.timeout(TIMEOUT)
def test_start_sequence_kernels(self, config, install_kernel):
self._run_signaltest_lifecycle(config)
self._run_signaltest_lifecycle(config)
self._run_signaltest_lifecycle(config)
@pytest.mark.timeout(TIMEOUT + 10)
def test_start_parallel_thread_kernels(self, config, install_kernel):
if config.KernelManager.transport == "ipc":
pytest.skip("IPC transport is currently not working for this test!")
self._run_signaltest_lifecycle(config)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as thread_executor:
future1 = thread_executor.submit(self._run_signaltest_lifecycle, config)
future2 = thread_executor.submit(self._run_signaltest_lifecycle, config)
future1.result()
future2.result()
@pytest.mark.timeout(TIMEOUT)
@pytest.mark.skipif(
(sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)),
reason='"Bad file descriptor" error',
)
def test_start_parallel_process_kernels(self, config, install_kernel):
if config.KernelManager.transport == "ipc":
pytest.skip("IPC transport is currently not working for this test!")
self._run_signaltest_lifecycle(config)
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as thread_executor:
future1 = thread_executor.submit(self._run_signaltest_lifecycle, config)
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as process_executor:
future2 = process_executor.submit(self._run_signaltest_lifecycle, config)
future2.result()
future1.result()
@pytest.mark.timeout(TIMEOUT)
@pytest.mark.skipif(
(sys.platform == "darwin") and (sys.version_info >= (3, 6)) and (sys.version_info < (3, 8)),
reason='"Bad file descriptor" error',
)
def test_start_sequence_process_kernels(self, config, install_kernel):
if config.KernelManager.transport == "ipc":
pytest.skip("IPC transport is currently not working for this test!")
self._run_signaltest_lifecycle(config)
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as pool_executor:
future = pool_executor.submit(self._run_signaltest_lifecycle, config)
future.result()
def _prepare_kernel(self, km, startup_timeout=TIMEOUT, **kwargs):
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return kc
def _run_signaltest_lifecycle(self, config=None):
km = KernelManager(config=config, kernel_name="signaltest")
kc = self._prepare_kernel(km, stdout=PIPE, stderr=PIPE)
def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
execute("start")
assert km.is_alive()
execute("check")
assert km.is_alive()
km.restart_kernel(now=True)
assert km.is_alive()
execute("check")
km.shutdown_kernel()
assert km.context.closed
kc.stop_channels()
@pytest.mark.asyncio
class TestAsyncKernelManager:
async def test_lifecycle(self, async_km):
await async_km.start_kernel(stdout=PIPE, stderr=PIPE)
is_alive = await async_km.is_alive()
assert is_alive
is_ready = async_km.ready.done()
assert is_ready
await async_km.restart_kernel(now=True)
is_alive = await async_km.is_alive()
assert is_alive
await async_km.interrupt_kernel()
assert isinstance(async_km, AsyncKernelManager)
await async_km.shutdown_kernel(now=True)
is_alive = await async_km.is_alive()
assert is_alive is False
assert async_km.context.closed
async def test_get_connect_info(self, async_km):
cinfo = async_km.get_connection_info()
keys = sorted(cinfo.keys())
expected = sorted(
[
"ip",
"transport",
"hb_port",
"shell_port",
"stdin_port",
"iopub_port",
"control_port",
"key",
"signature_scheme",
]
)
assert keys == expected
@pytest.mark.timeout(10)
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support signals")
async def test_signal_kernel_subprocesses(self, install_kernel, start_async_kernel):
km, kc = start_async_kernel
async def execute(cmd):
request_id = kc.execute(cmd)
while True:
reply = await kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
return content
# Ensure that shutdown_kernel and stop_channels are called at the end of the test.
# Note: we cannot use addCleanup(<func>) for these since it doesn't prpperly handle
N = 5
for i in range(N):
await execute("start")
await asyncio.sleep(1)
reply = await execute("check")
assert reply["user_expressions"]["poll"] == [None] * N
request_id = kc.execute("sleep")
await asyncio.sleep(1)
await km.interrupt_kernel()
while True:
reply = await kc.get_shell_msg(TIMEOUT)
if reply["parent_header"]["msg_id"] == request_id:
break
content = reply["content"]
assert content["status"] == "ok"
assert content["user_expressions"]["interrupted"] is True
for i in range(50):
reply = await execute("check")
if reply["user_expressions"]["poll"] != [-signal.SIGINT] * N:
await asyncio.sleep(0.1)
else:
break
assert reply["user_expressions"]["poll"] == [-signal.SIGINT] * N
@pytest.mark.timeout(10)
async def test_start_new_async_kernel(self, install_kernel, start_async_kernel):
km, kc = start_async_kernel
is_alive = await km.is_alive()
assert is_alive
is_alive = await kc.is_alive()
assert is_alive
async def test_subclass_callables(self, async_km_subclass):
async_km_subclass.reset_counts()
await async_km_subclass.start_kernel(stdout=PIPE, stderr=PIPE)
assert async_km_subclass.call_count("start_kernel") == 1
assert async_km_subclass.call_count("_launch_kernel") == 1
is_alive = await async_km_subclass.is_alive()
assert is_alive
assert async_km_subclass.call_count("is_alive") >= 1
async_km_subclass.reset_counts()
await async_km_subclass.restart_kernel(now=True)
assert async_km_subclass.call_count("restart_kernel") == 1
assert async_km_subclass.call_count("shutdown_kernel") == 1
assert async_km_subclass.call_count("interrupt_kernel") == 1
assert async_km_subclass.call_count("_kill_kernel") == 1
assert async_km_subclass.call_count("cleanup_resources") == 1
assert async_km_subclass.call_count("start_kernel") == 1
assert async_km_subclass.call_count("_launch_kernel") == 1
assert async_km_subclass.call_count("signal_kernel") == 1
is_alive = await async_km_subclass.is_alive()
assert is_alive
assert async_km_subclass.call_count("is_alive") >= 1
async_km_subclass.reset_counts()
await async_km_subclass.interrupt_kernel()
assert async_km_subclass.call_count("interrupt_kernel") == 1
assert async_km_subclass.call_count("signal_kernel") == 1
assert isinstance(async_km_subclass, AsyncKernelManager)
async_km_subclass.reset_counts()
await async_km_subclass.shutdown_kernel(now=False)
assert async_km_subclass.call_count("shutdown_kernel") == 1
assert async_km_subclass.call_count("interrupt_kernel") == 1
assert async_km_subclass.call_count("request_shutdown") == 1
assert async_km_subclass.call_count("finish_shutdown") == 1
assert async_km_subclass.call_count("cleanup_resources") == 1
assert async_km_subclass.call_count("signal_kernel") == 1
assert async_km_subclass.call_count("is_alive") >= 1
is_alive = await async_km_subclass.is_alive()
assert is_alive is False
assert async_km_subclass.call_count("is_alive") >= 1
assert async_km_subclass.context.closed
| true | true |
f71d74fb9dde8bde86b6b8d92cb15d0c1a7d332a | 3,476 | py | Python | image_subsets/image_subsets.py | Comp4710AprioriTextIllustrator/TextIllustrator | fd46b2f80530f622e7ae4302693fbfaad45a8de8 | [
"MIT"
] | 1 | 2015-11-04T15:21:11.000Z | 2015-11-04T15:21:11.000Z | image_subsets/image_subsets.py | Comp4710AprioriTextIllustrator/TextIllustrator | fd46b2f80530f622e7ae4302693fbfaad45a8de8 | [
"MIT"
] | null | null | null | image_subsets/image_subsets.py | Comp4710AprioriTextIllustrator/TextIllustrator | fd46b2f80530f622e7ae4302693fbfaad45a8de8 | [
"MIT"
] | null | null | null | #!/bin/python
# -*- coding: utf-8 -*-
import comparators.pixel_comparator
import comparators.chained_image_comparator
import comparators.avg_pixel_comparator
import image_iterator
import sys
# Not really sets, since they may contain "duplicate" images(ie ones that when compared return True)
'''
Input:
image_sets: A list of sets where each set contains a list of file paths for the images to be compared
comparator: A comparator that will be used to compare the images
Output:
The common subset of all the image_sets
'''
def get_common_subset_of_image_sets(image_sets, comparator):
if len(image_subsets) == 0:
return None
current_image_subset = image_sets[0]
for image_set in image_sets[1:]:
# Refine the subset
current_image_subset = intersection_of_sets(current_image_subset, image_set, comparator)
return current_image_set
'''
Performs a block based intersection of the two image sets image_set1 and image_set2 using the comparator specified by the comparator parameter
'''
def intersection_of_sets(image_set1, image_set2, comparator):
subset = []
# Done this way so we don't keep more that 2*image_iterator.max_images_per_block
for images in image_iterator.load_images(image_set1):
for other_images in image_iterator.load_images(image_set2):
# Load 2 blocks of images into memory, run a comparison of every image in the first block against all images in the second block.
for image in images:
for other_image in other_images:
if comparator.compare(image[1], other_image[1]):
subset.append(image[0])
break
return subset
def parse_subsets(subset_file):
subsets = []
curr_subset = []
curr_line = subset_file.readline()
while curr_line != '':
if curr_line.strip() == '' and len(curr_subset) > 0:
subsets.append(curr_subset)
curr_subset = []
elif curr_line.strip() != '':
curr_subset.append(curr_line.strip())
curr_line = subset_file.readline()
if len(curr_subset) > 0:
subsets.append(curr_subset)
return subsets
def usage():
print "python image_subsets.py <image_sets_file>"
'''
Will parse a file were grouped lines of file paths are considered sets of images. Then find the common subset of these sets.
Example:
test_file.txt
-------------
C:\some_file.bmp
C:\some_other_file.bmp
C:\another_file.jpg
C:\final_file.gif
----------------
This will produce 3 sets
1: ['C:\some_file.bmp', 'C:\some_other_file.bmp']
2: ['C:\another_file.jpg']
3: ['C:\final_file.gif']
'''
if __name__=="__main__":
if len(sys.argv) == 2:
image_iterator.max_images_per_block = 1
try:
image_subsets = None
with open(sys.argv[1], 'r') as subset_file:
image_subsets = parse_subsets(subset_file)
print image_subsets
if image_subsets:
chained_comparators = [comparators.pixel_comparator.PixelComparator(), comparators.avg_pixel_comparator.AvgPixelComparator()]
comparator = comparators.chained_image_comparator.ChainedImageComparator(image_comparators=chained_comparators)
print get_common_subset_of_image_sets(image_subsets, comparator)
except Exception as e:
print e
usage()
else:
usage() | 33.423077 | 142 | 0.671174 |
import comparators.pixel_comparator
import comparators.chained_image_comparator
import comparators.avg_pixel_comparator
import image_iterator
import sys
'''
Input:
image_sets: A list of sets where each set contains a list of file paths for the images to be compared
comparator: A comparator that will be used to compare the images
Output:
The common subset of all the image_sets
'''
def get_common_subset_of_image_sets(image_sets, comparator):
if len(image_subsets) == 0:
return None
current_image_subset = image_sets[0]
for image_set in image_sets[1:]:
current_image_subset = intersection_of_sets(current_image_subset, image_set, comparator)
return current_image_set
'''
Performs a block based intersection of the two image sets image_set1 and image_set2 using the comparator specified by the comparator parameter
'''
def intersection_of_sets(image_set1, image_set2, comparator):
subset = []
for images in image_iterator.load_images(image_set1):
for other_images in image_iterator.load_images(image_set2):
# Load 2 blocks of images into memory, run a comparison of every image in the first block against all images in the second block.
for image in images:
for other_image in other_images:
if comparator.compare(image[1], other_image[1]):
subset.append(image[0])
break
return subset
def parse_subsets(subset_file):
subsets = []
curr_subset = []
curr_line = subset_file.readline()
while curr_line != '':
if curr_line.strip() == '' and len(curr_subset) > 0:
subsets.append(curr_subset)
curr_subset = []
elif curr_line.strip() != '':
curr_subset.append(curr_line.strip())
curr_line = subset_file.readline()
if len(curr_subset) > 0:
subsets.append(curr_subset)
return subsets
def usage():
print "python image_subsets.py <image_sets_file>"
'''
Will parse a file were grouped lines of file paths are considered sets of images. Then find the common subset of these sets.
Example:
test_file.txt
-------------
C:\some_file.bmp
C:\some_other_file.bmp
C:\another_file.jpg
C:\final_file.gif
----------------
This will produce 3 sets
1: ['C:\some_file.bmp', 'C:\some_other_file.bmp']
2: ['C:\another_file.jpg']
3: ['C:\final_file.gif']
'''
if __name__=="__main__":
if len(sys.argv) == 2:
image_iterator.max_images_per_block = 1
try:
image_subsets = None
with open(sys.argv[1], 'r') as subset_file:
image_subsets = parse_subsets(subset_file)
print image_subsets
if image_subsets:
chained_comparators = [comparators.pixel_comparator.PixelComparator(), comparators.avg_pixel_comparator.AvgPixelComparator()]
comparator = comparators.chained_image_comparator.ChainedImageComparator(image_comparators=chained_comparators)
print get_common_subset_of_image_sets(image_subsets, comparator)
except Exception as e:
print e
usage()
else:
usage() | false | true |
f71d75126854c13366d9024369f6b81a57870f3f | 4,707 | py | Python | modnotes/modnotes.py | chasehult/misc-cogs | 171bfacccb534d5e73bf53ce71e0a312c35e0ec7 | [
"MIT"
] | null | null | null | modnotes/modnotes.py | chasehult/misc-cogs | 171bfacccb534d5e73bf53ce71e0a312c35e0ec7 | [
"MIT"
] | null | null | null | modnotes/modnotes.py | chasehult/misc-cogs | 171bfacccb534d5e73bf53ce71e0a312c35e0ec7 | [
"MIT"
] | null | null | null | """
Utilities for managing moderator notes about users.
"""
import re
import discord
from redbot.core import checks
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import inline, box, pagify
from tsutils import CogSettings
class ModNotes(commands.Cog):
def __init__(self, bot: Red, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bot = bot
self.settings = ModNotesSettings("modnotes")
@commands.group(aliases=["usernote"])
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def usernotes(self, ctx):
"""Moderator notes for users.
This module allows you to create notes to share between moderators.
"""
async def red_get_data_for_user(self, *, user_id):
"""Get a user's personal data."""
data = "No data is stored for user with ID {}.\n".format(user_id)
return {"user_data.txt": BytesIO(data.encode())}
async def red_delete_data_for_user(self, *, requester, user_id):
"""Delete a user's personal data.
No personal data is stored in this cog.
"""
return
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def get(self, ctx, user: discord.User):
"""Sends the notes for a user."""
notes = self.settings.getNotesForUser(ctx.guild.id, user.id)
if not notes:
await ctx.send(box('No notes for {}'.format(user.name)))
return
for idx, note in enumerate(notes):
await ctx.send(inline('Note {} of {}:'.format(idx + 1, len(notes))))
await ctx.send(box(note))
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def add(self, ctx, user: discord.User, *, note_text: str):
"""Add a note to a user."""
timestamp = str(ctx.message.created_at)[:-7]
msg = 'Added by {} ({}): {}'.format(ctx.author.name, timestamp, note_text)
server_id = ctx.guild.id
notes = self.settings.addNoteForUser(server_id, user.id, msg)
await ctx.send(inline('Done. User {} now has {} notes'.format(user.name, len(notes))))
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def delete(self, ctx, user: discord.User, note_num: int):
"""Delete a specific note for a user."""
notes = self.settings.getNotesForUser(ctx.guild.id, user.id)
if len(notes) < note_num:
await ctx.send(box('Note not found for {}'.format(user.name)))
return
note = notes[note_num - 1]
notes.remove(note)
self.settings.setNotesForUser(ctx.guild.id, user.id, notes)
await ctx.send(inline('Removed note {}. User has {} remaining.'.format(note_num, len(notes))))
await ctx.send(box(note))
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def list(self, ctx):
"""Lists all users and note counts for the server."""
user_notes = self.settings.getUserNotes(ctx.guild.id)
msg = 'Notes for {} users'.format(len(user_notes))
for user_id, notes in user_notes.items():
user = ctx.guild.get_member(user_id)
user_text = '{} ({})'.format(user.name, user.id) if user else user_id
msg += '\n\t{} : {}'.format(len(notes), user_text)
for page in pagify(msg):
await ctx.send(box(page))
class ModNotesSettings(CogSettings):
def make_default_settings(self):
config = {
'servers': {}
}
return config
def servers(self):
return self.bot_settings['servers']
def getServer(self, server_id):
servers = self.servers()
if server_id not in servers:
servers[server_id] = {}
return servers[server_id]
def getUserNotes(self, server_id):
server = self.getServer(server_id)
key = 'user_notes'
if key not in server:
server[key] = {}
return server[key]
def getNotesForUser(self, server_id, user_id):
user_notes = self.getUserNotes(server_id)
return user_notes.get(user_id, [])
def setNotesForUser(self, server_id, user_id, notes):
user_notes = self.getUserNotes(server_id)
if notes:
user_notes[user_id] = notes
else:
user_notes.pop(user_id, None)
self.save_settings()
return notes
def addNoteForUser(self, server_id, user_id, note: str):
notes = self.getNotesForUser(server_id, user_id)
notes.append(note)
self.setNotesForUser(server_id, user_id, notes)
return notes
| 34.357664 | 102 | 0.626089 | import re
import discord
from redbot.core import checks
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import inline, box, pagify
from tsutils import CogSettings
class ModNotes(commands.Cog):
def __init__(self, bot: Red, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bot = bot
self.settings = ModNotesSettings("modnotes")
@commands.group(aliases=["usernote"])
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def usernotes(self, ctx):
async def red_get_data_for_user(self, *, user_id):
data = "No data is stored for user with ID {}.\n".format(user_id)
return {"user_data.txt": BytesIO(data.encode())}
async def red_delete_data_for_user(self, *, requester, user_id):
return
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def get(self, ctx, user: discord.User):
notes = self.settings.getNotesForUser(ctx.guild.id, user.id)
if not notes:
await ctx.send(box('No notes for {}'.format(user.name)))
return
for idx, note in enumerate(notes):
await ctx.send(inline('Note {} of {}:'.format(idx + 1, len(notes))))
await ctx.send(box(note))
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def add(self, ctx, user: discord.User, *, note_text: str):
timestamp = str(ctx.message.created_at)[:-7]
msg = 'Added by {} ({}): {}'.format(ctx.author.name, timestamp, note_text)
server_id = ctx.guild.id
notes = self.settings.addNoteForUser(server_id, user.id, msg)
await ctx.send(inline('Done. User {} now has {} notes'.format(user.name, len(notes))))
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def delete(self, ctx, user: discord.User, note_num: int):
notes = self.settings.getNotesForUser(ctx.guild.id, user.id)
if len(notes) < note_num:
await ctx.send(box('Note not found for {}'.format(user.name)))
return
note = notes[note_num - 1]
notes.remove(note)
self.settings.setNotesForUser(ctx.guild.id, user.id, notes)
await ctx.send(inline('Removed note {}. User has {} remaining.'.format(note_num, len(notes))))
await ctx.send(box(note))
@usernotes.command()
@checks.mod_or_permissions(manage_guild=True)
async def list(self, ctx):
user_notes = self.settings.getUserNotes(ctx.guild.id)
msg = 'Notes for {} users'.format(len(user_notes))
for user_id, notes in user_notes.items():
user = ctx.guild.get_member(user_id)
user_text = '{} ({})'.format(user.name, user.id) if user else user_id
msg += '\n\t{} : {}'.format(len(notes), user_text)
for page in pagify(msg):
await ctx.send(box(page))
class ModNotesSettings(CogSettings):
def make_default_settings(self):
config = {
'servers': {}
}
return config
def servers(self):
return self.bot_settings['servers']
def getServer(self, server_id):
servers = self.servers()
if server_id not in servers:
servers[server_id] = {}
return servers[server_id]
def getUserNotes(self, server_id):
server = self.getServer(server_id)
key = 'user_notes'
if key not in server:
server[key] = {}
return server[key]
def getNotesForUser(self, server_id, user_id):
user_notes = self.getUserNotes(server_id)
return user_notes.get(user_id, [])
def setNotesForUser(self, server_id, user_id, notes):
user_notes = self.getUserNotes(server_id)
if notes:
user_notes[user_id] = notes
else:
user_notes.pop(user_id, None)
self.save_settings()
return notes
def addNoteForUser(self, server_id, user_id, note: str):
notes = self.getNotesForUser(server_id, user_id)
notes.append(note)
self.setNotesForUser(server_id, user_id, notes)
return notes
| true | true |
f71d75e458e3cb8f6494897fe66d3dc4ecbfc02d | 209 | py | Python | algorithms/1047. Remove All Adjacent Duplicates In String.py | woozway/py3-leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | 1 | 2020-12-02T13:54:30.000Z | 2020-12-02T13:54:30.000Z | algorithms/1047. Remove All Adjacent Duplicates In String.py | woozway/py3-leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | null | null | null | algorithms/1047. Remove All Adjacent Duplicates In String.py | woozway/py3-leetcode | e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf | [
"MIT"
] | null | null | null | class Solution:
def removeDuplicates(self, S: str) -> str:
stack = []
for c in S:
if stack and stack[-1] == c:
stack.pop()
else:
stack.append(c)
return ''.join(stack)
| 20.9 | 44 | 0.5311 | class Solution:
def removeDuplicates(self, S: str) -> str:
stack = []
for c in S:
if stack and stack[-1] == c:
stack.pop()
else:
stack.append(c)
return ''.join(stack)
| true | true |
f71d7736dd803f04fc96aeb047ba3df44f0a3091 | 3,802 | py | Python | orttraining/orttraining/python/training/_ortmodule_utils.py | alexshuang/onnxruntime | 771a6d235b8495d05bcf6a906107df1bd6e81744 | [
"MIT"
] | null | null | null | orttraining/orttraining/python/training/_ortmodule_utils.py | alexshuang/onnxruntime | 771a6d235b8495d05bcf6a906107df1bd6e81744 | [
"MIT"
] | null | null | null | orttraining/orttraining/python/training/_ortmodule_utils.py | alexshuang/onnxruntime | 771a6d235b8495d05bcf6a906107df1bd6e81744 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from . import _utils
from onnxruntime.capi.onnxruntime_inference_collection import OrtValue
from onnxruntime.capi import _pybind_state as C
import torch
from torch.utils.dlpack import from_dlpack, to_dlpack
from torch.utils.cpp_extension import load_inline
def _ortvalue_to_torch_tensor(ortvalue):
# PyTorch's to_dlpack() uses same config for both torch.bool and torch.uint8,
# and convert the config to torch.uint8 tensor duing from_dlpack().
# So we need to convert the torch tensor to torch.bool type if OrtValue is bool tensor.
torch_tensor = from_dlpack(ortvalue._ortvalue.to_dlpack())
return torch_tensor.to(torch.bool) if ortvalue.data_type() == 'tensor(bool)' else torch_tensor
def _ortvalue_from_torch_tensor(torch_tensor):
return OrtValue(C.OrtValue.from_dlpack(to_dlpack(torch_tensor), torch_tensor.dtype == torch.bool))
def _load_torch_gpu_allocator_cpp_extension(verbosity, is_rocm_pytorch):
gpu_identifier = "hip" if is_rocm_pytorch else "cuda"
gpu_allocator_header = "HIPCachingAllocator" if is_rocm_pytorch else "CUDACachingAllocator"
torch_gpu_allocator_addresses_cpp_source = f'''
#include <torch/extension.h>
#include <c10/{gpu_identifier}/{gpu_allocator_header}.h>
size_t gpu_caching_allocator_raw_alloc_address() {{
return reinterpret_cast<size_t>(&c10::{gpu_identifier}::{gpu_allocator_header}::raw_alloc);
}}
size_t gpu_caching_allocator_raw_delete_address() {{
return reinterpret_cast<size_t>(&c10::{gpu_identifier}::{gpu_allocator_header}::raw_delete);
}}
'''
return load_inline(name='inline_extension',
cpp_sources=[torch_gpu_allocator_addresses_cpp_source],
extra_cflags=['-D__HIP_PLATFORM_HCC__=1' if is_rocm_pytorch else ''],
functions=['gpu_caching_allocator_raw_alloc_address',
'gpu_caching_allocator_raw_delete_address'],
verbose=verbosity,
with_cuda=True)
def _check_same_device(device, argument_str, *args):
'''Check that all tensor arguments in *args reside on the same device as the input device'''
assert isinstance(device, torch.device), '`device` must be a valid `torch.device` object'
for arg in args:
if arg is not None and isinstance(arg, torch.Tensor):
arg_device = torch.device(arg.device)
if arg_device != device:
raise RuntimeError(
f"{argument_str} found on device {arg_device}, but expected it to be on module device {device}.")
def get_device_from_module(module):
'''Returns the first device found in the `module`'s parameters or None'''
device = None
try:
device = next(module.parameters()).device
for param in module.parameters():
if param.device != device:
raise RuntimeError('ORTModule supports a single device per model for now')
except StopIteration:
# Model doesn't have a device set to any of the model parameters
pass
return device
def _create_iobinding(io_binding, inputs, model, device):
'''Creates IO binding for a `model` inputs and output'''
for idx, value_info in enumerate(model.graph.input):
io_binding.bind_ortvalue_input(value_info.name, _ortvalue_from_torch_tensor(inputs[idx]))
for value_info in model.graph.output:
io_binding.bind_output(value_info.name, device.type, device_id=_utils.get_device_index(device))
| 44.209302 | 117 | 0.675697 |
from . import _utils
from onnxruntime.capi.onnxruntime_inference_collection import OrtValue
from onnxruntime.capi import _pybind_state as C
import torch
from torch.utils.dlpack import from_dlpack, to_dlpack
from torch.utils.cpp_extension import load_inline
def _ortvalue_to_torch_tensor(ortvalue):
# and convert the config to torch.uint8 tensor duing from_dlpack().
# So we need to convert the torch tensor to torch.bool type if OrtValue is bool tensor.
torch_tensor = from_dlpack(ortvalue._ortvalue.to_dlpack())
return torch_tensor.to(torch.bool) if ortvalue.data_type() == 'tensor(bool)' else torch_tensor
def _ortvalue_from_torch_tensor(torch_tensor):
return OrtValue(C.OrtValue.from_dlpack(to_dlpack(torch_tensor), torch_tensor.dtype == torch.bool))
def _load_torch_gpu_allocator_cpp_extension(verbosity, is_rocm_pytorch):
gpu_identifier = "hip" if is_rocm_pytorch else "cuda"
gpu_allocator_header = "HIPCachingAllocator" if is_rocm_pytorch else "CUDACachingAllocator"
torch_gpu_allocator_addresses_cpp_source = f'''
#include <torch/extension.h>
#include <c10/{gpu_identifier}/{gpu_allocator_header}.h>
size_t gpu_caching_allocator_raw_alloc_address() {{
return reinterpret_cast<size_t>(&c10::{gpu_identifier}::{gpu_allocator_header}::raw_alloc);
}}
size_t gpu_caching_allocator_raw_delete_address() {{
return reinterpret_cast<size_t>(&c10::{gpu_identifier}::{gpu_allocator_header}::raw_delete);
}}
'''
return load_inline(name='inline_extension',
cpp_sources=[torch_gpu_allocator_addresses_cpp_source],
extra_cflags=['-D__HIP_PLATFORM_HCC__=1' if is_rocm_pytorch else ''],
functions=['gpu_caching_allocator_raw_alloc_address',
'gpu_caching_allocator_raw_delete_address'],
verbose=verbosity,
with_cuda=True)
def _check_same_device(device, argument_str, *args):
assert isinstance(device, torch.device), '`device` must be a valid `torch.device` object'
for arg in args:
if arg is not None and isinstance(arg, torch.Tensor):
arg_device = torch.device(arg.device)
if arg_device != device:
raise RuntimeError(
f"{argument_str} found on device {arg_device}, but expected it to be on module device {device}.")
def get_device_from_module(module):
device = None
try:
device = next(module.parameters()).device
for param in module.parameters():
if param.device != device:
raise RuntimeError('ORTModule supports a single device per model for now')
except StopIteration:
# Model doesn't have a device set to any of the model parameters
pass
return device
def _create_iobinding(io_binding, inputs, model, device):
for idx, value_info in enumerate(model.graph.input):
io_binding.bind_ortvalue_input(value_info.name, _ortvalue_from_torch_tensor(inputs[idx]))
for value_info in model.graph.output:
io_binding.bind_output(value_info.name, device.type, device_id=_utils.get_device_index(device))
| true | true |
f71d7757a47a3d5539caee900fabbeca9eb0d27c | 2,709 | py | Python | share/qt/extract_strings_qt.py | CoinStaging/Apollon | 0c78aa921244d67517ecb699b48767093e95de69 | [
"MIT"
] | 17 | 2020-03-21T21:03:31.000Z | 2021-04-07T18:45:38.000Z | share/qt/extract_strings_qt.py | CoinStaging/Apollon | 0c78aa921244d67517ecb699b48767093e95de69 | [
"MIT"
] | 8 | 2020-04-05T19:14:44.000Z | 2020-05-22T04:15:31.000Z | share/qt/extract_strings_qt.py | CoinStaging/Apollon | 0c78aa921244d67517ecb699b48767093e95de69 | [
"MIT"
] | 19 | 2020-03-04T19:32:03.000Z | 2021-11-16T04:27:14.000Z | #!/usr/bin/python
# Copyright (c) 2012-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("index-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("index-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("index-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("index-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 29.769231 | 105 | 0.641565 |
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("index-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("index-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("index-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("index-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| true | true |
f71d7805e1d138fa87506d9d152885ae11b2c0b8 | 8,756 | py | Python | cottontail/rabbitmq_management.py | 3lpsy/cottontail | fb7f364ea00bf6fe4b575818d562bd1df26407b9 | [
"BSD-3-Clause"
] | 21 | 2018-02-02T20:20:08.000Z | 2021-04-12T13:12:45.000Z | cottontail/rabbitmq_management.py | 3lpsy/cottontail | fb7f364ea00bf6fe4b575818d562bd1df26407b9 | [
"BSD-3-Clause"
] | 15 | 2017-08-21T08:41:03.000Z | 2020-11-25T22:05:43.000Z | cottontail/rabbitmq_management.py | 3lpsy/cottontail | fb7f364ea00bf6fe4b575818d562bd1df26407b9 | [
"BSD-3-Clause"
] | 6 | 2019-04-23T06:19:04.000Z | 2021-02-14T01:48:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Quentin Kaiser <kaiserquentin@gmail.com>
#
# let's disable 'too many public methods'
# pylint: disable=R0904
"""
rabbitmq-management HTTP API client.
Example:
rbmq = RabbitMQManagementClient('localhost')
rbmq.whoami()
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class UnauthorizedAccessException(Exception):
"""Custom exception for HTTP 401"""
pass
class RabbitMQManagementClient(object):
"""rabbitmq-management HTTP API client.
Attributes:
host (str): server host
port (int, optional): servver port
username (str, optional): account's username
password (str, optional): account's password
"""
def __init__(self, host, port=15672, username="guest", password="guest",\
ssl=False):
"""Constructor
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
host (str): server host
port (int, optional): servver port
username (str, optional): account's username
password (str, optional): account's password
"""
self._host = host
self._port = port
self._username = username
self._password = password
self._scheme = "https" if ssl else "http"
def get_request(self, path):
"""Wrapper for GET requests to the API.
Args:
path (str): REST path appended to /api
Returns:
HTTP response JSON object.
Raises:
UnauthorizedException
"""
response = requests.get(
"{}://{}:{}/api/{}".format(self._scheme, self._host, self._port, path),
auth=(self._username, self._password),
verify=False,
timeout=5
)
if response.status_code == 200:
return response.json()
elif response.status_code == 401 or response.status_code == 403:
raise UnauthorizedAccessException(
"Authorization error: can't access /api/{}".format(path))
elif response.status_code == 404:
return None
else:
raise Exception("An error occured")
def post_request(self, path, data):
"""Wrapper for POST requests to the API
Args:
path (str): REST path appended to /api
data (object): POST body
Returns:
HTTP response JSON object
Raises:
UnauthorizedException
"""
response = requests.post(
"{}://{}:{}/api/{}".format(self._scheme, self._host, self._port, path),
auth=(self._username, self._password),
json=data,
verify=False
)
if response.status_code == 200:
return response.json()
elif response.status_code == 401 or response.status_code == 403:
raise UnauthorizedAccessException(
"Authorization error: can't access /api/{}".format(path))
else:
raise Exception("An error occured")
def get_amqp_listeners(self):
"""
Request the API for AMQP listeners.
"""
overview = self.get_overview()
return [l for l in overview["listeners"] if "amqp" in l["protocol"]]
def get_overview(self):
"""
Various random bits of information that describe the whole system.
"""
return self.get_request("overview")
def get_cluster_name(self):
"""
Name identifying this RabbitMQ cluster.
"""
return self.get_request("cluster-name")
def get_nodes(self):
"""
A list of nodes in the RabbitMQ cluster.
"""
return self.get_request("nodes")
def get_node(self, name, memory=False, binary=False):
"""
An individual node in the RabbitMQ cluster.
"""
return self.get_request("nodes/{}?memory={}&binary={}".format(
name, str(memory).lower(), str(binary).lower()))
def get_definitions(self, vhost=None):
"""
The server definitions - exchanges, queues, bindings, users,
virtual hosts, permissions and parameters.
Everything apart from messages.
"""
if vhost is not None:
return self.get_request("definitions/{}".format(
quote(vhost, safe='')))
return self.get_request("definitions")
def get_connections(self, vhost=None):
"""
A list of all open connections.
"""
if vhost is not None:
return self.get_request("vhosts/{}/connections".format(
quote(vhost, safe='')))
return self.get_request("connections")
def get_connection(self, name):
"""
An individual connection.
"""
return self.get_request("connections/{}".format(name))
def get_channels(self, vhost=None):
"""
A list of all open channels.
"""
if vhost is not None:
return self.get_request("vhosts/{}/channels".format(
quote(vhost, safe='')))
return self.get_request("channels")
def get_channel(self, name):
"""
Details about an individual channel.
"""
return self.get_request("channels/{}".format(name.replace(" ", "%20")))
def get_consumers(self, vhost=None):
"""
A list of all consumers (in a given vhosts).
"""
if vhost is not None:
return self.get_request("consumers/{}".format(
quote(vhost, safe='')))
return self.get_request("consumers")
def get_exchanges(self, vhost=None):
"""
A list of all exchanges (in a given vhost).
"""
if vhost is not None:
return self.get_request("exchanges/{}".format(
quote(vhost, safe='')))
return self.get_request("exchanges")
def get_exchange(self, vhost, name):
"""
An individual exchange.
"""
return self.get_request("exchanges/{}/{}".format(
quote(vhost, safe=''), name))
def get_queues(self, vhost=None):
"""
A list of all queues.
"""
if vhost is not None:
return self.get_request("queues/{}".format(quote(vhost, safe='')))
return self.get_request("queues")
def get_queue(self, vhost, name):
"""
An individual queue.
"""
return self.get_request("queue/{}/{}".format(vhost, name))
def get_messages(self, vhost, queue, count=10, requeue=True):
"""
Get messages currently stored in queue.
"""
return self.post_request(
"queues/{}/{}/get".format(quote(vhost, safe=''), queue),
{
"count": count,
"encoding": "auto",
"name": queue,
"requeue": str(requeue).lower(),
"vhost": vhost
}
)
def get_bindings(self, vhost=None):
"""
A list of all bindings (in a given virtual host).
"""
if vhost is not None:
return self.get_request("bindings/{}".format(
quote(vhost, safe='')))
return self.get_request("bindings")
def get_vhosts(self):
"""
A list of all vhosts.
"""
return self.get_request("vhosts")
def get_vhost(self, name):
"""
An individual virtual host.
"""
return self.get_request("vhosts/{}".format(quote(name, safe='')))
def get_permissions(self, name=None, username=None):
"""
A list of all permissions.
"""
if name is None:
return self.get_request("permissions")
else:
if username is None:
return self.get_request("permissions/{}".format(quote(name, safe='')))
else:
return self.get_request("permissions/{}/{}".format(
quote(name, safe=''), quote(username, safe='')))
def get_users(self):
"""
A list of all users.
"""
return self.get_request("users")
def get_user(self, name):
"""
An individual user.
"""
return self.get_request("users/{}".format(name))
def whoami(self):
"""
Details of the currently authenticated user.
"""
return self.get_request("whoami")
| 29.681356 | 86 | 0.559502 |
# pylint: disable=R0904
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class UnauthorizedAccessException(Exception):
pass
class RabbitMQManagementClient(object):
def __init__(self, host, port=15672, username="guest", password="guest",\
ssl=False):
self._host = host
self._port = port
self._username = username
self._password = password
self._scheme = "https" if ssl else "http"
def get_request(self, path):
response = requests.get(
"{}://{}:{}/api/{}".format(self._scheme, self._host, self._port, path),
auth=(self._username, self._password),
verify=False,
timeout=5
)
if response.status_code == 200:
return response.json()
elif response.status_code == 401 or response.status_code == 403:
raise UnauthorizedAccessException(
"Authorization error: can't access /api/{}".format(path))
elif response.status_code == 404:
return None
else:
raise Exception("An error occured")
def post_request(self, path, data):
response = requests.post(
"{}://{}:{}/api/{}".format(self._scheme, self._host, self._port, path),
auth=(self._username, self._password),
json=data,
verify=False
)
if response.status_code == 200:
return response.json()
elif response.status_code == 401 or response.status_code == 403:
raise UnauthorizedAccessException(
"Authorization error: can't access /api/{}".format(path))
else:
raise Exception("An error occured")
def get_amqp_listeners(self):
overview = self.get_overview()
return [l for l in overview["listeners"] if "amqp" in l["protocol"]]
def get_overview(self):
return self.get_request("overview")
def get_cluster_name(self):
return self.get_request("cluster-name")
def get_nodes(self):
return self.get_request("nodes")
def get_node(self, name, memory=False, binary=False):
return self.get_request("nodes/{}?memory={}&binary={}".format(
name, str(memory).lower(), str(binary).lower()))
def get_definitions(self, vhost=None):
if vhost is not None:
return self.get_request("definitions/{}".format(
quote(vhost, safe='')))
return self.get_request("definitions")
def get_connections(self, vhost=None):
if vhost is not None:
return self.get_request("vhosts/{}/connections".format(
quote(vhost, safe='')))
return self.get_request("connections")
def get_connection(self, name):
return self.get_request("connections/{}".format(name))
def get_channels(self, vhost=None):
if vhost is not None:
return self.get_request("vhosts/{}/channels".format(
quote(vhost, safe='')))
return self.get_request("channels")
def get_channel(self, name):
return self.get_request("channels/{}".format(name.replace(" ", "%20")))
def get_consumers(self, vhost=None):
if vhost is not None:
return self.get_request("consumers/{}".format(
quote(vhost, safe='')))
return self.get_request("consumers")
def get_exchanges(self, vhost=None):
if vhost is not None:
return self.get_request("exchanges/{}".format(
quote(vhost, safe='')))
return self.get_request("exchanges")
def get_exchange(self, vhost, name):
return self.get_request("exchanges/{}/{}".format(
quote(vhost, safe=''), name))
def get_queues(self, vhost=None):
if vhost is not None:
return self.get_request("queues/{}".format(quote(vhost, safe='')))
return self.get_request("queues")
def get_queue(self, vhost, name):
return self.get_request("queue/{}/{}".format(vhost, name))
def get_messages(self, vhost, queue, count=10, requeue=True):
return self.post_request(
"queues/{}/{}/get".format(quote(vhost, safe=''), queue),
{
"count": count,
"encoding": "auto",
"name": queue,
"requeue": str(requeue).lower(),
"vhost": vhost
}
)
def get_bindings(self, vhost=None):
if vhost is not None:
return self.get_request("bindings/{}".format(
quote(vhost, safe='')))
return self.get_request("bindings")
def get_vhosts(self):
return self.get_request("vhosts")
def get_vhost(self, name):
return self.get_request("vhosts/{}".format(quote(name, safe='')))
def get_permissions(self, name=None, username=None):
if name is None:
return self.get_request("permissions")
else:
if username is None:
return self.get_request("permissions/{}".format(quote(name, safe='')))
else:
return self.get_request("permissions/{}/{}".format(
quote(name, safe=''), quote(username, safe='')))
def get_users(self):
return self.get_request("users")
def get_user(self, name):
return self.get_request("users/{}".format(name))
def whoami(self):
return self.get_request("whoami")
| true | true |
f71d78df585dd1d4cbf186e5203991da862bdf93 | 1,237 | py | Python | esp8266_devicecloud.py | faludi/ESP8266_DeviceCloud | 3855e745d06050f4e988afe08208389ece7fbd15 | [
"MIT"
] | null | null | null | esp8266_devicecloud.py | faludi/ESP8266_DeviceCloud | 3855e745d06050f4e988afe08208389ece7fbd15 | [
"MIT"
] | null | null | null | esp8266_devicecloud.py | faludi/ESP8266_DeviceCloud | 3855e745d06050f4e988afe08208389ece7fbd15 | [
"MIT"
] | null | null | null | """
Example for posting data to Device Cloud for data graphing, storage and analysis.
(digi.com/products/cloud/digi-device-cloud)
by Rob Faludi, faludi.com
"""
import time
import httpclient
import ubinascii
version = '1.0.0'
username = 'your username here' #enter your username!
password = 'your password here' #enter your password!
# Device Cloud connection info
url = 'http://devicecloud.digi.com/ws/v1/streams/history'
headers= {'authorization': 'Basic ' + auth}
stream_id = 'myStream'
stream_type = 'DOUBLE'
auth = ubinascii.b2a_base64(username + ':' + password).decode().strip() #base64 encoding
# posts data to Digi Device Cloud
def dc_post(data):
json={"stream_id": stream_id, "stream_type": stream_type, "value": str(data)}
r = httpclient.post(url, headers=headers, json=json)
r.close()
return r.status_code
# example program: posts 0 to 499 to a Device Cloud data stream
if __name__ == "__main__":
for data in range(500):
try:
status = dc_post(data) # post data to Device Cloud
except Exception as e:
status = type(e).__name__ + ': ' + str(e)
print('exception:', e)
print('post', data, 'status:', status)
time.sleep(1)
| 27.488889 | 88 | 0.667745 |
import time
import httpclient
import ubinascii
version = '1.0.0'
username = 'your username here'
password = 'your password here'
url = 'http://devicecloud.digi.com/ws/v1/streams/history'
headers= {'authorization': 'Basic ' + auth}
stream_id = 'myStream'
stream_type = 'DOUBLE'
auth = ubinascii.b2a_base64(username + ':' + password).decode().strip()
def dc_post(data):
json={"stream_id": stream_id, "stream_type": stream_type, "value": str(data)}
r = httpclient.post(url, headers=headers, json=json)
r.close()
return r.status_code
if __name__ == "__main__":
for data in range(500):
try:
status = dc_post(data)
except Exception as e:
status = type(e).__name__ + ': ' + str(e)
print('exception:', e)
print('post', data, 'status:', status)
time.sleep(1)
| true | true |
f71d79ae1cce67c14af83bbfe117b78d09e46318 | 27,697 | py | Python | fmsynth.py | Paul-31415/soundplay | 0e7ea27c6d4bdf5f94e5034c7775a10c62d1583e | [
"MIT"
] | null | null | null | fmsynth.py | Paul-31415/soundplay | 0e7ea27c6d4bdf5f94e5034c7775a10c62d1583e | [
"MIT"
] | null | null | null | fmsynth.py | Paul-31415/soundplay | 0e7ea27c6d4bdf5f94e5034c7775a10c62d1583e | [
"MIT"
] | null | null | null |
from itools import lmbdWr,lm
import itertools
from bisect import bisect_right
import brailleG as gr
def abs2(n):
return (n*n.conjugate()).real
def fsample(buf,m=1,b=0):
index = 0
y = 0
while 1:
index = (index+b+m*y)%len(buf)
y = yield buf[(int(index)+1)%len(buf)]*(index%1)+buf[int(index)]*(1-(index%1))
def fsine(a=1,m=1/48000,b=0):
s = 0
c = a
y = 0
while 1:
amt = b+m*y
s += c*amt
c -= s*amt
y = yield s
import math
pi = math.pi
eone = math.exp(2*pi)
buffer_size = 8192
sinBuffer = [math.sin(i*2*math.pi/4/buffer_size) for i in range(buffer_size+1)]
def nsin(a):
a = 4*buffer_size*(a%1)
if a<=buffer_size:
return sinBuffer[math.floor(a)]
elif a<=buffer_size*2:
return sinBuffer[math.floor(buffer_size-a)-1]
elif a<=buffer_size*3:
return -sinBuffer[math.floor(a-buffer_size*2)]
else:
return -sinBuffer[math.floor(buffer_size*3-a)-1]
def nsaw(a):
return (a%1)*2-1
def ntri(a):
return abs((a%1)-.5)*4-1
def nsquare(a,p=.5):
return ((a%1)<p)*2-1
lsin = lm(nsin)
lsaw = lm(nsaw)
ltri = lm(ntri)
lsqr = lm(nsquare)
def pulse(w=.5):
def p(v):
return ((v%1)<w)*2-1
return p
def tri(w=.5):
def t(v):
v %= 1
return 2*v/w-1 if v < w else 2*(w-v)/(1-w)+1
return t
_reserved = []
def polyvars(varstr):
return [Polynomial(i) for i in varstr]
class Polynomial:
def __init__(self,coef,var="x"):
if type(coef) == str:
var = coef
coef = [0,1]
self.a = coef
self.var = var
self.trim()
def trim(self):
while len(self.a):
if self.a[-1] == 0:
self.a = self.a[:-1]
else:
break
def simplified(self,tv=None):
if tv == None:
tv = self.var
if tv == self.var:
r = Polynomial([],tv)
x = Polynomial(tv)
xa = 1
for t in self.a:
if type(t) == Polynomial:
r += t.simplified(tv)*xa
else:
r += t*xa
xa *= x
else:
r = Polynomial([],tv)
x = Polynomial(self.var)
xa = 1
for t in self.a:
if type(t) == Polynomial:
r += t.simplified(tv).__mul__(xa)
else:
r += t*xa
xa *= x
return r
def __call__(self,vrs):
if type(vrs) != dict:
vrs = {self.var:vrs}
if self.var in vrs:
x = vrs[self.var]
v = 0
xa = 1
for t in self.a:
if type(t) == Polynomial:
t = t(vrs)
v += xa*t
xa *= x
return v
return Polynomial([t(vrs) if type(t) == Polynomial else t for t in self.a],self.var)
def __getitem__(self,i):
if i>=len(self):
return 0
return self.a[i]
def __setitem__(self,i,v):
if i>=len(self):
self.a += [0]*(i-len(self))+[v]
else:
self.a[i] = v
self.trim()
def __neg__(self):
return Polynomial([-i for i in self.a],self.var)
def __radd__(self,o):
return self.__add__(o)
def __add__(self,o):
if type(o) == Polynomial and o.var == self.var:
return self.padd(o)
return self.npadd(o)
def padd(self,o):
return Polynomial(sumPolyn(self.a,o.a),self.var)
def npadd(self,o):
if len(self.a):
return Polynomial([self.a[0]+o]+self.a[1:],self.var)
return Polynomial([o],self.var)
def __rsub__(self,o):
return -self.__sub__(o)
def __sub__(self,o):
if type(o) == Polynomial and o.var == self.var:
return self.psub(o)
return self.npsub(o)
def psub(self,o):
return self.padd(-o)
def npsub(self,o):
if len(self.a):
return Polynomial([self.a[0]-o]+self.a[1:],self.var)
return Polynomial([-o],self.var)
def __rmul__(self,o):
return self.__mul__(o)
def __mul__(self,o):
if type(o) == Polynomial and o.var == self.var:
return self.pmul(o)
return self.npmul(o)
def pmul(self,o):
return Polynomial(prodPolyn(self.a,o.a),self.var)
def npmul(self,o):
if len(self.a):
return Polynomial([e*o for e in self.a],self.var)
return Polynomial([],self.var)
#def __divmod__(self,o):
#def __repr__(self,var=None):
# if var == None:
# var = self.var
# return f"polyn({var}) = "+" + ".join((f"({self.a[i]})"+["",f"{var}"][i>0]+["",f"**{i}"][i>1] for i in range(len(self.a))))
def __repr__(self,var=None):
if var == None:
var = self.var
return f"p({var})="*0+" + ".join(((f"({self.a[i]})" if self.a[i] != 1 else ["1",""][i>0])+["",f"{var}"][i>0]+["",f"**{i}"][i>1] for i in range(len(self.a)) if self.a[i] != 0))
def deriv(self):
return Polynomial([self.a[i+1]*(i+1) for i in range(len(self.a)-1)],self.var)
def integ(self,k=0):
return Polynomial([k]+[self.a[i]*(1/(i+1)) for i in range(len(self.a))],self.var)
def convolve(self,o):
#integ of self(t-x)o(t) dt
#want first arg to be x, second to be bounds
#so,
x = Polynomial('x')
t = Polynomial('t')
integrand = Polynomial([self(t-x)*o(t)],'t')
return integrand.simplified('t').integ()
def __len__(self):
return len(self.a)
def __eq__(self,o):
if type(o) == Polynomial:
if len(o) != len(self) or o.var != self.var:
return False
for i in range(len(self)):
if self.a[i] != o.a[i]:
return False
return True
if type(o) == float or type(o) == int or type(o) == complex:
return len(self) <= 1 and (self.a+[0])[0] == o
def __matmul__(self,o):
return self.convolve(o)
def plot(self,*args):
plotPoly(self.a,*args)
def evalPolyn(polyn,x):
v = 0
xa = 1
for t in polyn:
v += xa*t
xa *= x
return v
def sumPolyn(p1,p2):
res = [0 for i in range(max(len(p1),len(p2)))]
for i in range(len(res)):
if i < len(p1):
if i < len(p2):
res[i] = p1[i] + p2[i]
else:
res[i] = p1[i]
else:
res[i] = p2[i]
return res
def prodPolyn(p1,p2):
if len(p1) == 0 or len(p2) == 0:
return []
res = [0 for i in range(len(p1)+len(p2)-1)]
for i in range(len(p1)):
for j in range(len(p2)):
res[i+j] += p1[i]*p2[j]
return res
def composePolyn(p1,p2): #retuns p1(p2(x))
px = [1]
pr = []
for i in p1:
pr = sumPolyn(pr,prodPolyn(px,[i]))
px = prodPolyn(px,p2)
return pr
def fourierPolyn(p,freq):
factor = 1/(2j*math.pi*freq)
mask = [factor]
result = [0 for i in p]
for i in range(len(p)):
facacc = factor
for j in range(i,-1,-1):
result[j] += facacc*p[i]
facacc *= -factor*j
return result
def evalFourierPolyn(p,freq,phase,low,high):
l = evalPolyn(p,low)
h = evalPolyn(p,high)
return h*(eone**(1j*(freq*high+phase)))-l*(eone**(1j*(freq*low+phase)))
def convolvePolyn(p1,p2):
pass
def softGCD(a,b,f=.01):
if abs(b)<=f:
return a
return softGCD(b,a%b,f)
def convPolyFrags(p0,p1,l0,l1):
#convolves 2 polynomial fragments
if l0 > l1:
return convPolyFrags(p1,p0,l1,l0)
#l0 ≤ l1
times = [-l0,0,l1-l0]
#moving = composePolyn(p0,[t-x])
def xify(v):
return Polynomial([v],'x').simplified('x')
p_0 = Polynomial(p0,'x')
p_1 = Polynomial(p1,'x')
x = Polynomial('x')
conv = p_0@p_1
a,b,c = conv(l0+x)-conv(0),conv(l0+x)-conv(x),conv(l1)-conv(x)
a,b,c = [xify(xify((a,b,c)[i])(x+times[i])) for i in range(3)]
if l1 != l0:
return PiecewizePoly([[],a.a,b.a,c.a,[]],[-math.inf]+times+[l1],0)
return PiecewizePoly([[],a.a,c.a,[]],[-math.inf]+times[:2]+[l1],0)
def plotPoly(p,t0=0,t1=1,res=50):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=1)
st = p[0]
end = evalPolyn(p,t1-t0)
mid = []
ts = []
if len(p) > 2:
for j in range(1,res):
t = (t1-t0)*j/res
ts += [t+t0]
mid += [evalPolyn(p,t)]
ts = [t0]+ts+[t1]
ys = [st]+mid+[end]
plt.plot(ts,[i.real for i in ys],linestyle='-',color=(.3,.3,1), linewidth=2)
plt.plot(ts,[i.imag for i in ys],linestyle='-',color=(1,.3,.3), linewidth=2)
plt.show(block=0)
#todo: closed form convolution
# perhaps use @ (__matmul__)
#todo: closed form composition? possible? not always: requires root finding
class PiecewizePoly:
def __init__(self,polys = [[]],times=[0],mod=1):
self.times = times
self.polys = polys
self.mod = mod
def __call__(self,x):
if self.mod != 0:
x %= self.mod
#binary search for correct polyn
l = bisect_right(self.times,x)-1
#eval polyn
return evalPolyn(self.polys[l],x-self.times[l])
def deriv(self):
#do derivitive on self
res_t = []
res_p = []
for p in range(len(self.polys)):
res_t += [self.times[p]]
res_p += [[]]
for i in range(len(self.polys[p])-1):
res_p[-1] += [self.polys[p][i+1]*(i+1)]
return PiecewizePoly(res_p,res_t,self.mod)
def integ(self,start=0,scale=1):
#do integral on self
res_t = []
res_p = []
for p in range(len(self.polys)):
res_t += [self.times[p]]
res_p += [[start]]
for i in range(len(self.polys[p])):
res_p[-1] += [self.polys[p][i]/(i+1)*scale]
#continuize segments after first
for i in range(1,len(res_t)):
val = evalPolyn(res_p[i-1],res_t[i]-res_t[i-1])
res_p[i][0] = val#-evalPolyn(res[i][1],res[i][0]) #not needed with new def
return PiecewizePoly(res_p,res_t,self.mod)
def timeshift(self,s):
assert self.mod==0
for i in range(len(self.times)):
self.times[i] -= s
return self
def timescale(self,s):
self.mod *= s
for i in range(len(self.times)):
self.times[i] *= s
self.polys[i] = composePolyn(self.polys[i],[0,1/s])
return self
def convolve(self,o,fudgefactor = .001):
ts = self.times + [self.mod if self.mod else math.inf]
to = o.times + [o.mod if o.mod else math.inf]
result = PiecewizePoly([[]],[-math.inf],0)
for i in range(len(self.polys)):
for j in range(len(o.polys)):
pc = convPolyFrags(self.polys[i],o.polys[j],ts[i+1]-ts[i],to[j+1]-to[j])
result += pc.timeshift(ts[i]-to[j])
#now do moddy stuff
return result
def __matmul__(self,o,fudgefactor = .001):
return self.convolve(o,fudgefactor)
def __lmbdWr__(self):
return lmbdWr(self)
def __iterWr__(self):
return iterWr(iter(lmbdWr(self)))
def bias(self):
intg = self.integ()
return (intg.end()-intg(0))/self.mod
def unbiased(self):
#self shifted to have 0 dc bias
bias = self.bias()
res_t = []
res_p = []
for p in range(len(self.polys)):
res_t += [self.times[p]]
res_p += [sumPolyn([-bias],self.polys[p])]
return PiecewizePoly(res_p,res_t,self.mod)
def graph(self,w=40,h=20,lo=-2,hi=2):
gr.graph(self,0,self.mod,lo,hi,w,h)
def plot(self,res=50):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=1)
dash = 0
for i in range(len(self.polys)):
dash = 1-dash
t0 = self.times[i]
if t0 == -math.inf:
t0 = self.times[i+1]-1
t1 = (self.times+[self.mod if self.mod != 0 else self.times[-2]+1])[i+1]
p = self.polys[i]
st = (p+[0])[0]
end = evalPolyn(p,t1-t0)
mid = []
ts = []
if len(p) > 2:
for j in range(1,res):
t = (t1-t0)*j/res
ts += [t+t0]
mid += [evalPolyn(p,t)]
ts = [t0]+ts+[t1]
ys = [st]+mid+[end]
plt.plot(ts,[i.real for i in ys],linestyle='-',color=(.3*dash,.3*dash,1), linewidth=2)
plt.plot(ts,[i.imag for i in ys],linestyle='-',color=(1,.3*dash,.3*dash), linewidth=2)
plt.show(block=0)
def mag2(self):
sqd = PiecewizePoly([prodPolyn(p,p) for p in self.polys],[t for t in self.times],self.mod+1).integ()
return (sqd(self.mod)-sqd(0))/self.mod
def norm(self,v=.5):
#normalizes it so that integ(0,mod, of self^2) = v*mod
target = v
factor = target/self.mag2()**.5
return PiecewizePoly([[i*factor for i in p] for p in self.polys],[t for t in self.times],self.mod)
def __add__(self,o,fudgefactor = .001):
if type(o) == PiecewizePoly:
if self.mod == 0:
assert o.mod == 0
res_t = [-math.inf]
res_p = [sumPolyn(self.polys[0],o.polys[0])]
si = 0
oi = 0
sts = self.times + [math.inf]
ots = o.times + [math.inf]
sp = self.polys + [[]]
op = o.polys + [[]]
while si < len(self.times) and oi < len(o.times):
st,ot = sts[si+1],ots[oi+1]
if st < ot:
si += 1
res_t += [st]
res_p += [sumPolyn(sp[si],
composePolyn(op[oi],[st-ot,1]))]
elif st > ot:
oi += 1
res_t += [ot]
res_p += [sumPolyn(composePolyn(sp[si],[ot-st,1]),
op[oi])]
else:
si += 1
oi += 1
res_t += [st]
res_p += [sumPolyn(sp[si],op[oi])]
return PiecewizePoly(res_p,res_t,0)
else:
assert o.mod != 0
gcd = softGCD(self.mod,o.mod,fudgefactor*(self.mod*o.mod)**.5)
lcm = self.mod*o.mod/gcd
t = 0
res_t = []
res_p = []
sto = 0
oto = 0
si = 0
oi = 0
while t < lcm:
res_t += [t]
res_p += [sumPolyn(composePolyn(self.polys[si],[t-(self.times[si]+sto),1]),
composePolyn(o.polys[oi],[t-(o.times[oi]+oto),1]))]
st = sto+(self.times+[self.times[0]+self.mod])[si+1]
ot = oto+(o.times+[o.times[0]+o.mod])[oi+1]
t = min(st,ot)
if st <= t:
si += 1
if si >= len(self.polys):
si = 0
sto += self.mod
if ot <= t:
oi += 1
if oi >= len(o.polys):
oi = 0
oto += o.mod
return PiecewizePoly(res_p,res_t,lcm)
else:
return PiecewizePoly([sumPolyn(p,[o]) for p in self.polys],[t for t in self.times],self.mod)
def __mul__(self,o,fudgefactor = .001):
if type(o) == PiecewizePoly:
gcd = softGCD(self.mod,o.mod,fudgefactor*(self.mod*o.mod)**.5)
lcm = self.mod*o.mod/gcd
t = 0
res_t = []
res_p = []
sto = 0
oto = 0
si = 0
oi = 0
while t < lcm:
res_t += [t]
res_p += [prodPolyn(composePolyn(self.polys[si],[t-(self.times[si]+sto),1]),
composePolyn(o.polys[oi],[t-(o.times[oi]+oto),1]))]
st = sto+(self.times+[self.times[0]+self.mod])[si+1]
ot = oto+(o.times+[o.times[0]+o.mod])[oi+1]
t = min(st,ot)
if st <= t:
si += 1
if si >= len(self.polys):
si = 0
sto += self.mod
if ot <= t:
oi += 1
if oi >= len(o.polys):
oi = 0
oto += o.mod
return PiecewizePoly(res_p,res_t,lcm)
else:
return PiecewizePoly([prodPolyn(p,[o]) for p in self.polys],[t for t in self.times],self.mod)
def __radd__(self,o):
return self.__add__(o)
def __rmul__(self,o):
return self.__mul__(o)
def __sub__(self,o):
return self.__add__(o.__mul__(-1))
def __rsub__(self,o):
return self.__mul__(-1).__add__(o)
def t(self,v=1):
return PiecewizePoly([[p[i]/(v**i) for i in range(len(p))] for p in self.polys],[t*v for t in self.times],self.mod*v)
def isZero(self):
for i in self.polys:
for j in i:
if j != 0:
return False
return True
def end(self):
x = self.mod
l = -1
#eval polyn
return evalPolyn(self.polys[l],x-self.times[l])
def freqComponent(self,f):
if f == 0:
return self.bias()
result = 0
f /= self.mod
for i in range(len(self.polys)):
p = fourierPolyn(self.polys[i],f)
result += evalFourierPolyn(p,f,f*self.times[i],0,(self.times+[self.mod])[i+1]-self.times[i])
return result
def graphSpectrum(self,w=20,h=10,both=True):
gr.graphLT(lambda x:abs(self.freqComponent(x)),both-h*2*both,h*(4-2*both)+both,0,1,w,h)
def graphSpectrumLog(self,w=20,h=10,both = True,low=-10,hi=1):
gr.graphLT(lambda x: (lambda v: (math.log(v) if v!=0 else -1e300))(abs(self.freqComponent(x))),both-h*2*both,h*(4-2*both)+both,low,hi,w,h)
def bandLimit(self,t,bl=5,neg=False):
tot = 0
for i in range(neg*(1-bl),bl):
tot += eone**(1j*i*t)*self.freqComponent(i)
return tot
def getBandlimitedBuffer(self,denominator,numerator = 1,ff=0,fnd=2,neg=False):
#f_nyquist = .5
# f_n = n*(num/den) < f_nyquist
# n < .5*den/num
d = softGCD(numerator,denominator,ff)
numerator=int(round(numerator/d))
denominator=int(round(denominator/d))
return [self.bandLimit(numerator*i*self.mod/denominator,int(denominator/numerator/fnd),neg) for i in range(numerator*denominator)]
def bandConj(self,t,bl=5):
tot = 0
re = self.real()
im = self.imag()
for i in range(0,bl):
f = eone**(1j*i*t)
tot += (f*re.freqComponent(i)).imag+(f*im.freqComponent(i)).imag*1j
return tot
def real(self):
return PiecewizePoly([[i.real for i in j]for j in self.polys],[t for t in self.times],self.mod)
def imag(self):
return PiecewizePoly([[i.imag for i in j]for j in self.polys],[t for t in self.times],self.mod)
def oscope(self,w=40,h=20,s=.5+.5j,m=.5,n=256):
scrn = gr.brailleScreen(w*2,h*4)
for i in range(n):
t = i*self.mod/n
v = self(t).conjugate()*m+s
if 0<=int(v.real*w*2)<w*2 and 0<=int(v.imag*h*4) < h*4:
gr.brailleScreenSet(scrn,int(v.real*w*2),int(v.imag*h*4))
gr.brailleScreenPrint(scrn)
def forever(v):
while 1:
yield v
#NEW BWLSYNTH IDEA:
# sample the nth integral then derivitate the signal n times
# the high harmonics are suppressed in the integrals which means
# when they alias they are tiny
# but the reconstruction filter doesn't amplify them a ton because they were aliased
# thus cheap and easy bwl synthesis
def idbwlPoly(p,rate=440/48000,q=1,d=1):
try:
rate.__next__
except:
rate = forever(rate)
ds = [[0]*d for i in range(q)]
rates = [0]*d
trate = 0
for i in range(q):
p = p.unbiased().integ()
t = 0
di = 0
for i in range(q*d):
di = (di+1)%d
t += rate
t %= 1
r = p(t)
trate -= rates[di]
rates[di] = next(rate)
trate += rates[di]
for i in range(q):
r,ds[i][di] = (r-ds[i][di]) / trate,r
while 1:
di = (di+1)%d
t += rate
t %= 1
r = p(t)
trate -= rates[di]
rates[di] = next(rate)
trate += rates[di]
for i in range(q):
r,ds[i][di] = (r-ds[i][di]) / trate,r
yield r
def ditherPoly(p,rate=440/48000,dd=1):
from random import random
t = 0
while 1:
t += rate
yield p(t+dd*rate*random())
def gaussApprox(mean=0,spread=1,iters=3):
s = spread/iters
blip = PiecewizePoly([[],[1/s],[]],[-math.inf,0,s],0)
acc = blip
for b in bin(iters)[3:]:
acc.plot()
acc @= acc
if b == '1':
acc @= blip
return acc.timeshift(mean)
def plinConnectDots(dat,speed=1):
polys = []
times = []
t = 0
for i in range(len(dat)):
leng = abs(dat[i-1]-dat[i])
polys += [[dat[i-1],(dat[i]-dat[i-1])/leng]]
times += [t]
t += leng
return PiecewizePoly(polys,times,t)
def pnlinConnectDots(dat,speed=1):
r = plinConnectDots(dat,speed)
return r.t(1/r.mod)
def papprox(dat,integ=2):
#derivitive the freqs integ times
dcs = []
for intg in range(integ):
dcs += [dat[-1]/(intg+1)]
ddat = [(-dat[i-1]+dat[i])/(intg+1) for i in range(len(dat))]
dat = ddat
res = PiecewizePoly([[i] for i in dat],[i for i in range(len(dat))],len(dat))
for i in range(integ):
res = res.integ(dcs[-i-1])
return res
"""bl = len(dat)//2
guess1 = PiecewizePoly([[i] for i in dat],[i/len(dat) for i in range(len(dat))],1)
freqs = [guess1.freqComponent(i) for i in range(1-bl,bl)]
dc = guess1.bias()
#derivitive the freqs integ times
for i in range(integ):
for f in range(len(freqs)):
freqs[f] *= (f+1-bl//2)*1j
#come up with new samples to integrate repeatedly
samps = []
for t in range(len(dat)):
tot = 0
for i in range(1-bl,bl):
tot += eone**(1j*i*t/len(dat))*freqs[i]
samps += [tot]
res = PiecewizePoly([[i] for i in samps],[i/len(samps) for i in range(len(samps))],1)
for i in range(integ):
res = res.unbiased().integ(0,1).unbiased()
return res + dc
"""
def ppulse(width=.5,amplitude=1):
return PiecewizePoly([[0,[-1]],[width,[1]]]).unbiased()
psqr = PiecewizePoly([[-1],[1]],[0,.5])
#.5 -> 2
ptri = psqr.integ(0,4).unbiased()
#.25*.5=1/8
ppar = ptri.integ(0,8)
psaw = PiecewizePoly([[1,-2]],[0])
cf = pnlinConnectDots([-.75+1.5j,-.5+1j,.5+1j,.75+1.5j,1+1j,1-1j,-1j-1,1j-1])*.5
cfi = plinConnectDots([-.75+1.5j,-.5+1j,.5+1j,.75+1.5j,1+1j,1-1j,-1j-1,1j-1])
cfi.polys += [[-1/3+.5j,-1j],[1/3+.5j,-1j]]
cfi.times += [cfi.mod,cfi.mod+.75]
cfi.mod += 1.5
cfi = cfi.t(1/cfi.mod)*.5
def reorderTimes(times,order,top):
newTimes = []
t = 0
for i in order:
if i == len(times)-1:
l = top-times[i]
else:
l = times[i+1]-times[i]
newTimes += [t]
t += l
return newTimes
def reorder(wave,goal,fs=20,wfd = lambda f,a,b: abs(abs2(a)-abs2(b))):
l = [i for i in range(len(wave.polys))]
goalF = [goal.freqComponent(i) for i in range(1-fs,fs)]
best = wave
bestD = 1e300
for p in itertools.permutations(l):
guess = PiecewizePoly([wave.polys[i] for i in p],reorderTimes(wave.times,p,wave.mod),wave.mod)
guessF = [guess.freqComponent(i) for i in range(1-fs,fs)]
d = 0
for i in range(len(goalF)):
d += wfd(1-fs+i,goalF[i],guessF[i])
if d < bestD:
best = guess
bestD = d
return best
def quickStar(n,s=2):
return pnlinConnectDots([eone**(1j*i*s/n) for i in range(n)])*.5
def prettyStar(n,rl=.5):
return pnlinConnectDots([eone**(1j*(i+.5*j)/n)*[1,rl][j] for i in range(n) for j in range(2)])*.5
def getPSineApprox(sects=2,integs=12):
offs = integs%4
guess = PiecewizePoly([[math.sin(((i+.5)/sects+offs/4)*2*math.pi)] for i in range(sects)],[i/sects for i in range(sects)]).unbiased()
for i in range(integs):
guess = guess.integ(0,1).unbiased().norm()
return guess
def c(f,g):
for i in g:
yield f(i)
def x(n,g):
for i in g:
yield n*i
def p(n,g):
for i in g:
yield n+i
def const(n):
while 1:
yield n
def integ(g,a=0):
for i in g:
a += i
yield a
def deriv(g):
p = next(g)
for i in g:
yield i-p
p = i
def clamp(n,v=1):
return min(max(n,-v),v)
def bderiv(g,b=1):
p = next(g)
d = 0
for i in g:
d += i-p
p = i
v = clamp(d,b)
yield v
d -= v
def send(g1,g2):
next(g1)
while 1:
yield g1.send(next(g2))
class passFilter:
def __init__(self):
self.value = 0
def send(self,val,time=1):
self.value = val
return val
class contRAvgFilt(passFilter):
def __init__(self,a):
self.alpha = math.log(a)
self.value = 0
def send(self,val,time=1):
self.value = val+(self.value-val)*math.exp(self.alpha*time)
return self.value
def getPerfSquareBuff(n,d=1):
w = 1
outbuf = [0 for i in range(n)]
while w < n/d/2:
for i in range(n):
outbuf[i] += math.sin(i*2*pi*d/n*w)/w
w += 2
return outbuf
def nearestDownSample(g,r=1):
a = 0
for i in g:
while a < 1:
yield i
a += r
a -= 1
def linearDownSample(g,r=1):
p = 0
a = 0
for i in g:
while a < 1:
yield a*i+(1-a)*p
a += r
p = i
a -= 1
def fsamp(f,s=[(-1,.5),(1,.5)],filt=None,r=48000):
if filt == None:
filt = contRAvgFilt(1/r)
a = 0
i = 0
if type(f)==int or type(f)==float:
def g(v):
while 1:
yield v
f = g(f)
filtered = 0
while 1:
t = next(f)/r
while t > 0:
dt = min(t,s[i][1]-a)
a += dt
t -= dt
filt.send(s[i][0],dt)
if a>=s[i][1]:
a -= s[i][1]
i = (i+1)%len(s)
yield filt.value
#actual fm stuff
from filters import IIR
import numpy as np
def phaseModulate(g,d=.1,f=10000,sr=48000):
t = 0
for i in g:
t += f/sr
yield nsin(t+i.real*d)+1j*(nsin(t+.25+i.imag*d))
def modulate(g,d=0.01,f=10000,sr=48000):
t = .25
for i in g:
t += (d*i+1+1j)*f/sr
yield (nsin(t.real)+1j*nsin(t.imag))
"""def stereoEncode(g,c=10000,sr=48000):
t = 0
flt = IIR()
flt.setPolys([1],
for i in g:
t += (c+i.imag)/sr
yield nsin(t)+i.real
def stereoDecode(g,c=15000,sr=48000):
for i in g:
r =
"""
#def fm(
| 29.433581 | 183 | 0.490559 |
from itools import lmbdWr,lm
import itertools
from bisect import bisect_right
import brailleG as gr
def abs2(n):
return (n*n.conjugate()).real
def fsample(buf,m=1,b=0):
index = 0
y = 0
while 1:
index = (index+b+m*y)%len(buf)
y = yield buf[(int(index)+1)%len(buf)]*(index%1)+buf[int(index)]*(1-(index%1))
def fsine(a=1,m=1/48000,b=0):
s = 0
c = a
y = 0
while 1:
amt = b+m*y
s += c*amt
c -= s*amt
y = yield s
import math
pi = math.pi
eone = math.exp(2*pi)
buffer_size = 8192
sinBuffer = [math.sin(i*2*math.pi/4/buffer_size) for i in range(buffer_size+1)]
def nsin(a):
a = 4*buffer_size*(a%1)
if a<=buffer_size:
return sinBuffer[math.floor(a)]
elif a<=buffer_size*2:
return sinBuffer[math.floor(buffer_size-a)-1]
elif a<=buffer_size*3:
return -sinBuffer[math.floor(a-buffer_size*2)]
else:
return -sinBuffer[math.floor(buffer_size*3-a)-1]
def nsaw(a):
return (a%1)*2-1
def ntri(a):
return abs((a%1)-.5)*4-1
def nsquare(a,p=.5):
return ((a%1)<p)*2-1
lsin = lm(nsin)
lsaw = lm(nsaw)
ltri = lm(ntri)
lsqr = lm(nsquare)
def pulse(w=.5):
def p(v):
return ((v%1)<w)*2-1
return p
def tri(w=.5):
def t(v):
v %= 1
return 2*v/w-1 if v < w else 2*(w-v)/(1-w)+1
return t
_reserved = []
def polyvars(varstr):
return [Polynomial(i) for i in varstr]
class Polynomial:
def __init__(self,coef,var="x"):
if type(coef) == str:
var = coef
coef = [0,1]
self.a = coef
self.var = var
self.trim()
def trim(self):
while len(self.a):
if self.a[-1] == 0:
self.a = self.a[:-1]
else:
break
def simplified(self,tv=None):
if tv == None:
tv = self.var
if tv == self.var:
r = Polynomial([],tv)
x = Polynomial(tv)
xa = 1
for t in self.a:
if type(t) == Polynomial:
r += t.simplified(tv)*xa
else:
r += t*xa
xa *= x
else:
r = Polynomial([],tv)
x = Polynomial(self.var)
xa = 1
for t in self.a:
if type(t) == Polynomial:
r += t.simplified(tv).__mul__(xa)
else:
r += t*xa
xa *= x
return r
def __call__(self,vrs):
if type(vrs) != dict:
vrs = {self.var:vrs}
if self.var in vrs:
x = vrs[self.var]
v = 0
xa = 1
for t in self.a:
if type(t) == Polynomial:
t = t(vrs)
v += xa*t
xa *= x
return v
return Polynomial([t(vrs) if type(t) == Polynomial else t for t in self.a],self.var)
def __getitem__(self,i):
if i>=len(self):
return 0
return self.a[i]
def __setitem__(self,i,v):
if i>=len(self):
self.a += [0]*(i-len(self))+[v]
else:
self.a[i] = v
self.trim()
def __neg__(self):
return Polynomial([-i for i in self.a],self.var)
def __radd__(self,o):
return self.__add__(o)
def __add__(self,o):
if type(o) == Polynomial and o.var == self.var:
return self.padd(o)
return self.npadd(o)
def padd(self,o):
return Polynomial(sumPolyn(self.a,o.a),self.var)
def npadd(self,o):
if len(self.a):
return Polynomial([self.a[0]+o]+self.a[1:],self.var)
return Polynomial([o],self.var)
def __rsub__(self,o):
return -self.__sub__(o)
def __sub__(self,o):
if type(o) == Polynomial and o.var == self.var:
return self.psub(o)
return self.npsub(o)
def psub(self,o):
return self.padd(-o)
def npsub(self,o):
if len(self.a):
return Polynomial([self.a[0]-o]+self.a[1:],self.var)
return Polynomial([-o],self.var)
def __rmul__(self,o):
return self.__mul__(o)
def __mul__(self,o):
if type(o) == Polynomial and o.var == self.var:
return self.pmul(o)
return self.npmul(o)
def pmul(self,o):
return Polynomial(prodPolyn(self.a,o.a),self.var)
def npmul(self,o):
if len(self.a):
return Polynomial([e*o for e in self.a],self.var)
return Polynomial([],self.var)
def __repr__(self,var=None):
if var == None:
var = self.var
return f"p({var})="*0+" + ".join(((f"({self.a[i]})" if self.a[i] != 1 else ["1",""][i>0])+["",f"{var}"][i>0]+["",f"**{i}"][i>1] for i in range(len(self.a)) if self.a[i] != 0))
def deriv(self):
return Polynomial([self.a[i+1]*(i+1) for i in range(len(self.a)-1)],self.var)
def integ(self,k=0):
return Polynomial([k]+[self.a[i]*(1/(i+1)) for i in range(len(self.a))],self.var)
def convolve(self,o):
x = Polynomial('x')
t = Polynomial('t')
integrand = Polynomial([self(t-x)*o(t)],'t')
return integrand.simplified('t').integ()
def __len__(self):
return len(self.a)
def __eq__(self,o):
if type(o) == Polynomial:
if len(o) != len(self) or o.var != self.var:
return False
for i in range(len(self)):
if self.a[i] != o.a[i]:
return False
return True
if type(o) == float or type(o) == int or type(o) == complex:
return len(self) <= 1 and (self.a+[0])[0] == o
def __matmul__(self,o):
return self.convolve(o)
def plot(self,*args):
plotPoly(self.a,*args)
def evalPolyn(polyn,x):
v = 0
xa = 1
for t in polyn:
v += xa*t
xa *= x
return v
def sumPolyn(p1,p2):
res = [0 for i in range(max(len(p1),len(p2)))]
for i in range(len(res)):
if i < len(p1):
if i < len(p2):
res[i] = p1[i] + p2[i]
else:
res[i] = p1[i]
else:
res[i] = p2[i]
return res
def prodPolyn(p1,p2):
if len(p1) == 0 or len(p2) == 0:
return []
res = [0 for i in range(len(p1)+len(p2)-1)]
for i in range(len(p1)):
for j in range(len(p2)):
res[i+j] += p1[i]*p2[j]
return res
def composePolyn(p1,p2):
px = [1]
pr = []
for i in p1:
pr = sumPolyn(pr,prodPolyn(px,[i]))
px = prodPolyn(px,p2)
return pr
def fourierPolyn(p,freq):
factor = 1/(2j*math.pi*freq)
mask = [factor]
result = [0 for i in p]
for i in range(len(p)):
facacc = factor
for j in range(i,-1,-1):
result[j] += facacc*p[i]
facacc *= -factor*j
return result
def evalFourierPolyn(p,freq,phase,low,high):
l = evalPolyn(p,low)
h = evalPolyn(p,high)
return h*(eone**(1j*(freq*high+phase)))-l*(eone**(1j*(freq*low+phase)))
def convolvePolyn(p1,p2):
pass
def softGCD(a,b,f=.01):
if abs(b)<=f:
return a
return softGCD(b,a%b,f)
def convPolyFrags(p0,p1,l0,l1):
if l0 > l1:
return convPolyFrags(p1,p0,l1,l0)
times = [-l0,0,l1-l0]
def xify(v):
return Polynomial([v],'x').simplified('x')
p_0 = Polynomial(p0,'x')
p_1 = Polynomial(p1,'x')
x = Polynomial('x')
conv = p_0@p_1
a,b,c = conv(l0+x)-conv(0),conv(l0+x)-conv(x),conv(l1)-conv(x)
a,b,c = [xify(xify((a,b,c)[i])(x+times[i])) for i in range(3)]
if l1 != l0:
return PiecewizePoly([[],a.a,b.a,c.a,[]],[-math.inf]+times+[l1],0)
return PiecewizePoly([[],a.a,c.a,[]],[-math.inf]+times[:2]+[l1],0)
def plotPoly(p,t0=0,t1=1,res=50):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=1)
st = p[0]
end = evalPolyn(p,t1-t0)
mid = []
ts = []
if len(p) > 2:
for j in range(1,res):
t = (t1-t0)*j/res
ts += [t+t0]
mid += [evalPolyn(p,t)]
ts = [t0]+ts+[t1]
ys = [st]+mid+[end]
plt.plot(ts,[i.real for i in ys],linestyle='-',color=(.3,.3,1), linewidth=2)
plt.plot(ts,[i.imag for i in ys],linestyle='-',color=(1,.3,.3), linewidth=2)
plt.show(block=0)
class PiecewizePoly:
def __init__(self,polys = [[]],times=[0],mod=1):
self.times = times
self.polys = polys
self.mod = mod
def __call__(self,x):
if self.mod != 0:
x %= self.mod
l = bisect_right(self.times,x)-1
return evalPolyn(self.polys[l],x-self.times[l])
def deriv(self):
res_t = []
res_p = []
for p in range(len(self.polys)):
res_t += [self.times[p]]
res_p += [[]]
for i in range(len(self.polys[p])-1):
res_p[-1] += [self.polys[p][i+1]*(i+1)]
return PiecewizePoly(res_p,res_t,self.mod)
def integ(self,start=0,scale=1):
res_t = []
res_p = []
for p in range(len(self.polys)):
res_t += [self.times[p]]
res_p += [[start]]
for i in range(len(self.polys[p])):
res_p[-1] += [self.polys[p][i]/(i+1)*scale]
for i in range(1,len(res_t)):
val = evalPolyn(res_p[i-1],res_t[i]-res_t[i-1])
res_p[i][0] = valePoly(res_p,res_t,self.mod)
def timeshift(self,s):
assert self.mod==0
for i in range(len(self.times)):
self.times[i] -= s
return self
def timescale(self,s):
self.mod *= s
for i in range(len(self.times)):
self.times[i] *= s
self.polys[i] = composePolyn(self.polys[i],[0,1/s])
return self
def convolve(self,o,fudgefactor = .001):
ts = self.times + [self.mod if self.mod else math.inf]
to = o.times + [o.mod if o.mod else math.inf]
result = PiecewizePoly([[]],[-math.inf],0)
for i in range(len(self.polys)):
for j in range(len(o.polys)):
pc = convPolyFrags(self.polys[i],o.polys[j],ts[i+1]-ts[i],to[j+1]-to[j])
result += pc.timeshift(ts[i]-to[j])
return result
def __matmul__(self,o,fudgefactor = .001):
return self.convolve(o,fudgefactor)
def __lmbdWr__(self):
return lmbdWr(self)
def __iterWr__(self):
return iterWr(iter(lmbdWr(self)))
def bias(self):
intg = self.integ()
return (intg.end()-intg(0))/self.mod
def unbiased(self):
bias = self.bias()
res_t = []
res_p = []
for p in range(len(self.polys)):
res_t += [self.times[p]]
res_p += [sumPolyn([-bias],self.polys[p])]
return PiecewizePoly(res_p,res_t,self.mod)
def graph(self,w=40,h=20,lo=-2,hi=2):
gr.graph(self,0,self.mod,lo,hi,w,h)
def plot(self,res=50):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=1, ncols=1)
dash = 0
for i in range(len(self.polys)):
dash = 1-dash
t0 = self.times[i]
if t0 == -math.inf:
t0 = self.times[i+1]-1
t1 = (self.times+[self.mod if self.mod != 0 else self.times[-2]+1])[i+1]
p = self.polys[i]
st = (p+[0])[0]
end = evalPolyn(p,t1-t0)
mid = []
ts = []
if len(p) > 2:
for j in range(1,res):
t = (t1-t0)*j/res
ts += [t+t0]
mid += [evalPolyn(p,t)]
ts = [t0]+ts+[t1]
ys = [st]+mid+[end]
plt.plot(ts,[i.real for i in ys],linestyle='-',color=(.3*dash,.3*dash,1), linewidth=2)
plt.plot(ts,[i.imag for i in ys],linestyle='-',color=(1,.3*dash,.3*dash), linewidth=2)
plt.show(block=0)
def mag2(self):
sqd = PiecewizePoly([prodPolyn(p,p) for p in self.polys],[t for t in self.times],self.mod+1).integ()
return (sqd(self.mod)-sqd(0))/self.mod
def norm(self,v=.5):
target = v
factor = target/self.mag2()**.5
return PiecewizePoly([[i*factor for i in p] for p in self.polys],[t for t in self.times],self.mod)
def __add__(self,o,fudgefactor = .001):
if type(o) == PiecewizePoly:
if self.mod == 0:
assert o.mod == 0
res_t = [-math.inf]
res_p = [sumPolyn(self.polys[0],o.polys[0])]
si = 0
oi = 0
sts = self.times + [math.inf]
ots = o.times + [math.inf]
sp = self.polys + [[]]
op = o.polys + [[]]
while si < len(self.times) and oi < len(o.times):
st,ot = sts[si+1],ots[oi+1]
if st < ot:
si += 1
res_t += [st]
res_p += [sumPolyn(sp[si],
composePolyn(op[oi],[st-ot,1]))]
elif st > ot:
oi += 1
res_t += [ot]
res_p += [sumPolyn(composePolyn(sp[si],[ot-st,1]),
op[oi])]
else:
si += 1
oi += 1
res_t += [st]
res_p += [sumPolyn(sp[si],op[oi])]
return PiecewizePoly(res_p,res_t,0)
else:
assert o.mod != 0
gcd = softGCD(self.mod,o.mod,fudgefactor*(self.mod*o.mod)**.5)
lcm = self.mod*o.mod/gcd
t = 0
res_t = []
res_p = []
sto = 0
oto = 0
si = 0
oi = 0
while t < lcm:
res_t += [t]
res_p += [sumPolyn(composePolyn(self.polys[si],[t-(self.times[si]+sto),1]),
composePolyn(o.polys[oi],[t-(o.times[oi]+oto),1]))]
st = sto+(self.times+[self.times[0]+self.mod])[si+1]
ot = oto+(o.times+[o.times[0]+o.mod])[oi+1]
t = min(st,ot)
if st <= t:
si += 1
if si >= len(self.polys):
si = 0
sto += self.mod
if ot <= t:
oi += 1
if oi >= len(o.polys):
oi = 0
oto += o.mod
return PiecewizePoly(res_p,res_t,lcm)
else:
return PiecewizePoly([sumPolyn(p,[o]) for p in self.polys],[t for t in self.times],self.mod)
def __mul__(self,o,fudgefactor = .001):
if type(o) == PiecewizePoly:
gcd = softGCD(self.mod,o.mod,fudgefactor*(self.mod*o.mod)**.5)
lcm = self.mod*o.mod/gcd
t = 0
res_t = []
res_p = []
sto = 0
oto = 0
si = 0
oi = 0
while t < lcm:
res_t += [t]
res_p += [prodPolyn(composePolyn(self.polys[si],[t-(self.times[si]+sto),1]),
composePolyn(o.polys[oi],[t-(o.times[oi]+oto),1]))]
st = sto+(self.times+[self.times[0]+self.mod])[si+1]
ot = oto+(o.times+[o.times[0]+o.mod])[oi+1]
t = min(st,ot)
if st <= t:
si += 1
if si >= len(self.polys):
si = 0
sto += self.mod
if ot <= t:
oi += 1
if oi >= len(o.polys):
oi = 0
oto += o.mod
return PiecewizePoly(res_p,res_t,lcm)
else:
return PiecewizePoly([prodPolyn(p,[o]) for p in self.polys],[t for t in self.times],self.mod)
def __radd__(self,o):
return self.__add__(o)
def __rmul__(self,o):
return self.__mul__(o)
def __sub__(self,o):
return self.__add__(o.__mul__(-1))
def __rsub__(self,o):
return self.__mul__(-1).__add__(o)
def t(self,v=1):
return PiecewizePoly([[p[i]/(v**i) for i in range(len(p))] for p in self.polys],[t*v for t in self.times],self.mod*v)
def isZero(self):
for i in self.polys:
for j in i:
if j != 0:
return False
return True
def end(self):
x = self.mod
l = -1
return evalPolyn(self.polys[l],x-self.times[l])
def freqComponent(self,f):
if f == 0:
return self.bias()
result = 0
f /= self.mod
for i in range(len(self.polys)):
p = fourierPolyn(self.polys[i],f)
result += evalFourierPolyn(p,f,f*self.times[i],0,(self.times+[self.mod])[i+1]-self.times[i])
return result
def graphSpectrum(self,w=20,h=10,both=True):
gr.graphLT(lambda x:abs(self.freqComponent(x)),both-h*2*both,h*(4-2*both)+both,0,1,w,h)
def graphSpectrumLog(self,w=20,h=10,both = True,low=-10,hi=1):
gr.graphLT(lambda x: (lambda v: (math.log(v) if v!=0 else -1e300))(abs(self.freqComponent(x))),both-h*2*both,h*(4-2*both)+both,low,hi,w,h)
def bandLimit(self,t,bl=5,neg=False):
tot = 0
for i in range(neg*(1-bl),bl):
tot += eone**(1j*i*t)*self.freqComponent(i)
return tot
def getBandlimitedBuffer(self,denominator,numerator = 1,ff=0,fnd=2,neg=False):
d = softGCD(numerator,denominator,ff)
numerator=int(round(numerator/d))
denominator=int(round(denominator/d))
return [self.bandLimit(numerator*i*self.mod/denominator,int(denominator/numerator/fnd),neg) for i in range(numerator*denominator)]
def bandConj(self,t,bl=5):
tot = 0
re = self.real()
im = self.imag()
for i in range(0,bl):
f = eone**(1j*i*t)
tot += (f*re.freqComponent(i)).imag+(f*im.freqComponent(i)).imag*1j
return tot
def real(self):
return PiecewizePoly([[i.real for i in j]for j in self.polys],[t for t in self.times],self.mod)
def imag(self):
return PiecewizePoly([[i.imag for i in j]for j in self.polys],[t for t in self.times],self.mod)
def oscope(self,w=40,h=20,s=.5+.5j,m=.5,n=256):
scrn = gr.brailleScreen(w*2,h*4)
for i in range(n):
t = i*self.mod/n
v = self(t).conjugate()*m+s
if 0<=int(v.real*w*2)<w*2 and 0<=int(v.imag*h*4) < h*4:
gr.brailleScreenSet(scrn,int(v.real*w*2),int(v.imag*h*4))
gr.brailleScreenPrint(scrn)
def forever(v):
while 1:
yield v
# thus cheap and easy bwl synthesis
def idbwlPoly(p,rate=440/48000,q=1,d=1):
try:
rate.__next__
except:
rate = forever(rate)
ds = [[0]*d for i in range(q)]
rates = [0]*d
trate = 0
for i in range(q):
p = p.unbiased().integ()
t = 0
di = 0
for i in range(q*d):
di = (di+1)%d
t += rate
t %= 1
r = p(t)
trate -= rates[di]
rates[di] = next(rate)
trate += rates[di]
for i in range(q):
r,ds[i][di] = (r-ds[i][di]) / trate,r
while 1:
di = (di+1)%d
t += rate
t %= 1
r = p(t)
trate -= rates[di]
rates[di] = next(rate)
trate += rates[di]
for i in range(q):
r,ds[i][di] = (r-ds[i][di]) / trate,r
yield r
def ditherPoly(p,rate=440/48000,dd=1):
from random import random
t = 0
while 1:
t += rate
yield p(t+dd*rate*random())
def gaussApprox(mean=0,spread=1,iters=3):
s = spread/iters
blip = PiecewizePoly([[],[1/s],[]],[-math.inf,0,s],0)
acc = blip
for b in bin(iters)[3:]:
acc.plot()
acc @= acc
if b == '1':
acc @= blip
return acc.timeshift(mean)
def plinConnectDots(dat,speed=1):
polys = []
times = []
t = 0
for i in range(len(dat)):
leng = abs(dat[i-1]-dat[i])
polys += [[dat[i-1],(dat[i]-dat[i-1])/leng]]
times += [t]
t += leng
return PiecewizePoly(polys,times,t)
def pnlinConnectDots(dat,speed=1):
r = plinConnectDots(dat,speed)
return r.t(1/r.mod)
def papprox(dat,integ=2):
#derivitive the freqs integ times
dcs = []
for intg in range(integ):
dcs += [dat[-1]/(intg+1)]
ddat = [(-dat[i-1]+dat[i])/(intg+1) for i in range(len(dat))]
dat = ddat
res = PiecewizePoly([[i] for i in dat],[i for i in range(len(dat))],len(dat))
for i in range(integ):
res = res.integ(dcs[-i-1])
return res
def ppulse(width=.5,amplitude=1):
return PiecewizePoly([[0,[-1]],[width,[1]]]).unbiased()
psqr = PiecewizePoly([[-1],[1]],[0,.5])
#.5 -> 2
ptri = psqr.integ(0,4).unbiased()
#.25*.5=1/8
ppar = ptri.integ(0,8)
psaw = PiecewizePoly([[1,-2]],[0])
cf = pnlinConnectDots([-.75+1.5j,-.5+1j,.5+1j,.75+1.5j,1+1j,1-1j,-1j-1,1j-1])*.5
cfi = plinConnectDots([-.75+1.5j,-.5+1j,.5+1j,.75+1.5j,1+1j,1-1j,-1j-1,1j-1])
cfi.polys += [[-1/3+.5j,-1j],[1/3+.5j,-1j]]
cfi.times += [cfi.mod,cfi.mod+.75]
cfi.mod += 1.5
cfi = cfi.t(1/cfi.mod)*.5
def reorderTimes(times,order,top):
newTimes = []
t = 0
for i in order:
if i == len(times)-1:
l = top-times[i]
else:
l = times[i+1]-times[i]
newTimes += [t]
t += l
return newTimes
def reorder(wave,goal,fs=20,wfd = lambda f,a,b: abs(abs2(a)-abs2(b))):
l = [i for i in range(len(wave.polys))]
goalF = [goal.freqComponent(i) for i in range(1-fs,fs)]
best = wave
bestD = 1e300
for p in itertools.permutations(l):
guess = PiecewizePoly([wave.polys[i] for i in p],reorderTimes(wave.times,p,wave.mod),wave.mod)
guessF = [guess.freqComponent(i) for i in range(1-fs,fs)]
d = 0
for i in range(len(goalF)):
d += wfd(1-fs+i,goalF[i],guessF[i])
if d < bestD:
best = guess
bestD = d
return best
def quickStar(n,s=2):
return pnlinConnectDots([eone**(1j*i*s/n) for i in range(n)])*.5
def prettyStar(n,rl=.5):
return pnlinConnectDots([eone**(1j*(i+.5*j)/n)*[1,rl][j] for i in range(n) for j in range(2)])*.5
def getPSineApprox(sects=2,integs=12):
offs = integs%4
guess = PiecewizePoly([[math.sin(((i+.5)/sects+offs/4)*2*math.pi)] for i in range(sects)],[i/sects for i in range(sects)]).unbiased()
for i in range(integs):
guess = guess.integ(0,1).unbiased().norm()
return guess
def c(f,g):
for i in g:
yield f(i)
def x(n,g):
for i in g:
yield n*i
def p(n,g):
for i in g:
yield n+i
def const(n):
while 1:
yield n
def integ(g,a=0):
for i in g:
a += i
yield a
def deriv(g):
p = next(g)
for i in g:
yield i-p
p = i
def clamp(n,v=1):
return min(max(n,-v),v)
def bderiv(g,b=1):
p = next(g)
d = 0
for i in g:
d += i-p
p = i
v = clamp(d,b)
yield v
d -= v
def send(g1,g2):
next(g1)
while 1:
yield g1.send(next(g2))
class passFilter:
def __init__(self):
self.value = 0
def send(self,val,time=1):
self.value = val
return val
class contRAvgFilt(passFilter):
def __init__(self,a):
self.alpha = math.log(a)
self.value = 0
def send(self,val,time=1):
self.value = val+(self.value-val)*math.exp(self.alpha*time)
return self.value
def getPerfSquareBuff(n,d=1):
w = 1
outbuf = [0 for i in range(n)]
while w < n/d/2:
for i in range(n):
outbuf[i] += math.sin(i*2*pi*d/n*w)/w
w += 2
return outbuf
def nearestDownSample(g,r=1):
a = 0
for i in g:
while a < 1:
yield i
a += r
a -= 1
def linearDownSample(g,r=1):
p = 0
a = 0
for i in g:
while a < 1:
yield a*i+(1-a)*p
a += r
p = i
a -= 1
def fsamp(f,s=[(-1,.5),(1,.5)],filt=None,r=48000):
if filt == None:
filt = contRAvgFilt(1/r)
a = 0
i = 0
if type(f)==int or type(f)==float:
def g(v):
while 1:
yield v
f = g(f)
filtered = 0
while 1:
t = next(f)/r
while t > 0:
dt = min(t,s[i][1]-a)
a += dt
t -= dt
filt.send(s[i][0],dt)
if a>=s[i][1]:
a -= s[i][1]
i = (i+1)%len(s)
yield filt.value
#actual fm stuff
from filters import IIR
import numpy as np
def phaseModulate(g,d=.1,f=10000,sr=48000):
t = 0
for i in g:
t += f/sr
yield nsin(t+i.real*d)+1j*(nsin(t+.25+i.imag*d))
def modulate(g,d=0.01,f=10000,sr=48000):
t = .25
for i in g:
t += (d*i+1+1j)*f/sr
yield (nsin(t.real)+1j*nsin(t.imag))
#def fm(
| true | true |
f71d7c018fbe014e4ecc1d9b47bd5d36caccc271 | 29,722 | py | Python | certbot-nginx/certbot_nginx/_internal/parser.py | robszumski/certbot | d7050132c7468cbb08d3d717685a76b91d3c921a | [
"Apache-2.0"
] | 1 | 2021-09-24T22:27:06.000Z | 2021-09-24T22:27:06.000Z | certbot-nginx/certbot_nginx/_internal/parser.py | robszumski/certbot | d7050132c7468cbb08d3d717685a76b91d3c921a | [
"Apache-2.0"
] | 1 | 2021-09-24T22:22:21.000Z | 2021-09-24T22:22:21.000Z | certbot-nginx/certbot_nginx/_internal/parser.py | LaudateCorpus1/certbot | 3ec8b552a25b75d00d7693d52e6df93e5bd02a5f | [
"Apache-2.0"
] | 1 | 2021-09-24T22:22:11.000Z | 2021-09-24T22:22:11.000Z | """NginxParser is a member object of the NginxConfigurator class."""
import copy
import functools
import glob
import logging
import re
import pyparsing
import six
from acme.magic_typing import Dict
from acme.magic_typing import List
from acme.magic_typing import Set
from acme.magic_typing import Tuple
from acme.magic_typing import Union
from certbot import errors
from certbot.compat import os
from certbot_nginx._internal import nginxparser
from certbot_nginx._internal import obj
logger = logging.getLogger(__name__)
class NginxParser(object):
"""Class handles the fine details of parsing the Nginx Configuration.
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar dict parsed: Mapping of file paths to parsed trees
"""
def __init__(self, root):
self.parsed = {} # type: Dict[str, Union[List, nginxparser.UnspacedList]]
self.root = os.path.abspath(root)
self.config_root = self._find_config_root()
# Parse nginx.conf and included files.
# TODO: Check sites-available/ as well. For now, the configurator does
# not enable sites from there.
self.load()
def load(self):
"""Loads Nginx files into a parsed tree.
"""
self.parsed = {}
self._parse_recursively(self.config_root)
def _parse_recursively(self, filepath):
"""Parses nginx config files recursively by looking at 'include'
directives inside 'http' and 'server' blocks. Note that this only
reads Nginx files that potentially declare a virtual host.
:param str filepath: The path to the files to parse, as a glob
"""
# pylint: disable=too-many-nested-blocks
filepath = self.abs_path(filepath)
trees = self._parse_files(filepath)
for tree in trees:
for entry in tree:
if _is_include_directive(entry):
# Parse the top-level included file
self._parse_recursively(entry[1])
elif entry[0] == ['http'] or entry[0] == ['server']:
# Look for includes in the top-level 'http'/'server' context
for subentry in entry[1]:
if _is_include_directive(subentry):
self._parse_recursively(subentry[1])
elif entry[0] == ['http'] and subentry[0] == ['server']:
# Look for includes in a 'server' context within
# an 'http' context
for server_entry in subentry[1]:
if _is_include_directive(server_entry):
self._parse_recursively(server_entry[1])
def abs_path(self, path):
"""Converts a relative path to an absolute path relative to the root.
Does nothing for paths that are already absolute.
:param str path: The path
:returns: The absolute path
:rtype: str
"""
if not os.path.isabs(path):
return os.path.normpath(os.path.join(self.root, path))
return os.path.normpath(path)
def _build_addr_to_ssl(self):
"""Builds a map from address to whether it listens on ssl in any server block
"""
servers = self._get_raw_servers()
addr_to_ssl = {} # type: Dict[Tuple[str, str], bool]
for filename in servers:
for server, _ in servers[filename]:
# Parse the server block to save addr info
parsed_server = _parse_server_raw(server)
for addr in parsed_server['addrs']:
addr_tuple = addr.normalized_tuple()
if addr_tuple not in addr_to_ssl:
addr_to_ssl[addr_tuple] = addr.ssl
addr_to_ssl[addr_tuple] = addr.ssl or addr_to_ssl[addr_tuple]
return addr_to_ssl
def _get_raw_servers(self):
# pylint: disable=cell-var-from-loop
# type: () -> Dict
"""Get a map of unparsed all server blocks
"""
servers = {} # type: Dict[str, Union[List, nginxparser.UnspacedList]]
for filename in self.parsed:
tree = self.parsed[filename]
servers[filename] = []
srv = servers[filename] # workaround undefined loop var in lambdas
# Find all the server blocks
_do_for_subarray(tree, lambda x: len(x) >= 2 and x[0] == ['server'],
lambda x, y: srv.append((x[1], y)))
# Find 'include' statements in server blocks and append their trees
for i, (server, path) in enumerate(servers[filename]):
new_server = self._get_included_directives(server)
servers[filename][i] = (new_server, path)
return servers
def get_vhosts(self):
"""Gets list of all 'virtual hosts' found in Nginx configuration.
Technically this is a misnomer because Nginx does not have virtual
hosts, it has 'server blocks'.
:returns: List of :class:`~certbot_nginx._internal.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
enabled = True # We only look at enabled vhosts for now
servers = self._get_raw_servers()
vhosts = []
for filename in servers:
for server, path in servers[filename]:
# Parse the server block into a VirtualHost object
parsed_server = _parse_server_raw(server)
vhost = obj.VirtualHost(filename,
parsed_server['addrs'],
parsed_server['ssl'],
enabled,
parsed_server['names'],
server,
path)
vhosts.append(vhost)
self._update_vhosts_addrs_ssl(vhosts)
return vhosts
def _update_vhosts_addrs_ssl(self, vhosts):
"""Update a list of raw parsed vhosts to include global address sslishness
"""
addr_to_ssl = self._build_addr_to_ssl()
for vhost in vhosts:
for addr in vhost.addrs:
addr.ssl = addr_to_ssl[addr.normalized_tuple()]
if addr.ssl:
vhost.ssl = True
def _get_included_directives(self, block):
"""Returns array with the "include" directives expanded out by
concatenating the contents of the included file to the block.
:param list block:
:rtype: list
"""
result = copy.deepcopy(block) # Copy the list to keep self.parsed idempotent
for directive in block:
if _is_include_directive(directive):
included_files = glob.glob(
self.abs_path(directive[1]))
for incl in included_files:
try:
result.extend(self.parsed[incl])
except KeyError:
pass
return result
def _parse_files(self, filepath, override=False):
"""Parse files from a glob
:param str filepath: Nginx config file path
:param bool override: Whether to parse a file that has been parsed
:returns: list of parsed tree structures
:rtype: list
"""
files = glob.glob(filepath) # nginx on unix calls glob(3) for this
# XXX Windows nginx uses FindFirstFile, and
# should have a narrower call here
trees = []
for item in files:
if item in self.parsed and not override:
continue
try:
with open(item) as _file:
parsed = nginxparser.load(_file)
self.parsed[item] = parsed
trees.append(parsed)
except IOError:
logger.warning("Could not open file: %s", item)
except pyparsing.ParseException as err:
logger.debug("Could not parse file: %s due to %s", item, err)
return trees
def _find_config_root(self):
"""Return the Nginx Configuration Root file."""
location = ['nginx.conf']
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError(
"Could not find Nginx root configuration file (nginx.conf)")
def filedump(self, ext='tmp', lazy=True):
"""Dumps parsed configurations into files.
:param str ext: The file extension to use for the dumped files. If
empty, this overrides the existing conf files.
:param bool lazy: Only write files that have been modified
"""
# Best-effort atomicity is enforced above us by reverter.py
for filename in self.parsed:
tree = self.parsed[filename]
if ext:
filename = filename + os.path.extsep + ext
try:
if lazy and not tree.is_dirty():
continue
out = nginxparser.dumps(tree)
logger.debug('Writing nginx conf tree to %s:\n%s', filename, out)
with open(filename, 'w') as _file:
_file.write(out)
except IOError:
logger.error("Could not open file for writing: %s", filename)
def parse_server(self, server):
"""Parses a list of server directives, accounting for global address sslishness.
:param list server: list of directives in a server block
:rtype: dict
"""
addr_to_ssl = self._build_addr_to_ssl()
parsed_server = _parse_server_raw(server)
_apply_global_addr_ssl(addr_to_ssl, parsed_server)
return parsed_server
def has_ssl_on_directive(self, vhost):
"""Does vhost have ssl on for all ports?
:param :class:`~certbot_nginx._internal.obj.VirtualHost` vhost: The vhost in question
:returns: True if 'ssl on' directive is included
:rtype: bool
"""
server = vhost.raw
for directive in server:
if not directive:
continue
if _is_ssl_on_directive(directive):
return True
return False
def add_server_directives(self, vhost, directives, insert_at_top=False):
"""Add directives to the server block identified by vhost.
This method modifies vhost to be fully consistent with the new directives.
..note :: It's an error to try and add a nonrepeatable directive that already
exists in the config block with a conflicting value.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param :class:`~certbot_nginx._internal.obj.VirtualHost` vhost: The vhost
whose information we use to match on
:param list directives: The directives to add
:param bool insert_at_top: True if the directives need to be inserted at the top
of the server block instead of the bottom
"""
self._modify_server_directives(vhost,
functools.partial(_add_directives, directives, insert_at_top))
def update_or_add_server_directives(self, vhost, directives, insert_at_top=False):
"""Add or replace directives in the server block identified by vhost.
This method modifies vhost to be fully consistent with the new directives.
..note :: When a directive with the same name already exists in the
config block, the first instance will be replaced. Otherwise, the directive
will be appended/prepended to the config block as in add_server_directives.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param :class:`~certbot_nginx._internal.obj.VirtualHost` vhost: The vhost
whose information we use to match on
:param list directives: The directives to add
:param bool insert_at_top: True if the directives need to be inserted at the top
of the server block instead of the bottom
"""
self._modify_server_directives(vhost,
functools.partial(_update_or_add_directives, directives, insert_at_top))
def remove_server_directives(self, vhost, directive_name, match_func=None):
"""Remove all directives of type directive_name.
:param :class:`~certbot_nginx._internal.obj.VirtualHost` vhost: The vhost
to remove directives from
:param string directive_name: The directive type to remove
:param callable match_func: Function of the directive that returns true for directives
to be deleted.
"""
self._modify_server_directives(vhost,
functools.partial(_remove_directives, directive_name, match_func))
def _update_vhost_based_on_new_directives(self, vhost, directives_list):
new_server = self._get_included_directives(directives_list)
parsed_server = self.parse_server(new_server)
vhost.addrs = parsed_server['addrs']
vhost.ssl = parsed_server['ssl']
vhost.names = parsed_server['names']
vhost.raw = new_server
def _modify_server_directives(self, vhost, block_func):
filename = vhost.filep
try:
result = self.parsed[filename]
for index in vhost.path:
result = result[index]
if not isinstance(result, list) or len(result) != 2:
raise errors.MisconfigurationError("Not a server block.")
result = result[1]
block_func(result)
self._update_vhost_based_on_new_directives(vhost, result)
except errors.MisconfigurationError as err:
raise errors.MisconfigurationError("Problem in %s: %s" % (filename, str(err)))
def duplicate_vhost(self, vhost_template, remove_singleton_listen_params=False,
only_directives=None):
"""Duplicate the vhost in the configuration files.
:param :class:`~certbot_nginx._internal.obj.VirtualHost` vhost_template: The vhost
whose information we copy
:param bool remove_singleton_listen_params: If we should remove parameters
from listen directives in the block that can only be used once per address
:param list only_directives: If it exists, only duplicate the named directives. Only
looks at first level of depth; does not expand includes.
:returns: A vhost object for the newly created vhost
:rtype: :class:`~certbot_nginx._internal.obj.VirtualHost`
"""
# TODO: https://github.com/certbot/certbot/issues/5185
# put it in the same file as the template, at the same level
new_vhost = copy.deepcopy(vhost_template)
enclosing_block = self.parsed[vhost_template.filep]
for index in vhost_template.path[:-1]:
enclosing_block = enclosing_block[index]
raw_in_parsed = copy.deepcopy(enclosing_block[vhost_template.path[-1]])
if only_directives is not None:
new_directives = nginxparser.UnspacedList([])
for directive in raw_in_parsed[1]:
if directive and directive[0] in only_directives:
new_directives.append(directive)
raw_in_parsed[1] = new_directives
self._update_vhost_based_on_new_directives(new_vhost, new_directives)
enclosing_block.append(raw_in_parsed)
new_vhost.path[-1] = len(enclosing_block) - 1
if remove_singleton_listen_params:
for addr in new_vhost.addrs:
addr.default = False
addr.ipv6only = False
for directive in enclosing_block[new_vhost.path[-1]][1]:
if directive and directive[0] == 'listen':
# Exclude one-time use parameters which will cause an error if repeated.
# https://nginx.org/en/docs/http/ngx_http_core_module.html#listen
exclude = set(('default_server', 'default', 'setfib', 'fastopen', 'backlog',
'rcvbuf', 'sndbuf', 'accept_filter', 'deferred', 'bind',
'ipv6only', 'reuseport', 'so_keepalive'))
for param in exclude:
# See: github.com/certbot/certbot/pull/6223#pullrequestreview-143019225
keys = [x.split('=')[0] for x in directive]
if param in keys:
del directive[keys.index(param)]
return new_vhost
def _parse_ssl_options(ssl_options):
if ssl_options is not None:
try:
with open(ssl_options) as _file:
return nginxparser.load(_file)
except IOError:
logger.warning("Missing NGINX TLS options file: %s", ssl_options)
except pyparsing.ParseBaseException as err:
logger.debug("Could not parse file: %s due to %s", ssl_options, err)
return []
def _do_for_subarray(entry, condition, func, path=None):
"""Executes a function for a subarray of a nested array if it matches
the given condition.
:param list entry: The list to iterate over
:param function condition: Returns true iff func should be executed on item
:param function func: The function to call for each matching item
"""
if path is None:
path = []
if isinstance(entry, list):
if condition(entry):
func(entry, path)
else:
for index, item in enumerate(entry):
_do_for_subarray(item, condition, func, path + [index])
def get_best_match(target_name, names):
"""Finds the best match for target_name out of names using the Nginx
name-matching rules (exact > longest wildcard starting with * >
longest wildcard ending with * > regex).
:param str target_name: The name to match
:param set names: The candidate server names
:returns: Tuple of (type of match, the name that matched)
:rtype: tuple
"""
exact = []
wildcard_start = []
wildcard_end = []
regex = []
for name in names:
if _exact_match(target_name, name):
exact.append(name)
elif _wildcard_match(target_name, name, True):
wildcard_start.append(name)
elif _wildcard_match(target_name, name, False):
wildcard_end.append(name)
elif _regex_match(target_name, name):
regex.append(name)
if exact:
# There can be more than one exact match; e.g. eff.org, .eff.org
match = min(exact, key=len)
return ('exact', match)
if wildcard_start:
# Return the longest wildcard
match = max(wildcard_start, key=len)
return ('wildcard_start', match)
if wildcard_end:
# Return the longest wildcard
match = max(wildcard_end, key=len)
return ('wildcard_end', match)
if regex:
# Just return the first one for now
match = regex[0]
return ('regex', match)
return (None, None)
def _exact_match(target_name, name):
return name in (target_name, '.' + target_name)
def _wildcard_match(target_name, name, start):
# Degenerate case
if name == '*':
return True
parts = target_name.split('.')
match_parts = name.split('.')
# If the domain ends in a wildcard, do the match procedure in reverse
if not start:
parts.reverse()
match_parts.reverse()
# The first part must be a wildcard or blank, e.g. '.eff.org'
first = match_parts.pop(0)
if first not in ('*', ''):
return False
target_name = '.'.join(parts)
name = '.'.join(match_parts)
# Ex: www.eff.org matches *.eff.org, eff.org does not match *.eff.org
return target_name.endswith('.' + name)
def _regex_match(target_name, name):
# Must start with a tilde
if len(name) < 2 or name[0] != '~':
return False
# After tilde is a perl-compatible regex
try:
regex = re.compile(name[1:])
return re.match(regex, target_name)
except re.error: # pragma: no cover
# perl-compatible regexes are sometimes not recognized by python
return False
def _is_include_directive(entry):
"""Checks if an nginx parsed entry is an 'include' directive.
:param list entry: the parsed entry
:returns: Whether it's an 'include' directive
:rtype: bool
"""
return (isinstance(entry, list) and
len(entry) == 2 and entry[0] == 'include' and
isinstance(entry[1], six.string_types))
def _is_ssl_on_directive(entry):
"""Checks if an nginx parsed entry is an 'ssl on' directive.
:param list entry: the parsed entry
:returns: Whether it's an 'ssl on' directive
:rtype: bool
"""
return (isinstance(entry, list) and
len(entry) == 2 and entry[0] == 'ssl' and
entry[1] == 'on')
def _add_directives(directives, insert_at_top, block):
"""Adds directives to a config block."""
for directive in directives:
_add_directive(block, directive, insert_at_top)
if block and '\n' not in block[-1]: # could be " \n " or ["\n"] !
block.append(nginxparser.UnspacedList('\n'))
def _update_or_add_directives(directives, insert_at_top, block):
"""Adds or replaces directives in a config block."""
for directive in directives:
_update_or_add_directive(block, directive, insert_at_top)
if block and '\n' not in block[-1]: # could be " \n " or ["\n"] !
block.append(nginxparser.UnspacedList('\n'))
INCLUDE = 'include'
REPEATABLE_DIRECTIVES = set(['server_name', 'listen', INCLUDE, 'rewrite', 'add_header'])
COMMENT = ' managed by Certbot'
COMMENT_BLOCK = [' ', '#', COMMENT]
def comment_directive(block, location):
"""Add a ``#managed by Certbot`` comment to the end of the line at location.
:param list block: The block containing the directive to be commented
:param int location: The location within ``block`` of the directive to be commented
"""
next_entry = block[location + 1] if location + 1 < len(block) else None
if isinstance(next_entry, list) and next_entry:
if len(next_entry) >= 2 and next_entry[-2] == "#" and COMMENT in next_entry[-1]:
return
if isinstance(next_entry, nginxparser.UnspacedList):
next_entry = next_entry.spaced[0]
else:
next_entry = next_entry[0]
block.insert(location + 1, COMMENT_BLOCK[:])
if next_entry is not None and "\n" not in next_entry:
block.insert(location + 2, '\n')
def _comment_out_directive(block, location, include_location):
"""Comment out the line at location, with a note of explanation."""
comment_message = ' duplicated in {0}'.format(include_location)
# add the end comment
# create a dumpable object out of block[location] (so it includes the ;)
directive = block[location]
new_dir_block = nginxparser.UnspacedList([]) # just a wrapper
new_dir_block.append(directive)
dumped = nginxparser.dumps(new_dir_block)
commented = dumped + ' #' + comment_message # add the comment directly to the one-line string
new_dir = nginxparser.loads(commented) # reload into UnspacedList
# add the beginning comment
insert_location = 0
if new_dir[0].spaced[0] != new_dir[0][0]: # if there's whitespace at the beginning
insert_location = 1
new_dir[0].spaced.insert(insert_location, "# ") # comment out the line
new_dir[0].spaced.append(";") # directly add in the ;, because now dumping won't work properly
dumped = nginxparser.dumps(new_dir)
new_dir = nginxparser.loads(dumped) # reload into an UnspacedList
block[location] = new_dir[0] # set the now-single-line-comment directive back in place
def _find_location(block, directive_name, match_func=None):
"""Finds the index of the first instance of directive_name in block.
If no line exists, use None."""
return next((index for index, line in enumerate(block) \
if line and line[0] == directive_name and (match_func is None or match_func(line))), None)
def _is_whitespace_or_comment(directive):
"""Is this directive either a whitespace or comment directive?"""
return len(directive) == 0 or directive[0] == '#'
def _add_directive(block, directive, insert_at_top):
if not isinstance(directive, nginxparser.UnspacedList):
directive = nginxparser.UnspacedList(directive)
if _is_whitespace_or_comment(directive):
# whitespace or comment
block.append(directive)
return
location = _find_location(block, directive[0])
# Append or prepend directive. Fail if the name is not a repeatable directive name,
# and there is already a copy of that directive with a different value
# in the config file.
# handle flat include files
directive_name = directive[0]
def can_append(loc, dir_name):
""" Can we append this directive to the block? """
return loc is None or (isinstance(dir_name, six.string_types)
and dir_name in REPEATABLE_DIRECTIVES)
err_fmt = 'tried to insert directive "{0}" but found conflicting "{1}".'
# Give a better error message about the specific directive than Nginx's "fail to restart"
if directive_name == INCLUDE:
# in theory, we might want to do this recursively, but in practice, that's really not
# necessary because we know what file we're talking about (and if we don't recurse, we
# just give a worse error message)
included_directives = _parse_ssl_options(directive[1])
for included_directive in included_directives:
included_dir_loc = _find_location(block, included_directive[0])
included_dir_name = included_directive[0]
if (not _is_whitespace_or_comment(included_directive)
and not can_append(included_dir_loc, included_dir_name)):
if block[included_dir_loc] != included_directive:
raise errors.MisconfigurationError(err_fmt.format(included_directive,
block[included_dir_loc]))
_comment_out_directive(block, included_dir_loc, directive[1])
if can_append(location, directive_name):
if insert_at_top:
# Add a newline so the comment doesn't comment
# out existing directives
block.insert(0, nginxparser.UnspacedList('\n'))
block.insert(0, directive)
comment_directive(block, 0)
else:
block.append(directive)
comment_directive(block, len(block) - 1)
elif block[location] != directive:
raise errors.MisconfigurationError(err_fmt.format(directive, block[location]))
def _update_directive(block, directive, location):
block[location] = directive
comment_directive(block, location)
def _update_or_add_directive(block, directive, insert_at_top):
if not isinstance(directive, nginxparser.UnspacedList):
directive = nginxparser.UnspacedList(directive)
if _is_whitespace_or_comment(directive):
# whitespace or comment
block.append(directive)
return
location = _find_location(block, directive[0])
# we can update directive
if location is not None:
_update_directive(block, directive, location)
return
_add_directive(block, directive, insert_at_top)
def _is_certbot_comment(directive):
return '#' in directive and COMMENT in directive
def _remove_directives(directive_name, match_func, block):
"""Removes directives of name directive_name from a config block if match_func matches.
"""
while True:
location = _find_location(block, directive_name, match_func=match_func)
if location is None:
return
# if the directive was made by us, remove the comment following
if location + 1 < len(block) and _is_certbot_comment(block[location + 1]):
del block[location + 1]
del block[location]
def _apply_global_addr_ssl(addr_to_ssl, parsed_server):
"""Apply global sslishness information to the parsed server block
"""
for addr in parsed_server['addrs']:
addr.ssl = addr_to_ssl[addr.normalized_tuple()]
if addr.ssl:
parsed_server['ssl'] = True
def _parse_server_raw(server):
"""Parses a list of server directives.
:param list server: list of directives in a server block
:rtype: dict
"""
addrs = set() # type: Set[obj.Addr]
ssl = False # type: bool
names = set() # type: Set[str]
apply_ssl_to_all_addrs = False
for directive in server:
if not directive:
continue
if directive[0] == 'listen':
addr = obj.Addr.fromstring(" ".join(directive[1:]))
if addr:
addrs.add(addr)
if addr.ssl:
ssl = True
elif directive[0] == 'server_name':
names.update(x.strip('"\'') for x in directive[1:])
elif _is_ssl_on_directive(directive):
ssl = True
apply_ssl_to_all_addrs = True
if apply_ssl_to_all_addrs:
for addr in addrs:
addr.ssl = True
return {
'addrs': addrs,
'ssl': ssl,
'names': names
}
| 38.903141 | 98 | 0.625866 | import copy
import functools
import glob
import logging
import re
import pyparsing
import six
from acme.magic_typing import Dict
from acme.magic_typing import List
from acme.magic_typing import Set
from acme.magic_typing import Tuple
from acme.magic_typing import Union
from certbot import errors
from certbot.compat import os
from certbot_nginx._internal import nginxparser
from certbot_nginx._internal import obj
logger = logging.getLogger(__name__)
class NginxParser(object):
def __init__(self, root):
self.parsed = {}
self.root = os.path.abspath(root)
self.config_root = self._find_config_root()
self.load()
def load(self):
self.parsed = {}
self._parse_recursively(self.config_root)
def _parse_recursively(self, filepath):
filepath = self.abs_path(filepath)
trees = self._parse_files(filepath)
for tree in trees:
for entry in tree:
if _is_include_directive(entry):
self._parse_recursively(entry[1])
elif entry[0] == ['http'] or entry[0] == ['server']:
for subentry in entry[1]:
if _is_include_directive(subentry):
self._parse_recursively(subentry[1])
elif entry[0] == ['http'] and subentry[0] == ['server']:
for server_entry in subentry[1]:
if _is_include_directive(server_entry):
self._parse_recursively(server_entry[1])
def abs_path(self, path):
if not os.path.isabs(path):
return os.path.normpath(os.path.join(self.root, path))
return os.path.normpath(path)
def _build_addr_to_ssl(self):
servers = self._get_raw_servers()
addr_to_ssl = {}
for filename in servers:
for server, _ in servers[filename]:
parsed_server = _parse_server_raw(server)
for addr in parsed_server['addrs']:
addr_tuple = addr.normalized_tuple()
if addr_tuple not in addr_to_ssl:
addr_to_ssl[addr_tuple] = addr.ssl
addr_to_ssl[addr_tuple] = addr.ssl or addr_to_ssl[addr_tuple]
return addr_to_ssl
def _get_raw_servers(self):
servers = {}
for filename in self.parsed:
tree = self.parsed[filename]
servers[filename] = []
srv = servers[filename]
_do_for_subarray(tree, lambda x: len(x) >= 2 and x[0] == ['server'],
lambda x, y: srv.append((x[1], y)))
for i, (server, path) in enumerate(servers[filename]):
new_server = self._get_included_directives(server)
servers[filename][i] = (new_server, path)
return servers
def get_vhosts(self):
enabled = True
servers = self._get_raw_servers()
vhosts = []
for filename in servers:
for server, path in servers[filename]:
parsed_server = _parse_server_raw(server)
vhost = obj.VirtualHost(filename,
parsed_server['addrs'],
parsed_server['ssl'],
enabled,
parsed_server['names'],
server,
path)
vhosts.append(vhost)
self._update_vhosts_addrs_ssl(vhosts)
return vhosts
def _update_vhosts_addrs_ssl(self, vhosts):
addr_to_ssl = self._build_addr_to_ssl()
for vhost in vhosts:
for addr in vhost.addrs:
addr.ssl = addr_to_ssl[addr.normalized_tuple()]
if addr.ssl:
vhost.ssl = True
def _get_included_directives(self, block):
result = copy.deepcopy(block)
for directive in block:
if _is_include_directive(directive):
included_files = glob.glob(
self.abs_path(directive[1]))
for incl in included_files:
try:
result.extend(self.parsed[incl])
except KeyError:
pass
return result
def _parse_files(self, filepath, override=False):
files = glob.glob(filepath)
trees = []
for item in files:
if item in self.parsed and not override:
continue
try:
with open(item) as _file:
parsed = nginxparser.load(_file)
self.parsed[item] = parsed
trees.append(parsed)
except IOError:
logger.warning("Could not open file: %s", item)
except pyparsing.ParseException as err:
logger.debug("Could not parse file: %s due to %s", item, err)
return trees
def _find_config_root(self):
location = ['nginx.conf']
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError(
"Could not find Nginx root configuration file (nginx.conf)")
def filedump(self, ext='tmp', lazy=True):
for filename in self.parsed:
tree = self.parsed[filename]
if ext:
filename = filename + os.path.extsep + ext
try:
if lazy and not tree.is_dirty():
continue
out = nginxparser.dumps(tree)
logger.debug('Writing nginx conf tree to %s:\n%s', filename, out)
with open(filename, 'w') as _file:
_file.write(out)
except IOError:
logger.error("Could not open file for writing: %s", filename)
def parse_server(self, server):
addr_to_ssl = self._build_addr_to_ssl()
parsed_server = _parse_server_raw(server)
_apply_global_addr_ssl(addr_to_ssl, parsed_server)
return parsed_server
def has_ssl_on_directive(self, vhost):
server = vhost.raw
for directive in server:
if not directive:
continue
if _is_ssl_on_directive(directive):
return True
return False
def add_server_directives(self, vhost, directives, insert_at_top=False):
self._modify_server_directives(vhost,
functools.partial(_add_directives, directives, insert_at_top))
def update_or_add_server_directives(self, vhost, directives, insert_at_top=False):
self._modify_server_directives(vhost,
functools.partial(_update_or_add_directives, directives, insert_at_top))
def remove_server_directives(self, vhost, directive_name, match_func=None):
self._modify_server_directives(vhost,
functools.partial(_remove_directives, directive_name, match_func))
def _update_vhost_based_on_new_directives(self, vhost, directives_list):
new_server = self._get_included_directives(directives_list)
parsed_server = self.parse_server(new_server)
vhost.addrs = parsed_server['addrs']
vhost.ssl = parsed_server['ssl']
vhost.names = parsed_server['names']
vhost.raw = new_server
def _modify_server_directives(self, vhost, block_func):
filename = vhost.filep
try:
result = self.parsed[filename]
for index in vhost.path:
result = result[index]
if not isinstance(result, list) or len(result) != 2:
raise errors.MisconfigurationError("Not a server block.")
result = result[1]
block_func(result)
self._update_vhost_based_on_new_directives(vhost, result)
except errors.MisconfigurationError as err:
raise errors.MisconfigurationError("Problem in %s: %s" % (filename, str(err)))
def duplicate_vhost(self, vhost_template, remove_singleton_listen_params=False,
only_directives=None):
new_vhost = copy.deepcopy(vhost_template)
enclosing_block = self.parsed[vhost_template.filep]
for index in vhost_template.path[:-1]:
enclosing_block = enclosing_block[index]
raw_in_parsed = copy.deepcopy(enclosing_block[vhost_template.path[-1]])
if only_directives is not None:
new_directives = nginxparser.UnspacedList([])
for directive in raw_in_parsed[1]:
if directive and directive[0] in only_directives:
new_directives.append(directive)
raw_in_parsed[1] = new_directives
self._update_vhost_based_on_new_directives(new_vhost, new_directives)
enclosing_block.append(raw_in_parsed)
new_vhost.path[-1] = len(enclosing_block) - 1
if remove_singleton_listen_params:
for addr in new_vhost.addrs:
addr.default = False
addr.ipv6only = False
for directive in enclosing_block[new_vhost.path[-1]][1]:
if directive and directive[0] == 'listen':
exclude = set(('default_server', 'default', 'setfib', 'fastopen', 'backlog',
'rcvbuf', 'sndbuf', 'accept_filter', 'deferred', 'bind',
'ipv6only', 'reuseport', 'so_keepalive'))
for param in exclude:
s = [x.split('=')[0] for x in directive]
if param in keys:
del directive[keys.index(param)]
return new_vhost
def _parse_ssl_options(ssl_options):
if ssl_options is not None:
try:
with open(ssl_options) as _file:
return nginxparser.load(_file)
except IOError:
logger.warning("Missing NGINX TLS options file: %s", ssl_options)
except pyparsing.ParseBaseException as err:
logger.debug("Could not parse file: %s due to %s", ssl_options, err)
return []
def _do_for_subarray(entry, condition, func, path=None):
if path is None:
path = []
if isinstance(entry, list):
if condition(entry):
func(entry, path)
else:
for index, item in enumerate(entry):
_do_for_subarray(item, condition, func, path + [index])
def get_best_match(target_name, names):
exact = []
wildcard_start = []
wildcard_end = []
regex = []
for name in names:
if _exact_match(target_name, name):
exact.append(name)
elif _wildcard_match(target_name, name, True):
wildcard_start.append(name)
elif _wildcard_match(target_name, name, False):
wildcard_end.append(name)
elif _regex_match(target_name, name):
regex.append(name)
if exact:
match = min(exact, key=len)
return ('exact', match)
if wildcard_start:
match = max(wildcard_start, key=len)
return ('wildcard_start', match)
if wildcard_end:
match = max(wildcard_end, key=len)
return ('wildcard_end', match)
if regex:
match = regex[0]
return ('regex', match)
return (None, None)
def _exact_match(target_name, name):
return name in (target_name, '.' + target_name)
def _wildcard_match(target_name, name, start):
if name == '*':
return True
parts = target_name.split('.')
match_parts = name.split('.')
if not start:
parts.reverse()
match_parts.reverse()
first = match_parts.pop(0)
if first not in ('*', ''):
return False
target_name = '.'.join(parts)
name = '.'.join(match_parts)
return target_name.endswith('.' + name)
def _regex_match(target_name, name):
if len(name) < 2 or name[0] != '~':
return False
try:
regex = re.compile(name[1:])
return re.match(regex, target_name)
except re.error:
return False
def _is_include_directive(entry):
return (isinstance(entry, list) and
len(entry) == 2 and entry[0] == 'include' and
isinstance(entry[1], six.string_types))
def _is_ssl_on_directive(entry):
return (isinstance(entry, list) and
len(entry) == 2 and entry[0] == 'ssl' and
entry[1] == 'on')
def _add_directives(directives, insert_at_top, block):
for directive in directives:
_add_directive(block, directive, insert_at_top)
if block and '\n' not in block[-1]:
block.append(nginxparser.UnspacedList('\n'))
def _update_or_add_directives(directives, insert_at_top, block):
for directive in directives:
_update_or_add_directive(block, directive, insert_at_top)
if block and '\n' not in block[-1]:
block.append(nginxparser.UnspacedList('\n'))
INCLUDE = 'include'
REPEATABLE_DIRECTIVES = set(['server_name', 'listen', INCLUDE, 'rewrite', 'add_header'])
COMMENT = ' managed by Certbot'
COMMENT_BLOCK = [' ', '#', COMMENT]
def comment_directive(block, location):
next_entry = block[location + 1] if location + 1 < len(block) else None
if isinstance(next_entry, list) and next_entry:
if len(next_entry) >= 2 and next_entry[-2] == "#" and COMMENT in next_entry[-1]:
return
if isinstance(next_entry, nginxparser.UnspacedList):
next_entry = next_entry.spaced[0]
else:
next_entry = next_entry[0]
block.insert(location + 1, COMMENT_BLOCK[:])
if next_entry is not None and "\n" not in next_entry:
block.insert(location + 2, '\n')
def _comment_out_directive(block, location, include_location):
comment_message = ' duplicated in {0}'.format(include_location)
directive = block[location]
new_dir_block = nginxparser.UnspacedList([])
new_dir_block.append(directive)
dumped = nginxparser.dumps(new_dir_block)
commented = dumped + ' #' + comment_message
new_dir = nginxparser.loads(commented)
insert_location = 0
if new_dir[0].spaced[0] != new_dir[0][0]:
insert_location = 1
new_dir[0].spaced.insert(insert_location, "# ") # comment out the line
new_dir[0].spaced.append(";") # directly add in the ;, because now dumping won't work properly
dumped = nginxparser.dumps(new_dir)
new_dir = nginxparser.loads(dumped)
block[location] = new_dir[0]
def _find_location(block, directive_name, match_func=None):
return next((index for index, line in enumerate(block) \
if line and line[0] == directive_name and (match_func is None or match_func(line))), None)
def _is_whitespace_or_comment(directive):
return len(directive) == 0 or directive[0] == '#'
def _add_directive(block, directive, insert_at_top):
if not isinstance(directive, nginxparser.UnspacedList):
directive = nginxparser.UnspacedList(directive)
if _is_whitespace_or_comment(directive):
block.append(directive)
return
location = _find_location(block, directive[0])
directive_name = directive[0]
def can_append(loc, dir_name):
return loc is None or (isinstance(dir_name, six.string_types)
and dir_name in REPEATABLE_DIRECTIVES)
err_fmt = 'tried to insert directive "{0}" but found conflicting "{1}".'
if directive_name == INCLUDE:
# in theory, we might want to do this recursively, but in practice, that's really not
included_directives = _parse_ssl_options(directive[1])
for included_directive in included_directives:
included_dir_loc = _find_location(block, included_directive[0])
included_dir_name = included_directive[0]
if (not _is_whitespace_or_comment(included_directive)
and not can_append(included_dir_loc, included_dir_name)):
if block[included_dir_loc] != included_directive:
raise errors.MisconfigurationError(err_fmt.format(included_directive,
block[included_dir_loc]))
_comment_out_directive(block, included_dir_loc, directive[1])
if can_append(location, directive_name):
if insert_at_top:
# out existing directives
block.insert(0, nginxparser.UnspacedList('\n'))
block.insert(0, directive)
comment_directive(block, 0)
else:
block.append(directive)
comment_directive(block, len(block) - 1)
elif block[location] != directive:
raise errors.MisconfigurationError(err_fmt.format(directive, block[location]))
def _update_directive(block, directive, location):
block[location] = directive
comment_directive(block, location)
def _update_or_add_directive(block, directive, insert_at_top):
if not isinstance(directive, nginxparser.UnspacedList):
directive = nginxparser.UnspacedList(directive)
if _is_whitespace_or_comment(directive):
# whitespace or comment
block.append(directive)
return
location = _find_location(block, directive[0])
# we can update directive
if location is not None:
_update_directive(block, directive, location)
return
_add_directive(block, directive, insert_at_top)
def _is_certbot_comment(directive):
return '
def _remove_directives(directive_name, match_func, block):
while True:
location = _find_location(block, directive_name, match_func=match_func)
if location is None:
return
# if the directive was made by us, remove the comment following
if location + 1 < len(block) and _is_certbot_comment(block[location + 1]):
del block[location + 1]
del block[location]
def _apply_global_addr_ssl(addr_to_ssl, parsed_server):
for addr in parsed_server['addrs']:
addr.ssl = addr_to_ssl[addr.normalized_tuple()]
if addr.ssl:
parsed_server['ssl'] = True
def _parse_server_raw(server):
addrs = set() # type: Set[obj.Addr]
ssl = False # type: bool
names = set() # type: Set[str]
apply_ssl_to_all_addrs = False
for directive in server:
if not directive:
continue
if directive[0] == 'listen':
addr = obj.Addr.fromstring(" ".join(directive[1:]))
if addr:
addrs.add(addr)
if addr.ssl:
ssl = True
elif directive[0] == 'server_name':
names.update(x.strip('"\'') for x in directive[1:])
elif _is_ssl_on_directive(directive):
ssl = True
apply_ssl_to_all_addrs = True
if apply_ssl_to_all_addrs:
for addr in addrs:
addr.ssl = True
return {
'addrs': addrs,
'ssl': ssl,
'names': names
}
| true | true |
f71d7c6696990d261b06073315046765fa6f1c50 | 2,131 | py | Python | src/predict-binary.py | accordinglyto/dferte | d4b8449c1633973dc538c9e72aca5d37802a4ee4 | [
"MIT"
] | null | null | null | src/predict-binary.py | accordinglyto/dferte | d4b8449c1633973dc538c9e72aca5d37802a4ee4 | [
"MIT"
] | 8 | 2020-11-13T18:55:17.000Z | 2022-03-12T00:34:40.000Z | src/predict-binary.py | accordinglyto/dferte | d4b8449c1633973dc538c9e72aca5d37802a4ee4 | [
"MIT"
] | null | null | null | import os
import numpy as np
#os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.models import Sequential, load_model
img_width, img_height = 48, 48
model_path = '../src/models/model.h5'
weights_path = '../src/models/weights'
model = load_model(model_path)
test_path = '../data/test'
def predict(file):
x = load_img(file, target_size=(img_width,img_height))
x = img_to_array(x)
x = np.expand_dims(x, axis=0)
array = model.predict(x)
result = array[0]
if result[0] > result[1]:
if result[0] > 0.9:
print("Predicted answer: Buy")
answer = 'buy'
print(result)
print(array)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
else:
if result[1] > 0.9:
print("Predicted answer: Sell")
answer = 'sell'
print(result)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
return answer
tb = 0
ts = 0
fb = 0
fs = 0
na = 0
for i, ret in enumerate(os.walk(test_path + '/buy')):
for i, filename in enumerate(ret[2]):
if filename.startswith("."):
continue
print("Label: buy")
result = predict(ret[0] + '/' + filename)
if result == "buy":
tb += 1
elif result == 'n/a':
print('no action')
na += 1
else:
fb += 1
for i, ret in enumerate(os.walk(test_path + '/sell')):
for i, filename in enumerate(ret[2]):
if filename.startswith("."):
continue
print("Label: sell")
result = predict(ret[0] + '/' + filename)
if result == "sell":
ts += 1
elif result == 'n/a':
print('no action')
na += 1
else:
fs += 1
"""
Check metrics
"""
print("True buy: ", tb)
print("True sell: ", ts)
print("False buy: ", fb) # important
print("False sell: ", fs)
print("No action", na)
precision = (tb+ts) / (tb + ts + fb + fs)
recall = tb / (tb + fs)
print("Precision: ", precision)
print("Recall: ", recall)
f_measure = (2 * recall * precision) / (recall + precision)
print("F-measure: ", f_measure)
| 22.670213 | 80 | 0.602065 | import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.models import Sequential, load_model
img_width, img_height = 48, 48
model_path = '../src/models/model.h5'
weights_path = '../src/models/weights'
model = load_model(model_path)
test_path = '../data/test'
def predict(file):
x = load_img(file, target_size=(img_width,img_height))
x = img_to_array(x)
x = np.expand_dims(x, axis=0)
array = model.predict(x)
result = array[0]
if result[0] > result[1]:
if result[0] > 0.9:
print("Predicted answer: Buy")
answer = 'buy'
print(result)
print(array)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
else:
if result[1] > 0.9:
print("Predicted answer: Sell")
answer = 'sell'
print(result)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
return answer
tb = 0
ts = 0
fb = 0
fs = 0
na = 0
for i, ret in enumerate(os.walk(test_path + '/buy')):
for i, filename in enumerate(ret[2]):
if filename.startswith("."):
continue
print("Label: buy")
result = predict(ret[0] + '/' + filename)
if result == "buy":
tb += 1
elif result == 'n/a':
print('no action')
na += 1
else:
fb += 1
for i, ret in enumerate(os.walk(test_path + '/sell')):
for i, filename in enumerate(ret[2]):
if filename.startswith("."):
continue
print("Label: sell")
result = predict(ret[0] + '/' + filename)
if result == "sell":
ts += 1
elif result == 'n/a':
print('no action')
na += 1
else:
fs += 1
print("True buy: ", tb)
print("True sell: ", ts)
print("False buy: ", fb)
print("False sell: ", fs)
print("No action", na)
precision = (tb+ts) / (tb + ts + fb + fs)
recall = tb / (tb + fs)
print("Precision: ", precision)
print("Recall: ", recall)
f_measure = (2 * recall * precision) / (recall + precision)
print("F-measure: ", f_measure)
| true | true |
f71d7ce1e66742abde39bb83f3025bb50e93bbb1 | 56,871 | py | Python | src/python/tests/core/bot/tasks/fuzz_task_test.py | vschs007/clusterfuzz | 4b5e825abcd80d81d734a69b7457df59a6a9aa6e | [
"Apache-2.0"
] | null | null | null | src/python/tests/core/bot/tasks/fuzz_task_test.py | vschs007/clusterfuzz | 4b5e825abcd80d81d734a69b7457df59a6a9aa6e | [
"Apache-2.0"
] | null | null | null | src/python/tests/core/bot/tasks/fuzz_task_test.py | vschs007/clusterfuzz | 4b5e825abcd80d81d734a69b7457df59a6a9aa6e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fuzz_task tests."""
# pylint: disable=protected-access
from builtins import object
from builtins import range
import datetime
import mock
import os
import parameterized
import shutil
import tempfile
import time
import unittest
from pyfakefs import fake_filesystem_unittest
import six
from base import utils
from bot import testcase_manager
from bot.fuzzers import engine
from bot.fuzzers.libFuzzer import engine as libfuzzer_engine
from bot.tasks import fuzz_task
from bot.untrusted_runner import file_host
from build_management import build_manager
from chrome import crash_uploader
from crash_analysis.stack_parsing import stack_analyzer
from datastore import data_handler
from datastore import data_types
from datastore import ndb
from google_cloud_utils import big_query
from metrics import monitor
from metrics import monitoring_metrics
from system import environment
from tests.test_libs import helpers
from tests.test_libs import test_utils
from tests.test_libs import untrusted_runner_helpers
class TrackFuzzerRunResultTest(unittest.TestCase):
"""Test _track_fuzzer_run_result."""
def setUp(self):
monitor.metrics_store().reset_for_testing()
def test_fuzzer_run_result(self):
"""Ensure _track_fuzzer_run_result set the right metrics."""
fuzz_task._track_fuzzer_run_result('name', 10, 100, 2)
fuzz_task._track_fuzzer_run_result('name', 100, 200, 2)
fuzz_task._track_fuzzer_run_result('name', 1000, 2000, 2)
fuzz_task._track_fuzzer_run_result('name', 1000, 500, 0)
fuzz_task._track_fuzzer_run_result('name', 0, 1000, -1)
fuzz_task._track_fuzzer_run_result('name', 0, 0, 2)
self.assertEqual(
4,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': 2
}))
self.assertEqual(
1,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': 0
}))
self.assertEqual(
1,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': -1
}))
testcase_count_ratio = (
monitoring_metrics.FUZZER_TESTCASE_COUNT_RATIO.get({
'fuzzer': 'name'
}))
self.assertEqual(3.1, testcase_count_ratio.sum)
self.assertEqual(5, testcase_count_ratio.count)
expected_buckets = [0 for _ in range(22)]
expected_buckets[1] = 1
expected_buckets[3] = 1
expected_buckets[11] = 2
expected_buckets[21] = 1
self.assertListEqual(expected_buckets, testcase_count_ratio.buckets)
class TrackBuildRunResultTest(unittest.TestCase):
"""Test _track_build_run_result."""
def setUp(self):
monitor.metrics_store().reset_for_testing()
def test_build_run_result(self):
"""Ensure _track_build_run_result set the right metrics."""
fuzz_task._track_build_run_result('name', 10000, True)
fuzz_task._track_build_run_result('name', 10001, True)
fuzz_task._track_build_run_result('name', 10002, False)
self.assertEqual(
2,
monitoring_metrics.JOB_BAD_BUILD_COUNT.get({
'job': 'name',
'bad_build': True
}))
self.assertEqual(
1,
monitoring_metrics.JOB_BAD_BUILD_COUNT.get({
'job': 'name',
'bad_build': False
}))
class TrackTestcaseRunResultTest(unittest.TestCase):
"""Test _track_testcase_run_result."""
def setUp(self):
monitor.metrics_store().reset_for_testing()
def test_testcase_run_result(self):
"""Ensure _track_testcase_run_result sets the right metrics."""
fuzz_task._track_testcase_run_result('fuzzer', 'job', 2, 5)
fuzz_task._track_testcase_run_result('fuzzer', 'job', 5, 10)
self.assertEqual(7,
monitoring_metrics.JOB_NEW_CRASH_COUNT.get({
'job': 'job'
}))
self.assertEqual(
15, monitoring_metrics.JOB_KNOWN_CRASH_COUNT.get({
'job': 'job'
}))
self.assertEqual(
7, monitoring_metrics.FUZZER_NEW_CRASH_COUNT.get({
'fuzzer': 'fuzzer'
}))
self.assertEqual(
15, monitoring_metrics.FUZZER_KNOWN_CRASH_COUNT.get({
'fuzzer': 'fuzzer'
}))
class TruncateFuzzerOutputTest(unittest.TestCase):
"""Truncate fuzzer output tests."""
def test_no_truncation(self):
"""No truncation."""
self.assertEqual('aaaa', fuzz_task.truncate_fuzzer_output('aaaa', 10))
def test_truncation(self):
"""Truncate."""
self.assertEqual(
'123456\n...truncated...\n54321',
fuzz_task.truncate_fuzzer_output(
'123456xxxxxxxxxxxxxxxxxxxxxxxxxxx54321', 28))
def test_error(self):
"""Error if limit is too low."""
with self.assertRaises(AssertionError):
self.assertEqual(
'', fuzz_task.truncate_fuzzer_output('123456xxxxxx54321', 10))
class TrackFuzzTimeTest(unittest.TestCase):
"""Test _TrackFuzzTime."""
def setUp(self):
monitor.metrics_store().reset_for_testing()
def _test(self, timeout):
"""Test helper."""
time_module = helpers.MockTime()
with fuzz_task._TrackFuzzTime('fuzzer', 'job', time_module) as tracker:
time_module.advance(5)
tracker.timeout = timeout
fuzzer_total_time = monitoring_metrics.FUZZER_TOTAL_FUZZ_TIME.get({
'fuzzer': 'fuzzer',
'timeout': timeout
})
self.assertEqual(5, fuzzer_total_time)
def test_success(self):
"""Test report metrics."""
self._test(False)
def test_timeout(self):
"""Test timeout."""
self._test(True)
class GetFuzzerMetadataFromOutputTest(unittest.TestCase):
"""Test get_fuzzer_metadata_from_output."""
def test_no_metadata(self):
"""Tests no metadata in output."""
data = 'abc\ndef\n123123'
self.assertDictEqual(fuzz_task.get_fuzzer_metadata_from_output(data), {})
data = ''
self.assertDictEqual(fuzz_task.get_fuzzer_metadata_from_output(data), {})
def test_metadata(self):
"""Tests parsing of metadata."""
data = ('abc\n'
'def\n'
'metadata:invalid: invalid\n'
'metadat::invalid: invalid\n'
'metadata::foo: bar\n'
'123123\n'
'metadata::blah: 1\n'
'metadata::test:abcd\n'
'metadata::test2: def\n')
self.assertDictEqual(
fuzz_task.get_fuzzer_metadata_from_output(data), {
'blah': '1',
'test': 'abcd',
'test2': 'def',
'foo': 'bar'
})
class GetRegressionTest(unittest.TestCase):
"""Test get_regression."""
def setUp(self):
helpers.patch(self, ['build_management.build_manager.is_custom_binary'])
def test_one_time_crasher(self):
"""Test when one_time_crasher_flag is True."""
self.mock.is_custom_binary.return_value = False
self.assertEqual('NA', fuzz_task.get_regression(True))
def test_custom_binary(self):
"""Test for custom binary."""
self.mock.is_custom_binary.return_value = True
self.assertEqual('NA', fuzz_task.get_regression(False))
def test_reproducible_non_custom_binary(self):
"""Test for reproducible non-custom binary."""
self.mock.is_custom_binary.return_value = False
self.assertEqual('', fuzz_task.get_regression(False))
class GetFixedOrMinimizedKeyTest(unittest.TestCase):
"""Test get_fixed_or_minimized_key."""
def test_one_time_crasher(self):
"""Test when one_time_crasher_flag is True."""
self.assertEqual('NA', fuzz_task.get_fixed_or_minimized_key(True))
def test_reproducible(self):
"""Test for reproducible."""
self.assertEqual('', fuzz_task.get_fixed_or_minimized_key(False))
class CrashInitTest(fake_filesystem_unittest.TestCase):
"""Test Crash.__init__."""
def setUp(self):
helpers.patch(self, [
'chrome.crash_uploader.FileMetadataInfo',
'bot.tasks.setup.archive_testcase_and_dependencies_in_gcs',
'crash_analysis.stack_parsing.stack_analyzer.get_crash_data',
'bot.testcase_manager.get_additional_command_line_flags',
'bot.testcase_manager.get_command_line_for_application',
'base.utils.get_crash_stacktrace_output',
'crash_analysis.crash_analyzer.ignore_stacktrace',
'crash_analysis.crash_analyzer.is_security_issue',
])
helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
self.mock.get_command_line_for_application.return_value = 'cmd'
dummy_state = stack_analyzer.StackAnalyzerState()
dummy_state.crash_type = 'type'
dummy_state.crash_address = 'address'
dummy_state.crash_state = 'state'
dummy_state.crash_stacktrace = 'orig_trace'
dummy_state.frames = ['frame 1', 'frame 2']
self.mock.get_crash_data.return_value = dummy_state
self.mock.get_crash_stacktrace_output.return_value = 'trace'
self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (
'fuzzed_key', True, 'absolute_path', 'archive_filename')
environment.set_value('FILTER_FUNCTIONAL_BUGS', False)
with open('/stack_file_path', 'w') as f:
f.write('unsym')
def test_error(self):
"""Test failing to reading stacktrace file."""
crash = fuzz_task.Crash.from_testcase_manager_crash(
testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], 'ges',
'/no_stack_file'))
self.assertIsNone(crash)
def _test_crash(self, should_be_ignored, security_flag):
"""Test crash."""
self.mock.get_command_line_for_application.reset_mock()
self.mock.get_crash_data.reset_mock()
self.mock.get_crash_stacktrace_output.reset_mock()
self.mock.is_security_issue.reset_mock()
self.mock.ignore_stacktrace.reset_mock()
self.mock.is_security_issue.return_value = security_flag
self.mock.ignore_stacktrace.return_value = should_be_ignored
crash = fuzz_task.Crash.from_testcase_manager_crash(
testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], 'ges',
'/stack_file_path'))
self.assertEqual('dir/path-http-name', crash.file_path)
self.assertEqual(123, crash.crash_time)
self.assertEqual(11, crash.return_code)
self.assertListEqual(['res'], crash.resource_list)
self.assertEqual('ges', crash.gestures)
self.assertEqual('path-http-name', crash.filename)
self.assertTrue(crash.http_flag)
self.assertEqual('cmd', crash.application_command_line)
self.mock.get_command_line_for_application.assert_called_once_with(
'dir/path-http-name', needs_http=True)
self.assertEqual('unsym', crash.unsymbolized_crash_stacktrace)
self.assertEqual('type', crash.crash_type)
self.assertEqual('address', crash.crash_address)
self.assertEqual('state', crash.crash_state)
self.assertListEqual(['frame 1', 'frame 2'], crash.crash_frames)
self.mock.get_crash_data.assert_called_once_with('unsym')
self.assertEqual('trace', crash.crash_stacktrace)
self.mock.get_crash_stacktrace_output.assert_called_once_with(
'cmd', 'orig_trace', 'unsym')
self.assertEqual(security_flag, crash.security_flag)
self.mock.is_security_issue.assert_called_once_with('unsym', 'type',
'address')
self.assertEqual('type,state,%s' % security_flag, crash.key)
self.assertEqual(should_be_ignored, crash.should_be_ignored)
self.mock.ignore_stacktrace.assert_called_once_with('orig_trace')
self.assertFalse(hasattr(crash, 'fuzzed_key'))
return crash
def _test_validity_and_get_functional_crash(self):
"""Test validity of different crashes and return functional crash."""
security_crash = self._test_crash(
should_be_ignored=False, security_flag=True)
self.assertIsNone(security_crash.get_error())
self.assertTrue(security_crash.is_valid())
ignored_crash = self._test_crash(should_be_ignored=True, security_flag=True)
self.assertIn('False crash', ignored_crash.get_error())
self.assertFalse(ignored_crash.is_valid())
functional_crash = self._test_crash(
should_be_ignored=False, security_flag=False)
return functional_crash
def test_valid_functional_bug(self):
"""Test valid because of functional bug."""
functional_crash = self._test_validity_and_get_functional_crash()
self.assertIsNone(functional_crash.get_error())
self.assertTrue(functional_crash.is_valid())
def test_invalid_functional_bug(self):
"""Test invalid because of functional bug."""
environment.set_value('FILTER_FUNCTIONAL_BUGS', True)
functional_crash = self._test_validity_and_get_functional_crash()
self.assertIn('Functional crash', functional_crash.get_error())
self.assertFalse(functional_crash.is_valid())
def test_hydrate_fuzzed_key(self):
"""Test hydrating fuzzed_key."""
crash = self._test_crash(should_be_ignored=False, security_flag=True)
self.assertFalse(crash.is_archived())
self.assertIsNone(crash.get_error())
self.assertTrue(crash.is_valid())
crash.archive_testcase_in_blobstore()
self.assertTrue(crash.is_archived())
self.assertIsNone(crash.get_error())
self.assertTrue(crash.is_valid())
self.assertEqual('fuzzed_key', crash.fuzzed_key)
self.assertTrue(crash.archived)
self.assertEqual('absolute_path', crash.absolute_path)
self.assertEqual('archive_filename', crash.archive_filename)
def test_hydrate_fuzzed_key_failure(self):
"""Test fail to hydrate fuzzed_key."""
self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (None,
False,
None,
None)
crash = self._test_crash(should_be_ignored=False, security_flag=True)
self.assertFalse(crash.is_archived())
self.assertIsNone(crash.get_error())
self.assertTrue(crash.is_valid())
crash.archive_testcase_in_blobstore()
self.assertTrue(crash.is_archived())
self.assertIn('Unable to store testcase in blobstore', crash.get_error())
self.assertFalse(crash.is_valid())
self.assertIsNone(crash.fuzzed_key)
self.assertFalse(crash.archived)
self.assertIsNone(crash.absolute_path)
self.assertIsNone(crash.archive_filename)
def test_args_from_testcase_manager(self):
"""Test args from testcase_manager.Crash."""
testcase_manager_crash = testcase_manager.Crash('path', 0, 0, [], [],
'/stack_file_path')
self.mock.get_additional_command_line_flags.return_value = 'minimized'
environment.set_value('APP_ARGS', 'app')
crash = fuzz_task.Crash.from_testcase_manager_crash(testcase_manager_crash)
self.assertEqual('app minimized', crash.arguments)
class CrashGroupTest(unittest.TestCase):
"""Test CrashGroup."""
def setUp(self):
helpers.patch(self, [
'bot.tasks.fuzz_task.find_main_crash',
'datastore.data_handler.find_testcase',
'datastore.data_handler.get_project_name',
])
self.mock.get_project_name.return_value = 'some_project'
self.crashes = [self._make_crash('g1'), self._make_crash('g2')]
self.context = mock.MagicMock(
test_timeout=99, fuzzer_name='test', fuzz_target=None)
self.reproducible_testcase = self._make_testcase(
project_name='some_project',
bug_information='',
one_time_crasher_flag=False)
self.unreproducible_testcase = self._make_testcase(
project_name='some_project',
bug_information='',
one_time_crasher_flag=True)
def _make_crash(self, gestures):
crash = mock.MagicMock(
crash_type='type',
crash_state='state',
security_flag=True,
file_path='file_path',
http_flag=True,
gestures=gestures)
return crash
def _make_testcase(self,
project_name,
bug_information,
one_time_crasher_flag,
timestamp=datetime.datetime.now()):
"""Make testcase."""
testcase = data_types.Testcase()
testcase.timestamp = timestamp
testcase.one_time_crasher_flag = one_time_crasher_flag
testcase.bug_information = bug_information
testcase.project_name = project_name
return testcase
def test_no_existing_testcase(self):
"""is_new=True and should_create_testcase=True when there's no existing
testcase."""
self.mock.find_testcase.return_value = None
self.mock.find_main_crash.return_value = self.crashes[0], True
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertTrue(group.should_create_testcase())
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertIsNone(group.existing_testcase)
self.assertEqual(self.crashes[0], group.main_crash)
self.assertTrue(group.is_new())
def test_has_existing_reproducible_testcase(self):
"""should_create_testcase=False when there's an existing reproducible
testcase."""
self.mock.find_testcase.return_value = self.reproducible_testcase
self.mock.find_main_crash.return_value = (self.crashes[0], True)
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures)
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertFalse(group.is_new())
self.assertFalse(group.should_create_testcase())
self.assertTrue(group.has_existing_reproducible_testcase())
def test_reproducible_crash(self):
"""should_create_testcase=True when the group is reproducible."""
self.mock.find_testcase.return_value = self.unreproducible_testcase
self.mock.find_main_crash.return_value = (self.crashes[0], False)
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures)
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertFalse(group.is_new())
self.assertTrue(group.should_create_testcase())
self.assertFalse(group.has_existing_reproducible_testcase())
self.assertFalse(group.one_time_crasher_flag)
def test_has_existing_unreproducible_testcase(self):
"""should_create_testcase=False when the unreproducible testcase already
exists."""
self.mock.find_testcase.return_value = self.unreproducible_testcase
self.mock.find_main_crash.return_value = (self.crashes[0], True)
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertFalse(group.should_create_testcase())
self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures)
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertFalse(group.is_new())
self.assertFalse(group.has_existing_reproducible_testcase())
self.assertTrue(group.one_time_crasher_flag)
class FindMainCrashTest(unittest.TestCase):
"""Test find_main_crash."""
def setUp(self):
helpers.patch(self, [
'bot.testcase_manager.test_for_reproducibility',
])
self.crashes = [
self._make_crash('g1'),
self._make_crash('g2'),
self._make_crash('g3'),
self._make_crash('g4')
]
self.reproducible_crashes = []
# pylint: disable=unused-argument
def test_for_repro(fuzzer_name,
full_fuzzer_name,
file_path,
state,
security_flag,
test_timeout,
http_flag,
gestures,
arguments=None):
"""Mock test_for_reproducibility."""
for c in self.reproducible_crashes:
if c.gestures == gestures:
return True
return False
self.mock.test_for_reproducibility.side_effect = test_for_repro
def _make_crash(self, gestures):
crash = mock.MagicMock(
file_path='file_path',
crash_state='state',
security_flag=True,
test_timeout=999,
gestures=gestures)
return crash
def test_reproducible_crash(self):
"""Find that the 2nd crash is reproducible."""
for c in self.crashes:
c.is_valid.return_value = True
self.crashes[0].is_valid.return_value = False
self.reproducible_crashes = [self.crashes[2]]
self.assertEqual((self.crashes[2], False),
fuzz_task.find_main_crash(self.crashes, 'test', 'test',
99))
self.crashes[0].archive_testcase_in_blobstore.assert_called_once_with()
self.crashes[1].archive_testcase_in_blobstore.assert_called_once_with()
self.crashes[2].archive_testcase_in_blobstore.assert_called_once_with()
self.crashes[3].archive_testcase_in_blobstore.assert_not_called()
# Calls for self.crashes[1] and self.crashes[2].
self.assertEqual(2, self.mock.test_for_reproducibility.call_count)
def test_unreproducible_crash(self):
"""No reproducible crash. Find the first valid one."""
for c in self.crashes:
c.is_valid.return_value = True
self.crashes[0].is_valid.return_value = False
self.reproducible_crashes = []
self.assertEqual((self.crashes[1], True),
fuzz_task.find_main_crash(self.crashes, 'test', 'test',
99))
for c in self.crashes:
c.archive_testcase_in_blobstore.assert_called_once_with()
# Calls for every crash except self.crashes[0] because it's invalid.
self.assertEqual(
len(self.crashes) - 1, self.mock.test_for_reproducibility.call_count)
def test_no_valid_crash(self):
"""No valid crash."""
for c in self.crashes:
c.is_valid.return_value = False
self.reproducible_crashes = []
self.assertEqual((None, None),
fuzz_task.find_main_crash(self.crashes, 'test', 'test',
99))
for c in self.crashes:
c.archive_testcase_in_blobstore.assert_called_once_with()
self.assertEqual(0, self.mock.test_for_reproducibility.call_count)
@test_utils.with_cloud_emulators('datastore')
class ProcessCrashesTest(fake_filesystem_unittest.TestCase):
"""Test process_crashes."""
def setUp(self):
helpers.patch(self, [
'chrome.crash_uploader.get_symbolized_stack_bytes',
'bot.tasks.task_creation.create_tasks',
'bot.tasks.setup.archive_testcase_and_dependencies_in_gcs',
'crash_analysis.stack_parsing.stack_analyzer.get_crash_data',
'build_management.revisions.get_real_revision',
'bot.testcase_manager.get_command_line_for_application',
'bot.testcase_manager.test_for_reproducibility',
'base.utils.get_crash_stacktrace_output',
'crash_analysis.crash_analyzer.ignore_stacktrace',
'crash_analysis.crash_analyzer.is_security_issue',
'datastore.data_handler.get_issue_tracker_name',
'datastore.data_handler.get_project_name',
'google.appengine.api.app_identity.get_application_id',
'google_cloud_utils.big_query.Client.insert',
'google_cloud_utils.big_query.get_api_client', 'time.sleep', 'time.time'
])
test_utils.set_up_pyfakefs(self)
self.mock.time.return_value = 987
self.mock.get_issue_tracker_name.return_value = 'some_issue_tracker'
self.mock.get_project_name.return_value = 'some_project'
self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (
'fuzzed_key', True, 'absolute_path', 'archive_filename')
def _make_crash(self, trace, state='state'):
"""Make crash."""
self.mock.get_real_revision.return_value = 'this.is.fake.ver'
self.mock.get_command_line_for_application.return_value = 'cmd'
dummy_state = stack_analyzer.StackAnalyzerState()
dummy_state.crash_type = 'type'
dummy_state.crash_address = 'address'
dummy_state.crash_state = state
dummy_state.crash_stacktrace = 'orig_trace'
dummy_state.crash_frames = ['frame 1', 'frame 2']
self.mock.get_crash_data.return_value = dummy_state
self.mock.get_symbolized_stack_bytes.return_value = 'f00df00d'
self.mock.get_crash_stacktrace_output.return_value = trace
self.mock.is_security_issue.return_value = True
self.mock.ignore_stacktrace.return_value = False
with open('/stack_file_path', 'w') as f:
f.write('unsym')
crash = fuzz_task.Crash.from_testcase_manager_crash(
testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], ['ges'],
'/stack_file_path'))
return crash
def test_existing_unreproducible_testcase(self):
"""Test existing unreproducible testcase."""
crashes = [self._make_crash('c1'), self._make_crash('c2')]
self.mock.test_for_reproducibility.return_value = False
existing_testcase = data_types.Testcase()
existing_testcase.crash_stacktrace = 'existing'
existing_testcase.crash_type = crashes[0].crash_type
existing_testcase.crash_state = crashes[0].crash_state
existing_testcase.security_flag = crashes[0].security_flag
existing_testcase.one_time_crasher_flag = True
existing_testcase.job_type = 'existing_job'
existing_testcase.timestamp = datetime.datetime.now()
existing_testcase.project_name = 'some_project'
existing_testcase.put()
variant = data_types.TestcaseVariant()
variant.status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
variant.job_type = 'job'
variant.testcase_id = existing_testcase.key.id()
variant.put()
new_crash_count, known_crash_count, groups = fuzz_task.process_crashes(
crashes=crashes,
context=fuzz_task.Context(
project_name='some_project',
bot_name='bot',
job_type='job',
fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
redzone=111,
disable_ubsan=True,
platform_id='platform',
crash_revision=1234,
fuzzer_name='fuzzer',
window_argument='win_args',
fuzzer_metadata={},
testcases_metadata={},
timeout_multiplier=1,
test_timeout=2,
thread_wait_timeout=3,
data_directory='/data'))
self.assertEqual(0, new_crash_count)
self.assertEqual(2, known_crash_count)
self.assertEqual(1, len(groups))
self.assertEqual(2, len(groups[0].crashes))
self.assertFalse(groups[0].is_new())
self.assertEqual(crashes[0].crash_type, groups[0].main_crash.crash_type)
self.assertEqual(crashes[0].crash_state, groups[0].main_crash.crash_state)
self.assertEqual(crashes[0].security_flag,
groups[0].main_crash.security_flag)
testcases = list(data_types.Testcase.query())
self.assertEqual(1, len(testcases))
self.assertEqual('existing', testcases[0].crash_stacktrace)
variant = data_handler.get_testcase_variant(existing_testcase.key.id(),
'job')
self.assertEqual(data_types.TestcaseVariantStatus.FLAKY, variant.status)
self.assertEqual('fuzzed_key', variant.reproducer_key)
self.assertEqual(1234, variant.revision)
self.assertEqual('type', variant.crash_type)
self.assertEqual('state', variant.crash_state)
self.assertEqual(True, variant.security_flag)
self.assertEqual(True, variant.is_similar)
@parameterized.parameterized.expand(['some_project', 'chromium'])
def test_create_many_groups(self, project_name):
"""Test creating many groups."""
self.mock.get_project_name.return_value = project_name
self.mock.insert.return_value = {'insertErrors': [{'index': 0}]}
# TODO(metzman): Add a seperate test for strategies.
r2_stacktrace = ('r2\ncf::fuzzing_strategies: value_profile\n')
crashes = [
self._make_crash('r1', state='reproducible1'),
self._make_crash(r2_stacktrace, state='reproducible1'),
self._make_crash('r3', state='reproducible1'),
self._make_crash('r4', state='reproducible2'),
self._make_crash('u1', state='unreproducible1'),
self._make_crash('u2', state='unreproducible2'),
self._make_crash('u3', state='unreproducible2'),
self._make_crash('u4', state='unreproducible3')
]
self.mock.test_for_reproducibility.side_effect = [
False, # For r1. It returns False. So, r1 is demoted.
True, # For r2. It returns True. So, r2 becomes primary for its group.
True, # For r4.
False, # For u1.
False, # For u2.
False, # For u3.
False
] # For u4.
new_crash_count, known_crash_count, groups = fuzz_task.process_crashes(
crashes=crashes,
context=fuzz_task.Context(
project_name=project_name,
bot_name='bot',
job_type='job',
fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
redzone=111,
disable_ubsan=False,
platform_id='platform',
crash_revision=1234,
fuzzer_name='fuzzer',
window_argument='win_args',
fuzzer_metadata={},
testcases_metadata={},
timeout_multiplier=1,
test_timeout=2,
thread_wait_timeout=3,
data_directory='/data'))
self.assertEqual(5, new_crash_count)
self.assertEqual(3, known_crash_count)
self.assertEqual(5, len(groups))
self.assertEqual([
'reproducible1', 'reproducible2', 'unreproducible1', 'unreproducible2',
'unreproducible3'
], [group.main_crash.crash_state for group in groups])
self.assertEqual([True, True, True, True, True],
[group.is_new() for group in groups])
self.assertEqual([3, 1, 1, 2, 1], [len(group.crashes) for group in groups])
testcases = list(data_types.Testcase.query())
self.assertEqual(5, len(testcases))
self.assertSetEqual(
set([r2_stacktrace, 'r4', 'u1', 'u2', 'u4']),
set(t.crash_stacktrace for t in testcases))
self.assertSetEqual(
set([
'{"fuzzing_strategies": ["value_profile"]}', None, None, None, None
]), set(t.additional_metadata for t in testcases))
# r2 is a reproducible crash, so r3 doesn't
# invoke archive_testcase_in_blobstore. Therefore, the
# archive_testcase_in_blobstore is called `len(crashes) - 1`.
self.assertEqual(
len(crashes) - 1,
self.mock.archive_testcase_and_dependencies_in_gcs.call_count)
# Check only the desired testcases were saved.
actual_crash_infos = [group.main_crash.crash_info for group in groups]
if project_name != 'chromium':
expected_crash_infos = [None] * len(actual_crash_infos)
else:
expected_saved_crash_info = crash_uploader.CrashReportInfo(
product='Chrome_' + environment.platform().lower().capitalize(),
version='this.is.fake.ver',
serialized_crash_stack_frames='f00df00d')
expected_crash_infos = [
expected_saved_crash_info, # r2 is main crash for group r1,r2,r3
expected_saved_crash_info, # r4 is main crash for its own group
None, # u1 is not reproducible
None, # u2, u3 are not reproducible
None, # u4 is not reproducible
]
self.assertEqual(len(expected_crash_infos), len(actual_crash_infos))
for expected, actual in zip(expected_crash_infos, actual_crash_infos):
if not expected:
self.assertIsNone(actual)
continue
self.assertEqual(expected.product, actual.product)
self.assertEqual(expected.version, actual.version)
self.assertEqual(expected.serialized_crash_stack_frames,
actual.serialized_crash_stack_frames)
def _make_big_query_json(crash, reproducible_flag, new_flag, testcase_id):
return {
'crash_type': crash.crash_type,
'crash_state': crash.crash_state,
'created_at': 987,
'platform': 'platform',
'crash_time_in_ms': int(crash.crash_time * 1000),
'parent_fuzzer_name': 'engine',
'fuzzer_name': 'engine_binary',
'job_type': 'job',
'security_flag': crash.security_flag,
'reproducible_flag': reproducible_flag,
'revision': '1234',
'new_flag': new_flag,
'project': project_name,
'testcase_id': testcase_id
}
def _get_testcase_id(crash):
rows = list(
data_types.Testcase.query(
data_types.Testcase.crash_type == crash.crash_type,
data_types.Testcase.crash_state == crash.crash_state,
data_types.Testcase.security_flag == crash.security_flag))
if not rows:
return None
return str(rows[0].key.id())
# Calls to write 5 groups of crashes to BigQuery.
self.assertEqual(5, self.mock.insert.call_count)
self.mock.insert.assert_has_calls([
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[0], True, False, None),
'%s:bot:987:0' % crashes[0].key),
big_query.Insert(
_make_big_query_json(crashes[1], True, True,
_get_testcase_id(crashes[1])),
'%s:bot:987:1' % crashes[0].key),
big_query.Insert(
_make_big_query_json(crashes[2], True, False, None),
'%s:bot:987:2' % crashes[0].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[3], True, True,
_get_testcase_id(crashes[3])),
'%s:bot:987:0' % crashes[3].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[4], False, True,
_get_testcase_id(crashes[4])),
'%s:bot:987:0' % crashes[4].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[5], False, True,
_get_testcase_id(crashes[5])),
'%s:bot:987:0' % crashes[5].key),
big_query.Insert(
_make_big_query_json(crashes[6], False, False, None),
'%s:bot:987:1' % crashes[5].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[7], False, True,
_get_testcase_id(crashes[7])),
'%s:bot:987:0' % crashes[7].key)
]),
])
class WriteCrashToBigQueryTest(unittest.TestCase):
"""Test write_crash_to_big_query."""
def setUp(self):
self.client = mock.Mock(spec_set=big_query.Client)
helpers.patch(self, [
'system.environment.get_value',
'datastore.data_handler.get_project_name',
'google_cloud_utils.big_query.Client',
'time.time',
])
monitor.metrics_store().reset_for_testing()
self.mock.get_project_name.return_value = 'some_project'
self.mock.get_value.return_value = 'bot'
self.mock.Client.return_value = self.client
self.mock.time.return_value = 99
self.crashes = [
self._make_crash('c1'),
self._make_crash('c2'),
self._make_crash('c3')
]
newly_created_testcase = mock.MagicMock()
newly_created_testcase.key.id.return_value = 't'
self.group = mock.MagicMock(
crashes=self.crashes,
main_crash=self.crashes[0],
one_time_crasher_flag=False,
newly_created_testcase=newly_created_testcase)
self.group.is_new.return_value = True
def _create_context(self, job_type, platform_id):
return fuzz_task.Context(
project_name='some_project',
bot_name='bot',
job_type=job_type,
fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
redzone=32,
disable_ubsan=False,
platform_id=platform_id,
crash_revision=1234,
fuzzer_name='engine',
window_argument='windows_args',
fuzzer_metadata={},
testcases_metadata={},
timeout_multiplier=1.0,
test_timeout=5,
thread_wait_timeout=6,
data_directory='data')
def _make_crash(self, state):
crash = mock.Mock(
crash_type='type',
crash_state=state,
crash_time=111,
security_flag=True,
key='key')
return crash
def _json(self, job, platform, state, new_flag, testcase_id):
return {
'crash_type': 'type',
'crash_state': state,
'created_at': 99,
'platform': platform,
'crash_time_in_ms': 111000,
'parent_fuzzer_name': 'engine',
'fuzzer_name': 'engine_binary',
'job_type': job,
'security_flag': True,
'reproducible_flag': True,
'revision': '1234',
'new_flag': new_flag,
'project': 'some_project',
'testcase_id': testcase_id
}
def test_all_succeed(self):
"""Test writing succeeds."""
self.client.insert.return_value = {}
context = self._create_context('job', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(3, success_count)
self.assertEqual(0, failure_count)
self.mock.Client.assert_called_once_with(
dataset_id='main', table_id='crashes$19700101')
self.client.insert.assert_called_once_with([
big_query.Insert(
self._json('job', 'linux', 'c1', True, 't'), 'key:bot:99:0'),
big_query.Insert(
self._json('job', 'linux', 'c2', False, None), 'key:bot:99:1'),
big_query.Insert(
self._json('job', 'linux', 'c3', False, None), 'key:bot:99:2')
])
def test_succeed(self):
"""Test writing succeeds."""
self.client.insert.return_value = {'insertErrors': [{'index': 1}]}
context = self._create_context('job', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(2, success_count)
self.assertEqual(1, failure_count)
self.mock.Client.assert_called_once_with(
dataset_id='main', table_id='crashes$19700101')
self.client.insert.assert_called_once_with([
big_query.Insert(
self._json('job', 'linux', 'c1', True, 't'), 'key:bot:99:0'),
big_query.Insert(
self._json('job', 'linux', 'c2', False, None), 'key:bot:99:1'),
big_query.Insert(
self._json('job', 'linux', 'c3', False, None), 'key:bot:99:2')
])
def test_chromeos_platform(self):
"""Test ChromeOS platform is written in stats."""
self.client.insert.return_value = {'insertErrors': [{'index': 1}]}
context = self._create_context('job_chromeos', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(2, success_count)
self.assertEqual(1, failure_count)
self.mock.Client.assert_called_once_with(
dataset_id='main', table_id='crashes$19700101')
self.client.insert.assert_called_once_with([
big_query.Insert(
self._json('job_chromeos', 'chrome', 'c1', True, 't'),
'key:bot:99:0'),
big_query.Insert(
self._json('job_chromeos', 'chrome', 'c2', False, None),
'key:bot:99:1'),
big_query.Insert(
self._json('job_chromeos', 'chrome', 'c3', False, None),
'key:bot:99:2')
])
def test_exception(self):
"""Test writing raising an exception."""
self.client.insert.side_effect = Exception('error')
context = self._create_context('job', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(0, success_count)
self.assertEqual(3, failure_count)
class ConvertGroupsToCrashesTest(object):
"""Test convert_groups_to_crashes."""
def test_convert(self):
"""Test converting."""
groups = [
mock.Mock(
crashes=[mock.Mock(), mock.Mock()],
main_crash=mock.Mock(
crash_type='t1', crash_state='s1', security_flag=True)),
mock.Mock(
crashes=[mock.Mock()],
main_crash=mock.Mock(
crash_type='t2', crash_state='s2', security_flag=False)),
]
groups[0].is_new.return_value = False
groups[1].is_new.return_value = True
self.assertEqual([
{
'is_new': False,
'count': 2,
'crash_type': 't1',
'crash_state': 's1',
'security_flag': True
},
{
'is_new': True,
'count': 1,
'crash_type': 't2',
'crash_state': 's2',
'security_flag': False
},
], fuzz_task.convert_groups_to_crashes(groups))
class TestCorpusSync(fake_filesystem_unittest.TestCase):
"""Test corpus sync."""
def setUp(self):
helpers.patch(self, [
'fuzzing.corpus_manager.FuzzTargetCorpus.rsync_to_disk',
'fuzzing.corpus_manager.FuzzTargetCorpus.upload_files',
'google_cloud_utils.storage.last_updated',
])
helpers.patch_environ(self)
os.environ['FAIL_RETRIES'] = '1'
os.environ['CORPUS_BUCKET'] = 'bucket'
self.mock.rsync_to_disk.return_value = True
test_utils.set_up_pyfakefs(self)
self.fs.create_dir('/dir')
self.fs.create_dir('/dir1')
def _write_corpus_files(self, *args, **kwargs): # pylint: disable=unused-argument
self.fs.create_file('/dir/a')
self.fs.create_file('/dir/b')
return True
def test_sync(self):
"""Test corpus sync."""
corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1')
self.mock.rsync_to_disk.side_effect = self._write_corpus_files
self.assertTrue(corpus.sync_from_gcs())
self.assertTrue(os.path.exists('/dir1/.child_sync'))
self.assertEqual(('/dir',), self.mock.rsync_to_disk.call_args[0][1:])
self.fs.create_file('/dir/c')
self.assertListEqual(['/dir/c'], corpus.get_new_files())
corpus.upload_files(corpus.get_new_files())
self.assertEqual((['/dir/c'],), self.mock.upload_files.call_args[0][1:])
self.assertListEqual([], corpus.get_new_files())
def test_no_sync(self):
"""Test no corpus sync when bundle is not updated since last sync."""
corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1')
utils.write_data_to_file(time.time(), '/dir1/.child_sync')
self.mock.last_updated.return_value = (
datetime.datetime.utcnow() - datetime.timedelta(days=1))
self.assertTrue(corpus.sync_from_gcs())
self.assertEqual(0, self.mock.rsync_to_disk.call_count)
def test_sync_with_failed_last_update(self):
"""Test corpus sync when failed to get last update info from gcs."""
corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1')
utils.write_data_to_file(time.time(), '/dir1/.child_sync')
self.mock.last_updated.return_value = None
self.assertTrue(corpus.sync_from_gcs())
self.assertEqual(1, self.mock.rsync_to_disk.call_count)
@test_utils.with_cloud_emulators('datastore')
class RecordFuzzTargetTest(unittest.TestCase):
"""Tests for record_fuzz_target."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'base.utils.is_oss_fuzz',
'base.utils.utcnow',
])
self.mock.is_oss_fuzz.return_value = False
self.mock.utcnow.return_value = datetime.datetime(2018, 1, 1)
def test_record_fuzz_target(self):
"""Test that record_fuzz_target works."""
fuzz_task.record_fuzz_target('libFuzzer', 'child', 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get()
self.assertDictEqual({
'binary': 'child',
'engine': 'libFuzzer',
'project': 'test-project',
}, fuzz_target.to_dict())
job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get()
self.assertDictEqual({
'fuzz_target_name': 'libFuzzer_child',
'job': 'job',
'engine': 'libFuzzer',
'last_run': datetime.datetime(2018, 1, 1, 0, 0),
'weight': 1.0,
}, job_mapping.to_dict())
self.assertEqual('libFuzzer_child', fuzz_target.fully_qualified_name())
self.assertEqual('child', fuzz_target.project_qualified_name())
def test_record_fuzz_target_existing(self):
"""Test that record_fuzz_target works when updating an existing entity."""
data_types.FuzzTarget(
binary='child', engine='libFuzzer', project='test-project').put()
data_types.FuzzTargetJob(
fuzz_target_name='libFuzzer_child',
job='job',
engine='libFuzzer',
last_run=datetime.datetime(2017, 12, 31, 0, 0)).put()
fuzz_task.record_fuzz_target('libFuzzer', 'child', 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get()
self.assertDictEqual({
'binary': 'child',
'engine': 'libFuzzer',
'project': 'test-project',
}, fuzz_target.to_dict())
job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get()
self.assertDictEqual({
'fuzz_target_name': 'libFuzzer_child',
'job': 'job',
'engine': 'libFuzzer',
'last_run': datetime.datetime(2018, 1, 1, 0, 0),
'weight': 1.0,
}, job_mapping.to_dict())
self.assertEqual('libFuzzer_child', fuzz_target.fully_qualified_name())
self.assertEqual('child', fuzz_target.project_qualified_name())
def test_record_fuzz_target_no_binary_name(self):
"""Test recording fuzz target with no binary."""
# Passing None to binary_name is an error. We shouldn't create any
# FuzzTargets as a result.
fuzz_task.record_fuzz_target('libFuzzer', None, 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get()
self.assertIsNone(fuzz_target)
job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get()
self.assertIsNone(job_mapping)
@parameterized.parameterized.expand(['child', 'proj_child'])
def test_record_fuzz_target_ossfuzz(self, binary_name):
"""Test that record_fuzz_target works with OSS-Fuzz projects."""
self.mock.is_oss_fuzz.return_value = True
data_types.Job(name='job', environment_string='PROJECT_NAME = proj\n').put()
fuzz_task.record_fuzz_target('libFuzzer', binary_name, 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_proj_child').get()
self.assertDictEqual({
'binary': binary_name,
'engine': 'libFuzzer',
'project': 'proj',
}, fuzz_target.to_dict())
job_mapping = ndb.Key(data_types.FuzzTargetJob,
'libFuzzer_proj_child/job').get()
self.assertDictEqual({
'fuzz_target_name': 'libFuzzer_proj_child',
'job': 'job',
'engine': 'libFuzzer',
'last_run': datetime.datetime(2018, 1, 1, 0, 0),
'weight': 1.0,
}, job_mapping.to_dict())
self.assertEqual('libFuzzer_proj_child', fuzz_target.fully_qualified_name())
self.assertEqual('proj_child', fuzz_target.project_qualified_name())
@test_utils.with_cloud_emulators('datastore')
class DoEngineFuzzingTest(fake_filesystem_unittest.TestCase):
"""do_engine_fuzzing tests."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'bot.fuzzers.engine_common.current_timestamp',
'bot.tasks.fuzz_task.GcsCorpus.sync_from_gcs',
'bot.tasks.fuzz_task.GcsCorpus.upload_files',
'build_management.revisions.get_component_list',
'bot.testcase_manager.upload_log',
'bot.testcase_manager.upload_testcase',
'metrics.fuzzer_stats.upload_stats',
])
test_utils.set_up_pyfakefs(self)
os.environ['JOB_NAME'] = 'libfuzzer_asan_test'
os.environ['FUZZ_INPUTS'] = '/fuzz-inputs'
os.environ['FUZZ_INPUTS_DISK'] = '/fuzz-inputs-disk'
os.environ['BUILD_DIR'] = '/build_dir'
os.environ['MAX_TESTCASES'] = '2'
os.environ['AUTOMATIC_LABELS'] = 'auto_label,auto_label1'
os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component,auto_component1'
self.fs.create_file('/build_dir/test_target')
self.fs.create_file(
'/build_dir/test_target.labels', contents='label1\nlabel2')
self.fs.create_file(
'/build_dir/test_target.owners', contents='owner1@email.com')
self.fs.create_file(
'/build_dir/test_target.components', contents='component1\ncomponent2')
self.fs.create_file('/input')
self.mock.sync_from_gcs.return_value = True
self.mock.upload_files.return_value = True
self.mock.get_component_list.return_value = [{
'component': 'component',
'link_text': 'rev',
}]
self.mock.current_timestamp.return_value = 0.0
def test_basic(self):
"""Test basic fuzzing session."""
session = fuzz_task.FuzzingSession('libFuzzer', 'libfuzzer_asan_test', 60)
session.testcase_directory = os.environ['FUZZ_INPUTS']
session.data_directory = '/data_dir'
os.environ['FUZZ_TARGET'] = 'test_target'
os.environ['APP_REVISION'] = '1'
expected_crashes = [engine.Crash('/input', 'stack', ['args'], 1.0)]
engine_impl = mock.Mock()
engine_impl.name = 'libFuzzer'
engine_impl.prepare.return_value = engine.FuzzOptions(
'/corpus', ['arg'], {
'strategy_1': 1,
'strategy_2': 50,
})
engine_impl.fuzz.side_effect = lambda *_: engine.FuzzResult(
'logs', ['cmd'], expected_crashes, {'stat': 1}, 42.0)
crashes, fuzzer_metadata = session.do_engine_fuzzing(engine_impl)
self.assertDictEqual({
'fuzzer_binary_name':
'test_target',
'issue_components':
'component1,component2,auto_component,auto_component1',
'issue_labels':
'label1,label2,auto_label,auto_label1',
'issue_owners':
'owner1@email.com',
}, fuzzer_metadata)
log_time = datetime.datetime(1970, 1, 1, 0, 0)
log_call = mock.call(
'Component revisions (build r1):\n'
'component: rev\n\n'
'Return code: 1\n\n'
'Command: cmd\nBot: None\nTime ran: 42.0\n\n'
'logs\n'
'cf::fuzzing_strategies: strategy_1:1,strategy_2:50', log_time)
self.mock.upload_log.assert_has_calls([log_call, log_call])
self.mock.upload_testcase.assert_has_calls([
mock.call('/input', log_time),
mock.call('/input', log_time),
])
self.assertEqual(2, len(crashes))
for i in range(2):
self.assertEqual('/input', crashes[i].file_path)
self.assertEqual(1, crashes[i].return_code)
self.assertEqual('stack', crashes[i].unsymbolized_crash_stacktrace)
self.assertEqual(1.0, crashes[i].crash_time)
self.assertEqual('args', crashes[i].arguments)
for i in range(2):
upload_args = self.mock.upload_stats.call_args_list[i][0][0]
testcase_run = upload_args[0]
self.assertDictEqual({
'build_revision': 1,
'command': ['cmd'],
'fuzzer': u'libFuzzer_test_target',
'job': 'libfuzzer_asan_test',
'kind': 'TestcaseRun',
'stat': 1,
'strategy_strategy_1': 1,
'strategy_strategy_2': 50,
'timestamp': 0.0,
}, testcase_run.data)
class UntrustedRunEngineFuzzerTest(
untrusted_runner_helpers.UntrustedRunnerIntegrationTest):
"""Engine fuzzing tests for untrusted."""
def setUp(self):
"""Set up."""
super(UntrustedRunEngineFuzzerTest, self).setUp()
environment.set_value('JOB_NAME', 'libfuzzer_asan_job')
job = data_types.Job(
name='libfuzzer_asan_job',
environment_string=(
'RELEASE_BUILD_BUCKET_PATH = '
'gs://clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-([0-9]+).zip\n'
'REVISION_VARS_URL = https://commondatastorage.googleapis.com/'
'clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-%s.srcmap.json\n'))
job.put()
self.temp_dir = tempfile.mkdtemp(dir=environment.get_value('FUZZ_INPUTS'))
environment.set_value('USE_MINIJAIL', False)
def tearDown(self):
super(UntrustedRunEngineFuzzerTest, self).tearDown()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_run_engine_fuzzer(self):
"""Test running engine fuzzer."""
self._setup_env(job_type='libfuzzer_asan_job')
environment.set_value('FUZZ_TEST_TIMEOUT', 3600)
build_manager.setup_build()
corpus_directory = os.path.join(self.temp_dir, 'corpus')
testcase_directory = os.path.join(self.temp_dir, 'artifacts')
os.makedirs(file_host.rebase_to_worker_root(corpus_directory))
os.makedirs(file_host.rebase_to_worker_root(testcase_directory))
result, fuzzer_metadata = fuzz_task.run_engine_fuzzer(
libfuzzer_engine.LibFuzzerEngine(), 'test_fuzzer', corpus_directory,
testcase_directory)
self.assertIn(
'ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000',
result.logs)
self.assertEqual(1, len(result.crashes))
self.assertTrue(result.crashes[0].input_path.startswith(
os.environ['ROOT_DIR']))
self.assertTrue(os.path.exists(result.crashes[0].input_path))
self.assertIsInstance(result.stats.get('number_of_executed_units'), int)
self.assertIsInstance(result.stats.get('oom_count'), int)
self.assertIsInstance(
result.stats.get('strategy_selection_method'), six.string_types)
self.assertDictEqual({'fuzzer_binary_name': 'test_fuzzer'}, fuzzer_metadata)
class AddIssueMetadataFromEnvironmentTest(unittest.TestCase):
"""Tests for _add_issue_metadata_from_environment."""
def setUp(self):
helpers.patch_environ(self)
def test_add_no_existing(self):
"""Test adding issue metadata when there are none existing."""
os.environ['AUTOMATIC_LABELS'] = 'auto_label'
os.environ['AUTOMATIC_LABELS_1'] = 'auto_label1'
os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component'
os.environ['AUTOMATIC_COMPONENTS_1'] = 'auto_component1'
metadata = {}
fuzz_task._add_issue_metadata_from_environment(metadata)
self.assertDictEqual({
'issue_components': 'auto_component,auto_component1',
'issue_labels': 'auto_label,auto_label1',
}, metadata)
def test_add_append(self):
"""Test adding issue metadata when there are already existing metadata."""
os.environ['AUTOMATIC_LABELS'] = 'auto_label'
os.environ['AUTOMATIC_LABELS_1'] = 'auto_label1'
os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component'
os.environ['AUTOMATIC_COMPONENTS_1'] = 'auto_component1'
metadata = {
'issue_components': 'existing_component',
'issue_labels': 'existing_label'
}
fuzz_task._add_issue_metadata_from_environment(metadata)
self.assertDictEqual({
'issue_components':
'existing_component,auto_component,auto_component1',
'issue_labels':
'existing_label,auto_label,auto_label1',
}, metadata)
def test_add_numeric(self):
"""Tests adding a numeric label."""
os.environ['AUTOMATIC_LABELS'] = '123'
metadata = {}
fuzz_task._add_issue_metadata_from_environment(metadata)
self.assertDictEqual({
'issue_labels': '123',
}, metadata)
| 36.738372 | 84 | 0.672153 |
from builtins import object
from builtins import range
import datetime
import mock
import os
import parameterized
import shutil
import tempfile
import time
import unittest
from pyfakefs import fake_filesystem_unittest
import six
from base import utils
from bot import testcase_manager
from bot.fuzzers import engine
from bot.fuzzers.libFuzzer import engine as libfuzzer_engine
from bot.tasks import fuzz_task
from bot.untrusted_runner import file_host
from build_management import build_manager
from chrome import crash_uploader
from crash_analysis.stack_parsing import stack_analyzer
from datastore import data_handler
from datastore import data_types
from datastore import ndb
from google_cloud_utils import big_query
from metrics import monitor
from metrics import monitoring_metrics
from system import environment
from tests.test_libs import helpers
from tests.test_libs import test_utils
from tests.test_libs import untrusted_runner_helpers
class TrackFuzzerRunResultTest(unittest.TestCase):
def setUp(self):
monitor.metrics_store().reset_for_testing()
def test_fuzzer_run_result(self):
fuzz_task._track_fuzzer_run_result('name', 10, 100, 2)
fuzz_task._track_fuzzer_run_result('name', 100, 200, 2)
fuzz_task._track_fuzzer_run_result('name', 1000, 2000, 2)
fuzz_task._track_fuzzer_run_result('name', 1000, 500, 0)
fuzz_task._track_fuzzer_run_result('name', 0, 1000, -1)
fuzz_task._track_fuzzer_run_result('name', 0, 0, 2)
self.assertEqual(
4,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': 2
}))
self.assertEqual(
1,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': 0
}))
self.assertEqual(
1,
monitoring_metrics.FUZZER_RETURN_CODE_COUNT.get({
'fuzzer': 'name',
'return_code': -1
}))
testcase_count_ratio = (
monitoring_metrics.FUZZER_TESTCASE_COUNT_RATIO.get({
'fuzzer': 'name'
}))
self.assertEqual(3.1, testcase_count_ratio.sum)
self.assertEqual(5, testcase_count_ratio.count)
expected_buckets = [0 for _ in range(22)]
expected_buckets[1] = 1
expected_buckets[3] = 1
expected_buckets[11] = 2
expected_buckets[21] = 1
self.assertListEqual(expected_buckets, testcase_count_ratio.buckets)
class TrackBuildRunResultTest(unittest.TestCase):
def setUp(self):
monitor.metrics_store().reset_for_testing()
def test_build_run_result(self):
fuzz_task._track_build_run_result('name', 10000, True)
fuzz_task._track_build_run_result('name', 10001, True)
fuzz_task._track_build_run_result('name', 10002, False)
self.assertEqual(
2,
monitoring_metrics.JOB_BAD_BUILD_COUNT.get({
'job': 'name',
'bad_build': True
}))
self.assertEqual(
1,
monitoring_metrics.JOB_BAD_BUILD_COUNT.get({
'job': 'name',
'bad_build': False
}))
class TrackTestcaseRunResultTest(unittest.TestCase):
def setUp(self):
monitor.metrics_store().reset_for_testing()
def test_testcase_run_result(self):
fuzz_task._track_testcase_run_result('fuzzer', 'job', 2, 5)
fuzz_task._track_testcase_run_result('fuzzer', 'job', 5, 10)
self.assertEqual(7,
monitoring_metrics.JOB_NEW_CRASH_COUNT.get({
'job': 'job'
}))
self.assertEqual(
15, monitoring_metrics.JOB_KNOWN_CRASH_COUNT.get({
'job': 'job'
}))
self.assertEqual(
7, monitoring_metrics.FUZZER_NEW_CRASH_COUNT.get({
'fuzzer': 'fuzzer'
}))
self.assertEqual(
15, monitoring_metrics.FUZZER_KNOWN_CRASH_COUNT.get({
'fuzzer': 'fuzzer'
}))
class TruncateFuzzerOutputTest(unittest.TestCase):
def test_no_truncation(self):
self.assertEqual('aaaa', fuzz_task.truncate_fuzzer_output('aaaa', 10))
def test_truncation(self):
self.assertEqual(
'123456\n...truncated...\n54321',
fuzz_task.truncate_fuzzer_output(
'123456xxxxxxxxxxxxxxxxxxxxxxxxxxx54321', 28))
def test_error(self):
with self.assertRaises(AssertionError):
self.assertEqual(
'', fuzz_task.truncate_fuzzer_output('123456xxxxxx54321', 10))
class TrackFuzzTimeTest(unittest.TestCase):
def setUp(self):
monitor.metrics_store().reset_for_testing()
def _test(self, timeout):
time_module = helpers.MockTime()
with fuzz_task._TrackFuzzTime('fuzzer', 'job', time_module) as tracker:
time_module.advance(5)
tracker.timeout = timeout
fuzzer_total_time = monitoring_metrics.FUZZER_TOTAL_FUZZ_TIME.get({
'fuzzer': 'fuzzer',
'timeout': timeout
})
self.assertEqual(5, fuzzer_total_time)
def test_success(self):
self._test(False)
def test_timeout(self):
self._test(True)
class GetFuzzerMetadataFromOutputTest(unittest.TestCase):
def test_no_metadata(self):
data = 'abc\ndef\n123123'
self.assertDictEqual(fuzz_task.get_fuzzer_metadata_from_output(data), {})
data = ''
self.assertDictEqual(fuzz_task.get_fuzzer_metadata_from_output(data), {})
def test_metadata(self):
data = ('abc\n'
'def\n'
'metadata:invalid: invalid\n'
'metadat::invalid: invalid\n'
'metadata::foo: bar\n'
'123123\n'
'metadata::blah: 1\n'
'metadata::test:abcd\n'
'metadata::test2: def\n')
self.assertDictEqual(
fuzz_task.get_fuzzer_metadata_from_output(data), {
'blah': '1',
'test': 'abcd',
'test2': 'def',
'foo': 'bar'
})
class GetRegressionTest(unittest.TestCase):
def setUp(self):
helpers.patch(self, ['build_management.build_manager.is_custom_binary'])
def test_one_time_crasher(self):
self.mock.is_custom_binary.return_value = False
self.assertEqual('NA', fuzz_task.get_regression(True))
def test_custom_binary(self):
self.mock.is_custom_binary.return_value = True
self.assertEqual('NA', fuzz_task.get_regression(False))
def test_reproducible_non_custom_binary(self):
self.mock.is_custom_binary.return_value = False
self.assertEqual('', fuzz_task.get_regression(False))
class GetFixedOrMinimizedKeyTest(unittest.TestCase):
def test_one_time_crasher(self):
self.assertEqual('NA', fuzz_task.get_fixed_or_minimized_key(True))
def test_reproducible(self):
self.assertEqual('', fuzz_task.get_fixed_or_minimized_key(False))
class CrashInitTest(fake_filesystem_unittest.TestCase):
def setUp(self):
helpers.patch(self, [
'chrome.crash_uploader.FileMetadataInfo',
'bot.tasks.setup.archive_testcase_and_dependencies_in_gcs',
'crash_analysis.stack_parsing.stack_analyzer.get_crash_data',
'bot.testcase_manager.get_additional_command_line_flags',
'bot.testcase_manager.get_command_line_for_application',
'base.utils.get_crash_stacktrace_output',
'crash_analysis.crash_analyzer.ignore_stacktrace',
'crash_analysis.crash_analyzer.is_security_issue',
])
helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
self.mock.get_command_line_for_application.return_value = 'cmd'
dummy_state = stack_analyzer.StackAnalyzerState()
dummy_state.crash_type = 'type'
dummy_state.crash_address = 'address'
dummy_state.crash_state = 'state'
dummy_state.crash_stacktrace = 'orig_trace'
dummy_state.frames = ['frame 1', 'frame 2']
self.mock.get_crash_data.return_value = dummy_state
self.mock.get_crash_stacktrace_output.return_value = 'trace'
self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (
'fuzzed_key', True, 'absolute_path', 'archive_filename')
environment.set_value('FILTER_FUNCTIONAL_BUGS', False)
with open('/stack_file_path', 'w') as f:
f.write('unsym')
def test_error(self):
crash = fuzz_task.Crash.from_testcase_manager_crash(
testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], 'ges',
'/no_stack_file'))
self.assertIsNone(crash)
def _test_crash(self, should_be_ignored, security_flag):
self.mock.get_command_line_for_application.reset_mock()
self.mock.get_crash_data.reset_mock()
self.mock.get_crash_stacktrace_output.reset_mock()
self.mock.is_security_issue.reset_mock()
self.mock.ignore_stacktrace.reset_mock()
self.mock.is_security_issue.return_value = security_flag
self.mock.ignore_stacktrace.return_value = should_be_ignored
crash = fuzz_task.Crash.from_testcase_manager_crash(
testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], 'ges',
'/stack_file_path'))
self.assertEqual('dir/path-http-name', crash.file_path)
self.assertEqual(123, crash.crash_time)
self.assertEqual(11, crash.return_code)
self.assertListEqual(['res'], crash.resource_list)
self.assertEqual('ges', crash.gestures)
self.assertEqual('path-http-name', crash.filename)
self.assertTrue(crash.http_flag)
self.assertEqual('cmd', crash.application_command_line)
self.mock.get_command_line_for_application.assert_called_once_with(
'dir/path-http-name', needs_http=True)
self.assertEqual('unsym', crash.unsymbolized_crash_stacktrace)
self.assertEqual('type', crash.crash_type)
self.assertEqual('address', crash.crash_address)
self.assertEqual('state', crash.crash_state)
self.assertListEqual(['frame 1', 'frame 2'], crash.crash_frames)
self.mock.get_crash_data.assert_called_once_with('unsym')
self.assertEqual('trace', crash.crash_stacktrace)
self.mock.get_crash_stacktrace_output.assert_called_once_with(
'cmd', 'orig_trace', 'unsym')
self.assertEqual(security_flag, crash.security_flag)
self.mock.is_security_issue.assert_called_once_with('unsym', 'type',
'address')
self.assertEqual('type,state,%s' % security_flag, crash.key)
self.assertEqual(should_be_ignored, crash.should_be_ignored)
self.mock.ignore_stacktrace.assert_called_once_with('orig_trace')
self.assertFalse(hasattr(crash, 'fuzzed_key'))
return crash
def _test_validity_and_get_functional_crash(self):
security_crash = self._test_crash(
should_be_ignored=False, security_flag=True)
self.assertIsNone(security_crash.get_error())
self.assertTrue(security_crash.is_valid())
ignored_crash = self._test_crash(should_be_ignored=True, security_flag=True)
self.assertIn('False crash', ignored_crash.get_error())
self.assertFalse(ignored_crash.is_valid())
functional_crash = self._test_crash(
should_be_ignored=False, security_flag=False)
return functional_crash
def test_valid_functional_bug(self):
functional_crash = self._test_validity_and_get_functional_crash()
self.assertIsNone(functional_crash.get_error())
self.assertTrue(functional_crash.is_valid())
def test_invalid_functional_bug(self):
environment.set_value('FILTER_FUNCTIONAL_BUGS', True)
functional_crash = self._test_validity_and_get_functional_crash()
self.assertIn('Functional crash', functional_crash.get_error())
self.assertFalse(functional_crash.is_valid())
def test_hydrate_fuzzed_key(self):
crash = self._test_crash(should_be_ignored=False, security_flag=True)
self.assertFalse(crash.is_archived())
self.assertIsNone(crash.get_error())
self.assertTrue(crash.is_valid())
crash.archive_testcase_in_blobstore()
self.assertTrue(crash.is_archived())
self.assertIsNone(crash.get_error())
self.assertTrue(crash.is_valid())
self.assertEqual('fuzzed_key', crash.fuzzed_key)
self.assertTrue(crash.archived)
self.assertEqual('absolute_path', crash.absolute_path)
self.assertEqual('archive_filename', crash.archive_filename)
def test_hydrate_fuzzed_key_failure(self):
self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (None,
False,
None,
None)
crash = self._test_crash(should_be_ignored=False, security_flag=True)
self.assertFalse(crash.is_archived())
self.assertIsNone(crash.get_error())
self.assertTrue(crash.is_valid())
crash.archive_testcase_in_blobstore()
self.assertTrue(crash.is_archived())
self.assertIn('Unable to store testcase in blobstore', crash.get_error())
self.assertFalse(crash.is_valid())
self.assertIsNone(crash.fuzzed_key)
self.assertFalse(crash.archived)
self.assertIsNone(crash.absolute_path)
self.assertIsNone(crash.archive_filename)
def test_args_from_testcase_manager(self):
testcase_manager_crash = testcase_manager.Crash('path', 0, 0, [], [],
'/stack_file_path')
self.mock.get_additional_command_line_flags.return_value = 'minimized'
environment.set_value('APP_ARGS', 'app')
crash = fuzz_task.Crash.from_testcase_manager_crash(testcase_manager_crash)
self.assertEqual('app minimized', crash.arguments)
class CrashGroupTest(unittest.TestCase):
def setUp(self):
helpers.patch(self, [
'bot.tasks.fuzz_task.find_main_crash',
'datastore.data_handler.find_testcase',
'datastore.data_handler.get_project_name',
])
self.mock.get_project_name.return_value = 'some_project'
self.crashes = [self._make_crash('g1'), self._make_crash('g2')]
self.context = mock.MagicMock(
test_timeout=99, fuzzer_name='test', fuzz_target=None)
self.reproducible_testcase = self._make_testcase(
project_name='some_project',
bug_information='',
one_time_crasher_flag=False)
self.unreproducible_testcase = self._make_testcase(
project_name='some_project',
bug_information='',
one_time_crasher_flag=True)
def _make_crash(self, gestures):
crash = mock.MagicMock(
crash_type='type',
crash_state='state',
security_flag=True,
file_path='file_path',
http_flag=True,
gestures=gestures)
return crash
def _make_testcase(self,
project_name,
bug_information,
one_time_crasher_flag,
timestamp=datetime.datetime.now()):
testcase = data_types.Testcase()
testcase.timestamp = timestamp
testcase.one_time_crasher_flag = one_time_crasher_flag
testcase.bug_information = bug_information
testcase.project_name = project_name
return testcase
def test_no_existing_testcase(self):
self.mock.find_testcase.return_value = None
self.mock.find_main_crash.return_value = self.crashes[0], True
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertTrue(group.should_create_testcase())
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertIsNone(group.existing_testcase)
self.assertEqual(self.crashes[0], group.main_crash)
self.assertTrue(group.is_new())
def test_has_existing_reproducible_testcase(self):
self.mock.find_testcase.return_value = self.reproducible_testcase
self.mock.find_main_crash.return_value = (self.crashes[0], True)
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures)
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertFalse(group.is_new())
self.assertFalse(group.should_create_testcase())
self.assertTrue(group.has_existing_reproducible_testcase())
def test_reproducible_crash(self):
self.mock.find_testcase.return_value = self.unreproducible_testcase
self.mock.find_main_crash.return_value = (self.crashes[0], False)
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures)
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertFalse(group.is_new())
self.assertTrue(group.should_create_testcase())
self.assertFalse(group.has_existing_reproducible_testcase())
self.assertFalse(group.one_time_crasher_flag)
def test_has_existing_unreproducible_testcase(self):
self.mock.find_testcase.return_value = self.unreproducible_testcase
self.mock.find_main_crash.return_value = (self.crashes[0], True)
group = fuzz_task.CrashGroup(self.crashes, self.context)
self.assertFalse(group.should_create_testcase())
self.assertEqual(self.crashes[0].gestures, group.main_crash.gestures)
self.mock.find_main_crash.assert_called_once_with(
self.crashes, 'test', 'test', self.context.test_timeout)
self.assertFalse(group.is_new())
self.assertFalse(group.has_existing_reproducible_testcase())
self.assertTrue(group.one_time_crasher_flag)
class FindMainCrashTest(unittest.TestCase):
def setUp(self):
helpers.patch(self, [
'bot.testcase_manager.test_for_reproducibility',
])
self.crashes = [
self._make_crash('g1'),
self._make_crash('g2'),
self._make_crash('g3'),
self._make_crash('g4')
]
self.reproducible_crashes = []
def test_for_repro(fuzzer_name,
full_fuzzer_name,
file_path,
state,
security_flag,
test_timeout,
http_flag,
gestures,
arguments=None):
for c in self.reproducible_crashes:
if c.gestures == gestures:
return True
return False
self.mock.test_for_reproducibility.side_effect = test_for_repro
def _make_crash(self, gestures):
crash = mock.MagicMock(
file_path='file_path',
crash_state='state',
security_flag=True,
test_timeout=999,
gestures=gestures)
return crash
def test_reproducible_crash(self):
for c in self.crashes:
c.is_valid.return_value = True
self.crashes[0].is_valid.return_value = False
self.reproducible_crashes = [self.crashes[2]]
self.assertEqual((self.crashes[2], False),
fuzz_task.find_main_crash(self.crashes, 'test', 'test',
99))
self.crashes[0].archive_testcase_in_blobstore.assert_called_once_with()
self.crashes[1].archive_testcase_in_blobstore.assert_called_once_with()
self.crashes[2].archive_testcase_in_blobstore.assert_called_once_with()
self.crashes[3].archive_testcase_in_blobstore.assert_not_called()
self.assertEqual(2, self.mock.test_for_reproducibility.call_count)
def test_unreproducible_crash(self):
for c in self.crashes:
c.is_valid.return_value = True
self.crashes[0].is_valid.return_value = False
self.reproducible_crashes = []
self.assertEqual((self.crashes[1], True),
fuzz_task.find_main_crash(self.crashes, 'test', 'test',
99))
for c in self.crashes:
c.archive_testcase_in_blobstore.assert_called_once_with()
self.assertEqual(
len(self.crashes) - 1, self.mock.test_for_reproducibility.call_count)
def test_no_valid_crash(self):
for c in self.crashes:
c.is_valid.return_value = False
self.reproducible_crashes = []
self.assertEqual((None, None),
fuzz_task.find_main_crash(self.crashes, 'test', 'test',
99))
for c in self.crashes:
c.archive_testcase_in_blobstore.assert_called_once_with()
self.assertEqual(0, self.mock.test_for_reproducibility.call_count)
@test_utils.with_cloud_emulators('datastore')
class ProcessCrashesTest(fake_filesystem_unittest.TestCase):
def setUp(self):
helpers.patch(self, [
'chrome.crash_uploader.get_symbolized_stack_bytes',
'bot.tasks.task_creation.create_tasks',
'bot.tasks.setup.archive_testcase_and_dependencies_in_gcs',
'crash_analysis.stack_parsing.stack_analyzer.get_crash_data',
'build_management.revisions.get_real_revision',
'bot.testcase_manager.get_command_line_for_application',
'bot.testcase_manager.test_for_reproducibility',
'base.utils.get_crash_stacktrace_output',
'crash_analysis.crash_analyzer.ignore_stacktrace',
'crash_analysis.crash_analyzer.is_security_issue',
'datastore.data_handler.get_issue_tracker_name',
'datastore.data_handler.get_project_name',
'google.appengine.api.app_identity.get_application_id',
'google_cloud_utils.big_query.Client.insert',
'google_cloud_utils.big_query.get_api_client', 'time.sleep', 'time.time'
])
test_utils.set_up_pyfakefs(self)
self.mock.time.return_value = 987
self.mock.get_issue_tracker_name.return_value = 'some_issue_tracker'
self.mock.get_project_name.return_value = 'some_project'
self.mock.archive_testcase_and_dependencies_in_gcs.return_value = (
'fuzzed_key', True, 'absolute_path', 'archive_filename')
def _make_crash(self, trace, state='state'):
self.mock.get_real_revision.return_value = 'this.is.fake.ver'
self.mock.get_command_line_for_application.return_value = 'cmd'
dummy_state = stack_analyzer.StackAnalyzerState()
dummy_state.crash_type = 'type'
dummy_state.crash_address = 'address'
dummy_state.crash_state = state
dummy_state.crash_stacktrace = 'orig_trace'
dummy_state.crash_frames = ['frame 1', 'frame 2']
self.mock.get_crash_data.return_value = dummy_state
self.mock.get_symbolized_stack_bytes.return_value = 'f00df00d'
self.mock.get_crash_stacktrace_output.return_value = trace
self.mock.is_security_issue.return_value = True
self.mock.ignore_stacktrace.return_value = False
with open('/stack_file_path', 'w') as f:
f.write('unsym')
crash = fuzz_task.Crash.from_testcase_manager_crash(
testcase_manager.Crash('dir/path-http-name', 123, 11, ['res'], ['ges'],
'/stack_file_path'))
return crash
def test_existing_unreproducible_testcase(self):
crashes = [self._make_crash('c1'), self._make_crash('c2')]
self.mock.test_for_reproducibility.return_value = False
existing_testcase = data_types.Testcase()
existing_testcase.crash_stacktrace = 'existing'
existing_testcase.crash_type = crashes[0].crash_type
existing_testcase.crash_state = crashes[0].crash_state
existing_testcase.security_flag = crashes[0].security_flag
existing_testcase.one_time_crasher_flag = True
existing_testcase.job_type = 'existing_job'
existing_testcase.timestamp = datetime.datetime.now()
existing_testcase.project_name = 'some_project'
existing_testcase.put()
variant = data_types.TestcaseVariant()
variant.status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
variant.job_type = 'job'
variant.testcase_id = existing_testcase.key.id()
variant.put()
new_crash_count, known_crash_count, groups = fuzz_task.process_crashes(
crashes=crashes,
context=fuzz_task.Context(
project_name='some_project',
bot_name='bot',
job_type='job',
fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
redzone=111,
disable_ubsan=True,
platform_id='platform',
crash_revision=1234,
fuzzer_name='fuzzer',
window_argument='win_args',
fuzzer_metadata={},
testcases_metadata={},
timeout_multiplier=1,
test_timeout=2,
thread_wait_timeout=3,
data_directory='/data'))
self.assertEqual(0, new_crash_count)
self.assertEqual(2, known_crash_count)
self.assertEqual(1, len(groups))
self.assertEqual(2, len(groups[0].crashes))
self.assertFalse(groups[0].is_new())
self.assertEqual(crashes[0].crash_type, groups[0].main_crash.crash_type)
self.assertEqual(crashes[0].crash_state, groups[0].main_crash.crash_state)
self.assertEqual(crashes[0].security_flag,
groups[0].main_crash.security_flag)
testcases = list(data_types.Testcase.query())
self.assertEqual(1, len(testcases))
self.assertEqual('existing', testcases[0].crash_stacktrace)
variant = data_handler.get_testcase_variant(existing_testcase.key.id(),
'job')
self.assertEqual(data_types.TestcaseVariantStatus.FLAKY, variant.status)
self.assertEqual('fuzzed_key', variant.reproducer_key)
self.assertEqual(1234, variant.revision)
self.assertEqual('type', variant.crash_type)
self.assertEqual('state', variant.crash_state)
self.assertEqual(True, variant.security_flag)
self.assertEqual(True, variant.is_similar)
@parameterized.parameterized.expand(['some_project', 'chromium'])
def test_create_many_groups(self, project_name):
self.mock.get_project_name.return_value = project_name
self.mock.insert.return_value = {'insertErrors': [{'index': 0}]}
# TODO(metzman): Add a seperate test for strategies.
r2_stacktrace = ('r2\ncf::fuzzing_strategies: value_profile\n')
crashes = [
self._make_crash('r1', state='reproducible1'),
self._make_crash(r2_stacktrace, state='reproducible1'),
self._make_crash('r3', state='reproducible1'),
self._make_crash('r4', state='reproducible2'),
self._make_crash('u1', state='unreproducible1'),
self._make_crash('u2', state='unreproducible2'),
self._make_crash('u3', state='unreproducible2'),
self._make_crash('u4', state='unreproducible3')
]
self.mock.test_for_reproducibility.side_effect = [
False, # For r1. It returns False. So, r1 is demoted.
True, # For r2. It returns True. So, r2 becomes primary for its group.
True, # For r4.
False, # For u1.
False, # For u2.
False, # For u3.
False
] # For u4.
new_crash_count, known_crash_count, groups = fuzz_task.process_crashes(
crashes=crashes,
context=fuzz_task.Context(
project_name=project_name,
bot_name='bot',
job_type='job',
fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
redzone=111,
disable_ubsan=False,
platform_id='platform',
crash_revision=1234,
fuzzer_name='fuzzer',
window_argument='win_args',
fuzzer_metadata={},
testcases_metadata={},
timeout_multiplier=1,
test_timeout=2,
thread_wait_timeout=3,
data_directory='/data'))
self.assertEqual(5, new_crash_count)
self.assertEqual(3, known_crash_count)
self.assertEqual(5, len(groups))
self.assertEqual([
'reproducible1', 'reproducible2', 'unreproducible1', 'unreproducible2',
'unreproducible3'
], [group.main_crash.crash_state for group in groups])
self.assertEqual([True, True, True, True, True],
[group.is_new() for group in groups])
self.assertEqual([3, 1, 1, 2, 1], [len(group.crashes) for group in groups])
testcases = list(data_types.Testcase.query())
self.assertEqual(5, len(testcases))
self.assertSetEqual(
set([r2_stacktrace, 'r4', 'u1', 'u2', 'u4']),
set(t.crash_stacktrace for t in testcases))
self.assertSetEqual(
set([
'{"fuzzing_strategies": ["value_profile"]}', None, None, None, None
]), set(t.additional_metadata for t in testcases))
# r2 is a reproducible crash, so r3 doesn't
self.assertEqual(
len(crashes) - 1,
self.mock.archive_testcase_and_dependencies_in_gcs.call_count)
actual_crash_infos = [group.main_crash.crash_info for group in groups]
if project_name != 'chromium':
expected_crash_infos = [None] * len(actual_crash_infos)
else:
expected_saved_crash_info = crash_uploader.CrashReportInfo(
product='Chrome_' + environment.platform().lower().capitalize(),
version='this.is.fake.ver',
serialized_crash_stack_frames='f00df00d')
expected_crash_infos = [
expected_saved_crash_info,
expected_saved_crash_info,
None,
None,
None,
]
self.assertEqual(len(expected_crash_infos), len(actual_crash_infos))
for expected, actual in zip(expected_crash_infos, actual_crash_infos):
if not expected:
self.assertIsNone(actual)
continue
self.assertEqual(expected.product, actual.product)
self.assertEqual(expected.version, actual.version)
self.assertEqual(expected.serialized_crash_stack_frames,
actual.serialized_crash_stack_frames)
def _make_big_query_json(crash, reproducible_flag, new_flag, testcase_id):
return {
'crash_type': crash.crash_type,
'crash_state': crash.crash_state,
'created_at': 987,
'platform': 'platform',
'crash_time_in_ms': int(crash.crash_time * 1000),
'parent_fuzzer_name': 'engine',
'fuzzer_name': 'engine_binary',
'job_type': 'job',
'security_flag': crash.security_flag,
'reproducible_flag': reproducible_flag,
'revision': '1234',
'new_flag': new_flag,
'project': project_name,
'testcase_id': testcase_id
}
def _get_testcase_id(crash):
rows = list(
data_types.Testcase.query(
data_types.Testcase.crash_type == crash.crash_type,
data_types.Testcase.crash_state == crash.crash_state,
data_types.Testcase.security_flag == crash.security_flag))
if not rows:
return None
return str(rows[0].key.id())
self.assertEqual(5, self.mock.insert.call_count)
self.mock.insert.assert_has_calls([
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[0], True, False, None),
'%s:bot:987:0' % crashes[0].key),
big_query.Insert(
_make_big_query_json(crashes[1], True, True,
_get_testcase_id(crashes[1])),
'%s:bot:987:1' % crashes[0].key),
big_query.Insert(
_make_big_query_json(crashes[2], True, False, None),
'%s:bot:987:2' % crashes[0].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[3], True, True,
_get_testcase_id(crashes[3])),
'%s:bot:987:0' % crashes[3].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[4], False, True,
_get_testcase_id(crashes[4])),
'%s:bot:987:0' % crashes[4].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[5], False, True,
_get_testcase_id(crashes[5])),
'%s:bot:987:0' % crashes[5].key),
big_query.Insert(
_make_big_query_json(crashes[6], False, False, None),
'%s:bot:987:1' % crashes[5].key)
]),
mock.call(mock.ANY, [
big_query.Insert(
_make_big_query_json(crashes[7], False, True,
_get_testcase_id(crashes[7])),
'%s:bot:987:0' % crashes[7].key)
]),
])
class WriteCrashToBigQueryTest(unittest.TestCase):
def setUp(self):
self.client = mock.Mock(spec_set=big_query.Client)
helpers.patch(self, [
'system.environment.get_value',
'datastore.data_handler.get_project_name',
'google_cloud_utils.big_query.Client',
'time.time',
])
monitor.metrics_store().reset_for_testing()
self.mock.get_project_name.return_value = 'some_project'
self.mock.get_value.return_value = 'bot'
self.mock.Client.return_value = self.client
self.mock.time.return_value = 99
self.crashes = [
self._make_crash('c1'),
self._make_crash('c2'),
self._make_crash('c3')
]
newly_created_testcase = mock.MagicMock()
newly_created_testcase.key.id.return_value = 't'
self.group = mock.MagicMock(
crashes=self.crashes,
main_crash=self.crashes[0],
one_time_crasher_flag=False,
newly_created_testcase=newly_created_testcase)
self.group.is_new.return_value = True
def _create_context(self, job_type, platform_id):
return fuzz_task.Context(
project_name='some_project',
bot_name='bot',
job_type=job_type,
fuzz_target=data_types.FuzzTarget(engine='engine', binary='binary'),
redzone=32,
disable_ubsan=False,
platform_id=platform_id,
crash_revision=1234,
fuzzer_name='engine',
window_argument='windows_args',
fuzzer_metadata={},
testcases_metadata={},
timeout_multiplier=1.0,
test_timeout=5,
thread_wait_timeout=6,
data_directory='data')
def _make_crash(self, state):
crash = mock.Mock(
crash_type='type',
crash_state=state,
crash_time=111,
security_flag=True,
key='key')
return crash
def _json(self, job, platform, state, new_flag, testcase_id):
return {
'crash_type': 'type',
'crash_state': state,
'created_at': 99,
'platform': platform,
'crash_time_in_ms': 111000,
'parent_fuzzer_name': 'engine',
'fuzzer_name': 'engine_binary',
'job_type': job,
'security_flag': True,
'reproducible_flag': True,
'revision': '1234',
'new_flag': new_flag,
'project': 'some_project',
'testcase_id': testcase_id
}
def test_all_succeed(self):
self.client.insert.return_value = {}
context = self._create_context('job', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(3, success_count)
self.assertEqual(0, failure_count)
self.mock.Client.assert_called_once_with(
dataset_id='main', table_id='crashes$19700101')
self.client.insert.assert_called_once_with([
big_query.Insert(
self._json('job', 'linux', 'c1', True, 't'), 'key:bot:99:0'),
big_query.Insert(
self._json('job', 'linux', 'c2', False, None), 'key:bot:99:1'),
big_query.Insert(
self._json('job', 'linux', 'c3', False, None), 'key:bot:99:2')
])
def test_succeed(self):
self.client.insert.return_value = {'insertErrors': [{'index': 1}]}
context = self._create_context('job', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(2, success_count)
self.assertEqual(1, failure_count)
self.mock.Client.assert_called_once_with(
dataset_id='main', table_id='crashes$19700101')
self.client.insert.assert_called_once_with([
big_query.Insert(
self._json('job', 'linux', 'c1', True, 't'), 'key:bot:99:0'),
big_query.Insert(
self._json('job', 'linux', 'c2', False, None), 'key:bot:99:1'),
big_query.Insert(
self._json('job', 'linux', 'c3', False, None), 'key:bot:99:2')
])
def test_chromeos_platform(self):
self.client.insert.return_value = {'insertErrors': [{'index': 1}]}
context = self._create_context('job_chromeos', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(2, success_count)
self.assertEqual(1, failure_count)
self.mock.Client.assert_called_once_with(
dataset_id='main', table_id='crashes$19700101')
self.client.insert.assert_called_once_with([
big_query.Insert(
self._json('job_chromeos', 'chrome', 'c1', True, 't'),
'key:bot:99:0'),
big_query.Insert(
self._json('job_chromeos', 'chrome', 'c2', False, None),
'key:bot:99:1'),
big_query.Insert(
self._json('job_chromeos', 'chrome', 'c3', False, None),
'key:bot:99:2')
])
def test_exception(self):
self.client.insert.side_effect = Exception('error')
context = self._create_context('job', 'linux')
fuzz_task.write_crashes_to_big_query(self.group, context)
success_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': True
})
failure_count = monitoring_metrics.BIG_QUERY_WRITE_COUNT.get({
'success': False
})
self.assertEqual(0, success_count)
self.assertEqual(3, failure_count)
class ConvertGroupsToCrashesTest(object):
def test_convert(self):
groups = [
mock.Mock(
crashes=[mock.Mock(), mock.Mock()],
main_crash=mock.Mock(
crash_type='t1', crash_state='s1', security_flag=True)),
mock.Mock(
crashes=[mock.Mock()],
main_crash=mock.Mock(
crash_type='t2', crash_state='s2', security_flag=False)),
]
groups[0].is_new.return_value = False
groups[1].is_new.return_value = True
self.assertEqual([
{
'is_new': False,
'count': 2,
'crash_type': 't1',
'crash_state': 's1',
'security_flag': True
},
{
'is_new': True,
'count': 1,
'crash_type': 't2',
'crash_state': 's2',
'security_flag': False
},
], fuzz_task.convert_groups_to_crashes(groups))
class TestCorpusSync(fake_filesystem_unittest.TestCase):
def setUp(self):
helpers.patch(self, [
'fuzzing.corpus_manager.FuzzTargetCorpus.rsync_to_disk',
'fuzzing.corpus_manager.FuzzTargetCorpus.upload_files',
'google_cloud_utils.storage.last_updated',
])
helpers.patch_environ(self)
os.environ['FAIL_RETRIES'] = '1'
os.environ['CORPUS_BUCKET'] = 'bucket'
self.mock.rsync_to_disk.return_value = True
test_utils.set_up_pyfakefs(self)
self.fs.create_dir('/dir')
self.fs.create_dir('/dir1')
def _write_corpus_files(self, *args, **kwargs):
self.fs.create_file('/dir/a')
self.fs.create_file('/dir/b')
return True
def test_sync(self):
corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1')
self.mock.rsync_to_disk.side_effect = self._write_corpus_files
self.assertTrue(corpus.sync_from_gcs())
self.assertTrue(os.path.exists('/dir1/.child_sync'))
self.assertEqual(('/dir',), self.mock.rsync_to_disk.call_args[0][1:])
self.fs.create_file('/dir/c')
self.assertListEqual(['/dir/c'], corpus.get_new_files())
corpus.upload_files(corpus.get_new_files())
self.assertEqual((['/dir/c'],), self.mock.upload_files.call_args[0][1:])
self.assertListEqual([], corpus.get_new_files())
def test_no_sync(self):
corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1')
utils.write_data_to_file(time.time(), '/dir1/.child_sync')
self.mock.last_updated.return_value = (
datetime.datetime.utcnow() - datetime.timedelta(days=1))
self.assertTrue(corpus.sync_from_gcs())
self.assertEqual(0, self.mock.rsync_to_disk.call_count)
def test_sync_with_failed_last_update(self):
corpus = fuzz_task.GcsCorpus('parent', 'child', '/dir', '/dir1')
utils.write_data_to_file(time.time(), '/dir1/.child_sync')
self.mock.last_updated.return_value = None
self.assertTrue(corpus.sync_from_gcs())
self.assertEqual(1, self.mock.rsync_to_disk.call_count)
@test_utils.with_cloud_emulators('datastore')
class RecordFuzzTargetTest(unittest.TestCase):
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'base.utils.is_oss_fuzz',
'base.utils.utcnow',
])
self.mock.is_oss_fuzz.return_value = False
self.mock.utcnow.return_value = datetime.datetime(2018, 1, 1)
def test_record_fuzz_target(self):
fuzz_task.record_fuzz_target('libFuzzer', 'child', 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get()
self.assertDictEqual({
'binary': 'child',
'engine': 'libFuzzer',
'project': 'test-project',
}, fuzz_target.to_dict())
job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get()
self.assertDictEqual({
'fuzz_target_name': 'libFuzzer_child',
'job': 'job',
'engine': 'libFuzzer',
'last_run': datetime.datetime(2018, 1, 1, 0, 0),
'weight': 1.0,
}, job_mapping.to_dict())
self.assertEqual('libFuzzer_child', fuzz_target.fully_qualified_name())
self.assertEqual('child', fuzz_target.project_qualified_name())
def test_record_fuzz_target_existing(self):
data_types.FuzzTarget(
binary='child', engine='libFuzzer', project='test-project').put()
data_types.FuzzTargetJob(
fuzz_target_name='libFuzzer_child',
job='job',
engine='libFuzzer',
last_run=datetime.datetime(2017, 12, 31, 0, 0)).put()
fuzz_task.record_fuzz_target('libFuzzer', 'child', 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get()
self.assertDictEqual({
'binary': 'child',
'engine': 'libFuzzer',
'project': 'test-project',
}, fuzz_target.to_dict())
job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get()
self.assertDictEqual({
'fuzz_target_name': 'libFuzzer_child',
'job': 'job',
'engine': 'libFuzzer',
'last_run': datetime.datetime(2018, 1, 1, 0, 0),
'weight': 1.0,
}, job_mapping.to_dict())
self.assertEqual('libFuzzer_child', fuzz_target.fully_qualified_name())
self.assertEqual('child', fuzz_target.project_qualified_name())
def test_record_fuzz_target_no_binary_name(self):
# FuzzTargets as a result.
fuzz_task.record_fuzz_target('libFuzzer', None, 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_child').get()
self.assertIsNone(fuzz_target)
job_mapping = ndb.Key(data_types.FuzzTargetJob, 'libFuzzer_child/job').get()
self.assertIsNone(job_mapping)
@parameterized.parameterized.expand(['child', 'proj_child'])
def test_record_fuzz_target_ossfuzz(self, binary_name):
self.mock.is_oss_fuzz.return_value = True
data_types.Job(name='job', environment_string='PROJECT_NAME = proj\n').put()
fuzz_task.record_fuzz_target('libFuzzer', binary_name, 'job')
fuzz_target = ndb.Key(data_types.FuzzTarget, 'libFuzzer_proj_child').get()
self.assertDictEqual({
'binary': binary_name,
'engine': 'libFuzzer',
'project': 'proj',
}, fuzz_target.to_dict())
job_mapping = ndb.Key(data_types.FuzzTargetJob,
'libFuzzer_proj_child/job').get()
self.assertDictEqual({
'fuzz_target_name': 'libFuzzer_proj_child',
'job': 'job',
'engine': 'libFuzzer',
'last_run': datetime.datetime(2018, 1, 1, 0, 0),
'weight': 1.0,
}, job_mapping.to_dict())
self.assertEqual('libFuzzer_proj_child', fuzz_target.fully_qualified_name())
self.assertEqual('proj_child', fuzz_target.project_qualified_name())
@test_utils.with_cloud_emulators('datastore')
class DoEngineFuzzingTest(fake_filesystem_unittest.TestCase):
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'bot.fuzzers.engine_common.current_timestamp',
'bot.tasks.fuzz_task.GcsCorpus.sync_from_gcs',
'bot.tasks.fuzz_task.GcsCorpus.upload_files',
'build_management.revisions.get_component_list',
'bot.testcase_manager.upload_log',
'bot.testcase_manager.upload_testcase',
'metrics.fuzzer_stats.upload_stats',
])
test_utils.set_up_pyfakefs(self)
os.environ['JOB_NAME'] = 'libfuzzer_asan_test'
os.environ['FUZZ_INPUTS'] = '/fuzz-inputs'
os.environ['FUZZ_INPUTS_DISK'] = '/fuzz-inputs-disk'
os.environ['BUILD_DIR'] = '/build_dir'
os.environ['MAX_TESTCASES'] = '2'
os.environ['AUTOMATIC_LABELS'] = 'auto_label,auto_label1'
os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component,auto_component1'
self.fs.create_file('/build_dir/test_target')
self.fs.create_file(
'/build_dir/test_target.labels', contents='label1\nlabel2')
self.fs.create_file(
'/build_dir/test_target.owners', contents='owner1@email.com')
self.fs.create_file(
'/build_dir/test_target.components', contents='component1\ncomponent2')
self.fs.create_file('/input')
self.mock.sync_from_gcs.return_value = True
self.mock.upload_files.return_value = True
self.mock.get_component_list.return_value = [{
'component': 'component',
'link_text': 'rev',
}]
self.mock.current_timestamp.return_value = 0.0
def test_basic(self):
session = fuzz_task.FuzzingSession('libFuzzer', 'libfuzzer_asan_test', 60)
session.testcase_directory = os.environ['FUZZ_INPUTS']
session.data_directory = '/data_dir'
os.environ['FUZZ_TARGET'] = 'test_target'
os.environ['APP_REVISION'] = '1'
expected_crashes = [engine.Crash('/input', 'stack', ['args'], 1.0)]
engine_impl = mock.Mock()
engine_impl.name = 'libFuzzer'
engine_impl.prepare.return_value = engine.FuzzOptions(
'/corpus', ['arg'], {
'strategy_1': 1,
'strategy_2': 50,
})
engine_impl.fuzz.side_effect = lambda *_: engine.FuzzResult(
'logs', ['cmd'], expected_crashes, {'stat': 1}, 42.0)
crashes, fuzzer_metadata = session.do_engine_fuzzing(engine_impl)
self.assertDictEqual({
'fuzzer_binary_name':
'test_target',
'issue_components':
'component1,component2,auto_component,auto_component1',
'issue_labels':
'label1,label2,auto_label,auto_label1',
'issue_owners':
'owner1@email.com',
}, fuzzer_metadata)
log_time = datetime.datetime(1970, 1, 1, 0, 0)
log_call = mock.call(
'Component revisions (build r1):\n'
'component: rev\n\n'
'Return code: 1\n\n'
'Command: cmd\nBot: None\nTime ran: 42.0\n\n'
'logs\n'
'cf::fuzzing_strategies: strategy_1:1,strategy_2:50', log_time)
self.mock.upload_log.assert_has_calls([log_call, log_call])
self.mock.upload_testcase.assert_has_calls([
mock.call('/input', log_time),
mock.call('/input', log_time),
])
self.assertEqual(2, len(crashes))
for i in range(2):
self.assertEqual('/input', crashes[i].file_path)
self.assertEqual(1, crashes[i].return_code)
self.assertEqual('stack', crashes[i].unsymbolized_crash_stacktrace)
self.assertEqual(1.0, crashes[i].crash_time)
self.assertEqual('args', crashes[i].arguments)
for i in range(2):
upload_args = self.mock.upload_stats.call_args_list[i][0][0]
testcase_run = upload_args[0]
self.assertDictEqual({
'build_revision': 1,
'command': ['cmd'],
'fuzzer': u'libFuzzer_test_target',
'job': 'libfuzzer_asan_test',
'kind': 'TestcaseRun',
'stat': 1,
'strategy_strategy_1': 1,
'strategy_strategy_2': 50,
'timestamp': 0.0,
}, testcase_run.data)
class UntrustedRunEngineFuzzerTest(
untrusted_runner_helpers.UntrustedRunnerIntegrationTest):
def setUp(self):
super(UntrustedRunEngineFuzzerTest, self).setUp()
environment.set_value('JOB_NAME', 'libfuzzer_asan_job')
job = data_types.Job(
name='libfuzzer_asan_job',
environment_string=(
'RELEASE_BUILD_BUCKET_PATH = '
'gs://clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-([0-9]+).zip\n'
'REVISION_VARS_URL = https://commondatastorage.googleapis.com/'
'clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-%s.srcmap.json\n'))
job.put()
self.temp_dir = tempfile.mkdtemp(dir=environment.get_value('FUZZ_INPUTS'))
environment.set_value('USE_MINIJAIL', False)
def tearDown(self):
super(UntrustedRunEngineFuzzerTest, self).tearDown()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_run_engine_fuzzer(self):
self._setup_env(job_type='libfuzzer_asan_job')
environment.set_value('FUZZ_TEST_TIMEOUT', 3600)
build_manager.setup_build()
corpus_directory = os.path.join(self.temp_dir, 'corpus')
testcase_directory = os.path.join(self.temp_dir, 'artifacts')
os.makedirs(file_host.rebase_to_worker_root(corpus_directory))
os.makedirs(file_host.rebase_to_worker_root(testcase_directory))
result, fuzzer_metadata = fuzz_task.run_engine_fuzzer(
libfuzzer_engine.LibFuzzerEngine(), 'test_fuzzer', corpus_directory,
testcase_directory)
self.assertIn(
'ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000',
result.logs)
self.assertEqual(1, len(result.crashes))
self.assertTrue(result.crashes[0].input_path.startswith(
os.environ['ROOT_DIR']))
self.assertTrue(os.path.exists(result.crashes[0].input_path))
self.assertIsInstance(result.stats.get('number_of_executed_units'), int)
self.assertIsInstance(result.stats.get('oom_count'), int)
self.assertIsInstance(
result.stats.get('strategy_selection_method'), six.string_types)
self.assertDictEqual({'fuzzer_binary_name': 'test_fuzzer'}, fuzzer_metadata)
class AddIssueMetadataFromEnvironmentTest(unittest.TestCase):
def setUp(self):
helpers.patch_environ(self)
def test_add_no_existing(self):
os.environ['AUTOMATIC_LABELS'] = 'auto_label'
os.environ['AUTOMATIC_LABELS_1'] = 'auto_label1'
os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component'
os.environ['AUTOMATIC_COMPONENTS_1'] = 'auto_component1'
metadata = {}
fuzz_task._add_issue_metadata_from_environment(metadata)
self.assertDictEqual({
'issue_components': 'auto_component,auto_component1',
'issue_labels': 'auto_label,auto_label1',
}, metadata)
def test_add_append(self):
os.environ['AUTOMATIC_LABELS'] = 'auto_label'
os.environ['AUTOMATIC_LABELS_1'] = 'auto_label1'
os.environ['AUTOMATIC_COMPONENTS'] = 'auto_component'
os.environ['AUTOMATIC_COMPONENTS_1'] = 'auto_component1'
metadata = {
'issue_components': 'existing_component',
'issue_labels': 'existing_label'
}
fuzz_task._add_issue_metadata_from_environment(metadata)
self.assertDictEqual({
'issue_components':
'existing_component,auto_component,auto_component1',
'issue_labels':
'existing_label,auto_label,auto_label1',
}, metadata)
def test_add_numeric(self):
os.environ['AUTOMATIC_LABELS'] = '123'
metadata = {}
fuzz_task._add_issue_metadata_from_environment(metadata)
self.assertDictEqual({
'issue_labels': '123',
}, metadata)
| true | true |
f71d7d078b07d7f6a9b3225e4d976e6989b07bd6 | 9,268 | py | Python | faassupervisor/faas/aws_lambda/udocker.py | WinstonN/faas-supervisor | 9dadf7a34459a5c66d2b2828e8a79badc55a8758 | [
"Apache-2.0"
] | null | null | null | faassupervisor/faas/aws_lambda/udocker.py | WinstonN/faas-supervisor | 9dadf7a34459a5c66d2b2828e8a79badc55a8758 | [
"Apache-2.0"
] | null | null | null | faassupervisor/faas/aws_lambda/udocker.py | WinstonN/faas-supervisor | 9dadf7a34459a5c66d2b2828e8a79badc55a8758 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) GRyCAP - I3M - UPV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""In this module are defined all the methods and classes used
to manage a udocker container in the lambda environment."""
import subprocess
from faassupervisor.exceptions import ContainerImageNotFoundError
from faassupervisor.utils import SysUtils, FileUtils, ConfigUtils
from faassupervisor.logger import get_logger
from faassupervisor.exceptions import ContainerTimeoutExpiredWarning
from faassupervisor.faas.aws_lambda.function import get_function_ip
def _parse_cont_env_var(key, value):
return ["--env", str(key) + '=' + str(value)] if key and value else []
class Udocker():
"""Class in charge of managing the udocker binary."""
_CONTAINER_OUTPUT_FILE = SysUtils.join_paths(FileUtils.get_tmp_dir(), "container-stdout")
_CONTAINER_NAME = "udocker_container"
_SCRIPT_EXEC = "/bin/sh"
def __init__(self, lambda_instance):
self.lambda_instance = lambda_instance
# Create required udocker folder
FileUtils.create_folder(SysUtils.get_env_var("UDOCKER_DIR"))
# Init the udocker command that will be executed
self.udocker_exec = [SysUtils.get_env_var("UDOCKER_EXEC")]
self.cont_cmd = self.udocker_exec + ["--quiet", "run"]
self.cont_img_id = ConfigUtils.read_cfg_var('container').get('image')
if not self.cont_img_id:
raise ContainerImageNotFoundError()
def _list_udocker_images_cmd(self):
return self.udocker_exec + ["images"]
def _load_udocker_image_cmd(self):
return self.udocker_exec + ["load", "-i", self.cont_img_id]
def _download_udocker_image_cmd(self):
return self.udocker_exec + ["-D", "pull", self.cont_img_id]
def _list_udocker_containers_cmd(self):
return self.udocker_exec + ["ps"]
def _create_udocker_container_cmd(self):
return self.udocker_exec + ["create", f"--name={self._CONTAINER_NAME}", self.cont_img_id]
def _set_udocker_container_execution_mode_cmd(self):
return self.udocker_exec + ["setup", "--execmode=F1", self._CONTAINER_NAME]
def _is_container_image_downloaded(self):
cmd_out = SysUtils.execute_cmd_and_return_output(self._list_udocker_images_cmd())
return self.cont_img_id in cmd_out
def _load_local_container_image(self):
get_logger().info("Loading container image '%s'", self.cont_img_id)
SysUtils.execute_cmd(self._load_udocker_image_cmd())
def _download_container_image(self):
get_logger().info("Pulling container '%s' from Docker Hub", self.cont_img_id)
SysUtils.execute_cmd(self._download_udocker_image_cmd())
def _is_container_available(self):
cmd_out = SysUtils.execute_cmd_and_return_output(self._list_udocker_containers_cmd())
return self._CONTAINER_NAME in cmd_out
def _create_image(self):
if self._is_container_image_downloaded():
get_logger().info("Container image '%s' already available", self.cont_img_id)
else:
if SysUtils.is_var_in_env("IMAGE_FILE"):
self._load_local_container_image()
else:
self._download_container_image()
def _create_container(self):
if self._is_container_available():
get_logger().info("Container already available")
else:
get_logger().info("Creating container based on image '%s'.", self.cont_img_id)
SysUtils.execute_cmd(self._create_udocker_container_cmd())
SysUtils.execute_cmd(self._set_udocker_container_execution_mode_cmd())
def _create_command(self):
self._add_container_volumes()
self._add_container_environment_variables()
# Container running script
if hasattr(self.lambda_instance, 'script_path'):
# Add script in memory as entrypoint
self.cont_cmd += [(f"--entrypoint={self._SCRIPT_EXEC} "
f"{self.lambda_instance.script_path}"),
self._CONTAINER_NAME]
# Container with args
elif hasattr(self.lambda_instance, 'cmd_args'):
# Add args
self.cont_cmd += [self._CONTAINER_NAME]
self.cont_cmd += self.lambda_instance.cmd_args
# Script to be executed every time (if defined)
elif hasattr(self.lambda_instance, 'init_script_path'):
# Add init script
self.cont_cmd += [(f"--entrypoint={self._SCRIPT_EXEC} "
f"{self.lambda_instance.init_script_path}"),
self._CONTAINER_NAME]
# Only container
else:
self.cont_cmd += [self._CONTAINER_NAME]
def _add_container_volumes(self):
self.cont_cmd.extend(["-v", SysUtils.get_env_var("TMP_INPUT_DIR")])
self.cont_cmd.extend(["-v", SysUtils.get_env_var("TMP_OUTPUT_DIR")])
self.cont_cmd.extend(["-v", "/dev", "-v", "/mnt", "-v", "/proc", "-v", "/etc/hosts", "--nosysdirs"])
if SysUtils.is_var_in_env('EXTRA_PAYLOAD'):
self.cont_cmd.extend(["-v", self.lambda_instance.PERMANENT_FOLDER])
def _add_cont_env_vars(self):
for key, value in SysUtils.get_cont_env_vars().items():
self.cont_cmd.extend(_parse_cont_env_var(key, value))
def _add_input_file(self):
self.cont_cmd.extend(_parse_cont_env_var("INPUT_FILE_PATH",
SysUtils.get_env_var("INPUT_FILE_PATH")))
def _add_output_dir(self):
self.cont_cmd.extend(_parse_cont_env_var("TMP_OUTPUT_DIR",
SysUtils.get_env_var("TMP_OUTPUT_DIR")))
def _add_storage_object_key(self):
self.cont_cmd.extend(_parse_cont_env_var("STORAGE_OBJECT_KEY",
SysUtils.get_env_var("STORAGE_OBJECT_KEY")))
def _add_extra_payload_path(self):
self.cont_cmd.extend(_parse_cont_env_var("EXTRA_PAYLOAD",
SysUtils.get_env_var("EXTRA_PAYLOAD")))
def _add_function_request_id(self):
self.cont_cmd.extend(_parse_cont_env_var("REQUEST_ID",
self.lambda_instance.get_request_id()))
def _add_aws_access_keys(self):
self.cont_cmd.extend(_parse_cont_env_var("AWS_ACCESS_KEY_ID",
SysUtils.get_env_var("AWS_ACCESS_KEY_ID")))
self.cont_cmd.extend(_parse_cont_env_var("AWS_SECRET_ACCESS_KEY",
SysUtils.get_env_var("AWS_SECRET_ACCESS_KEY")))
self.cont_cmd.extend(_parse_cont_env_var("AWS_SESSION_TOKEN",
SysUtils.get_env_var("AWS_SESSION_TOKEN")))
def _add_function_ip(self):
self.cont_cmd.extend(_parse_cont_env_var("INSTANCE_IP", get_function_ip()))
def _add_container_environment_variables(self):
self._add_function_request_id()
self._add_function_ip()
self._add_aws_access_keys()
self._add_cont_env_vars()
self._add_input_file()
self._add_output_dir()
self._add_storage_object_key()
self._add_extra_payload_path()
def prepare_container(self):
"""Prepares the environment to execute the udocker container."""
self._create_image()
self._create_container()
self._create_command()
def launch_udocker_container(self):
"""Launches the udocker container.
If the execution time of the container exceeds the defined execution time,
the container is killed and a warning is raised."""
remaining_seconds = self.lambda_instance.get_remaining_time_in_seconds()
get_logger().info("Executing udocker container. Timeout set to '%d' seconds",
remaining_seconds)
get_logger().debug("Udocker command: '%s'", self.cont_cmd)
with open(self._CONTAINER_OUTPUT_FILE, "wb") as out:
with subprocess.Popen(self.cont_cmd,
stderr=subprocess.STDOUT,
stdout=out,
start_new_session=True) as process:
try:
process.wait(timeout=remaining_seconds)
except subprocess.TimeoutExpired:
get_logger().info("Stopping process '%s'", process)
process.kill()
raise ContainerTimeoutExpiredWarning()
udocker_output = b''
if FileUtils.is_file(self._CONTAINER_OUTPUT_FILE):
udocker_output = FileUtils.read_file(self._CONTAINER_OUTPUT_FILE, file_mode="rb")
return udocker_output
| 45.431373 | 108 | 0.65451 |
import subprocess
from faassupervisor.exceptions import ContainerImageNotFoundError
from faassupervisor.utils import SysUtils, FileUtils, ConfigUtils
from faassupervisor.logger import get_logger
from faassupervisor.exceptions import ContainerTimeoutExpiredWarning
from faassupervisor.faas.aws_lambda.function import get_function_ip
def _parse_cont_env_var(key, value):
return ["--env", str(key) + '=' + str(value)] if key and value else []
class Udocker():
_CONTAINER_OUTPUT_FILE = SysUtils.join_paths(FileUtils.get_tmp_dir(), "container-stdout")
_CONTAINER_NAME = "udocker_container"
_SCRIPT_EXEC = "/bin/sh"
def __init__(self, lambda_instance):
self.lambda_instance = lambda_instance
FileUtils.create_folder(SysUtils.get_env_var("UDOCKER_DIR"))
self.udocker_exec = [SysUtils.get_env_var("UDOCKER_EXEC")]
self.cont_cmd = self.udocker_exec + ["--quiet", "run"]
self.cont_img_id = ConfigUtils.read_cfg_var('container').get('image')
if not self.cont_img_id:
raise ContainerImageNotFoundError()
def _list_udocker_images_cmd(self):
return self.udocker_exec + ["images"]
def _load_udocker_image_cmd(self):
return self.udocker_exec + ["load", "-i", self.cont_img_id]
def _download_udocker_image_cmd(self):
return self.udocker_exec + ["-D", "pull", self.cont_img_id]
def _list_udocker_containers_cmd(self):
return self.udocker_exec + ["ps"]
def _create_udocker_container_cmd(self):
return self.udocker_exec + ["create", f"--name={self._CONTAINER_NAME}", self.cont_img_id]
def _set_udocker_container_execution_mode_cmd(self):
return self.udocker_exec + ["setup", "--execmode=F1", self._CONTAINER_NAME]
def _is_container_image_downloaded(self):
cmd_out = SysUtils.execute_cmd_and_return_output(self._list_udocker_images_cmd())
return self.cont_img_id in cmd_out
def _load_local_container_image(self):
get_logger().info("Loading container image '%s'", self.cont_img_id)
SysUtils.execute_cmd(self._load_udocker_image_cmd())
def _download_container_image(self):
get_logger().info("Pulling container '%s' from Docker Hub", self.cont_img_id)
SysUtils.execute_cmd(self._download_udocker_image_cmd())
def _is_container_available(self):
cmd_out = SysUtils.execute_cmd_and_return_output(self._list_udocker_containers_cmd())
return self._CONTAINER_NAME in cmd_out
def _create_image(self):
if self._is_container_image_downloaded():
get_logger().info("Container image '%s' already available", self.cont_img_id)
else:
if SysUtils.is_var_in_env("IMAGE_FILE"):
self._load_local_container_image()
else:
self._download_container_image()
def _create_container(self):
if self._is_container_available():
get_logger().info("Container already available")
else:
get_logger().info("Creating container based on image '%s'.", self.cont_img_id)
SysUtils.execute_cmd(self._create_udocker_container_cmd())
SysUtils.execute_cmd(self._set_udocker_container_execution_mode_cmd())
def _create_command(self):
self._add_container_volumes()
self._add_container_environment_variables()
if hasattr(self.lambda_instance, 'script_path'):
self.cont_cmd += [(f"--entrypoint={self._SCRIPT_EXEC} "
f"{self.lambda_instance.script_path}"),
self._CONTAINER_NAME]
elif hasattr(self.lambda_instance, 'cmd_args'):
self.cont_cmd += [self._CONTAINER_NAME]
self.cont_cmd += self.lambda_instance.cmd_args
elif hasattr(self.lambda_instance, 'init_script_path'):
self.cont_cmd += [(f"--entrypoint={self._SCRIPT_EXEC} "
f"{self.lambda_instance.init_script_path}"),
self._CONTAINER_NAME]
else:
self.cont_cmd += [self._CONTAINER_NAME]
def _add_container_volumes(self):
self.cont_cmd.extend(["-v", SysUtils.get_env_var("TMP_INPUT_DIR")])
self.cont_cmd.extend(["-v", SysUtils.get_env_var("TMP_OUTPUT_DIR")])
self.cont_cmd.extend(["-v", "/dev", "-v", "/mnt", "-v", "/proc", "-v", "/etc/hosts", "--nosysdirs"])
if SysUtils.is_var_in_env('EXTRA_PAYLOAD'):
self.cont_cmd.extend(["-v", self.lambda_instance.PERMANENT_FOLDER])
def _add_cont_env_vars(self):
for key, value in SysUtils.get_cont_env_vars().items():
self.cont_cmd.extend(_parse_cont_env_var(key, value))
def _add_input_file(self):
self.cont_cmd.extend(_parse_cont_env_var("INPUT_FILE_PATH",
SysUtils.get_env_var("INPUT_FILE_PATH")))
def _add_output_dir(self):
self.cont_cmd.extend(_parse_cont_env_var("TMP_OUTPUT_DIR",
SysUtils.get_env_var("TMP_OUTPUT_DIR")))
def _add_storage_object_key(self):
self.cont_cmd.extend(_parse_cont_env_var("STORAGE_OBJECT_KEY",
SysUtils.get_env_var("STORAGE_OBJECT_KEY")))
def _add_extra_payload_path(self):
self.cont_cmd.extend(_parse_cont_env_var("EXTRA_PAYLOAD",
SysUtils.get_env_var("EXTRA_PAYLOAD")))
def _add_function_request_id(self):
self.cont_cmd.extend(_parse_cont_env_var("REQUEST_ID",
self.lambda_instance.get_request_id()))
def _add_aws_access_keys(self):
self.cont_cmd.extend(_parse_cont_env_var("AWS_ACCESS_KEY_ID",
SysUtils.get_env_var("AWS_ACCESS_KEY_ID")))
self.cont_cmd.extend(_parse_cont_env_var("AWS_SECRET_ACCESS_KEY",
SysUtils.get_env_var("AWS_SECRET_ACCESS_KEY")))
self.cont_cmd.extend(_parse_cont_env_var("AWS_SESSION_TOKEN",
SysUtils.get_env_var("AWS_SESSION_TOKEN")))
def _add_function_ip(self):
self.cont_cmd.extend(_parse_cont_env_var("INSTANCE_IP", get_function_ip()))
def _add_container_environment_variables(self):
self._add_function_request_id()
self._add_function_ip()
self._add_aws_access_keys()
self._add_cont_env_vars()
self._add_input_file()
self._add_output_dir()
self._add_storage_object_key()
self._add_extra_payload_path()
def prepare_container(self):
self._create_image()
self._create_container()
self._create_command()
def launch_udocker_container(self):
remaining_seconds = self.lambda_instance.get_remaining_time_in_seconds()
get_logger().info("Executing udocker container. Timeout set to '%d' seconds",
remaining_seconds)
get_logger().debug("Udocker command: '%s'", self.cont_cmd)
with open(self._CONTAINER_OUTPUT_FILE, "wb") as out:
with subprocess.Popen(self.cont_cmd,
stderr=subprocess.STDOUT,
stdout=out,
start_new_session=True) as process:
try:
process.wait(timeout=remaining_seconds)
except subprocess.TimeoutExpired:
get_logger().info("Stopping process '%s'", process)
process.kill()
raise ContainerTimeoutExpiredWarning()
udocker_output = b''
if FileUtils.is_file(self._CONTAINER_OUTPUT_FILE):
udocker_output = FileUtils.read_file(self._CONTAINER_OUTPUT_FILE, file_mode="rb")
return udocker_output
| true | true |
f71d7d40882346a998d1a79c9228b8c5ed1adc9f | 2,158 | py | Python | docs/conf.py | lengstrom/3db | 1d4a19600bcc43cf5637f7ee1d16887cba6308d5 | [
"MIT"
] | 1 | 2021-07-07T08:35:46.000Z | 2021-07-07T08:35:46.000Z | docs/conf.py | lengstrom/3db | 1d4a19600bcc43cf5637f7ee1d16887cba6308d5 | [
"MIT"
] | null | null | null | docs/conf.py | lengstrom/3db | 1d4a19600bcc43cf5637f7ee1d16887cba6308d5 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = '3DB'
copyright = '2021, 3DB Team'
author = '3DB Team'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinxcontrib.images'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': False,
'navigation_depth': -1,
'includehidden': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
autodoc_member_order = 'bysource'
autodoc_mock_imports = ['bpy', 'imagenet_c', 'cv2']
| 33.2 | 79 | 0.664504 |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
project = '3DB'
copyright = '2021, 3DB Team'
author = '3DB Team'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinxcontrib.images'
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': False,
'navigation_depth': -1,
'includehidden': True,
}
html_static_path = ['_static']
autodoc_member_order = 'bysource'
autodoc_mock_imports = ['bpy', 'imagenet_c', 'cv2']
| true | true |
f71d7ddcd38a4dbbb52b93b7f2fc440f98a1ed76 | 9,835 | py | Python | grr/client/components/chipsec_support/grr_chipsec_test.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | grr/client/components/chipsec_support/grr_chipsec_test.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | grr/client/components/chipsec_support/grr_chipsec_test.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Test Chipsec client actions."""
import collections
import sys
import mock
from chipsec.helper import oshelper
from grr.client import vfs
from grr.client.components.chipsec_support.actions import chipsec_types
from grr.lib import flags
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
class MockUnknownChipsetError(RuntimeError):
pass
class MockSPI(mock.MagicMock):
def get_SPI_region(self, unused_region): # pylint: disable=invalid-name
return (0, 0xffff, 0)
def read_spi(self, unused_offset, size):
return "\xff" * size
class UnsupportedChipset(mock.MagicMock):
def init(self, unused_platform, unused_load_driver):
msg = "Unsupported Platform: VID = 0x0000, DID = 0x0000"
raise MockUnknownChipsetError(msg)
class FailingOsHelperChipset(mock.MagicMock):
def init(self, unused_platform, unused_load_driver):
msg = "Unable to open /sys/bus/pci/devices/0000:00:00.0/config"
raise oshelper.OsHelperError(msg, -1)
class GRRChipsecTest(client_test_lib.EmptyActionTest):
"""Generic test class for GRR-Chipsec actions."""
def setUp(self):
# Mock the interface for Chipsec
self.chipsec_mock = mock.MagicMock()
self.chipsec_mock.chipset = mock.MagicMock()
self.chipsec_mock.chipset.UnknownChipsetError = MockUnknownChipsetError
self.chipsec_mock.hal = mock.MagicMock()
self.chipsec_mock.logger = mock.MagicMock()
mock_modules = {
"chipsec": self.chipsec_mock,
"chipsec.hal": self.chipsec_mock.hal,
}
self.chipsec_patch = mock.patch.dict(sys.modules, mock_modules)
self.chipsec_patch.start()
# Import the ClientAction to test with the Chipsec mock in place.
# pylint: disable=g-import-not-at-top, unused-variable
from grr.client.components.chipsec_support.actions import grr_chipsec
# pylint: enable=g-import-not-at-top, unused-variable
# Keep a reference to the module so child classes may mock its content.
self.grr_chipsec_module = grr_chipsec
self.grr_chipsec_module.chipset = self.chipsec_mock.chipset
self.grr_chipsec_module.logger = self.chipsec_mock.logger
def tearDown(self):
self.chipsec_patch.stop()
class TestChipsecDumpFlashImage(GRRChipsecTest):
"""Test the client dump flash image action."""
def setUp(self):
super(TestChipsecDumpFlashImage, self).setUp()
self.chipsec_mock.hal.spi = mock.MagicMock()
self.chipsec_mock.hal.spi.SPI = MockSPI
self.grr_chipsec_module.spi = self.chipsec_mock.hal.spi
def testDumpFlashImage(self):
"""Test the basic dump."""
args = chipsec_types.DumpFlashImageRequest()
result = self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)[0]
with vfs.VFSOpen(result.path) as image:
self.assertEqual(image.read(0x20000), "\xff" * 0x10000)
def testDumpFlashImageVerbose(self):
"""Test the basic dump with the verbose mode enabled."""
args = chipsec_types.DumpFlashImageRequest(log_level=1)
result = self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)[0]
with vfs.VFSOpen(result.path) as image:
self.assertEqual(image.read(0x20000), "\xff" * 0x10000)
self.assertNotEqual(self.chipsec_mock.logger.logger.call_count, 0)
def testDumpFlashImageUnknownChipset(self):
"""By default, if the chipset is unknown, no exception is raised."""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpFlashImageRequest()
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
def testDumpFlashImageUnknownChipsetVerbose(self):
"""Test unknown chipset with verbose mode.
If the chipset is unknown but verbose enabled, no exception is raised
and at least one response should be returned with non-empty logs.
"""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpFlashImageRequest(log_level=1)
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
self.assertNotEquals(self.chipsec_mock.logger.logger.call_count, 0)
self.assertGreaterEqual(len(self.results), 1)
self.assertNotEquals(len(self.results[0].logs), 0)
self.assertEquals(self.results[0].path.path, "")
def testDumpFlashImageOsHelperErrorChipset(self):
"""If an exception is raised by the helper layer, handle it."""
self.chipsec_mock.chipset.cs = FailingOsHelperChipset
args = chipsec_types.DumpFlashImageRequest()
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
class MockACPI(object):
def __init__(self, unused_chipset):
self.tableList = { # pylint: disable=invalid-name
"DSDT": [(0xAABBCCDDEEFF0011)],
"FACP": [(0x1100FFEEDDCCBBAA)],
"XSDT": [(0x1122334455667788)],
"SSDT": [(0x1234567890ABCDEF), (0x2234567890ABCDEF),
(0x3234567890ABCDEF)]
}
# Mimic the behaviour of tableList in Chipsec
# pylint: disable=invalid-name
self.tableList = collections.defaultdict(list, self.tableList)
# pylint: enable=invalid-name
# key: header, content
self.table_content = {
0xAABBCCDDEEFF0011: ("\xFF" * 0xFF, "\xEE" * 0xFF),
0x1100FFEEDDCCBBAA: ("\xEE" * 0xFF, "\xFF" * 0xFF),
0x1122334455667788: ("\xAB" * 0xFF, "\xCD" * 0xFF),
0x1234567890ABCDEF: ("\xEF" * 0xFF, "\xFE" * 0xFF),
0x2234567890ABCDEF: ("\xDC" * 0xFF, "\xBA" * 0xFF),
0x3234567890ABCDEF: ("\xAA" * 0xFF, "\xBB" * 0xFF)
}
def get_ACPI_table(self, name): # pylint: disable=invalid-name
return [self.table_content[address] for address in self.tableList[name]]
class MockACPIReadingRestrictedArea(object):
def __init__(self, unused_chipset):
# Simulate /dev/mem error
raise OSError("Operation not permitted")
def get_ACPI_table(self, unused_name): # pylint: disable=invalid-name
return []
class TestDumpACPITable(GRRChipsecTest):
def setUp(self):
super(TestDumpACPITable, self).setUp()
self.chipsec_mock.hal.acpi = mock.MagicMock()
self.chipsec_mock.hal.acpi.ACPI = MockACPI
self.grr_chipsec_module.acpi = self.chipsec_mock.hal.acpi
def testDumpValidSingleACPITable(self):
"""Tests basic valid ACPI table dump."""
args = chipsec_types.DumpACPITableRequest(table_signature="DSDT")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(len(result.acpi_tables), 1)
self.assertEqual(result.acpi_tables[0].table_address, 0xAABBCCDDEEFF0011)
self.assertEqual(result.acpi_tables[0].table_blob,
"\xFF" * 0xFF + "\xEE" * 0xFF)
def testDumpValidMultipleACPITables(self):
"""Tests valid ACPI table dump that would yield several tables."""
args = chipsec_types.DumpACPITableRequest(table_signature="SSDT")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(len(result.acpi_tables), 3)
self.assertEqual(result.acpi_tables[0].table_address, 0x1234567890ABCDEF)
self.assertEqual(result.acpi_tables[0].table_blob,
"\xEF" * 0xFF + "\xFE" * 0xFF)
self.assertEqual(result.acpi_tables[1].table_address, 0x2234567890ABCDEF)
self.assertEqual(result.acpi_tables[1].table_blob,
"\xDC" * 0xFF + "\xBA" * 0xFF)
self.assertEqual(result.acpi_tables[2].table_address, 0x3234567890ABCDEF)
self.assertEqual(result.acpi_tables[2].table_blob,
"\xAA" * 0xFF + "\xBB" * 0xFF)
def testDumpValidSingleACPITableVerbose(self):
"""Tests valid ACPI table dump with verbose mode enabled."""
args = chipsec_types.DumpACPITableRequest(
table_signature="XSDT", logging=True)
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(result.acpi_tables[0].table_address, 0x1122334455667788)
self.assertEqual(result.acpi_tables[0].table_blob,
"\xAB" * 0xFF + "\xCD" * 0xFF)
self.assertNotEquals(self.chipsec_mock.logger.logger.call_count, 0)
def testDumpInvalidACPITable(self):
"""Tests dumping invalid ACPI table."""
args = chipsec_types.DumpACPITableRequest(table_signature="INVALID_TABLE")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertNotEquals(len(result.logs), 0)
def testDumpACPITableUnknownChipset(self):
"""By default, if the chipset is unknown, no exception is raised."""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpACPITableRequest(table_signature="FACP")
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
def testDumpACPITableUnknownChipsetVerbose(self):
"""Tests unknown chipset with verbose mode.
If the chipset is unknown but verbose enabled, no exception is raised
and at least one response should be returned with non-empty logs.
"""
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpACPITableRequest(
table_signature="FACP", logging=True)
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
self.assertNotEquals(self.chipsec_mock.logger.logger.call_count, 0)
self.assertGreaterEqual(len(self.results), 1)
self.assertNotEquals(len(self.results[0].logs), 0)
def testDumpACPITableTriggeringDevMemError(self):
"""Tests the condition where OSError is triggered due to using /dev/mem.
No exception should be raised, and the log describing the error should be
returned.
"""
self.chipsec_mock.acpi.ACPI = MockACPIReadingRestrictedArea
args = chipsec_types.DumpACPITableRequest(table_signature="FACP")
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
self.assertGreaterEqual(len(self.results), 1)
self.assertNotEquals(len(self.results[0].logs), 0)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 38.268482 | 78 | 0.729029 |
import collections
import sys
import mock
from chipsec.helper import oshelper
from grr.client import vfs
from grr.client.components.chipsec_support.actions import chipsec_types
from grr.lib import flags
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
class MockUnknownChipsetError(RuntimeError):
pass
class MockSPI(mock.MagicMock):
def get_SPI_region(self, unused_region):
return (0, 0xffff, 0)
def read_spi(self, unused_offset, size):
return "\xff" * size
class UnsupportedChipset(mock.MagicMock):
def init(self, unused_platform, unused_load_driver):
msg = "Unsupported Platform: VID = 0x0000, DID = 0x0000"
raise MockUnknownChipsetError(msg)
class FailingOsHelperChipset(mock.MagicMock):
def init(self, unused_platform, unused_load_driver):
msg = "Unable to open /sys/bus/pci/devices/0000:00:00.0/config"
raise oshelper.OsHelperError(msg, -1)
class GRRChipsecTest(client_test_lib.EmptyActionTest):
def setUp(self):
self.chipsec_mock = mock.MagicMock()
self.chipsec_mock.chipset = mock.MagicMock()
self.chipsec_mock.chipset.UnknownChipsetError = MockUnknownChipsetError
self.chipsec_mock.hal = mock.MagicMock()
self.chipsec_mock.logger = mock.MagicMock()
mock_modules = {
"chipsec": self.chipsec_mock,
"chipsec.hal": self.chipsec_mock.hal,
}
self.chipsec_patch = mock.patch.dict(sys.modules, mock_modules)
self.chipsec_patch.start()
from grr.client.components.chipsec_support.actions import grr_chipsec
self.grr_chipsec_module = grr_chipsec
self.grr_chipsec_module.chipset = self.chipsec_mock.chipset
self.grr_chipsec_module.logger = self.chipsec_mock.logger
def tearDown(self):
self.chipsec_patch.stop()
class TestChipsecDumpFlashImage(GRRChipsecTest):
def setUp(self):
super(TestChipsecDumpFlashImage, self).setUp()
self.chipsec_mock.hal.spi = mock.MagicMock()
self.chipsec_mock.hal.spi.SPI = MockSPI
self.grr_chipsec_module.spi = self.chipsec_mock.hal.spi
def testDumpFlashImage(self):
args = chipsec_types.DumpFlashImageRequest()
result = self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)[0]
with vfs.VFSOpen(result.path) as image:
self.assertEqual(image.read(0x20000), "\xff" * 0x10000)
def testDumpFlashImageVerbose(self):
args = chipsec_types.DumpFlashImageRequest(log_level=1)
result = self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)[0]
with vfs.VFSOpen(result.path) as image:
self.assertEqual(image.read(0x20000), "\xff" * 0x10000)
self.assertNotEqual(self.chipsec_mock.logger.logger.call_count, 0)
def testDumpFlashImageUnknownChipset(self):
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpFlashImageRequest()
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
def testDumpFlashImageUnknownChipsetVerbose(self):
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpFlashImageRequest(log_level=1)
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
self.assertNotEquals(self.chipsec_mock.logger.logger.call_count, 0)
self.assertGreaterEqual(len(self.results), 1)
self.assertNotEquals(len(self.results[0].logs), 0)
self.assertEquals(self.results[0].path.path, "")
def testDumpFlashImageOsHelperErrorChipset(self):
self.chipsec_mock.chipset.cs = FailingOsHelperChipset
args = chipsec_types.DumpFlashImageRequest()
self.RunAction(self.grr_chipsec_module.DumpFlashImage, args)
class MockACPI(object):
def __init__(self, unused_chipset):
self.tableList = {
"DSDT": [(0xAABBCCDDEEFF0011)],
"FACP": [(0x1100FFEEDDCCBBAA)],
"XSDT": [(0x1122334455667788)],
"SSDT": [(0x1234567890ABCDEF), (0x2234567890ABCDEF),
(0x3234567890ABCDEF)]
}
self.tableList = collections.defaultdict(list, self.tableList)
self.table_content = {
0xAABBCCDDEEFF0011: ("\xFF" * 0xFF, "\xEE" * 0xFF),
0x1100FFEEDDCCBBAA: ("\xEE" * 0xFF, "\xFF" * 0xFF),
0x1122334455667788: ("\xAB" * 0xFF, "\xCD" * 0xFF),
0x1234567890ABCDEF: ("\xEF" * 0xFF, "\xFE" * 0xFF),
0x2234567890ABCDEF: ("\xDC" * 0xFF, "\xBA" * 0xFF),
0x3234567890ABCDEF: ("\xAA" * 0xFF, "\xBB" * 0xFF)
}
def get_ACPI_table(self, name):
return [self.table_content[address] for address in self.tableList[name]]
class MockACPIReadingRestrictedArea(object):
def __init__(self, unused_chipset):
raise OSError("Operation not permitted")
def get_ACPI_table(self, unused_name):
return []
class TestDumpACPITable(GRRChipsecTest):
def setUp(self):
super(TestDumpACPITable, self).setUp()
self.chipsec_mock.hal.acpi = mock.MagicMock()
self.chipsec_mock.hal.acpi.ACPI = MockACPI
self.grr_chipsec_module.acpi = self.chipsec_mock.hal.acpi
def testDumpValidSingleACPITable(self):
args = chipsec_types.DumpACPITableRequest(table_signature="DSDT")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(len(result.acpi_tables), 1)
self.assertEqual(result.acpi_tables[0].table_address, 0xAABBCCDDEEFF0011)
self.assertEqual(result.acpi_tables[0].table_blob,
"\xFF" * 0xFF + "\xEE" * 0xFF)
def testDumpValidMultipleACPITables(self):
args = chipsec_types.DumpACPITableRequest(table_signature="SSDT")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(len(result.acpi_tables), 3)
self.assertEqual(result.acpi_tables[0].table_address, 0x1234567890ABCDEF)
self.assertEqual(result.acpi_tables[0].table_blob,
"\xEF" * 0xFF + "\xFE" * 0xFF)
self.assertEqual(result.acpi_tables[1].table_address, 0x2234567890ABCDEF)
self.assertEqual(result.acpi_tables[1].table_blob,
"\xDC" * 0xFF + "\xBA" * 0xFF)
self.assertEqual(result.acpi_tables[2].table_address, 0x3234567890ABCDEF)
self.assertEqual(result.acpi_tables[2].table_blob,
"\xAA" * 0xFF + "\xBB" * 0xFF)
def testDumpValidSingleACPITableVerbose(self):
args = chipsec_types.DumpACPITableRequest(
table_signature="XSDT", logging=True)
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertEqual(result.acpi_tables[0].table_address, 0x1122334455667788)
self.assertEqual(result.acpi_tables[0].table_blob,
"\xAB" * 0xFF + "\xCD" * 0xFF)
self.assertNotEquals(self.chipsec_mock.logger.logger.call_count, 0)
def testDumpInvalidACPITable(self):
args = chipsec_types.DumpACPITableRequest(table_signature="INVALID_TABLE")
result = self.RunAction(self.grr_chipsec_module.DumpACPITable, args)[0]
self.assertNotEquals(len(result.logs), 0)
def testDumpACPITableUnknownChipset(self):
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpACPITableRequest(table_signature="FACP")
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
def testDumpACPITableUnknownChipsetVerbose(self):
self.chipsec_mock.chipset.cs = UnsupportedChipset
args = chipsec_types.DumpACPITableRequest(
table_signature="FACP", logging=True)
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
self.assertNotEquals(self.chipsec_mock.logger.logger.call_count, 0)
self.assertGreaterEqual(len(self.results), 1)
self.assertNotEquals(len(self.results[0].logs), 0)
def testDumpACPITableTriggeringDevMemError(self):
self.chipsec_mock.acpi.ACPI = MockACPIReadingRestrictedArea
args = chipsec_types.DumpACPITableRequest(table_signature="FACP")
self.RunAction(self.grr_chipsec_module.DumpACPITable, args)
self.assertGreaterEqual(len(self.results), 1)
self.assertNotEquals(len(self.results[0].logs), 0)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| true | true |
f71d7e44e4a12e49ae2b689e7ef43ea4b00aac94 | 3,032 | py | Python | statsSend/jenkins/jenkinsStatisticsSender.py | luigiberrettini/build-deploy-stats | 52a0bf5aeb8d2f8ef62e4e836eb0b9874dea500d | [
"MIT"
] | 2 | 2017-07-04T14:30:35.000Z | 2017-07-04T16:04:53.000Z | statsSend/jenkins/jenkinsStatisticsSender.py | luigiberrettini/build-deploy-stats | 52a0bf5aeb8d2f8ef62e4e836eb0b9874dea500d | [
"MIT"
] | null | null | null | statsSend/jenkins/jenkinsStatisticsSender.py | luigiberrettini/build-deploy-stats | 52a0bf5aeb8d2f8ef62e4e836eb0b9874dea500d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import xml.etree.ElementTree
from datetime import datetime, timedelta, timezone
from dateutil import parser
from statsSend.session import Session
from statsSend.utils import print_exception
from statsSend.urlBuilder import UrlBuilder
from statsSend.jenkins.jenkinsJob import JenkinsJob
class JenkinsStatisticsSender:
epoch = datetime(1970, 1, 1, tzinfo = timezone.utc)
one_second = timedelta(seconds = 1)
def __init__(self, settings, reporter):
self.xml_api_url_suffix = settings['xml_api_url_suffix']
self.json_api_url_suffix = settings['json_api_url_suffix']
page_size = int(settings['page_size'])
url_builder_factory = lambda suffix: UrlBuilder(settings['server_url'], '', suffix, page_size)
headers = { 'Accept': 'application/json'}
user = settings['user']
password_or_auth_token = settings['password_or_auth_token']
verify_ssl_certs = settings['verify_ssl_certs']
self.session_factory = lambda suffix: Session(url_builder_factory(suffix), headers, user, password_or_auth_token, verify_ssl_certs)
self.job_name = settings['job_name']
self.since_posix_timestamp = (parser.parse(settings['since_timestamp']) - self.epoch) // self.one_second * 1000
self.reporter = reporter
async def send(self):
root_job_resource = await self.retrieve_root_job_resource()
if ("report_categories" in dir(self.reporter)):
async with self.session_factory(self.json_api_url_suffix) as session:
try:
job = JenkinsJob(session, root_job_resource)
categories = [job.to_category() async for job in job.retrieve_buildable_descendants()]
self.reporter.report_categories(categories)
except Exception as err:
print_exception('Error sending categories')
async with self.session_factory(self.json_api_url_suffix) as session:
try:
job = JenkinsJob(session, root_job_resource)
async for job in job.retrieve_buildable_descendants():
for build in job.retrieve_builds_since_posix_timestamp(self.since_posix_timestamp):
try:
activity = build.to_activity()
self.reporter.report_activity(activity)
except Exception as err:
print_exception('Error reporting activity')
except Exception as err:
print_exception('Error reporting activities')
async def retrieve_root_job_resource(self):
async with self.session_factory(self.xml_api_url_suffix) as session:
resource = "?xpath=//job[name='{:s}']/url".format(self.job_name)
async with session.get_resource(resource) as response:
text = await response.text()
url_node = xml.etree.ElementTree.fromstring(text)
return url_node.text | 48.903226 | 139 | 0.66095 |
import xml.etree.ElementTree
from datetime import datetime, timedelta, timezone
from dateutil import parser
from statsSend.session import Session
from statsSend.utils import print_exception
from statsSend.urlBuilder import UrlBuilder
from statsSend.jenkins.jenkinsJob import JenkinsJob
class JenkinsStatisticsSender:
epoch = datetime(1970, 1, 1, tzinfo = timezone.utc)
one_second = timedelta(seconds = 1)
def __init__(self, settings, reporter):
self.xml_api_url_suffix = settings['xml_api_url_suffix']
self.json_api_url_suffix = settings['json_api_url_suffix']
page_size = int(settings['page_size'])
url_builder_factory = lambda suffix: UrlBuilder(settings['server_url'], '', suffix, page_size)
headers = { 'Accept': 'application/json'}
user = settings['user']
password_or_auth_token = settings['password_or_auth_token']
verify_ssl_certs = settings['verify_ssl_certs']
self.session_factory = lambda suffix: Session(url_builder_factory(suffix), headers, user, password_or_auth_token, verify_ssl_certs)
self.job_name = settings['job_name']
self.since_posix_timestamp = (parser.parse(settings['since_timestamp']) - self.epoch) // self.one_second * 1000
self.reporter = reporter
async def send(self):
root_job_resource = await self.retrieve_root_job_resource()
if ("report_categories" in dir(self.reporter)):
async with self.session_factory(self.json_api_url_suffix) as session:
try:
job = JenkinsJob(session, root_job_resource)
categories = [job.to_category() async for job in job.retrieve_buildable_descendants()]
self.reporter.report_categories(categories)
except Exception as err:
print_exception('Error sending categories')
async with self.session_factory(self.json_api_url_suffix) as session:
try:
job = JenkinsJob(session, root_job_resource)
async for job in job.retrieve_buildable_descendants():
for build in job.retrieve_builds_since_posix_timestamp(self.since_posix_timestamp):
try:
activity = build.to_activity()
self.reporter.report_activity(activity)
except Exception as err:
print_exception('Error reporting activity')
except Exception as err:
print_exception('Error reporting activities')
async def retrieve_root_job_resource(self):
async with self.session_factory(self.xml_api_url_suffix) as session:
resource = "?xpath=//job[name='{:s}']/url".format(self.job_name)
async with session.get_resource(resource) as response:
text = await response.text()
url_node = xml.etree.ElementTree.fromstring(text)
return url_node.text | true | true |
f71d7f51e7ed402cc9736ce8ab91a8fab33564d5 | 1,467 | py | Python | general_tools/obs_tools.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | 1 | 2020-11-25T04:07:37.000Z | 2020-11-25T04:07:37.000Z | general_tools/obs_tools.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | 52 | 2018-10-25T05:49:30.000Z | 2022-03-16T22:31:57.000Z | general_tools/obs_tools.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | null | null | null | import os
import markdown2
from bs4 import BeautifulSoup
def get_obs_chapter_data(repo_dir, chapter_num):
obs_chapter_data = {
'title': None,
'frames': [],
'bible_reference': None
}
obs_chapter_file = os.path.join(repo_dir, 'content', f'{chapter_num}.md')
if not os.path.isfile(obs_chapter_file):
obs_chapter_file = os.path.join(repo_dir, f'{chapter_num}.md')
if os.path.isfile(obs_chapter_file):
print(obs_chapter_file)
soup = BeautifulSoup(markdown2.markdown_path(obs_chapter_file), 'html.parser')
obs_chapter_data['title'] = soup.h1.text
paragraphs = soup.find_all('p')
frame = {
'image': '',
'text': ''
}
for idx, p in enumerate(paragraphs):
if p.img:
src = p.img['src'].split('?')[0]
if frame['image']:
obs_chapter_data['frames'].append(frame)
frame = {
'image': '',
'text': ''
}
frame['image'] = src
p.img.extract()
if p.text:
if idx == len(paragraphs) - 1 and frame['text']:
obs_chapter_data['bible_reference'] = p.text
else:
frame['text'] += str(p)
if frame['image']:
obs_chapter_data['frames'].append(frame)
return obs_chapter_data
| 34.116279 | 86 | 0.512611 | import os
import markdown2
from bs4 import BeautifulSoup
def get_obs_chapter_data(repo_dir, chapter_num):
obs_chapter_data = {
'title': None,
'frames': [],
'bible_reference': None
}
obs_chapter_file = os.path.join(repo_dir, 'content', f'{chapter_num}.md')
if not os.path.isfile(obs_chapter_file):
obs_chapter_file = os.path.join(repo_dir, f'{chapter_num}.md')
if os.path.isfile(obs_chapter_file):
print(obs_chapter_file)
soup = BeautifulSoup(markdown2.markdown_path(obs_chapter_file), 'html.parser')
obs_chapter_data['title'] = soup.h1.text
paragraphs = soup.find_all('p')
frame = {
'image': '',
'text': ''
}
for idx, p in enumerate(paragraphs):
if p.img:
src = p.img['src'].split('?')[0]
if frame['image']:
obs_chapter_data['frames'].append(frame)
frame = {
'image': '',
'text': ''
}
frame['image'] = src
p.img.extract()
if p.text:
if idx == len(paragraphs) - 1 and frame['text']:
obs_chapter_data['bible_reference'] = p.text
else:
frame['text'] += str(p)
if frame['image']:
obs_chapter_data['frames'].append(frame)
return obs_chapter_data
| true | true |
f71d7fa0ae2eee68a918e572b3e0809b5157731c | 434 | py | Python | awx/main/tests/unit/models/test_credential.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 1 | 2019-03-07T11:54:50.000Z | 2019-03-07T11:54:50.000Z | awx/main/tests/unit/models/test_credential.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 1 | 2020-04-09T08:43:28.000Z | 2020-04-09T08:43:28.000Z | awx/main/tests/unit/models/test_credential.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 2 | 2018-09-03T19:10:02.000Z | 2019-06-12T07:07:16.000Z | # -*- coding: utf-8 -*-
from awx.main.models import Credential, CredentialType
def test_unique_hash_with_unicode():
ct = CredentialType(name=u'Väult', kind='vault')
cred = Credential(
id=4,
name=u'Iñtërnâtiônàlizætiøn',
credential_type=ct,
inputs={
u'vault_id': u'🐉🐉🐉'
},
credential_type_id=42
)
assert cred.unique_hash(display=True) == u'Väult (id=🐉🐉🐉)'
| 24.111111 | 62 | 0.596774 |
from awx.main.models import Credential, CredentialType
def test_unique_hash_with_unicode():
ct = CredentialType(name=u'Väult', kind='vault')
cred = Credential(
id=4,
name=u'Iñtërnâtiônàlizætiøn',
credential_type=ct,
inputs={
u'vault_id': u'🐉🐉🐉'
},
credential_type_id=42
)
assert cred.unique_hash(display=True) == u'Väult (id=🐉🐉🐉)'
| true | true |
f71d7fe2e8cb699b6a0e6e27b481f5b65d9878c7 | 5,051 | py | Python | tests/test_warnings.py | fao89/drf-spectacular | 0dbd9d1c065f10aa1f8d472abd4795c05fb7b6c6 | [
"BSD-3-Clause"
] | null | null | null | tests/test_warnings.py | fao89/drf-spectacular | 0dbd9d1c065f10aa1f8d472abd4795c05fb7b6c6 | [
"BSD-3-Clause"
] | null | null | null | tests/test_warnings.py | fao89/drf-spectacular | 0dbd9d1c065f10aa1f8d472abd4795c05fb7b6c6 | [
"BSD-3-Clause"
] | 1 | 2021-06-14T16:42:20.000Z | 2021-06-14T16:42:20.000Z | from django.db import models
from rest_framework import mixins, serializers, views, viewsets
from rest_framework.authentication import BaseAuthentication
from rest_framework.decorators import action
from rest_framework.views import APIView
from drf_spectacular.utils import extend_schema
from tests import generate_schema
def test_serializer_name_reuse(warnings):
from rest_framework import routers
from drf_spectacular.generators import SchemaGenerator
router = routers.SimpleRouter()
def x1():
class XSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
return XSerializer
def x2():
class XSerializer(serializers.Serializer):
integer = serializers.IntegerField
return XSerializer
class X1Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = x1()
router.register('x1', X1Viewset, basename='x1')
class X2Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = x2()
router.register('x2', X2Viewset, basename='x2')
generator = SchemaGenerator(patterns=router.urls)
generator.get_schema(request=None, public=True)
def test_owned_serializer_naming_override_with_ref_name_collision(warnings):
class XSerializer(serializers.Serializer):
x = serializers.UUIDField()
class YSerializer(serializers.Serializer):
x = serializers.IntegerField()
class Meta:
ref_name = 'X' # already used above
class XAPIView(APIView):
@extend_schema(request=XSerializer, responses=YSerializer)
def post(self, request):
pass # pragma: no cover
generate_schema('x', view=XAPIView)
def test_no_queryset_warn(capsys):
class X1Serializer(serializers.Serializer):
uuid = serializers.UUIDField()
class X1Viewset(viewsets.ReadOnlyModelViewSet):
serializer_class = X1Serializer
generate_schema('x1', X1Viewset)
assert 'no queryset' in capsys.readouterr().err
def test_path_param_not_in_model(capsys):
class M3(models.Model):
pass # pragma: no cover
class XSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
class XViewset(viewsets.ReadOnlyModelViewSet):
serializer_class = XSerializer
queryset = M3.objects.none()
@action(detail=True, url_path='meta/(?P<ephemeral>[^/.]+)', methods=['POST'])
def meta_param(self, request, ephemeral, pk):
pass # pragma: no cover
generate_schema('x1', XViewset)
assert 'no such field' in capsys.readouterr().err
def test_no_authentication_scheme_registered(capsys):
class XAuth(BaseAuthentication):
pass # pragma: no cover
class XSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
class XViewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = XSerializer
authentication_classes = [XAuth]
generate_schema('x', XViewset)
assert 'no OpenApiAuthenticationExtension registered' in capsys.readouterr().err
def test_serializer_not_found(capsys):
class XViewset(mixins.ListModelMixin, viewsets.GenericViewSet):
pass # pragma: no cover
generate_schema('x', XViewset)
assert 'Exception raised while getting serializer' in capsys.readouterr().err
def test_extend_schema_unknown_class(capsys):
class DoesNotCompute:
pass # pragma: no cover
class X1Viewset(viewsets.GenericViewSet):
@extend_schema(responses={200: DoesNotCompute})
def list(self, request):
pass # pragma: no cover
generate_schema('x1', X1Viewset)
assert 'Expected either a serializer' in capsys.readouterr().err
def test_extend_schema_unknown_class2(capsys):
class DoesNotCompute:
pass # pragma: no cover
class X1Viewset(viewsets.GenericViewSet):
@extend_schema(responses=DoesNotCompute)
def list(self, request):
pass # pragma: no cover
generate_schema('x1', X1Viewset)
assert 'Expected either a serializer' in capsys.readouterr().err
def test_no_serializer_class_on_apiview(capsys):
class XView(views.APIView):
def get(self, request):
pass # pragma: no cover
generate_schema('x', view=XView)
assert 'Unable to guess serializer for' in capsys.readouterr().err
def test_unable_to_follow_field_source_through_intermediate_property_warning(warnings):
class FailingFieldSourceTraversalModel1(models.Model):
@property
def x(self): # missing type hint emits warning
return # pragma: no cover
class XSerializer(serializers.ModelSerializer):
x = serializers.ReadOnlyField(source='x.y')
class Meta:
model = FailingFieldSourceTraversalModel1
fields = '__all__'
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass # pragma: no cover
generate_schema('x', view=XAPIView)
| 30.065476 | 87 | 0.706593 | from django.db import models
from rest_framework import mixins, serializers, views, viewsets
from rest_framework.authentication import BaseAuthentication
from rest_framework.decorators import action
from rest_framework.views import APIView
from drf_spectacular.utils import extend_schema
from tests import generate_schema
def test_serializer_name_reuse(warnings):
from rest_framework import routers
from drf_spectacular.generators import SchemaGenerator
router = routers.SimpleRouter()
def x1():
class XSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
return XSerializer
def x2():
class XSerializer(serializers.Serializer):
integer = serializers.IntegerField
return XSerializer
class X1Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = x1()
router.register('x1', X1Viewset, basename='x1')
class X2Viewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = x2()
router.register('x2', X2Viewset, basename='x2')
generator = SchemaGenerator(patterns=router.urls)
generator.get_schema(request=None, public=True)
def test_owned_serializer_naming_override_with_ref_name_collision(warnings):
class XSerializer(serializers.Serializer):
x = serializers.UUIDField()
class YSerializer(serializers.Serializer):
x = serializers.IntegerField()
class Meta:
ref_name = 'X'
class XAPIView(APIView):
@extend_schema(request=XSerializer, responses=YSerializer)
def post(self, request):
pass
generate_schema('x', view=XAPIView)
def test_no_queryset_warn(capsys):
class X1Serializer(serializers.Serializer):
uuid = serializers.UUIDField()
class X1Viewset(viewsets.ReadOnlyModelViewSet):
serializer_class = X1Serializer
generate_schema('x1', X1Viewset)
assert 'no queryset' in capsys.readouterr().err
def test_path_param_not_in_model(capsys):
class M3(models.Model):
pass
class XSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
class XViewset(viewsets.ReadOnlyModelViewSet):
serializer_class = XSerializer
queryset = M3.objects.none()
@action(detail=True, url_path='meta/(?P<ephemeral>[^/.]+)', methods=['POST'])
def meta_param(self, request, ephemeral, pk):
pass
generate_schema('x1', XViewset)
assert 'no such field' in capsys.readouterr().err
def test_no_authentication_scheme_registered(capsys):
class XAuth(BaseAuthentication):
pass
class XSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
class XViewset(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = XSerializer
authentication_classes = [XAuth]
generate_schema('x', XViewset)
assert 'no OpenApiAuthenticationExtension registered' in capsys.readouterr().err
def test_serializer_not_found(capsys):
class XViewset(mixins.ListModelMixin, viewsets.GenericViewSet):
pass
generate_schema('x', XViewset)
assert 'Exception raised while getting serializer' in capsys.readouterr().err
def test_extend_schema_unknown_class(capsys):
class DoesNotCompute:
pass
class X1Viewset(viewsets.GenericViewSet):
@extend_schema(responses={200: DoesNotCompute})
def list(self, request):
pass
generate_schema('x1', X1Viewset)
assert 'Expected either a serializer' in capsys.readouterr().err
def test_extend_schema_unknown_class2(capsys):
class DoesNotCompute:
pass
class X1Viewset(viewsets.GenericViewSet):
@extend_schema(responses=DoesNotCompute)
def list(self, request):
pass
generate_schema('x1', X1Viewset)
assert 'Expected either a serializer' in capsys.readouterr().err
def test_no_serializer_class_on_apiview(capsys):
class XView(views.APIView):
def get(self, request):
pass
generate_schema('x', view=XView)
assert 'Unable to guess serializer for' in capsys.readouterr().err
def test_unable_to_follow_field_source_through_intermediate_property_warning(warnings):
class FailingFieldSourceTraversalModel1(models.Model):
@property
def x(self):
return
class XSerializer(serializers.ModelSerializer):
x = serializers.ReadOnlyField(source='x.y')
class Meta:
model = FailingFieldSourceTraversalModel1
fields = '__all__'
class XAPIView(APIView):
@extend_schema(responses=XSerializer)
def get(self, request):
pass
generate_schema('x', view=XAPIView)
| true | true |
f71d7ff644e054571c43b78fa96b7e2e5f88fb9d | 22,213 | py | Python | sacred_mis/_sources/pomis2_57be95a71b575624c33c6ffe64e50d6e.py | T3p/baselines | 5623c9160d1e86ebca3e673f142fe6b14a1db06c | [
"MIT"
] | 2 | 2020-08-01T18:19:05.000Z | 2021-06-30T06:37:23.000Z | sacred_mis/_sources/pomis2_57be95a71b575624c33c6ffe64e50d6e.py | T3p/baselines | 5623c9160d1e86ebca3e673f142fe6b14a1db06c | [
"MIT"
] | null | null | null | sacred_mis/_sources/pomis2_57be95a71b575624c33c6ffe64e50d6e.py | T3p/baselines | 5623c9160d1e86ebca3e673f142fe6b14a1db06c | [
"MIT"
] | 5 | 2018-11-24T16:29:39.000Z | 2021-12-10T03:07:07.000Z | import numpy as np
import warnings
import baselines.common.tf_util as U
import tensorflow as tf
import time
from baselines.common import zipsame, colorize
from contextlib import contextmanager
from collections import deque
from baselines import logger
from baselines.common.cg import cg
from baselines.pomis2.memory import Memory
from baselines.common.centralized_sampler import traj_segment_generator
from baselines.pois.utils import cluster_rewards
@contextmanager
def timed(msg):
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize('done in %.3f seconds'%(time.time() - tstart), color='magenta'))
def update_epsilon(delta_bound, epsilon_old, max_increase=2.):
if delta_bound > (1. - 1. / (2 * max_increase)) * epsilon_old:
return epsilon_old * max_increase
else:
return epsilon_old ** 2 / (2 * (epsilon_old - delta_bound))
def line_search_parabola(theta_init, alpha, natural_gradient, set_parameter, evaluate_bound, delta_bound_tol=1e-4, max_line_search_ite=30):
epsilon = 1.
epsilon_old = 0.
delta_bound_old = -np.inf
bound_init = evaluate_bound()
theta_old = theta_init
for i in range(max_line_search_ite):
theta = theta_init + epsilon * alpha * natural_gradient
set_parameter(theta)
bound = evaluate_bound()
if np.isnan(bound):
warnings.warn('Got NaN bound value: rolling back!')
return theta_old, epsilon_old, delta_bound_old, i + 1
delta_bound = bound - bound_init
epsilon_old = epsilon
epsilon = update_epsilon(delta_bound, epsilon_old)
if delta_bound <= delta_bound_old + delta_bound_tol:
if delta_bound_old < 0.:
return theta_init, 0., 0., i+1
else:
return theta_old, epsilon_old, delta_bound_old, i+1
delta_bound_old = delta_bound
theta_old = theta
return theta_old, epsilon_old, delta_bound_old, i+1
def line_search_binary(theta_init, alpha, natural_gradient, set_parameter, evaluate_loss, delta_bound_tol=1e-4, max_line_search_ite=30):
low = 0.
high = None
bound_init = evaluate_loss()
delta_bound_old = 0.
theta_opt = theta_init
i_opt = 0
delta_bound_opt = 0.
epsilon_opt = 0.
epsilon = 1.
for i in range(max_line_search_ite):
theta = theta_init + epsilon * natural_gradient * alpha
set_parameter(theta)
bound = evaluate_loss()
delta_bound = bound - bound_init
if np.isnan(bound):
warnings.warn('Got NaN bound value: rolling back!')
if np.isnan(bound) or delta_bound <= delta_bound_opt:
high = epsilon
else:
low = epsilon
theta_opt = theta
delta_bound_opt = delta_bound
i_opt = i
epsilon_opt = epsilon
epsilon_old = epsilon
if high is None:
epsilon *= 2
else:
epsilon = (low + high) / 2.
if abs(epsilon_old - epsilon) < 1e-12:
break
return theta_opt, epsilon_opt, delta_bound_opt, i_opt+1
def optimize_offline(theta_init, set_parameter, line_search, evaluate_loss, evaluate_gradient, evaluate_natural_gradient=None, gradient_tol=1e-4, bound_tol=1e-4, max_offline_ite=100):
theta = theta_old = theta_init
improvement = improvement_old = 0.
set_parameter(theta)
'''
bound_init = evaluate_loss()
import scipy.optimize as opt
def func(x):
set_parameter(x)
return -evaluate_loss()
def grad(x):
set_parameter(x)
return -evaluate_gradient().astype(np.float64)
theta, bound, d = opt.fmin_l_bfgs_b(func=func,
fprime=grad,
x0=theta_init.astype(np.float64),
maxiter=100,
)
print(bound_init, bound)
print(d)
set_parameter(theta)
improvement = bound_init + bound
return theta, improvement
'''
fmtstr = '%6i %10.3g %10.3g %18i %18.3g %18.3g %18.3g'
titlestr = '%6s %10s %10s %18s %18s %18s %18s'
print(titlestr % ('iter', 'epsilon', 'step size', 'num line search', 'gradient norm', 'delta bound ite', 'delta bound tot'))
for i in range(max_offline_ite):
bound = evaluate_loss()
gradient = evaluate_gradient()
if np.any(np.isnan(gradient)):
warnings.warn('Got NaN gradient! Stopping!')
set_parameter(theta_old)
return theta_old, improvement
if np.isnan(bound):
warnings.warn('Got NaN bound! Stopping!')
set_parameter(theta_old)
return theta_old, improvement_old
if evaluate_natural_gradient is not None:
natural_gradient = evaluate_natural_gradient(gradient)
else:
natural_gradient = gradient
if np.dot(gradient, natural_gradient) < 0:
warnings.warn('NatGradient dot Gradient < 0! Using vanilla gradient')
natural_gradient = gradient
gradient_norm = np.sqrt(np.dot(gradient, natural_gradient))
if gradient_norm < gradient_tol:
print('stopping - gradient norm < gradient_tol')
return theta, improvement
alpha = 1. / gradient_norm ** 2
theta_old = theta
improvement_old = improvement
theta, epsilon, delta_bound, num_line_search = line_search(theta, alpha, natural_gradient, set_parameter, evaluate_loss)
set_parameter(theta)
improvement += delta_bound
print(fmtstr % (i+1, epsilon, alpha*epsilon, num_line_search, gradient_norm, delta_bound, improvement))
if delta_bound < bound_tol:
print('stopping - delta bound < bound_tol')
return theta, improvement
return theta, improvement
def learn(env, make_policy, *,
n_episodes,
horizon,
delta,
gamma,
max_iters,
sampler=None,
use_natural_gradient=False, #can be 'exact', 'approximate'
fisher_reg=1e-2,
iw_method='is',
iw_norm='none',
bound='J',
line_search_type='parabola',
save_weights=0,
improvement_tol=0.,
center_return=False,
render_after=None,
max_offline_iters=100,
callback=None,
clipping=False,
entropy='none',
positive_return=False,
reward_clustering='none',
capacity=10,
warm_start=True):
np.set_printoptions(precision=3)
max_samples = horizon * n_episodes
if line_search_type == 'binary':
line_search = line_search_binary
elif line_search_type == 'parabola':
line_search = line_search_parabola
else:
raise ValueError()
# Building the environment
ob_space = env.observation_space
ac_space = env.action_space
# Creating the memory buffer
memory = Memory(capacity=capacity, batch_size=n_episodes, horizon=horizon,
ob_space=ob_space, ac_space=ac_space)
# Building the target policy and saving its parameters
pi = make_policy('pi', ob_space, ac_space)
all_var_list = pi.get_trainable_variables()
var_list = [v for v in all_var_list if v.name.split('/')[1].startswith('pol')]
shapes = [U.intprod(var.get_shape().as_list()) for var in var_list]
n_parameters = sum(shapes)
# Building a set of behavioral policies
behavioral_policies = memory.build_policies(make_policy, pi)
# Placeholders
ob_ = ob = U.get_placeholder_cached(name='ob')
ac_ = pi.pdtype.sample_placeholder([None], name='ac')
mask_ = tf.placeholder(dtype=tf.float32, shape=(None), name='mask')
rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='rew')
disc_rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='disc_rew')
clustered_rew_ = tf.placeholder(dtype=tf.float32, shape=(None))
gradient_ = tf.placeholder(dtype=tf.float32, shape=(n_parameters, 1), name='gradient')
iter_number_ = tf.placeholder(dtype=tf.int32, name='iter_number')
active_policies = tf.placeholder(dtype=tf.float32, shape=(capacity), name='active_policies')
losses_with_name = []
# Total number of trajectories
N_total = tf.reduce_sum(active_policies) * n_episodes
# Split operations
disc_rew_split = tf.reshape(disc_rew_ * mask_, [-1, horizon])
rew_split = tf.reshape(rew_ * mask_, [-1, horizon])
mask_split = tf.reshape(mask_, [-1, horizon])
# Policy densities
target_log_pdf = pi.pd.logp(ac_) * mask_
target_log_pdf_split = tf.reshape(target_log_pdf, [-1, horizon])
behavioral_log_pdfs = tf.stack([bpi.pd.logp(ac_) * mask_ for bpi in memory.policies]) # Shape is (capacity, ntraj*horizon)
behavioral_log_pdfs_split = tf.reshape(behavioral_log_pdfs, [memory.capacity, -1, horizon])
# Compute renyi divergencies and sum over time, then exponentiate
emp_d2_split = tf.reshape(tf.stack([pi.pd.renyi(bpi.pd, 2) * mask_ for bpi in memory.policies]), [memory.capacity, -1, horizon])
emp_d2_split_cum = tf.exp(tf.reduce_sum(emp_d2_split, axis=2))
# Compute arithmetic and harmonic mean of emp_d2
emp_d2_mean = tf.reduce_mean(emp_d2_split_cum, axis=1)
emp_d2_arithmetic = tf.reduce_sum(emp_d2_mean * active_policies) / tf.reduce_sum(active_policies)
emp_d2_harmonic = tf.reduce_sum(active_policies) / tf.reduce_sum(1 / emp_d2_mean)
# Return processing: clipping, centering, discounting
ep_return = clustered_rew_ #tf.reduce_sum(mask_split * disc_rew_split, axis=1)
if clipping:
rew_split = tf.clip_by_value(rew_split, -1, 1)
if center_return:
ep_return = ep_return - tf.reduce_mean(ep_return)
rew_split = rew_split - (tf.reduce_sum(rew_split) / (tf.reduce_sum(mask_split) + 1e-24))
discounter = [pow(gamma, i) for i in range(0, horizon)] # Decreasing gamma
discounter_tf = tf.constant(discounter)
disc_rew_split = rew_split * discounter_tf
# Reward statistics
return_mean = tf.reduce_mean(ep_return)
return_std = U.reduce_std(ep_return)
return_max = tf.reduce_max(ep_return)
return_min = tf.reduce_min(ep_return)
return_abs_max = tf.reduce_max(tf.abs(ep_return))
return_step_max = tf.reduce_max(tf.abs(rew_split)) # Max step reward
return_step_mean = tf.abs(tf.reduce_mean(rew_split))
positive_step_return_max = tf.maximum(0.0, tf.reduce_max(rew_split))
negative_step_return_max = tf.maximum(0.0, tf.reduce_max(-rew_split))
return_step_maxmin = tf.abs(positive_step_return_max - negative_step_return_max)
losses_with_name.extend([(return_mean, 'InitialReturnMean'),
(return_max, 'InitialReturnMax'),
(return_min, 'InitialReturnMin'),
(return_std, 'InitialReturnStd'),
(emp_d2_arithmetic, 'EmpiricalD2Arithmetic'),
(emp_d2_harmonic, 'EmpiricalD2Harmonic'),
(return_step_max, 'ReturnStepMax'),
(return_step_maxmin, 'ReturnStepMaxmin')])
if iw_method == 'is':
# Sum the log prob over time. Shapes: target(Nep, H), behav (Cap, Nep, H)
target_log_pdf_episode = tf.reduce_sum(target_log_pdf_split, axis=1)
behavioral_log_pdf_episode = tf.reduce_sum(behavioral_log_pdfs_split, axis=2)
# To avoid numerical instability, compute the inversed ratio
log_ratio = target_log_pdf_split - behavioral_log_pdfs_split
inverse_log_ratio_episode = - tf.reduce_sum(log_ratio, axis=2)
iw = 1 / tf.reduce_sum(tf.exp(inverse_log_ratio_episode) * tf.expand_dims(active_policies, -1), axis=0)
# Compute also the balance-heuristic weights
iw_split = tf.reshape(iw, (memory.capacity, -1))
iw_by_behavioral = tf.reduce_mean(iw_split, axis=1)
losses_with_name.append((iw_by_behavioral[0] / tf.reduce_sum(iw_by_behavioral), 'MultiIWFirstRatio'))
losses_with_name.append((tf.reduce_max(iw_by_behavioral), 'MultiIWMax'))
losses_with_name.append((tf.reduce_sum(iw_by_behavioral), 'MultiIWSum'))
losses_with_name.append((tf.reduce_min(iw_by_behavioral), 'MultiIWMin'))
# Get the probability by exponentiation
#target_pdf_episode = tf.exp(target_log_pdf_episode)
#behavioral_pdf_episode = tf.exp(behavioral_log_pdf_episode)
# Get the denominator by averaging over behavioral policies
#behavioral_pdf_mixture = tf.reduce_mean(behavioral_pdf_episode, axis=0) + 1e-24
#iw = target_pdf_episode / behavioral_pdf_mixture
iwn = iw / n_episodes
# Compute the J
w_return_mean = tf.reduce_sum(ep_return * iwn)
# Empirical D2 of the mixture and relative ESS
ess_renyi_arithmetic = N_total / emp_d2_arithmetic
ess_renyi_harmonic = N_total / emp_d2_harmonic
# Log quantities
losses_with_name.extend([(tf.reduce_max(iw), 'MaxIW'),
(tf.reduce_min(iw), 'MinIW'),
(tf.reduce_mean(iw), 'MeanIW'),
(U.reduce_std(iw), 'StdIW'),
(tf.reduce_min(target_log_pdf_episode), 'MinTargetPdf'),
(tf.reduce_min(behavioral_log_pdf_episode), 'MinBehavPdf'),
(ess_renyi_arithmetic, 'ESSRenyiArithmetic'),
(ess_renyi_harmonic, 'ESSRenyiHarmonic')])
else:
raise NotImplementedError()
if bound == 'J':
bound_ = w_return_mean
elif bound == 'max-d2-harmonic':
bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_harmonic)) * return_abs_max
elif bound == 'max-d2-arithmetic':
bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_arithmetic)) * return_abs_max
else:
raise NotImplementedError()
# Policy entropy for exploration
ent = pi.pd.entropy()
meanent = tf.reduce_mean(ent)
losses_with_name.append((meanent, 'MeanEntropy'))
# Add policy entropy bonus
if entropy != 'none':
scheme, v1, v2 = entropy.split(':')
if scheme == 'step':
entcoeff = tf.cond(iter_number_ < int(v2), lambda: float(v1), lambda: float(0.0))
losses_with_name.append((entcoeff, 'EntropyCoefficient'))
entbonus = entcoeff * meanent
bound_ = bound_ + entbonus
elif scheme == 'lin':
ip = tf.cast(iter_number_ / max_iters, tf.float32)
entcoeff_decay = tf.maximum(0.0, float(v2) + (float(v1) - float(v2)) * (1.0 - ip))
losses_with_name.append((entcoeff_decay, 'EntropyCoefficient'))
entbonus = entcoeff_decay * meanent
bound_ = bound_ + entbonus
elif scheme == 'exp':
ent_f = tf.exp(-tf.abs(tf.reduce_mean(iw) - 1) * float(v2)) * float(v1)
losses_with_name.append((ent_f, 'EntropyCoefficient'))
bound_ = bound_ + ent_f * meanent
else:
raise Exception('Unrecognized entropy scheme.')
losses_with_name.append((w_return_mean, 'ReturnMeanIW'))
losses_with_name.append((bound_, 'Bound'))
losses, loss_names = map(list, zip(*losses_with_name))
'''
if use_natural_gradient:
p = tf.placeholder(dtype=tf.float32, shape=[None])
target_logpdf_episode = tf.reduce_sum(target_log_pdf_split * mask_split, axis=1)
grad_logprob = U.flatgrad(tf.stop_gradient(iwn) * target_logpdf_episode, var_list)
dot_product = tf.reduce_sum(grad_logprob * p)
hess_logprob = U.flatgrad(dot_product, var_list)
compute_linear_operator = U.function([p, ob_, ac_, disc_rew_, mask_], [-hess_logprob])
'''
assert_ops = tf.group(*tf.get_collection('asserts'))
print_ops = tf.group(*tf.get_collection('prints'))
compute_lossandgrad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses + [U.flatgrad(bound_, var_list), assert_ops, print_ops])
compute_grad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [U.flatgrad(bound_, var_list), assert_ops, print_ops])
compute_bound = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [bound_, assert_ops, print_ops])
compute_losses = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses)
#compute_temp = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [log_inverse_ratio, abc, iw])
set_parameter = U.SetFromFlat(var_list)
get_parameter = U.GetFlat(var_list)
policy_reinit = tf.variables_initializer(var_list)
if sampler is None:
seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=gamma)
sampler = type("SequentialSampler", (object,), {"collect": lambda self, _: seg_gen.__next__()})()
U.initialize()
# Starting optimizing
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=n_episodes)
rewbuffer = deque(maxlen=n_episodes)
while True:
iters_so_far += 1
if iters_so_far == 50:
print('=== CHANGED GAMMA TO 1.0')
seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=1.0)
sampler = type("SequentialSampler", (object,), {"collect": lambda self, _: seg_gen.__next__()})()
if render_after is not None and iters_so_far % render_after == 0:
if hasattr(env, 'render'):
render(env, pi, horizon)
if callback:
callback(locals(), globals())
if iters_so_far >= max_iters:
print('Finished...')
break
logger.log('********** Iteration %i ************' % iters_so_far)
theta = get_parameter()
with timed('sampling'):
seg = sampler.collect(theta)
lens, rets = seg['ep_lens'], seg['ep_rets']
lenbuffer.extend(lens)
rewbuffer.extend(rets)
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
# Adding batch of trajectories to memory
memory.add_trajectory_batch(seg)
# Get multiple batches from memory
seg_with_memory = memory.get_trajectories()
# Get clustered reward
reward_matrix = np.reshape(seg_with_memory['disc_rew'] * seg_with_memory['mask'], (-1, horizon))
ep_reward = np.sum(reward_matrix, axis=1)
ep_reward = cluster_rewards(ep_reward, reward_clustering)
args = ob, ac, rew, disc_rew, clustered_rew, mask, iter_number, active_policies = (seg_with_memory['ob'],
seg_with_memory['ac'],
seg_with_memory['rew'],
seg_with_memory['disc_rew'],
ep_reward,
seg_with_memory['mask'],
iters_so_far,
memory.get_active_policies_mask())
def evaluate_loss():
loss = compute_bound(*args)
return loss[0]
def evaluate_gradient():
gradient = compute_grad(*args)
return gradient[0]
if use_natural_gradient:
def evaluate_fisher_vector_prod(x):
return compute_linear_operator(x, *args)[0] + fisher_reg * x
def evaluate_natural_gradient(g):
return cg(evaluate_fisher_vector_prod, g, cg_iters=10, verbose=0)
else:
evaluate_natural_gradient = None
with timed('summaries before'):
logger.record_tabular("Iteration", iters_so_far)
logger.record_tabular("InitialBound", evaluate_loss())
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if save_weights > 0 and iters_so_far % save_weights == 0:
logger.record_tabular('Weights', str(get_parameter()))
import pickle
file = open('checkpoint' + str(iters_so_far) + '.pkl', 'wb')
pickle.dump(theta, file)
if not warm_start or memory.get_current_load() == capacity:
# Optimize
with timed("offline optimization"):
theta, improvement = optimize_offline(theta,
set_parameter,
line_search,
evaluate_loss,
evaluate_gradient,
evaluate_natural_gradient,
max_offline_ite=max_offline_iters)
set_parameter(theta)
print(theta)
with timed('summaries after'):
meanlosses = np.array(compute_losses(*args))
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
else:
# Reinitialize the policy
tf.get_default_session().run(policy_reinit)
logger.dump_tabular()
env.close()
| 41.597378 | 183 | 0.620267 | import numpy as np
import warnings
import baselines.common.tf_util as U
import tensorflow as tf
import time
from baselines.common import zipsame, colorize
from contextlib import contextmanager
from collections import deque
from baselines import logger
from baselines.common.cg import cg
from baselines.pomis2.memory import Memory
from baselines.common.centralized_sampler import traj_segment_generator
from baselines.pois.utils import cluster_rewards
@contextmanager
def timed(msg):
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize('done in %.3f seconds'%(time.time() - tstart), color='magenta'))
def update_epsilon(delta_bound, epsilon_old, max_increase=2.):
if delta_bound > (1. - 1. / (2 * max_increase)) * epsilon_old:
return epsilon_old * max_increase
else:
return epsilon_old ** 2 / (2 * (epsilon_old - delta_bound))
def line_search_parabola(theta_init, alpha, natural_gradient, set_parameter, evaluate_bound, delta_bound_tol=1e-4, max_line_search_ite=30):
epsilon = 1.
epsilon_old = 0.
delta_bound_old = -np.inf
bound_init = evaluate_bound()
theta_old = theta_init
for i in range(max_line_search_ite):
theta = theta_init + epsilon * alpha * natural_gradient
set_parameter(theta)
bound = evaluate_bound()
if np.isnan(bound):
warnings.warn('Got NaN bound value: rolling back!')
return theta_old, epsilon_old, delta_bound_old, i + 1
delta_bound = bound - bound_init
epsilon_old = epsilon
epsilon = update_epsilon(delta_bound, epsilon_old)
if delta_bound <= delta_bound_old + delta_bound_tol:
if delta_bound_old < 0.:
return theta_init, 0., 0., i+1
else:
return theta_old, epsilon_old, delta_bound_old, i+1
delta_bound_old = delta_bound
theta_old = theta
return theta_old, epsilon_old, delta_bound_old, i+1
def line_search_binary(theta_init, alpha, natural_gradient, set_parameter, evaluate_loss, delta_bound_tol=1e-4, max_line_search_ite=30):
low = 0.
high = None
bound_init = evaluate_loss()
delta_bound_old = 0.
theta_opt = theta_init
i_opt = 0
delta_bound_opt = 0.
epsilon_opt = 0.
epsilon = 1.
for i in range(max_line_search_ite):
theta = theta_init + epsilon * natural_gradient * alpha
set_parameter(theta)
bound = evaluate_loss()
delta_bound = bound - bound_init
if np.isnan(bound):
warnings.warn('Got NaN bound value: rolling back!')
if np.isnan(bound) or delta_bound <= delta_bound_opt:
high = epsilon
else:
low = epsilon
theta_opt = theta
delta_bound_opt = delta_bound
i_opt = i
epsilon_opt = epsilon
epsilon_old = epsilon
if high is None:
epsilon *= 2
else:
epsilon = (low + high) / 2.
if abs(epsilon_old - epsilon) < 1e-12:
break
return theta_opt, epsilon_opt, delta_bound_opt, i_opt+1
def optimize_offline(theta_init, set_parameter, line_search, evaluate_loss, evaluate_gradient, evaluate_natural_gradient=None, gradient_tol=1e-4, bound_tol=1e-4, max_offline_ite=100):
theta = theta_old = theta_init
improvement = improvement_old = 0.
set_parameter(theta)
fmtstr = '%6i %10.3g %10.3g %18i %18.3g %18.3g %18.3g'
titlestr = '%6s %10s %10s %18s %18s %18s %18s'
print(titlestr % ('iter', 'epsilon', 'step size', 'num line search', 'gradient norm', 'delta bound ite', 'delta bound tot'))
for i in range(max_offline_ite):
bound = evaluate_loss()
gradient = evaluate_gradient()
if np.any(np.isnan(gradient)):
warnings.warn('Got NaN gradient! Stopping!')
set_parameter(theta_old)
return theta_old, improvement
if np.isnan(bound):
warnings.warn('Got NaN bound! Stopping!')
set_parameter(theta_old)
return theta_old, improvement_old
if evaluate_natural_gradient is not None:
natural_gradient = evaluate_natural_gradient(gradient)
else:
natural_gradient = gradient
if np.dot(gradient, natural_gradient) < 0:
warnings.warn('NatGradient dot Gradient < 0! Using vanilla gradient')
natural_gradient = gradient
gradient_norm = np.sqrt(np.dot(gradient, natural_gradient))
if gradient_norm < gradient_tol:
print('stopping - gradient norm < gradient_tol')
return theta, improvement
alpha = 1. / gradient_norm ** 2
theta_old = theta
improvement_old = improvement
theta, epsilon, delta_bound, num_line_search = line_search(theta, alpha, natural_gradient, set_parameter, evaluate_loss)
set_parameter(theta)
improvement += delta_bound
print(fmtstr % (i+1, epsilon, alpha*epsilon, num_line_search, gradient_norm, delta_bound, improvement))
if delta_bound < bound_tol:
print('stopping - delta bound < bound_tol')
return theta, improvement
return theta, improvement
def learn(env, make_policy, *,
n_episodes,
horizon,
delta,
gamma,
max_iters,
sampler=None,
use_natural_gradient=False,
fisher_reg=1e-2,
iw_method='is',
iw_norm='none',
bound='J',
line_search_type='parabola',
save_weights=0,
improvement_tol=0.,
center_return=False,
render_after=None,
max_offline_iters=100,
callback=None,
clipping=False,
entropy='none',
positive_return=False,
reward_clustering='none',
capacity=10,
warm_start=True):
np.set_printoptions(precision=3)
max_samples = horizon * n_episodes
if line_search_type == 'binary':
line_search = line_search_binary
elif line_search_type == 'parabola':
line_search = line_search_parabola
else:
raise ValueError()
ob_space = env.observation_space
ac_space = env.action_space
memory = Memory(capacity=capacity, batch_size=n_episodes, horizon=horizon,
ob_space=ob_space, ac_space=ac_space)
pi = make_policy('pi', ob_space, ac_space)
all_var_list = pi.get_trainable_variables()
var_list = [v for v in all_var_list if v.name.split('/')[1].startswith('pol')]
shapes = [U.intprod(var.get_shape().as_list()) for var in var_list]
n_parameters = sum(shapes)
behavioral_policies = memory.build_policies(make_policy, pi)
ob_ = ob = U.get_placeholder_cached(name='ob')
ac_ = pi.pdtype.sample_placeholder([None], name='ac')
mask_ = tf.placeholder(dtype=tf.float32, shape=(None), name='mask')
rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='rew')
disc_rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='disc_rew')
clustered_rew_ = tf.placeholder(dtype=tf.float32, shape=(None))
gradient_ = tf.placeholder(dtype=tf.float32, shape=(n_parameters, 1), name='gradient')
iter_number_ = tf.placeholder(dtype=tf.int32, name='iter_number')
active_policies = tf.placeholder(dtype=tf.float32, shape=(capacity), name='active_policies')
losses_with_name = []
N_total = tf.reduce_sum(active_policies) * n_episodes
disc_rew_split = tf.reshape(disc_rew_ * mask_, [-1, horizon])
rew_split = tf.reshape(rew_ * mask_, [-1, horizon])
mask_split = tf.reshape(mask_, [-1, horizon])
target_log_pdf = pi.pd.logp(ac_) * mask_
target_log_pdf_split = tf.reshape(target_log_pdf, [-1, horizon])
behavioral_log_pdfs = tf.stack([bpi.pd.logp(ac_) * mask_ for bpi in memory.policies])
behavioral_log_pdfs_split = tf.reshape(behavioral_log_pdfs, [memory.capacity, -1, horizon])
emp_d2_split = tf.reshape(tf.stack([pi.pd.renyi(bpi.pd, 2) * mask_ for bpi in memory.policies]), [memory.capacity, -1, horizon])
emp_d2_split_cum = tf.exp(tf.reduce_sum(emp_d2_split, axis=2))
emp_d2_mean = tf.reduce_mean(emp_d2_split_cum, axis=1)
emp_d2_arithmetic = tf.reduce_sum(emp_d2_mean * active_policies) / tf.reduce_sum(active_policies)
emp_d2_harmonic = tf.reduce_sum(active_policies) / tf.reduce_sum(1 / emp_d2_mean)
ep_return = clustered_rew_
if clipping:
rew_split = tf.clip_by_value(rew_split, -1, 1)
if center_return:
ep_return = ep_return - tf.reduce_mean(ep_return)
rew_split = rew_split - (tf.reduce_sum(rew_split) / (tf.reduce_sum(mask_split) + 1e-24))
discounter = [pow(gamma, i) for i in range(0, horizon)]
discounter_tf = tf.constant(discounter)
disc_rew_split = rew_split * discounter_tf
return_mean = tf.reduce_mean(ep_return)
return_std = U.reduce_std(ep_return)
return_max = tf.reduce_max(ep_return)
return_min = tf.reduce_min(ep_return)
return_abs_max = tf.reduce_max(tf.abs(ep_return))
return_step_max = tf.reduce_max(tf.abs(rew_split))
return_step_mean = tf.abs(tf.reduce_mean(rew_split))
positive_step_return_max = tf.maximum(0.0, tf.reduce_max(rew_split))
negative_step_return_max = tf.maximum(0.0, tf.reduce_max(-rew_split))
return_step_maxmin = tf.abs(positive_step_return_max - negative_step_return_max)
losses_with_name.extend([(return_mean, 'InitialReturnMean'),
(return_max, 'InitialReturnMax'),
(return_min, 'InitialReturnMin'),
(return_std, 'InitialReturnStd'),
(emp_d2_arithmetic, 'EmpiricalD2Arithmetic'),
(emp_d2_harmonic, 'EmpiricalD2Harmonic'),
(return_step_max, 'ReturnStepMax'),
(return_step_maxmin, 'ReturnStepMaxmin')])
if iw_method == 'is':
target_log_pdf_episode = tf.reduce_sum(target_log_pdf_split, axis=1)
behavioral_log_pdf_episode = tf.reduce_sum(behavioral_log_pdfs_split, axis=2)
log_ratio = target_log_pdf_split - behavioral_log_pdfs_split
inverse_log_ratio_episode = - tf.reduce_sum(log_ratio, axis=2)
iw = 1 / tf.reduce_sum(tf.exp(inverse_log_ratio_episode) * tf.expand_dims(active_policies, -1), axis=0)
iw_split = tf.reshape(iw, (memory.capacity, -1))
iw_by_behavioral = tf.reduce_mean(iw_split, axis=1)
losses_with_name.append((iw_by_behavioral[0] / tf.reduce_sum(iw_by_behavioral), 'MultiIWFirstRatio'))
losses_with_name.append((tf.reduce_max(iw_by_behavioral), 'MultiIWMax'))
losses_with_name.append((tf.reduce_sum(iw_by_behavioral), 'MultiIWSum'))
losses_with_name.append((tf.reduce_min(iw_by_behavioral), 'MultiIWMin'))
iwn = iw / n_episodes
w_return_mean = tf.reduce_sum(ep_return * iwn)
ess_renyi_arithmetic = N_total / emp_d2_arithmetic
ess_renyi_harmonic = N_total / emp_d2_harmonic
losses_with_name.extend([(tf.reduce_max(iw), 'MaxIW'),
(tf.reduce_min(iw), 'MinIW'),
(tf.reduce_mean(iw), 'MeanIW'),
(U.reduce_std(iw), 'StdIW'),
(tf.reduce_min(target_log_pdf_episode), 'MinTargetPdf'),
(tf.reduce_min(behavioral_log_pdf_episode), 'MinBehavPdf'),
(ess_renyi_arithmetic, 'ESSRenyiArithmetic'),
(ess_renyi_harmonic, 'ESSRenyiHarmonic')])
else:
raise NotImplementedError()
if bound == 'J':
bound_ = w_return_mean
elif bound == 'max-d2-harmonic':
bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_harmonic)) * return_abs_max
elif bound == 'max-d2-arithmetic':
bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_arithmetic)) * return_abs_max
else:
raise NotImplementedError()
ent = pi.pd.entropy()
meanent = tf.reduce_mean(ent)
losses_with_name.append((meanent, 'MeanEntropy'))
if entropy != 'none':
scheme, v1, v2 = entropy.split(':')
if scheme == 'step':
entcoeff = tf.cond(iter_number_ < int(v2), lambda: float(v1), lambda: float(0.0))
losses_with_name.append((entcoeff, 'EntropyCoefficient'))
entbonus = entcoeff * meanent
bound_ = bound_ + entbonus
elif scheme == 'lin':
ip = tf.cast(iter_number_ / max_iters, tf.float32)
entcoeff_decay = tf.maximum(0.0, float(v2) + (float(v1) - float(v2)) * (1.0 - ip))
losses_with_name.append((entcoeff_decay, 'EntropyCoefficient'))
entbonus = entcoeff_decay * meanent
bound_ = bound_ + entbonus
elif scheme == 'exp':
ent_f = tf.exp(-tf.abs(tf.reduce_mean(iw) - 1) * float(v2)) * float(v1)
losses_with_name.append((ent_f, 'EntropyCoefficient'))
bound_ = bound_ + ent_f * meanent
else:
raise Exception('Unrecognized entropy scheme.')
losses_with_name.append((w_return_mean, 'ReturnMeanIW'))
losses_with_name.append((bound_, 'Bound'))
losses, loss_names = map(list, zip(*losses_with_name))
assert_ops = tf.group(*tf.get_collection('asserts'))
print_ops = tf.group(*tf.get_collection('prints'))
compute_lossandgrad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses + [U.flatgrad(bound_, var_list), assert_ops, print_ops])
compute_grad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [U.flatgrad(bound_, var_list), assert_ops, print_ops])
compute_bound = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [bound_, assert_ops, print_ops])
compute_losses = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses)
set_parameter = U.SetFromFlat(var_list)
get_parameter = U.GetFlat(var_list)
policy_reinit = tf.variables_initializer(var_list)
if sampler is None:
seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=gamma)
sampler = type("SequentialSampler", (object,), {"collect": lambda self, _: seg_gen.__next__()})()
U.initialize()
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=n_episodes)
rewbuffer = deque(maxlen=n_episodes)
while True:
iters_so_far += 1
if iters_so_far == 50:
print('=== CHANGED GAMMA TO 1.0')
seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=1.0)
sampler = type("SequentialSampler", (object,), {"collect": lambda self, _: seg_gen.__next__()})()
if render_after is not None and iters_so_far % render_after == 0:
if hasattr(env, 'render'):
render(env, pi, horizon)
if callback:
callback(locals(), globals())
if iters_so_far >= max_iters:
print('Finished...')
break
logger.log('********** Iteration %i ************' % iters_so_far)
theta = get_parameter()
with timed('sampling'):
seg = sampler.collect(theta)
lens, rets = seg['ep_lens'], seg['ep_rets']
lenbuffer.extend(lens)
rewbuffer.extend(rets)
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
memory.add_trajectory_batch(seg)
seg_with_memory = memory.get_trajectories()
reward_matrix = np.reshape(seg_with_memory['disc_rew'] * seg_with_memory['mask'], (-1, horizon))
ep_reward = np.sum(reward_matrix, axis=1)
ep_reward = cluster_rewards(ep_reward, reward_clustering)
args = ob, ac, rew, disc_rew, clustered_rew, mask, iter_number, active_policies = (seg_with_memory['ob'],
seg_with_memory['ac'],
seg_with_memory['rew'],
seg_with_memory['disc_rew'],
ep_reward,
seg_with_memory['mask'],
iters_so_far,
memory.get_active_policies_mask())
def evaluate_loss():
loss = compute_bound(*args)
return loss[0]
def evaluate_gradient():
gradient = compute_grad(*args)
return gradient[0]
if use_natural_gradient:
def evaluate_fisher_vector_prod(x):
return compute_linear_operator(x, *args)[0] + fisher_reg * x
def evaluate_natural_gradient(g):
return cg(evaluate_fisher_vector_prod, g, cg_iters=10, verbose=0)
else:
evaluate_natural_gradient = None
with timed('summaries before'):
logger.record_tabular("Iteration", iters_so_far)
logger.record_tabular("InitialBound", evaluate_loss())
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if save_weights > 0 and iters_so_far % save_weights == 0:
logger.record_tabular('Weights', str(get_parameter()))
import pickle
file = open('checkpoint' + str(iters_so_far) + '.pkl', 'wb')
pickle.dump(theta, file)
if not warm_start or memory.get_current_load() == capacity:
with timed("offline optimization"):
theta, improvement = optimize_offline(theta,
set_parameter,
line_search,
evaluate_loss,
evaluate_gradient,
evaluate_natural_gradient,
max_offline_ite=max_offline_iters)
set_parameter(theta)
print(theta)
with timed('summaries after'):
meanlosses = np.array(compute_losses(*args))
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
else:
tf.get_default_session().run(policy_reinit)
logger.dump_tabular()
env.close()
| true | true |
f71d8034256591679beb03fa527db6b8505101de | 2,465 | py | Python | pymobiledevice3/services/web_protocol/cdp_server.py | iOSForensics/pymobiledevice3 | 6b148f4e58cc51cb44c18935913a3e6cec5b60d5 | [
"MIT"
] | 1 | 2022-01-20T16:53:15.000Z | 2022-01-20T16:53:15.000Z | pymobiledevice3/services/web_protocol/cdp_server.py | iOSForensics/pymobiledevice3 | 6b148f4e58cc51cb44c18935913a3e6cec5b60d5 | [
"MIT"
] | null | null | null | pymobiledevice3/services/web_protocol/cdp_server.py | iOSForensics/pymobiledevice3 | 6b148f4e58cc51cb44c18935913a3e6cec5b60d5 | [
"MIT"
] | null | null | null | import asyncio
import uuid
from fastapi import FastAPI, WebSocket
from fastapi.logger import logger
from pymobiledevice3.services.web_protocol.cdp_target import CdpTarget
from pymobiledevice3.services.web_protocol.session_protocol import SessionProtocol
from pymobiledevice3.services.webinspector import WirTypes
app = FastAPI()
@app.on_event("startup")
async def startup_event():
app.state.inspector.connect()
@app.get('/json')
async def available_targets():
app.state.inspector.get_open_pages()
targets = []
for app_id in app.state.inspector.application_pages:
for page_id, page in app.state.inspector.application_pages[app_id].items():
if page.type_ not in (WirTypes.WEB, WirTypes.WEB_PAGE):
continue
targets.append({
'description': '',
'id': page_id,
'title': page.web_title,
'type': 'page',
'url': page.web_url,
'webSocketDebuggerUrl': f'ws://localhost:9222/devtools/page/{page_id}',
'devtoolsFrontendUrl': f'/devtools/inspector.html?ws://localhost:9222/devtools/page/{page_id}',
})
return targets
@app.get('/json/version')
def version():
return {
'Browser': 'Safari',
'Protocol-Version': '1.1',
'User-Agent': 'pymobiledevice3',
'V8-Version': '7.2.233',
'WebKit-Version': '537.36 (@cfede9db1d154de0468cb0538479f34c0755a0f4)',
'webSocketDebuggerUrl': f'ws://localhost:9222/devtools/browser/{app.state.inspector.connection_id}'
}
async def from_cdp(target: CdpTarget, websocket):
async for message in websocket.iter_json():
logger.debug(f'CDP INPUT: {message}')
await target.send(message)
async def to_cdp(target: CdpTarget, websocket):
while True:
message = await target.receive()
logger.debug(f'CDP OUTPUT: {message}')
await websocket.send_json(message)
@app.websocket('/devtools/page/{page_id}')
async def page_debugger(websocket: WebSocket, page_id: str):
application, page = app.state.inspector.find_page_id(page_id)
session_id = str(uuid.uuid4()).upper()
protocol = SessionProtocol(app.state.inspector, session_id, application, page, method_prefix='')
target = await CdpTarget.create(protocol)
await websocket.accept()
await asyncio.gather(
from_cdp(target, websocket),
to_cdp(target, websocket),
)
| 32.866667 | 111 | 0.666531 | import asyncio
import uuid
from fastapi import FastAPI, WebSocket
from fastapi.logger import logger
from pymobiledevice3.services.web_protocol.cdp_target import CdpTarget
from pymobiledevice3.services.web_protocol.session_protocol import SessionProtocol
from pymobiledevice3.services.webinspector import WirTypes
app = FastAPI()
@app.on_event("startup")
async def startup_event():
app.state.inspector.connect()
@app.get('/json')
async def available_targets():
app.state.inspector.get_open_pages()
targets = []
for app_id in app.state.inspector.application_pages:
for page_id, page in app.state.inspector.application_pages[app_id].items():
if page.type_ not in (WirTypes.WEB, WirTypes.WEB_PAGE):
continue
targets.append({
'description': '',
'id': page_id,
'title': page.web_title,
'type': 'page',
'url': page.web_url,
'webSocketDebuggerUrl': f'ws://localhost:9222/devtools/page/{page_id}',
'devtoolsFrontendUrl': f'/devtools/inspector.html?ws://localhost:9222/devtools/page/{page_id}',
})
return targets
@app.get('/json/version')
def version():
return {
'Browser': 'Safari',
'Protocol-Version': '1.1',
'User-Agent': 'pymobiledevice3',
'V8-Version': '7.2.233',
'WebKit-Version': '537.36 (@cfede9db1d154de0468cb0538479f34c0755a0f4)',
'webSocketDebuggerUrl': f'ws://localhost:9222/devtools/browser/{app.state.inspector.connection_id}'
}
async def from_cdp(target: CdpTarget, websocket):
async for message in websocket.iter_json():
logger.debug(f'CDP INPUT: {message}')
await target.send(message)
async def to_cdp(target: CdpTarget, websocket):
while True:
message = await target.receive()
logger.debug(f'CDP OUTPUT: {message}')
await websocket.send_json(message)
@app.websocket('/devtools/page/{page_id}')
async def page_debugger(websocket: WebSocket, page_id: str):
application, page = app.state.inspector.find_page_id(page_id)
session_id = str(uuid.uuid4()).upper()
protocol = SessionProtocol(app.state.inspector, session_id, application, page, method_prefix='')
target = await CdpTarget.create(protocol)
await websocket.accept()
await asyncio.gather(
from_cdp(target, websocket),
to_cdp(target, websocket),
)
| true | true |
f71d8093d149be92f99d6a9576f78859def80b28 | 10,421 | py | Python | nmt_rouge.py | shengqiangzhang/transformer-pointer-generator-for-english-dataset- | 1ace22017647f06521db36de1ed99e02bce91355 | [
"MIT"
] | 7 | 2021-01-04T07:44:47.000Z | 2022-02-27T08:48:48.000Z | nmt_rouge.py | shengqiangzhang/transformer-pointer-generator-for-english-dataset- | 1ace22017647f06521db36de1ed99e02bce91355 | [
"MIT"
] | 1 | 2022-03-19T06:22:06.000Z | 2022-03-19T06:22:06.000Z | nmt_rouge.py | shengqiangzhang/transformer-pointer-generator-for-english-dataset- | 1ace22017647f06521db36de1ed99e02bce91355 | [
"MIT"
] | 2 | 2021-12-06T13:48:24.000Z | 2021-12-19T18:08:36.000Z | """ROUGE metric implementation.
Copy from tf_seq2seq/seq2seq/metrics/rouge.py.
This is a modified and slightly extended verison of
https://github.com/miso-belica/sumy/blob/valid/sumy/evaluation/rouge.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import numpy as np
#pylint: disable=C0103
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _split_into_words(sentences):
"""Splits multiple sentences into words and flattens the result"""
return list(itertools.chain(*[_.split(" ") for _ in sentences]))
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
words = _split_into_words(sentences)
return _get_ngrams(n, words)
def _len_lcs(x, y):
"""
Returns the length of the Longest Common Subsequence between sequences x
and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""
Computes the length of the longest common subsequence (lcs) between two
strings. The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _recon_lcs(x, y):
"""
Returns the Longest Subsequence between x and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns:
sequence: LCS of x and y
"""
i, j = len(x), len(y)
table = _lcs(x, y)
def _recon(i, j):
"""private recon calculation"""
if i == 0 or j == 0:
return []
elif x[i - 1] == y[j - 1]:
return _recon(i - 1, j - 1) + [(x[i - 1], i)]
elif table[i - 1, j] > table[i, j - 1]:
return _recon(i - 1, j)
else:
return _recon(i, j - 1)
recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))
return recon_tuple
def rouge_n(evaluated_sentences, reference_sentences, n=2):
"""
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
n: Size of ngram. Defaults to 2.
Returns:
A tuple (f1, precision, recall) for ROUGE-N
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
# return overlapping_count / reference_count
return f1_score, precision, recall
def _f_p_r_lcs(llcs, m, n):
"""
Computes the LCS-based F-measure score
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta**2)) * r_lcs * p_lcs
denom = r_lcs + ((beta**2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs, p_lcs, r_lcs
def rouge_l_sentence_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (sentence level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentences: The sentences from the referene set
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
reference_words = _split_into_words(reference_sentences)
evaluated_words = _split_into_words(evaluated_sentences)
m = len(reference_words)
n = len(evaluated_words)
lcs = _len_lcs(evaluated_words, reference_words)
return _f_p_r_lcs(lcs, m, n)
def _union_lcs(evaluated_sentences, reference_sentence):
"""
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C. For example
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8 and
c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1 is
"w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5". The
union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5" and
LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = set()
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
union_lcs_count = len(lcs_union)
union_lcs_value = union_lcs_count / combined_lcs_length
return union_lcs_value
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
# total number of words in reference sentences
m = len(_split_into_words(reference_sentences))
# total number of words in evaluated sentences
n = len(_split_into_words(evaluated_sentences))
union_lcs_sum_across_all_references = 0
for ref_s in reference_sentences:
union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,
ref_s)
return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)
def rouge(hypotheses, references):
"""Calculates average rouge scores for a list of hypotheses and
references"""
# Filter out hyps that are of 0 length
# hyps_and_refs = zip(hypotheses, references)
# hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]
# hypotheses, references = zip(*hyps_and_refs)
# Calculate ROUGE-1 F1, precision, recall scores
rouge_1 = [
rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)
]
rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))
# Calculate ROUGE-2 F1, precision, recall scores
rouge_2 = [
rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)
]
rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))
# Calculate ROUGE-L F1, precision, recall scores
rouge_l = [
rouge_l_sentence_level([hyp], [ref])
for hyp, ref in zip(hypotheses, references)
]
rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))
return {
"rouge_1/f_score": rouge_1_f,
"rouge_1/r_score": rouge_1_r,
"rouge_1/p_score": rouge_1_p,
"rouge_2/f_score": rouge_2_f,
"rouge_2/r_score": rouge_2_r,
"rouge_2/p_score": rouge_2_p,
"rouge_l/f_score": rouge_l_f,
"rouge_l/r_score": rouge_l_r,
"rouge_l/p_score": rouge_l_p,
}
| 29.521246 | 80 | 0.696286 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import numpy as np
def _get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _split_into_words(sentences):
return list(itertools.chain(*[_.split(" ") for _ in sentences]))
def _get_word_ngrams(n, sentences):
assert len(sentences) > 0
assert n > 0
words = _split_into_words(sentences)
return _get_ngrams(n, words)
def _len_lcs(x, y):
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _recon_lcs(x, y):
i, j = len(x), len(y)
table = _lcs(x, y)
def _recon(i, j):
if i == 0 or j == 0:
return []
elif x[i - 1] == y[j - 1]:
return _recon(i - 1, j - 1) + [(x[i - 1], i)]
elif table[i - 1, j] > table[i, j - 1]:
return _recon(i - 1, j)
else:
return _recon(i, j - 1)
recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))
return recon_tuple
def rouge_n(evaluated_sentences, reference_sentences, n=2):
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
return f1_score, precision, recall
def _f_p_r_lcs(llcs, m, n):
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta**2)) * r_lcs * p_lcs
denom = r_lcs + ((beta**2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs, p_lcs, r_lcs
def rouge_l_sentence_level(evaluated_sentences, reference_sentences):
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
reference_words = _split_into_words(reference_sentences)
evaluated_words = _split_into_words(evaluated_sentences)
m = len(reference_words)
n = len(evaluated_words)
lcs = _len_lcs(evaluated_words, reference_words)
return _f_p_r_lcs(lcs, m, n)
def _union_lcs(evaluated_sentences, reference_sentence):
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = set()
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
union_lcs_count = len(lcs_union)
union_lcs_value = union_lcs_count / combined_lcs_length
return union_lcs_value
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
m = len(_split_into_words(reference_sentences))
n = len(_split_into_words(evaluated_sentences))
union_lcs_sum_across_all_references = 0
for ref_s in reference_sentences:
union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,
ref_s)
return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)
def rouge(hypotheses, references):
rouge_1 = [
rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)
]
rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))
rouge_2 = [
rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)
]
rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))
rouge_l = [
rouge_l_sentence_level([hyp], [ref])
for hyp, ref in zip(hypotheses, references)
]
rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))
return {
"rouge_1/f_score": rouge_1_f,
"rouge_1/r_score": rouge_1_r,
"rouge_1/p_score": rouge_1_p,
"rouge_2/f_score": rouge_2_f,
"rouge_2/r_score": rouge_2_r,
"rouge_2/p_score": rouge_2_p,
"rouge_l/f_score": rouge_l_f,
"rouge_l/r_score": rouge_l_r,
"rouge_l/p_score": rouge_l_p,
}
| true | true |
f71d816cb43be30dc40087bc3974ad2ed7599d34 | 17,478 | py | Python | readthedocs/api/v3/serializers.py | darrowco/readthedocs.org | fa7fc5a24306f1f6a27c7393f381c594ab29b357 | [
"MIT"
] | 1 | 2021-07-01T01:31:58.000Z | 2021-07-01T01:31:58.000Z | readthedocs/api/v3/serializers.py | darrowco/readthedocs.org | fa7fc5a24306f1f6a27c7393f381c594ab29b357 | [
"MIT"
] | 4 | 2020-06-05T22:35:21.000Z | 2021-12-13T20:10:17.000Z | readthedocs/api/v3/serializers.py | darrowco/readthedocs.org | fa7fc5a24306f1f6a27c7393f381c594ab29b357 | [
"MIT"
] | null | null | null | import datetime
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from rest_flex_fields import FlexFieldsModelSerializer
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from rest_framework import serializers
from readthedocs.builds.models import Build, Version
from readthedocs.projects.constants import LANGUAGES, PROGRAMMING_LANGUAGES, REPO_CHOICES
from readthedocs.projects.models import Project, EnvironmentVariable
from readthedocs.redirects.models import Redirect, TYPE_CHOICES as REDIRECT_TYPE_CHOICES
class UserSerializer(FlexFieldsModelSerializer):
class Meta:
model = User
fields = [
'username',
]
class BaseLinksSerializer(serializers.Serializer):
def _absolute_url(self, path):
scheme = 'http' if settings.DEBUG else 'https'
domain = settings.PRODUCTION_DOMAIN
return urllib.parse.urlunparse((scheme, domain, path, '', '', ''))
class BuildCreateSerializer(serializers.ModelSerializer):
"""
Used when triggering (create action) a ``Build`` for a specific ``Version``.
This serializer validates that no field is sent at all in the request.
"""
class Meta:
model = Build
fields = []
class BuildLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-builds-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'build_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_version(self, obj):
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.version.slug,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class BuildConfigSerializer(FlexFieldsSerializerMixin, serializers.Serializer):
"""
Render ``Build.config`` property without modifying it.
.. note::
Any change on the output of that property will be reflected here,
which may produce incompatible changes in the API.
"""
def to_representation(self, instance):
# For now, we want to return the ``config`` object as it is without
# manipulating it.
return instance
class BuildStateSerializer(serializers.Serializer):
code = serializers.CharField(source='state')
name = serializers.SerializerMethodField()
def get_name(self, obj):
return obj.state.title()
class BuildSerializer(FlexFieldsModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
version = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='date')
finished = serializers.SerializerMethodField()
success = serializers.SerializerMethodField()
duration = serializers.IntegerField(source='length')
state = BuildStateSerializer(source='*')
_links = BuildLinksSerializer(source='*')
expandable_fields = dict(
config=(
BuildConfigSerializer,
dict(
source='config',
),
),
)
class Meta:
model = Build
fields = [
'id',
'version',
'project',
'created',
'finished',
'duration',
'state',
'success',
'error',
'commit',
'_links',
]
def get_finished(self, obj):
if obj.date and obj.length:
return obj.date + datetime.timedelta(seconds=obj.length)
def get_success(self, obj):
"""
Return ``None`` if the build is not finished.
This is needed becase ``default=True`` in the model field.
"""
if obj.finished:
return obj.success
return None
class PrivacyLevelSerializer(serializers.Serializer):
code = serializers.CharField(source='privacy_level')
name = serializers.SerializerMethodField()
def get_name(self, obj):
return obj.privacy_level.title()
class VersionLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-versions-builds-list',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'parent_lookup_version__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class VersionURLsSerializer(serializers.Serializer):
documentation = serializers.SerializerMethodField()
vcs = serializers.URLField(source='vcs_url')
def get_documentation(self, obj):
return obj.project.get_docs_url(version_slug=obj.slug,)
class VersionSerializer(FlexFieldsModelSerializer):
privacy_level = PrivacyLevelSerializer(source='*')
ref = serializers.CharField()
downloads = serializers.SerializerMethodField()
urls = VersionURLsSerializer(source='*')
_links = VersionLinksSerializer(source='*')
expandable_fields = dict(
last_build=(
BuildSerializer,
dict(
source='last_build',
),
),
)
class Meta:
model = Version
fields = [
'id',
'slug',
'verbose_name',
'identifier',
'ref',
'built',
'active',
'privacy_level',
'type',
'downloads',
'urls',
'_links',
]
def get_downloads(self, obj):
downloads = obj.get_downloads()
data = {}
for k, v in downloads.items():
if k in ('htmlzip', 'pdf', 'epub'):
data[k] = ('http:' if settings.DEBUG else 'https:') + v
return data
class VersionUpdateSerializer(serializers.ModelSerializer):
"""
Used when modifying (update action) a ``Version``.
It only allows to make the Version active/non-active and private/public.
"""
class Meta:
model = Version
fields = [
'active',
'privacy_level',
]
class LanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, language):
return language
def get_name(self, language):
for code, name in LANGUAGES:
if code == language:
return name
return 'Unknown'
class ProgrammingLanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, programming_language):
return programming_language
def get_name(self, programming_language):
for code, name in PROGRAMMING_LANGUAGES:
if code == programming_language:
return name
return 'Unknown'
class ProjectURLsSerializer(BaseLinksSerializer, serializers.Serializer):
"""Serializer with all the user-facing URLs under Read the Docs."""
documentation = serializers.CharField(source='get_docs_url')
home = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
def get_home(self, obj):
path = reverse('projects_detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse('builds_project_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse('project_version_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
class RepositorySerializer(serializers.Serializer):
url = serializers.CharField(source='repo')
type = serializers.ChoiceField(
source='repo_type',
choices=REPO_CHOICES,
)
class ProjectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
environmentvariables = serializers.SerializerMethodField()
redirects = serializers.SerializerMethodField()
subprojects = serializers.SerializerMethodField()
superproject = serializers.SerializerMethodField()
translations = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse('projects-detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse(
'projects-versions-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_environmentvariables(self, obj):
path = reverse(
'projects-environmentvariables-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_redirects(self, obj):
path = reverse(
'projects-redirects-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-builds-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_subprojects(self, obj):
path = reverse(
'projects-subprojects-list',
kwargs={
'parent_lookup_superprojects__parent__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_superproject(self, obj):
path = reverse(
'projects-superproject',
kwargs={
'project_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_translations(self, obj):
path = reverse(
'projects-translations-list',
kwargs={
'parent_lookup_main_language_project__slug': obj.slug,
},
)
return self._absolute_url(path)
class ProjectCreateSerializer(FlexFieldsModelSerializer):
"""Serializer used to Import a Project."""
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(source='project_url', required=False)
class Meta:
model = Project
fields = (
'name',
'language',
'programming_language',
'repository',
'homepage',
)
class ProjectSerializer(FlexFieldsModelSerializer):
homepage = serializers.SerializerMethodField()
language = LanguageSerializer()
programming_language = ProgrammingLanguageSerializer()
repository = RepositorySerializer(source='*')
privacy_level = PrivacyLevelSerializer(source='*')
urls = ProjectURLsSerializer(source='*')
subproject_of = serializers.SerializerMethodField()
translation_of = serializers.SerializerMethodField()
default_branch = serializers.CharField(source='get_default_branch')
tags = serializers.StringRelatedField(many=True)
users = UserSerializer(many=True)
_links = ProjectLinksSerializer(source='*')
# TODO: adapt these fields with the proper names in the db and then remove
# them from here
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
expandable_fields = dict(
active_versions=(
VersionSerializer,
dict(
# NOTE: this has to be a Model method, can't be a
# ``SerializerMethodField`` as far as I know
source='active_versions',
many=True,
),
),
)
class Meta:
model = Project
fields = [
'id',
'name',
'slug',
'created',
'modified',
'language',
'programming_language',
'homepage',
'repository',
'default_version',
'default_branch',
'privacy_level',
'subproject_of',
'translation_of',
'users',
'urls',
'tags',
# NOTE: ``expandable_fields`` must not be included here. Otherwise,
# they will be tried to be rendered and fail
# 'users',
# 'active_versions',
'_links',
]
def get_homepage(self, obj):
# Overridden only to return ``None`` when the project_url is ``''``
return obj.project_url or None
def get_translation_of(self, obj):
if obj.main_language_project:
return self.__class__(obj.main_language_project).data
def get_subproject_of(self, obj):
try:
return self.__class__(obj.superprojects.first().parent).data
except Exception:
return None
class RedirectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-redirects-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'redirect_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class RedirectSerializerBase(serializers.ModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='create_dt', read_only=True)
modified = serializers.DateTimeField(source='update_dt', read_only=True)
_links = RedirectLinksSerializer(source='*', read_only=True)
type = serializers.ChoiceField(source='redirect_type', choices=REDIRECT_TYPE_CHOICES)
class Meta:
model = Redirect
fields = [
'pk',
'created',
'modified',
'project',
'type',
'from_url',
'to_url',
'_links',
]
class RedirectCreateSerializer(RedirectSerializerBase):
pass
class RedirectDetailSerializer(RedirectSerializerBase):
"""Override RedirectSerializerBase to sanitize the empty fields."""
from_url = serializers.SerializerMethodField()
to_url = serializers.SerializerMethodField()
def get_from_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.from_url or None
def get_to_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.to_url or None
class EnvironmentVariableLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-environmentvariables-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'environmentvariable_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class EnvironmentVariableSerializer(serializers.ModelSerializer):
value = serializers.CharField(write_only=True)
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
_links = EnvironmentVariableLinksSerializer(source='*', read_only=True)
class Meta:
model = EnvironmentVariable
fields = [
'pk',
'created',
'modified',
'name',
'value',
'project',
'_links',
]
| 28.419512 | 89 | 0.612198 | import datetime
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from rest_flex_fields import FlexFieldsModelSerializer
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from rest_framework import serializers
from readthedocs.builds.models import Build, Version
from readthedocs.projects.constants import LANGUAGES, PROGRAMMING_LANGUAGES, REPO_CHOICES
from readthedocs.projects.models import Project, EnvironmentVariable
from readthedocs.redirects.models import Redirect, TYPE_CHOICES as REDIRECT_TYPE_CHOICES
class UserSerializer(FlexFieldsModelSerializer):
class Meta:
model = User
fields = [
'username',
]
class BaseLinksSerializer(serializers.Serializer):
def _absolute_url(self, path):
scheme = 'http' if settings.DEBUG else 'https'
domain = settings.PRODUCTION_DOMAIN
return urllib.parse.urlunparse((scheme, domain, path, '', '', ''))
class BuildCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Build
fields = []
class BuildLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-builds-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'build_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_version(self, obj):
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.version.slug,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class BuildConfigSerializer(FlexFieldsSerializerMixin, serializers.Serializer):
def to_representation(self, instance):
return instance
class BuildStateSerializer(serializers.Serializer):
code = serializers.CharField(source='state')
name = serializers.SerializerMethodField()
def get_name(self, obj):
return obj.state.title()
class BuildSerializer(FlexFieldsModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
version = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='date')
finished = serializers.SerializerMethodField()
success = serializers.SerializerMethodField()
duration = serializers.IntegerField(source='length')
state = BuildStateSerializer(source='*')
_links = BuildLinksSerializer(source='*')
expandable_fields = dict(
config=(
BuildConfigSerializer,
dict(
source='config',
),
),
)
class Meta:
model = Build
fields = [
'id',
'version',
'project',
'created',
'finished',
'duration',
'state',
'success',
'error',
'commit',
'_links',
]
def get_finished(self, obj):
if obj.date and obj.length:
return obj.date + datetime.timedelta(seconds=obj.length)
def get_success(self, obj):
if obj.finished:
return obj.success
return None
class PrivacyLevelSerializer(serializers.Serializer):
code = serializers.CharField(source='privacy_level')
name = serializers.SerializerMethodField()
def get_name(self, obj):
return obj.privacy_level.title()
class VersionLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-versions-builds-list',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'parent_lookup_version__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class VersionURLsSerializer(serializers.Serializer):
documentation = serializers.SerializerMethodField()
vcs = serializers.URLField(source='vcs_url')
def get_documentation(self, obj):
return obj.project.get_docs_url(version_slug=obj.slug,)
class VersionSerializer(FlexFieldsModelSerializer):
privacy_level = PrivacyLevelSerializer(source='*')
ref = serializers.CharField()
downloads = serializers.SerializerMethodField()
urls = VersionURLsSerializer(source='*')
_links = VersionLinksSerializer(source='*')
expandable_fields = dict(
last_build=(
BuildSerializer,
dict(
source='last_build',
),
),
)
class Meta:
model = Version
fields = [
'id',
'slug',
'verbose_name',
'identifier',
'ref',
'built',
'active',
'privacy_level',
'type',
'downloads',
'urls',
'_links',
]
def get_downloads(self, obj):
downloads = obj.get_downloads()
data = {}
for k, v in downloads.items():
if k in ('htmlzip', 'pdf', 'epub'):
data[k] = ('http:' if settings.DEBUG else 'https:') + v
return data
class VersionUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Version
fields = [
'active',
'privacy_level',
]
class LanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, language):
return language
def get_name(self, language):
for code, name in LANGUAGES:
if code == language:
return name
return 'Unknown'
class ProgrammingLanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, programming_language):
return programming_language
def get_name(self, programming_language):
for code, name in PROGRAMMING_LANGUAGES:
if code == programming_language:
return name
return 'Unknown'
class ProjectURLsSerializer(BaseLinksSerializer, serializers.Serializer):
documentation = serializers.CharField(source='get_docs_url')
home = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
def get_home(self, obj):
path = reverse('projects_detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse('builds_project_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse('project_version_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
class RepositorySerializer(serializers.Serializer):
url = serializers.CharField(source='repo')
type = serializers.ChoiceField(
source='repo_type',
choices=REPO_CHOICES,
)
class ProjectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
environmentvariables = serializers.SerializerMethodField()
redirects = serializers.SerializerMethodField()
subprojects = serializers.SerializerMethodField()
superproject = serializers.SerializerMethodField()
translations = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse('projects-detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse(
'projects-versions-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_environmentvariables(self, obj):
path = reverse(
'projects-environmentvariables-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_redirects(self, obj):
path = reverse(
'projects-redirects-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-builds-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_subprojects(self, obj):
path = reverse(
'projects-subprojects-list',
kwargs={
'parent_lookup_superprojects__parent__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_superproject(self, obj):
path = reverse(
'projects-superproject',
kwargs={
'project_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_translations(self, obj):
path = reverse(
'projects-translations-list',
kwargs={
'parent_lookup_main_language_project__slug': obj.slug,
},
)
return self._absolute_url(path)
class ProjectCreateSerializer(FlexFieldsModelSerializer):
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(source='project_url', required=False)
class Meta:
model = Project
fields = (
'name',
'language',
'programming_language',
'repository',
'homepage',
)
class ProjectSerializer(FlexFieldsModelSerializer):
homepage = serializers.SerializerMethodField()
language = LanguageSerializer()
programming_language = ProgrammingLanguageSerializer()
repository = RepositorySerializer(source='*')
privacy_level = PrivacyLevelSerializer(source='*')
urls = ProjectURLsSerializer(source='*')
subproject_of = serializers.SerializerMethodField()
translation_of = serializers.SerializerMethodField()
default_branch = serializers.CharField(source='get_default_branch')
tags = serializers.StringRelatedField(many=True)
users = UserSerializer(many=True)
_links = ProjectLinksSerializer(source='*')
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
expandable_fields = dict(
active_versions=(
VersionSerializer,
dict(
# ``SerializerMethodField`` as far as I know
source='active_versions',
many=True,
),
),
)
class Meta:
model = Project
fields = [
'id',
'name',
'slug',
'created',
'modified',
'language',
'programming_language',
'homepage',
'repository',
'default_version',
'default_branch',
'privacy_level',
'subproject_of',
'translation_of',
'users',
'urls',
'tags',
# NOTE: ``expandable_fields`` must not be included here. Otherwise,
# they will be tried to be rendered and fail
# 'users',
# 'active_versions',
'_links',
]
def get_homepage(self, obj):
# Overridden only to return ``None`` when the project_url is ``''``
return obj.project_url or None
def get_translation_of(self, obj):
if obj.main_language_project:
return self.__class__(obj.main_language_project).data
def get_subproject_of(self, obj):
try:
return self.__class__(obj.superprojects.first().parent).data
except Exception:
return None
class RedirectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-redirects-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'redirect_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class RedirectSerializerBase(serializers.ModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='create_dt', read_only=True)
modified = serializers.DateTimeField(source='update_dt', read_only=True)
_links = RedirectLinksSerializer(source='*', read_only=True)
type = serializers.ChoiceField(source='redirect_type', choices=REDIRECT_TYPE_CHOICES)
class Meta:
model = Redirect
fields = [
'pk',
'created',
'modified',
'project',
'type',
'from_url',
'to_url',
'_links',
]
class RedirectCreateSerializer(RedirectSerializerBase):
pass
class RedirectDetailSerializer(RedirectSerializerBase):
from_url = serializers.SerializerMethodField()
to_url = serializers.SerializerMethodField()
def get_from_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.from_url or None
def get_to_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.to_url or None
class EnvironmentVariableLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-environmentvariables-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'environmentvariable_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class EnvironmentVariableSerializer(serializers.ModelSerializer):
value = serializers.CharField(write_only=True)
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
_links = EnvironmentVariableLinksSerializer(source='*', read_only=True)
class Meta:
model = EnvironmentVariable
fields = [
'pk',
'created',
'modified',
'name',
'value',
'project',
'_links',
]
| true | true |
f71d817086ba45a81765922769a674dd85882342 | 393 | py | Python | modules/post-exploitation/koadic.py | decidedlygray/ptf | f17f50606fac5ef30f42c0b5e0fa57b58f696b99 | [
"FTL"
] | 4,391 | 2015-05-12T19:30:45.000Z | 2022-03-30T13:39:27.000Z | modules/post-exploitation/koadic.py | decidedlygray/ptf | f17f50606fac5ef30f42c0b5e0fa57b58f696b99 | [
"FTL"
] | 340 | 2015-05-14T13:50:44.000Z | 2022-01-13T14:40:14.000Z | modules/post-exploitation/koadic.py | decidedlygray/ptf | f17f50606fac5ef30f42c0b5e0fa57b58f696b99 | [
"FTL"
] | 1,290 | 2015-05-13T00:24:58.000Z | 2022-03-30T08:20:22.000Z | AUTHOR="Jared Haight (@jaredhaight)"
DESCRIPTION="This module will install/update Koadic C3 (COM Command and Control Framework)"
INSTALL_TYPE="GIT"
REPOSITORY_LOCATION="https://github.com/zerosum0x0/koadic"
INSTALL_LOCATION="koadic"
DEBIAN="python,python-pip"
ARCHLINUX = "python,python-pip"
BYPASS_UPDATE="NO"
AFTER_COMMANDS="cd {INSTALL_LOCATION},pip install -r ./requirements.txt"
| 20.684211 | 91 | 0.78117 | AUTHOR="Jared Haight (@jaredhaight)"
DESCRIPTION="This module will install/update Koadic C3 (COM Command and Control Framework)"
INSTALL_TYPE="GIT"
REPOSITORY_LOCATION="https://github.com/zerosum0x0/koadic"
INSTALL_LOCATION="koadic"
DEBIAN="python,python-pip"
ARCHLINUX = "python,python-pip"
BYPASS_UPDATE="NO"
AFTER_COMMANDS="cd {INSTALL_LOCATION},pip install -r ./requirements.txt"
| true | true |
f71d8259f8b4b230a161329e0a9ccdf462508567 | 851 | py | Python | ml-agents/tests/yamato/standalone_build_tests.py | carlos-aguayo/ml-agents | 8ad72c08d7d9408c4dda4601beec7f8e82f911ed | [
"Apache-2.0"
] | 2 | 2020-12-01T20:46:13.000Z | 2021-03-07T16:17:45.000Z | ml-agents/tests/yamato/standalone_build_tests.py | carlos-aguayo/ml-agents | 8ad72c08d7d9408c4dda4601beec7f8e82f911ed | [
"Apache-2.0"
] | null | null | null | ml-agents/tests/yamato/standalone_build_tests.py | carlos-aguayo/ml-agents | 8ad72c08d7d9408c4dda4601beec7f8e82f911ed | [
"Apache-2.0"
] | null | null | null | import sys
import argparse
from .yamato_utils import get_base_path, run_standalone_build
def main(scene_path):
base_path = get_base_path()
print(f"Running in base path {base_path}")
executable_name = None
if scene_path is not None:
executable_name = scene_path.strip(".unity")
executable_name = executable_name.split("/")[-1]
executable_name = "testPlayer-" + executable_name
returncode = run_standalone_build(
base_path, verbose=True, output_path=executable_name, scene_path=scene_path
)
if returncode == 0:
print("Test run SUCCEEDED!")
else:
print("Test run FAILED!")
sys.exit(returncode)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scene", default=None)
args = parser.parse_args()
main(args.scene)
| 25.029412 | 83 | 0.683901 | import sys
import argparse
from .yamato_utils import get_base_path, run_standalone_build
def main(scene_path):
base_path = get_base_path()
print(f"Running in base path {base_path}")
executable_name = None
if scene_path is not None:
executable_name = scene_path.strip(".unity")
executable_name = executable_name.split("/")[-1]
executable_name = "testPlayer-" + executable_name
returncode = run_standalone_build(
base_path, verbose=True, output_path=executable_name, scene_path=scene_path
)
if returncode == 0:
print("Test run SUCCEEDED!")
else:
print("Test run FAILED!")
sys.exit(returncode)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scene", default=None)
args = parser.parse_args()
main(args.scene)
| true | true |
f71d82961476e1a195d28dbf87387de8e1e5007b | 13,636 | py | Python | stix2/test/v20/test_properties.py | emmanvg/cti-python-stix2 | d4c01157352552e2ec05bcf7b0f79ef96e34022d | [
"BSD-3-Clause"
] | 9 | 2017-07-03T19:01:23.000Z | 2020-02-20T19:22:49.000Z | stix2/test/v20/test_properties.py | emmanvg/cti-python-stix2 | d4c01157352552e2ec05bcf7b0f79ef96e34022d | [
"BSD-3-Clause"
] | 1 | 2017-05-30T14:13:27.000Z | 2017-05-30T14:13:27.000Z | stix2/test/v20/test_properties.py | emmanvg/cti-python-stix2 | d4c01157352552e2ec05bcf7b0f79ef96e34022d | [
"BSD-3-Clause"
] | 1 | 2017-05-26T19:17:14.000Z | 2017-05-26T19:17:14.000Z | import uuid
import pytest
import stix2
from stix2.exceptions import (
AtLeastOnePropertyError, CustomContentError, DictionaryKeyError,
)
from stix2.properties import (
BinaryProperty, BooleanProperty, DictionaryProperty,
EmbeddedObjectProperty, EnumProperty, ExtensionsProperty, FloatProperty,
HashesProperty, HexProperty, IDProperty, IntegerProperty, ListProperty,
Property, ReferenceProperty, STIXObjectProperty, StringProperty,
TimestampProperty, TypeProperty,
)
from stix2.v20.common import MarkingProperty
from . import constants
def test_property():
p = Property()
assert p.required is False
assert p.clean('foo') == 'foo'
assert p.clean(3) == 3
def test_basic_clean():
class Prop(Property):
def clean(self, value):
if value == 42:
return value
else:
raise ValueError("Must be 42")
p = Prop()
assert p.clean(42) == 42
with pytest.raises(ValueError):
p.clean(41)
def test_property_default():
class Prop(Property):
def default(self):
return 77
p = Prop()
assert p.default() == 77
def test_fixed_property():
p = Property(fixed="2.0")
assert p.clean("2.0")
with pytest.raises(ValueError):
assert p.clean("x") is False
with pytest.raises(ValueError):
assert p.clean(2.0) is False
assert p.default() == "2.0"
assert p.clean(p.default())
def test_list_property():
p = ListProperty(StringProperty)
assert p.clean(['abc', 'xyz'])
with pytest.raises(ValueError):
p.clean([])
def test_string_property():
prop = StringProperty()
assert prop.clean('foobar')
assert prop.clean(1)
assert prop.clean([1, 2, 3])
def test_type_property():
prop = TypeProperty('my-type')
assert prop.clean('my-type')
with pytest.raises(ValueError):
prop.clean('not-my-type')
assert prop.clean(prop.default())
ID_PROP = IDProperty('my-type', spec_version="2.0")
MY_ID = 'my-type--232c9d3f-49fc-4440-bb01-607f638778e7'
@pytest.mark.parametrize(
"value", [
MY_ID,
'my-type--00000000-0000-4000-8000-000000000000',
],
)
def test_id_property_valid(value):
assert ID_PROP.clean(value) == value
CONSTANT_IDS = [
constants.ATTACK_PATTERN_ID,
constants.CAMPAIGN_ID,
constants.COURSE_OF_ACTION_ID,
constants.IDENTITY_ID,
constants.INDICATOR_ID,
constants.INTRUSION_SET_ID,
constants.MALWARE_ID,
constants.MARKING_DEFINITION_ID,
constants.OBSERVED_DATA_ID,
constants.RELATIONSHIP_ID,
constants.REPORT_ID,
constants.SIGHTING_ID,
constants.THREAT_ACTOR_ID,
constants.TOOL_ID,
constants.VULNERABILITY_ID,
]
CONSTANT_IDS.extend(constants.MARKING_IDS)
CONSTANT_IDS.extend(constants.RELATIONSHIP_IDS)
@pytest.mark.parametrize("value", CONSTANT_IDS)
def test_id_property_valid_for_type(value):
type = value.split('--', 1)[0]
assert IDProperty(type=type, spec_version="2.0").clean(value) == value
def test_id_property_wrong_type():
with pytest.raises(ValueError) as excinfo:
ID_PROP.clean('not-my-type--232c9d3f-49fc-4440-bb01-607f638778e7')
assert str(excinfo.value) == "must start with 'my-type--'."
@pytest.mark.parametrize(
"value", [
'my-type--foo',
# Not a v4 UUID
'my-type--00000000-0000-0000-0000-000000000000',
'my-type--' + str(uuid.uuid1()),
'my-type--' + str(uuid.uuid3(uuid.NAMESPACE_DNS, "example.org")),
'my-type--' + str(uuid.uuid5(uuid.NAMESPACE_DNS, "example.org")),
],
)
def test_id_property_not_a_valid_hex_uuid(value):
with pytest.raises(ValueError):
ID_PROP.clean(value)
def test_id_property_default():
default = ID_PROP.default()
assert ID_PROP.clean(default) == default
@pytest.mark.parametrize(
"value", [
2,
-1,
3.14,
False,
],
)
def test_integer_property_valid(value):
int_prop = IntegerProperty()
assert int_prop.clean(value) is not None
@pytest.mark.parametrize(
"value", [
-1,
-100,
-5 * 6,
],
)
def test_integer_property_invalid_min_with_constraints(value):
int_prop = IntegerProperty(min=0, max=180)
with pytest.raises(ValueError) as excinfo:
int_prop.clean(value)
assert "minimum value is" in str(excinfo.value)
@pytest.mark.parametrize(
"value", [
181,
200,
50 * 6,
],
)
def test_integer_property_invalid_max_with_constraints(value):
int_prop = IntegerProperty(min=0, max=180)
with pytest.raises(ValueError) as excinfo:
int_prop.clean(value)
assert "maximum value is" in str(excinfo.value)
@pytest.mark.parametrize(
"value", [
"something",
StringProperty(),
],
)
def test_integer_property_invalid(value):
int_prop = IntegerProperty()
with pytest.raises(ValueError):
int_prop.clean(value)
@pytest.mark.parametrize(
"value", [
2,
-1,
3.14,
False,
],
)
def test_float_property_valid(value):
int_prop = FloatProperty()
assert int_prop.clean(value) is not None
@pytest.mark.parametrize(
"value", [
"something",
StringProperty(),
],
)
def test_float_property_invalid(value):
int_prop = FloatProperty()
with pytest.raises(ValueError):
int_prop.clean(value)
@pytest.mark.parametrize(
"value", [
True,
False,
'True',
'False',
'true',
'false',
'TRUE',
'FALSE',
'T',
'F',
't',
'f',
1,
0,
],
)
def test_boolean_property_valid(value):
bool_prop = BooleanProperty()
assert bool_prop.clean(value) is not None
@pytest.mark.parametrize(
"value", [
'abc',
['false'],
{'true': 'true'},
2,
-1,
],
)
def test_boolean_property_invalid(value):
bool_prop = BooleanProperty()
with pytest.raises(ValueError):
bool_prop.clean(value)
def test_reference_property():
ref_prop = ReferenceProperty(valid_types="my-type", spec_version="2.0")
assert ref_prop.clean("my-type--00000000-0000-4000-8000-000000000000")
with pytest.raises(ValueError):
ref_prop.clean("foo")
# This is not a valid V4 UUID
with pytest.raises(ValueError):
ref_prop.clean("my-type--00000000-0000-0000-0000-000000000000")
def test_reference_property_specific_type():
ref_prop = ReferenceProperty(valid_types="my-type", spec_version="2.0")
with pytest.raises(ValueError):
ref_prop.clean("not-my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf")
assert ref_prop.clean("my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf") == \
"my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf"
@pytest.mark.parametrize(
"value", [
'2017-01-01T12:34:56Z',
'2017-01-01 12:34:56',
'Jan 1 2017 12:34:56',
],
)
def test_timestamp_property_valid(value):
ts_prop = TimestampProperty()
assert ts_prop.clean(value) == constants.FAKE_TIME
def test_timestamp_property_invalid():
ts_prop = TimestampProperty()
with pytest.raises(ValueError):
ts_prop.clean(1)
with pytest.raises(ValueError):
ts_prop.clean("someday sometime")
def test_binary_property():
bin_prop = BinaryProperty()
assert bin_prop.clean("TG9yZW0gSXBzdW0=")
with pytest.raises(ValueError):
bin_prop.clean("foobar")
def test_hex_property():
hex_prop = HexProperty()
assert hex_prop.clean("4c6f72656d20497073756d")
with pytest.raises(ValueError):
hex_prop.clean("foobar")
@pytest.mark.parametrize(
"d", [
{'description': 'something'},
[('abc', 1), ('bcd', 2), ('cde', 3)],
],
)
def test_dictionary_property_valid(d):
dict_prop = DictionaryProperty(spec_version="2.0")
assert dict_prop.clean(d)
@pytest.mark.parametrize(
"d", [
[{'a': 'something'}, "Invalid dictionary key a: (shorter than 3 characters)."],
[
{'a'*300: 'something'}, "Invalid dictionary key aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaa: (longer than 256 characters).",
],
[
{'Hey!': 'something'}, "Invalid dictionary key Hey!: (contains characters other than lowercase a-z, "
"uppercase A-Z, numerals 0-9, hyphen (-), or underscore (_)).",
],
],
)
def test_dictionary_property_invalid_key(d):
dict_prop = DictionaryProperty(spec_version="2.0")
with pytest.raises(DictionaryKeyError) as excinfo:
dict_prop.clean(d[0])
assert str(excinfo.value) == d[1]
@pytest.mark.parametrize(
"d", [
# TODO: This error message could be made more helpful. The error is caused
# because `json.loads()` doesn't like the *single* quotes around the key
# name, even though they are valid in a Python dictionary. While technically
# accurate (a string is not a dictionary), if we want to be able to load
# string-encoded "dictionaries" that are, we need a better error message
# or an alternative to `json.loads()` ... and preferably *not* `eval()`. :-)
# Changing the following to `'{"description": "something"}'` does not cause
# any ValueError to be raised.
("{'description': 'something'}", "The dictionary property must contain a dictionary"),
],
)
def test_dictionary_property_invalid(d):
dict_prop = DictionaryProperty(spec_version="2.0")
with pytest.raises(ValueError) as excinfo:
dict_prop.clean(d[0])
assert str(excinfo.value) == d[1]
def test_property_list_of_dictionary():
@stix2.v20.CustomObject(
'x-new-obj', [
('property1', ListProperty(DictionaryProperty(spec_version="2.0"), required=True)),
],
)
class NewObj():
pass
test_obj = NewObj(property1=[{'foo': 'bar'}])
assert test_obj.property1[0]['foo'] == 'bar'
@pytest.mark.parametrize(
"value", [
{"sha256": "6db12788c37247f2316052e142f42f4b259d6561751e5f401a1ae2a6df9c674b"},
[('MD5', '2dfb1bcc980200c6706feee399d41b3f'), ('RIPEMD-160', 'b3a8cd8a27c90af79b3c81754f267780f443dfef')],
],
)
def test_hashes_property_valid(value):
hash_prop = HashesProperty()
assert hash_prop.clean(value)
@pytest.mark.parametrize(
"value", [
{"MD5": "a"},
{"SHA-256": "2dfb1bcc980200c6706feee399d41b3f"},
],
)
def test_hashes_property_invalid(value):
hash_prop = HashesProperty()
with pytest.raises(ValueError):
hash_prop.clean(value)
def test_embedded_property():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = stix2.v20.EmailMIMEComponent(
content_type="text/plain; charset=utf-8",
content_disposition="inline",
body="Cats are funny!",
)
assert emb_prop.clean(mime)
with pytest.raises(ValueError):
emb_prop.clean("string")
@pytest.mark.parametrize(
"value", [
['a', 'b', 'c'],
('a', 'b', 'c'),
'b',
],
)
def test_enum_property_valid(value):
enum_prop = EnumProperty(value)
assert enum_prop.clean('b')
def test_enum_property_invalid():
enum_prop = EnumProperty(['a', 'b', 'c'])
with pytest.raises(ValueError):
enum_prop.clean('z')
def test_extension_property_valid():
ext_prop = ExtensionsProperty(spec_version="2.0", enclosing_type='file')
assert ext_prop({
'windows-pebinary-ext': {
'pe_type': 'exe',
},
})
def test_extension_property_invalid1():
ext_prop = ExtensionsProperty(spec_version="2.0", enclosing_type='file')
with pytest.raises(ValueError):
ext_prop.clean(1)
def test_extension_property_invalid2():
ext_prop = ExtensionsProperty(spec_version="2.0", enclosing_type='file')
with pytest.raises(CustomContentError):
ext_prop.clean(
{
'foobar-ext': {
'pe_type': 'exe',
},
},
)
def test_extension_property_invalid_type():
ext_prop = ExtensionsProperty(spec_version="2.0", enclosing_type='indicator')
with pytest.raises(CustomContentError) as excinfo:
ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
},
},
)
assert "Can't parse unknown extension" in str(excinfo.value)
def test_extension_at_least_one_property_constraint():
with pytest.raises(AtLeastOnePropertyError):
stix2.v20.TCPExt()
def test_marking_property_error():
mark_prop = MarkingProperty()
with pytest.raises(ValueError) as excinfo:
mark_prop.clean('my-marking')
assert str(excinfo.value) == "must be a Statement, TLP Marking or a registered marking."
def test_stix_property_not_compliant_spec():
# This is a 2.0 test only...
indicator = stix2.v20.Indicator(spec_version="2.0", allow_custom=True, **constants.INDICATOR_KWARGS)
stix_prop = STIXObjectProperty(spec_version="2.0")
with pytest.raises(ValueError) as excinfo:
stix_prop.clean(indicator)
assert "Spec version 2.0 bundles don't yet support containing objects of a different spec version." in str(excinfo.value)
| 26.022901 | 125 | 0.650704 | import uuid
import pytest
import stix2
from stix2.exceptions import (
AtLeastOnePropertyError, CustomContentError, DictionaryKeyError,
)
from stix2.properties import (
BinaryProperty, BooleanProperty, DictionaryProperty,
EmbeddedObjectProperty, EnumProperty, ExtensionsProperty, FloatProperty,
HashesProperty, HexProperty, IDProperty, IntegerProperty, ListProperty,
Property, ReferenceProperty, STIXObjectProperty, StringProperty,
TimestampProperty, TypeProperty,
)
from stix2.v20.common import MarkingProperty
from . import constants
def test_property():
p = Property()
assert p.required is False
assert p.clean('foo') == 'foo'
assert p.clean(3) == 3
def test_basic_clean():
class Prop(Property):
def clean(self, value):
if value == 42:
return value
else:
raise ValueError("Must be 42")
p = Prop()
assert p.clean(42) == 42
with pytest.raises(ValueError):
p.clean(41)
def test_property_default():
class Prop(Property):
def default(self):
return 77
p = Prop()
assert p.default() == 77
def test_fixed_property():
p = Property(fixed="2.0")
assert p.clean("2.0")
with pytest.raises(ValueError):
assert p.clean("x") is False
with pytest.raises(ValueError):
assert p.clean(2.0) is False
assert p.default() == "2.0"
assert p.clean(p.default())
def test_list_property():
p = ListProperty(StringProperty)
assert p.clean(['abc', 'xyz'])
with pytest.raises(ValueError):
p.clean([])
def test_string_property():
prop = StringProperty()
assert prop.clean('foobar')
assert prop.clean(1)
assert prop.clean([1, 2, 3])
def test_type_property():
prop = TypeProperty('my-type')
assert prop.clean('my-type')
with pytest.raises(ValueError):
prop.clean('not-my-type')
assert prop.clean(prop.default())
ID_PROP = IDProperty('my-type', spec_version="2.0")
MY_ID = 'my-type--232c9d3f-49fc-4440-bb01-607f638778e7'
@pytest.mark.parametrize(
"value", [
MY_ID,
'my-type--00000000-0000-4000-8000-000000000000',
],
)
def test_id_property_valid(value):
assert ID_PROP.clean(value) == value
CONSTANT_IDS = [
constants.ATTACK_PATTERN_ID,
constants.CAMPAIGN_ID,
constants.COURSE_OF_ACTION_ID,
constants.IDENTITY_ID,
constants.INDICATOR_ID,
constants.INTRUSION_SET_ID,
constants.MALWARE_ID,
constants.MARKING_DEFINITION_ID,
constants.OBSERVED_DATA_ID,
constants.RELATIONSHIP_ID,
constants.REPORT_ID,
constants.SIGHTING_ID,
constants.THREAT_ACTOR_ID,
constants.TOOL_ID,
constants.VULNERABILITY_ID,
]
CONSTANT_IDS.extend(constants.MARKING_IDS)
CONSTANT_IDS.extend(constants.RELATIONSHIP_IDS)
@pytest.mark.parametrize("value", CONSTANT_IDS)
def test_id_property_valid_for_type(value):
type = value.split('--', 1)[0]
assert IDProperty(type=type, spec_version="2.0").clean(value) == value
def test_id_property_wrong_type():
with pytest.raises(ValueError) as excinfo:
ID_PROP.clean('not-my-type--232c9d3f-49fc-4440-bb01-607f638778e7')
assert str(excinfo.value) == "must start with 'my-type--'."
@pytest.mark.parametrize(
"value", [
'my-type--foo',
'my-type--00000000-0000-0000-0000-000000000000',
'my-type--' + str(uuid.uuid1()),
'my-type--' + str(uuid.uuid3(uuid.NAMESPACE_DNS, "example.org")),
'my-type--' + str(uuid.uuid5(uuid.NAMESPACE_DNS, "example.org")),
],
)
def test_id_property_not_a_valid_hex_uuid(value):
with pytest.raises(ValueError):
ID_PROP.clean(value)
def test_id_property_default():
default = ID_PROP.default()
assert ID_PROP.clean(default) == default
@pytest.mark.parametrize(
"value", [
2,
-1,
3.14,
False,
],
)
def test_integer_property_valid(value):
int_prop = IntegerProperty()
assert int_prop.clean(value) is not None
@pytest.mark.parametrize(
"value", [
-1,
-100,
-5 * 6,
],
)
def test_integer_property_invalid_min_with_constraints(value):
int_prop = IntegerProperty(min=0, max=180)
with pytest.raises(ValueError) as excinfo:
int_prop.clean(value)
assert "minimum value is" in str(excinfo.value)
@pytest.mark.parametrize(
"value", [
181,
200,
50 * 6,
],
)
def test_integer_property_invalid_max_with_constraints(value):
int_prop = IntegerProperty(min=0, max=180)
with pytest.raises(ValueError) as excinfo:
int_prop.clean(value)
assert "maximum value is" in str(excinfo.value)
@pytest.mark.parametrize(
"value", [
"something",
StringProperty(),
],
)
def test_integer_property_invalid(value):
int_prop = IntegerProperty()
with pytest.raises(ValueError):
int_prop.clean(value)
@pytest.mark.parametrize(
"value", [
2,
-1,
3.14,
False,
],
)
def test_float_property_valid(value):
int_prop = FloatProperty()
assert int_prop.clean(value) is not None
@pytest.mark.parametrize(
"value", [
"something",
StringProperty(),
],
)
def test_float_property_invalid(value):
int_prop = FloatProperty()
with pytest.raises(ValueError):
int_prop.clean(value)
@pytest.mark.parametrize(
"value", [
True,
False,
'True',
'False',
'true',
'false',
'TRUE',
'FALSE',
'T',
'F',
't',
'f',
1,
0,
],
)
def test_boolean_property_valid(value):
bool_prop = BooleanProperty()
assert bool_prop.clean(value) is not None
@pytest.mark.parametrize(
"value", [
'abc',
['false'],
{'true': 'true'},
2,
-1,
],
)
def test_boolean_property_invalid(value):
bool_prop = BooleanProperty()
with pytest.raises(ValueError):
bool_prop.clean(value)
def test_reference_property():
ref_prop = ReferenceProperty(valid_types="my-type", spec_version="2.0")
assert ref_prop.clean("my-type--00000000-0000-4000-8000-000000000000")
with pytest.raises(ValueError):
ref_prop.clean("foo")
with pytest.raises(ValueError):
ref_prop.clean("my-type--00000000-0000-0000-0000-000000000000")
def test_reference_property_specific_type():
ref_prop = ReferenceProperty(valid_types="my-type", spec_version="2.0")
with pytest.raises(ValueError):
ref_prop.clean("not-my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf")
assert ref_prop.clean("my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf") == \
"my-type--8a8e8758-f92c-4058-ba38-f061cd42a0cf"
@pytest.mark.parametrize(
"value", [
'2017-01-01T12:34:56Z',
'2017-01-01 12:34:56',
'Jan 1 2017 12:34:56',
],
)
def test_timestamp_property_valid(value):
ts_prop = TimestampProperty()
assert ts_prop.clean(value) == constants.FAKE_TIME
def test_timestamp_property_invalid():
ts_prop = TimestampProperty()
with pytest.raises(ValueError):
ts_prop.clean(1)
with pytest.raises(ValueError):
ts_prop.clean("someday sometime")
def test_binary_property():
bin_prop = BinaryProperty()
assert bin_prop.clean("TG9yZW0gSXBzdW0=")
with pytest.raises(ValueError):
bin_prop.clean("foobar")
def test_hex_property():
hex_prop = HexProperty()
assert hex_prop.clean("4c6f72656d20497073756d")
with pytest.raises(ValueError):
hex_prop.clean("foobar")
@pytest.mark.parametrize(
"d", [
{'description': 'something'},
[('abc', 1), ('bcd', 2), ('cde', 3)],
],
)
def test_dictionary_property_valid(d):
dict_prop = DictionaryProperty(spec_version="2.0")
assert dict_prop.clean(d)
@pytest.mark.parametrize(
"d", [
[{'a': 'something'}, "Invalid dictionary key a: (shorter than 3 characters)."],
[
{'a'*300: 'something'}, "Invalid dictionary key aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
"aaaaaaaaaaaaaaaaaaaaaaa: (longer than 256 characters).",
],
[
{'Hey!': 'something'}, "Invalid dictionary key Hey!: (contains characters other than lowercase a-z, "
"uppercase A-Z, numerals 0-9, hyphen (-), or underscore (_)).",
],
],
)
def test_dictionary_property_invalid_key(d):
dict_prop = DictionaryProperty(spec_version="2.0")
with pytest.raises(DictionaryKeyError) as excinfo:
dict_prop.clean(d[0])
assert str(excinfo.value) == d[1]
@pytest.mark.parametrize(
"d", [
# name, even though they are valid in a Python dictionary. While technically
# accurate (a string is not a dictionary), if we want to be able to load
# string-encoded "dictionaries" that are, we need a better error message
# or an alternative to `json.loads()` ... and preferably *not* `eval()`. :-)
# Changing the following to `'{"description": "something"}'` does not cause
# any ValueError to be raised.
("{'description': 'something'}", "The dictionary property must contain a dictionary"),
],
)
def test_dictionary_property_invalid(d):
dict_prop = DictionaryProperty(spec_version="2.0")
with pytest.raises(ValueError) as excinfo:
dict_prop.clean(d[0])
assert str(excinfo.value) == d[1]
def test_property_list_of_dictionary():
@stix2.v20.CustomObject(
'x-new-obj', [
('property1', ListProperty(DictionaryProperty(spec_version="2.0"), required=True)),
],
)
class NewObj():
pass
test_obj = NewObj(property1=[{'foo': 'bar'}])
assert test_obj.property1[0]['foo'] == 'bar'
@pytest.mark.parametrize(
"value", [
{"sha256": "6db12788c37247f2316052e142f42f4b259d6561751e5f401a1ae2a6df9c674b"},
[('MD5', '2dfb1bcc980200c6706feee399d41b3f'), ('RIPEMD-160', 'b3a8cd8a27c90af79b3c81754f267780f443dfef')],
],
)
def test_hashes_property_valid(value):
hash_prop = HashesProperty()
assert hash_prop.clean(value)
@pytest.mark.parametrize(
"value", [
{"MD5": "a"},
{"SHA-256": "2dfb1bcc980200c6706feee399d41b3f"},
],
)
def test_hashes_property_invalid(value):
hash_prop = HashesProperty()
with pytest.raises(ValueError):
hash_prop.clean(value)
def test_embedded_property():
emb_prop = EmbeddedObjectProperty(type=stix2.v20.EmailMIMEComponent)
mime = stix2.v20.EmailMIMEComponent(
content_type="text/plain; charset=utf-8",
content_disposition="inline",
body="Cats are funny!",
)
assert emb_prop.clean(mime)
with pytest.raises(ValueError):
emb_prop.clean("string")
@pytest.mark.parametrize(
"value", [
['a', 'b', 'c'],
('a', 'b', 'c'),
'b',
],
)
def test_enum_property_valid(value):
enum_prop = EnumProperty(value)
assert enum_prop.clean('b')
def test_enum_property_invalid():
enum_prop = EnumProperty(['a', 'b', 'c'])
with pytest.raises(ValueError):
enum_prop.clean('z')
def test_extension_property_valid():
ext_prop = ExtensionsProperty(spec_version="2.0", enclosing_type='file')
assert ext_prop({
'windows-pebinary-ext': {
'pe_type': 'exe',
},
})
def test_extension_property_invalid1():
ext_prop = ExtensionsProperty(spec_version="2.0", enclosing_type='file')
with pytest.raises(ValueError):
ext_prop.clean(1)
def test_extension_property_invalid2():
ext_prop = ExtensionsProperty(spec_version="2.0", enclosing_type='file')
with pytest.raises(CustomContentError):
ext_prop.clean(
{
'foobar-ext': {
'pe_type': 'exe',
},
},
)
def test_extension_property_invalid_type():
ext_prop = ExtensionsProperty(spec_version="2.0", enclosing_type='indicator')
with pytest.raises(CustomContentError) as excinfo:
ext_prop.clean(
{
'windows-pebinary-ext': {
'pe_type': 'exe',
},
},
)
assert "Can't parse unknown extension" in str(excinfo.value)
def test_extension_at_least_one_property_constraint():
with pytest.raises(AtLeastOnePropertyError):
stix2.v20.TCPExt()
def test_marking_property_error():
mark_prop = MarkingProperty()
with pytest.raises(ValueError) as excinfo:
mark_prop.clean('my-marking')
assert str(excinfo.value) == "must be a Statement, TLP Marking or a registered marking."
def test_stix_property_not_compliant_spec():
indicator = stix2.v20.Indicator(spec_version="2.0", allow_custom=True, **constants.INDICATOR_KWARGS)
stix_prop = STIXObjectProperty(spec_version="2.0")
with pytest.raises(ValueError) as excinfo:
stix_prop.clean(indicator)
assert "Spec version 2.0 bundles don't yet support containing objects of a different spec version." in str(excinfo.value)
| true | true |
f71d846b2e00c72a66c869bc58ec2cd34e47a382 | 1,024 | py | Python | main.py | yuntan/crypkograph | 7715e195a1503f15d2e024dff5daf07c85cb63b1 | [
"MIT"
] | 26 | 2018-06-03T16:48:37.000Z | 2021-12-28T08:20:07.000Z | main.py | yuntan/crypkograph | 7715e195a1503f15d2e024dff5daf07c85cb63b1 | [
"MIT"
] | 1 | 2018-06-06T17:41:10.000Z | 2018-06-06T17:41:10.000Z | main.py | yuntan/crypkograph | 7715e195a1503f15d2e024dff5daf07c85cb63b1 | [
"MIT"
] | 7 | 2018-06-06T15:15:03.000Z | 2021-12-28T08:20:06.000Z | from os import mkdir
from bottle import route, get, request, static_file, run
from settings import PORT, DIR_CACHE, DIR_GRAPH
from crypkograph import render_graph
@route('/')
@route('/index.html')
def serve_html():
return static_file('index.html', '.')
@route('/static/<filename:path>')
def serve_static(filename):
return static_file(filename, 'static')
@route('/generated/<filename:re:.*\.gv\.(png|pdf)>')
def serve_generated(filename):
ext = filename.split('.')[-1]
if ext == 'png':
return static_file(filename, DIR_GRAPH, mimetype='image/png')
elif ext == 'pdf':
return static_file(filename, DIR_GRAPH, download=filename)
# /api/render?owner_addr={owner_addr}
@get('/api/render')
def render():
owner_addr = request.query['owner_addr']
if not owner_addr:
raise Exception()
render_graph(owner_addr, subdir=DIR_GRAPH)
if __name__ == '__main__':
try:
mkdir(DIR_CACHE)
except FileExistsError:
pass
run(host='0.0.0.0', port=PORT)
| 22.755556 | 69 | 0.673828 | from os import mkdir
from bottle import route, get, request, static_file, run
from settings import PORT, DIR_CACHE, DIR_GRAPH
from crypkograph import render_graph
@route('/')
@route('/index.html')
def serve_html():
return static_file('index.html', '.')
@route('/static/<filename:path>')
def serve_static(filename):
return static_file(filename, 'static')
@route('/generated/<filename:re:.*\.gv\.(png|pdf)>')
def serve_generated(filename):
ext = filename.split('.')[-1]
if ext == 'png':
return static_file(filename, DIR_GRAPH, mimetype='image/png')
elif ext == 'pdf':
return static_file(filename, DIR_GRAPH, download=filename)
@get('/api/render')
def render():
owner_addr = request.query['owner_addr']
if not owner_addr:
raise Exception()
render_graph(owner_addr, subdir=DIR_GRAPH)
if __name__ == '__main__':
try:
mkdir(DIR_CACHE)
except FileExistsError:
pass
run(host='0.0.0.0', port=PORT)
| true | true |
f71d84bd199bff2d65b7447164541e907a3f533b | 3,063 | py | Python | weasyl/blocktag.py | Weasyl/weasyl | 80c86942c6f20a815086e2895fdad51d3aa77eed | [
"Apache-2.0"
] | 111 | 2016-05-18T04:18:18.000Z | 2021-11-03T02:05:19.000Z | weasyl/blocktag.py | Weasyl/weasyl | 80c86942c6f20a815086e2895fdad51d3aa77eed | [
"Apache-2.0"
] | 1,103 | 2016-05-29T05:17:53.000Z | 2022-03-31T18:12:40.000Z | weasyl/blocktag.py | Weasyl/weasyl | 80c86942c6f20a815086e2895fdad51d3aa77eed | [
"Apache-2.0"
] | 47 | 2016-05-29T20:48:37.000Z | 2021-11-12T09:40:40.000Z | from libweasyl import ratings
from libweasyl.cache import region
from weasyl import define as d
from weasyl import profile
from weasyl import searchtag
# For blocked tags, `rating` refers to the lowest rating for which that tag is
# blocked; for example, (X, Y, 10) would block tag Y for all ratings, whereas
# (X, Y, 30) would block tag Y for only adult ratings.
def check(userid, submitid=None, charid=None, journalid=None):
"""
Returns True if the submission, character, or journal contains a search tag
that the user has blocked, else False.
"""
if not userid:
return False
if submitid:
map_table = "searchmapsubmit"
content_table = "submission"
id_field = "submitid"
target = submitid
elif charid:
map_table = "searchmapchar"
content_table = "character"
id_field = "charid"
target = charid
else:
map_table = "searchmapjournal"
content_table = "journal"
id_field = "journalid"
target = journalid
query = """
SELECT EXISTS (
SELECT 0 FROM {map_table} searchmap
INNER JOIN {content_table} content ON searchmap.targetid = content.{id_field}
WHERE searchmap.targetid = %(id)s
AND content.userid != %(user)s
AND searchmap.tagid IN (
SELECT blocktag.tagid FROM blocktag
WHERE userid = %(user)s AND blocktag.rating <= content.rating)) AS block
""".format(map_table=map_table, content_table=content_table, id_field=id_field)
return d.engine.execute(query, id=target, user=userid).first().block
def check_list(rating, tags, blocked_tags):
return any(rating >= b['rating'] and b['tagid'] in tags for b in blocked_tags)
def select(userid):
return [{
"title": i[0],
"rating": i[1],
} for i in d.execute("SELECT st.title, bt.rating FROM searchtag st "
" INNER JOIN blocktag bt ON st.tagid = bt.tagid"
" WHERE bt.userid = %i"
" ORDER BY st.title", [userid])]
@region.cache_on_arguments()
@d.record_timing
def select_ids(userid):
return [
dict(row)
for row in d.engine.execute(
'SELECT tagid, rating FROM blocktag WHERE userid = %(user)s',
user=userid)
]
def insert(userid, title, rating):
if rating not in ratings.CODE_MAP:
rating = ratings.GENERAL.code
profile.check_user_rating_allowed(userid, rating)
d.engine.execute(
'INSERT INTO blocktag (userid, tagid, rating) VALUES (%(user)s, %(tag)s, %(rating)s) ON CONFLICT DO NOTHING',
user=userid, tag=searchtag.get_or_create(title), rating=rating)
select_ids.invalidate(userid)
def remove(userid, title):
d.engine.execute(
"DELETE FROM blocktag WHERE (userid, tagid) = (%(user)s, (SELECT tagid FROM searchtag WHERE title = %(tag)s))",
user=userid,
tag=d.get_search_tag(title),
)
select_ids.invalidate(userid)
| 31.57732 | 119 | 0.627163 | from libweasyl import ratings
from libweasyl.cache import region
from weasyl import define as d
from weasyl import profile
from weasyl import searchtag
def check(userid, submitid=None, charid=None, journalid=None):
if not userid:
return False
if submitid:
map_table = "searchmapsubmit"
content_table = "submission"
id_field = "submitid"
target = submitid
elif charid:
map_table = "searchmapchar"
content_table = "character"
id_field = "charid"
target = charid
else:
map_table = "searchmapjournal"
content_table = "journal"
id_field = "journalid"
target = journalid
query = """
SELECT EXISTS (
SELECT 0 FROM {map_table} searchmap
INNER JOIN {content_table} content ON searchmap.targetid = content.{id_field}
WHERE searchmap.targetid = %(id)s
AND content.userid != %(user)s
AND searchmap.tagid IN (
SELECT blocktag.tagid FROM blocktag
WHERE userid = %(user)s AND blocktag.rating <= content.rating)) AS block
""".format(map_table=map_table, content_table=content_table, id_field=id_field)
return d.engine.execute(query, id=target, user=userid).first().block
def check_list(rating, tags, blocked_tags):
return any(rating >= b['rating'] and b['tagid'] in tags for b in blocked_tags)
def select(userid):
return [{
"title": i[0],
"rating": i[1],
} for i in d.execute("SELECT st.title, bt.rating FROM searchtag st "
" INNER JOIN blocktag bt ON st.tagid = bt.tagid"
" WHERE bt.userid = %i"
" ORDER BY st.title", [userid])]
@region.cache_on_arguments()
@d.record_timing
def select_ids(userid):
return [
dict(row)
for row in d.engine.execute(
'SELECT tagid, rating FROM blocktag WHERE userid = %(user)s',
user=userid)
]
def insert(userid, title, rating):
if rating not in ratings.CODE_MAP:
rating = ratings.GENERAL.code
profile.check_user_rating_allowed(userid, rating)
d.engine.execute(
'INSERT INTO blocktag (userid, tagid, rating) VALUES (%(user)s, %(tag)s, %(rating)s) ON CONFLICT DO NOTHING',
user=userid, tag=searchtag.get_or_create(title), rating=rating)
select_ids.invalidate(userid)
def remove(userid, title):
d.engine.execute(
"DELETE FROM blocktag WHERE (userid, tagid) = (%(user)s, (SELECT tagid FROM searchtag WHERE title = %(tag)s))",
user=userid,
tag=d.get_search_tag(title),
)
select_ids.invalidate(userid)
| true | true |
f71d8544221be449972f7b45e2325ab14304a5db | 293 | py | Python | CodeChef/PRACTICE/Factorial - FCTRL.py | IshanManchanda/competitive-python | e9ee5fc48eaea0497ec13e8af5f89da6ac050526 | [
"MIT"
] | 6 | 2019-06-06T15:16:05.000Z | 2020-07-30T01:56:53.000Z | CodeChef/PRACTICE/Factorial - FCTRL.py | IshanManchanda/competitive-python | e9ee5fc48eaea0497ec13e8af5f89da6ac050526 | [
"MIT"
] | null | null | null | CodeChef/PRACTICE/Factorial - FCTRL.py | IshanManchanda/competitive-python | e9ee5fc48eaea0497ec13e8af5f89da6ac050526 | [
"MIT"
] | 1 | 2020-03-22T21:41:53.000Z | 2020-03-22T21:41:53.000Z | def main():
from sys import stdin, stdout
rl = stdin.readline
pl = stdout.write
int1 = int
str1 = str
xr = range
sum1 = sum
arr = [5]
for k in xr(1, 13):
arr[k] = arr[k - 1] * 5
for _ in xr(int1(rl())):
n = int1(rl())
c = sum1(n / i for i in arr)
pl(str1(c) + "\n")
main()
| 15.421053 | 30 | 0.549488 | def main():
from sys import stdin, stdout
rl = stdin.readline
pl = stdout.write
int1 = int
str1 = str
xr = range
sum1 = sum
arr = [5]
for k in xr(1, 13):
arr[k] = arr[k - 1] * 5
for _ in xr(int1(rl())):
n = int1(rl())
c = sum1(n / i for i in arr)
pl(str1(c) + "\n")
main()
| true | true |
f71d86f566cba857829800b557598795af8fd8ab | 1,145 | py | Python | irc_hooky/base_object.py | byorgey/irc-hooky | e78942b7e13ce273c40815863d0384dddfa52243 | [
"MIT"
] | 19 | 2016-01-26T18:36:38.000Z | 2022-03-12T02:32:01.000Z | irc_hooky/base_object.py | byorgey/irc-hooky | e78942b7e13ce273c40815863d0384dddfa52243 | [
"MIT"
] | 3 | 2016-01-29T19:43:25.000Z | 2019-03-11T20:21:11.000Z | irc_hooky/base_object.py | byorgey/irc-hooky | e78942b7e13ce273c40815863d0384dddfa52243 | [
"MIT"
] | 2 | 2016-03-01T09:23:07.000Z | 2020-04-01T21:53:51.000Z | from abc import ABCMeta
import logging
import json
class BaseObject(object):
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
self.log = logging.getLogger("irchooky")
for prop in self.properties:
setattr(self, prop, kwargs.get(prop, ""))
def load(self, object_dict):
if not object_dict:
return
for prop in self.properties:
default = getattr(self, prop)
setattr(self, prop, object_dict.get(prop, default))
def __str__(self):
return_dict = {}
for prop in self.properties:
return_dict.update({prop: str(getattr(self, prop))})
return json.dumps(return_dict)
def __eq__(self, other):
for prop in self.properties:
if not getattr(self, prop) == getattr(other, prop):
self.log.debug("Property %s is different" % prop)
self.log.debug("%s != %s" % (getattr(self, prop),
getattr(other, prop)))
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
| 29.358974 | 67 | 0.567686 | from abc import ABCMeta
import logging
import json
class BaseObject(object):
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
self.log = logging.getLogger("irchooky")
for prop in self.properties:
setattr(self, prop, kwargs.get(prop, ""))
def load(self, object_dict):
if not object_dict:
return
for prop in self.properties:
default = getattr(self, prop)
setattr(self, prop, object_dict.get(prop, default))
def __str__(self):
return_dict = {}
for prop in self.properties:
return_dict.update({prop: str(getattr(self, prop))})
return json.dumps(return_dict)
def __eq__(self, other):
for prop in self.properties:
if not getattr(self, prop) == getattr(other, prop):
self.log.debug("Property %s is different" % prop)
self.log.debug("%s != %s" % (getattr(self, prop),
getattr(other, prop)))
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
| true | true |
f71d870777dd8e99b39f2797adf52d842534def1 | 13,524 | py | Python | pyts/transformation/boss.py | NicolasHug/pyts | 29659fb09f568df2e7f8190f2d5a1c383dc7e9fa | [
"BSD-3-Clause"
] | 1 | 2019-10-19T12:03:20.000Z | 2019-10-19T12:03:20.000Z | pyts/transformation/boss.py | arita37/pyts | 29659fb09f568df2e7f8190f2d5a1c383dc7e9fa | [
"BSD-3-Clause"
] | null | null | null | pyts/transformation/boss.py | arita37/pyts | 29659fb09f568df2e7f8190f2d5a1c383dc7e9fa | [
"BSD-3-Clause"
] | 1 | 2020-01-09T11:34:35.000Z | 2020-01-09T11:34:35.000Z | """Code for Bag-of-SFA Symbols."""
# Author: Johann Faouzi <johann.faouzi@gmail.com>
# License: BSD-3-Clause
import numpy as np
from math import ceil
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils.validation import check_array, check_is_fitted
from sklearn.utils.multiclass import check_classification_targets
from ..approximation import SymbolicFourierApproximation
from ..utils import windowed_view
class BOSS(BaseEstimator, TransformerMixin):
"""Bag of Symbolic Fourier Approximation Symbols.
For each time series, subseries are extracted using a slidind window.
Then the subseries are transformed into a word using the Symbolic
Fourier Approximation (SFA) algorithm. For each time series, the words
are grouped together and a histogram counting the occurences of each
word is created.
Parameters
----------
word_size : int (default = 4)
Size of each word.
n_bins : int (default = 4)
The number of bins to produce. It must be between 2 and 26.
strategy : str (default = 'quantile')
Strategy used to define the widths of the bins:
- 'uniform': All bins in each sample have identical widths
- 'quantile': All bins in each sample have the same number of points
- 'normal': Bin edges are quantiles from a standard normal distribution
- 'entropy': Bin edges are computed using information gain
window_size : int or float (default = 10)
Size of the sliding window. If float, it represents the percentage of
the size of each time series and must be between 0 and 1. The window
size will be computed as ``ceil(window_size * n_timestamps)``.
window_step : int or float (default = 1)
Step of the sliding window. If float, it represents the percentage of
the size of each time series and must be between 0 and 1. The window
size will be computed as ``ceil(window_step * n_timestamps)``.
anova : bool (default = False)
If True, the Fourier coefficient selection is done via a one-way
ANOVA test. If False, the first Fourier coefficients are selected.
drop_sum : bool (default = False)
If True, the first Fourier coefficient (i.e. the sum of the subseries)
is dropped. Otherwise, it is kept.
norm_mean : bool (default = False)
If True, center each subseries before scaling.
norm_std : bool (default = False)
If True, scale each subseries to unit variance.
numerosity_reduction : bool (default = True)
If True, delete sample-wise all but one occurence of back to back
identical occurences of the same words.
sparse : bool (default = True)
Return a sparse matrix if True, else return an array.
alphabet : None, 'ordinal' or array-like, shape = (n_bins,)
Alphabet to use. If None, the first `n_bins` letters of the Latin
alphabet are used.
Attributes
----------
vocabulary_ : dict
A mapping of feature indices to terms.
References
----------
.. [1] P. Schäfer, "The BOSS is concerned with time series classification
in the presence of noise". Data Mining and Knowledge Discovery,
29(6), 1505-1530 (2015).
Examples
--------
>>> from pyts.datasets import load_gunpoint
>>> from pyts.transformation import BOSS
>>> X_train, X_test, _, _ = load_gunpoint(return_X_y=True)
>>> boss = BOSS(word_size=2, n_bins=2, sparse=False)
>>> boss.fit(X_train) # doctest: +ELLIPSIS
BOSS(...)
>>> sorted(boss.vocabulary_.values())
['aa', 'ab', 'ba', 'bb']
>>> boss.transform(X_test) # doctest: +ELLIPSIS
array(...)
"""
def __init__(self, word_size=4, n_bins=4, strategy='quantile',
window_size=10, window_step=1, anova=False, drop_sum=False,
norm_mean=False, norm_std=False, numerosity_reduction=True,
sparse=True, alphabet=None):
self.word_size = word_size
self.n_bins = n_bins
self.strategy = strategy
self.window_size = window_size
self.window_step = window_step
self.anova = anova
self.drop_sum = drop_sum
self.norm_mean = norm_mean
self.norm_std = norm_std
self.numerosity_reduction = numerosity_reduction
self.sparse = sparse
self.alphabet = alphabet
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Training vector.
y : None or array-like, shape = (n_samples,)
Class labels for each data sample.
Returns
-------
self : object
"""
X = check_array(X)
n_samples, n_timestamps = X.shape
if y is not None:
check_classification_targets(y)
window_size, window_step = self._check_params(n_timestamps)
n_windows = (n_timestamps - window_size + window_step) // window_step
X_windowed = windowed_view(
X, window_size=window_size, window_step=window_step
)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,
norm_mean=self.norm_mean, norm_std=self.norm_std,
n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet
)
if y is None:
y_repeated = None
else:
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range(n_samples * n_windows)])
X_word = X_word.reshape(n_samples, n_windows)
if self.numerosity_reduction:
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],
np.full(n_samples, True)]
X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])
for i in range(n_samples)])
else:
X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])
vectorizer = CountVectorizer()
vectorizer.fit(X_bow)
self.vocabulary_ = {value: key for key, value in
vectorizer.vocabulary_.items()}
self._window_size = window_size
self._window_step = window_step
self._n_windows = n_windows
self._sfa = sfa
self._vectorizer = vectorizer
return self
def transform(self, X):
"""Transform the provided data.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Test samples.
Returns
-------
X_new : sparse matrix, shape = (n_samples, n_words)
Document-term matrix.
"""
check_is_fitted(self, ['_sfa', '_vectorizer', 'vocabulary_'])
X = check_array(X)
n_samples, n_timestamps = X.shape
X_windowed = windowed_view(
X, window_size=self._window_size, window_step=self._window_step
)
X_windowed = X_windowed.reshape(-1, self._window_size)
X_sfa = self._sfa.transform(X_windowed)
X_word = np.asarray([''.join(X_sfa[i]) for i in range(X_sfa.shape[0])])
X_word = X_word.reshape(n_samples, self._n_windows)
if self.numerosity_reduction:
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],
np.full(n_samples, True)]
X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])
for i in range(n_samples)])
else:
X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])
X_boss = self._vectorizer.transform(X_bow)
if not self.sparse:
return X_boss.A
return csr_matrix(X_boss)
def fit_transform(self, X, y=None):
"""Fit the data then transform it.
Parameters
----------
X : array-like, shape = (n_samples, n_timestamps)
Training vector.
y : None or array-like, shape = (n_samples,)
Class labels for each data sample.
Returns
-------
X_new : sparse matrix, shape = (n_samples, n_words)
Document-term matrix.
"""
X = check_array(X)
n_samples, n_timestamps = X.shape
if y is not None:
check_classification_targets(y)
window_size, window_step = self._check_params(n_timestamps)
n_windows = (n_timestamps - window_size + window_step) // window_step
X_windowed = windowed_view(
X, window_size=window_size, window_step=window_step
)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,
norm_mean=self.norm_mean, norm_std=self.norm_std,
n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet
)
if y is None:
y_repeated = None
else:
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range(n_samples * n_windows)])
X_word = X_word.reshape(n_samples, n_windows)
if self.numerosity_reduction:
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],
np.full(n_samples, True)]
X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])
for i in range(n_samples)])
else:
X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])
vectorizer = CountVectorizer()
X_boss = vectorizer.fit_transform(X_bow)
self.vocabulary_ = {value: key for key, value in
vectorizer.vocabulary_.items()}
self._window_size = window_size
self._window_step = window_step
self._n_windows = n_windows
self._sfa = sfa
self._vectorizer = vectorizer
if not self.sparse:
return X_boss.A
return csr_matrix(X_boss)
def _check_params(self, n_timestamps):
if not isinstance(self.word_size, (int, np.integer)):
raise TypeError("'word_size' must be an integer.")
if not self.word_size >= 1:
raise ValueError("'word_size' must be a positive integer.")
if not isinstance(self.window_size,
(int, np.integer, float, np.floating)):
raise TypeError("'window_size' must be an integer or a float.")
if isinstance(self.window_size, (int, np.integer)):
if self.drop_sum:
if not 1 <= self.window_size <= (n_timestamps - 1):
raise ValueError(
"If 'window_size' is an integer, it must be greater "
"than or equal to 1 and lower than or equal to "
"(n_timestamps - 1) if 'drop_sum=True'."
)
else:
if not 1 <= self.window_size <= n_timestamps:
raise ValueError(
"If 'window_size' is an integer, it must be greater "
"than or equal to 1 and lower than or equal to "
"n_timestamps if 'drop_sum=False'."
)
window_size = self.window_size
else:
if not 0 < self.window_size <= 1:
raise ValueError(
"If 'window_size' is a float, it must be greater "
"than 0 and lower than or equal to 1."
)
window_size = ceil(self.window_size * n_timestamps)
if not isinstance(self.window_step,
(int, np.integer, float, np.floating)):
raise TypeError("'window_step' must be an integer or a float.")
if isinstance(self.window_step, (int, np.integer)):
if not 1 <= self.window_step <= n_timestamps:
raise ValueError(
"If 'window_step' is an integer, it must be greater "
"than or equal to 1 and lower than or equal to "
"n_timestamps."
)
window_step = self.window_step
else:
if not 0 < self.window_step <= 1:
raise ValueError(
"If 'window_step' is a float, it must be greater "
"than 0 and lower than or equal to 1."
)
window_step = ceil(self.window_step * n_timestamps)
if self.drop_sum:
if not self.word_size <= (window_size - 1):
raise ValueError(
"'word_size' must be lower than or equal to "
"(window_size - 1) if 'drop_sum=True'."
)
else:
if not self.word_size <= window_size:
raise ValueError(
"'word_size' must be lower than or equal to "
"window_size if 'drop_sum=False'."
)
return window_size, window_step
| 38.311615 | 79 | 0.586957 |
import numpy as np
from math import ceil
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils.validation import check_array, check_is_fitted
from sklearn.utils.multiclass import check_classification_targets
from ..approximation import SymbolicFourierApproximation
from ..utils import windowed_view
class BOSS(BaseEstimator, TransformerMixin):
def __init__(self, word_size=4, n_bins=4, strategy='quantile',
window_size=10, window_step=1, anova=False, drop_sum=False,
norm_mean=False, norm_std=False, numerosity_reduction=True,
sparse=True, alphabet=None):
self.word_size = word_size
self.n_bins = n_bins
self.strategy = strategy
self.window_size = window_size
self.window_step = window_step
self.anova = anova
self.drop_sum = drop_sum
self.norm_mean = norm_mean
self.norm_std = norm_std
self.numerosity_reduction = numerosity_reduction
self.sparse = sparse
self.alphabet = alphabet
def fit(self, X, y=None):
X = check_array(X)
n_samples, n_timestamps = X.shape
if y is not None:
check_classification_targets(y)
window_size, window_step = self._check_params(n_timestamps)
n_windows = (n_timestamps - window_size + window_step) // window_step
X_windowed = windowed_view(
X, window_size=window_size, window_step=window_step
)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,
norm_mean=self.norm_mean, norm_std=self.norm_std,
n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet
)
if y is None:
y_repeated = None
else:
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range(n_samples * n_windows)])
X_word = X_word.reshape(n_samples, n_windows)
if self.numerosity_reduction:
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],
np.full(n_samples, True)]
X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])
for i in range(n_samples)])
else:
X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])
vectorizer = CountVectorizer()
vectorizer.fit(X_bow)
self.vocabulary_ = {value: key for key, value in
vectorizer.vocabulary_.items()}
self._window_size = window_size
self._window_step = window_step
self._n_windows = n_windows
self._sfa = sfa
self._vectorizer = vectorizer
return self
def transform(self, X):
check_is_fitted(self, ['_sfa', '_vectorizer', 'vocabulary_'])
X = check_array(X)
n_samples, n_timestamps = X.shape
X_windowed = windowed_view(
X, window_size=self._window_size, window_step=self._window_step
)
X_windowed = X_windowed.reshape(-1, self._window_size)
X_sfa = self._sfa.transform(X_windowed)
X_word = np.asarray([''.join(X_sfa[i]) for i in range(X_sfa.shape[0])])
X_word = X_word.reshape(n_samples, self._n_windows)
if self.numerosity_reduction:
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],
np.full(n_samples, True)]
X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])
for i in range(n_samples)])
else:
X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])
X_boss = self._vectorizer.transform(X_bow)
if not self.sparse:
return X_boss.A
return csr_matrix(X_boss)
def fit_transform(self, X, y=None):
X = check_array(X)
n_samples, n_timestamps = X.shape
if y is not None:
check_classification_targets(y)
window_size, window_step = self._check_params(n_timestamps)
n_windows = (n_timestamps - window_size + window_step) // window_step
X_windowed = windowed_view(
X, window_size=window_size, window_step=window_step
)
X_windowed = X_windowed.reshape(n_samples * n_windows, window_size)
sfa = SymbolicFourierApproximation(
n_coefs=self.word_size, drop_sum=self.drop_sum, anova=self.anova,
norm_mean=self.norm_mean, norm_std=self.norm_std,
n_bins=self.n_bins, strategy=self.strategy, alphabet=self.alphabet
)
if y is None:
y_repeated = None
else:
y_repeated = np.repeat(y, n_windows)
X_sfa = sfa.fit_transform(X_windowed, y_repeated)
X_word = np.asarray([''.join(X_sfa[i])
for i in range(n_samples * n_windows)])
X_word = X_word.reshape(n_samples, n_windows)
if self.numerosity_reduction:
not_equal = np.c_[X_word[:, 1:] != X_word[:, :-1],
np.full(n_samples, True)]
X_bow = np.asarray([' '.join(X_word[i, not_equal[i]])
for i in range(n_samples)])
else:
X_bow = np.asarray([' '.join(X_word[i]) for i in range(n_samples)])
vectorizer = CountVectorizer()
X_boss = vectorizer.fit_transform(X_bow)
self.vocabulary_ = {value: key for key, value in
vectorizer.vocabulary_.items()}
self._window_size = window_size
self._window_step = window_step
self._n_windows = n_windows
self._sfa = sfa
self._vectorizer = vectorizer
if not self.sparse:
return X_boss.A
return csr_matrix(X_boss)
def _check_params(self, n_timestamps):
if not isinstance(self.word_size, (int, np.integer)):
raise TypeError("'word_size' must be an integer.")
if not self.word_size >= 1:
raise ValueError("'word_size' must be a positive integer.")
if not isinstance(self.window_size,
(int, np.integer, float, np.floating)):
raise TypeError("'window_size' must be an integer or a float.")
if isinstance(self.window_size, (int, np.integer)):
if self.drop_sum:
if not 1 <= self.window_size <= (n_timestamps - 1):
raise ValueError(
"If 'window_size' is an integer, it must be greater "
"than or equal to 1 and lower than or equal to "
"(n_timestamps - 1) if 'drop_sum=True'."
)
else:
if not 1 <= self.window_size <= n_timestamps:
raise ValueError(
"If 'window_size' is an integer, it must be greater "
"than or equal to 1 and lower than or equal to "
"n_timestamps if 'drop_sum=False'."
)
window_size = self.window_size
else:
if not 0 < self.window_size <= 1:
raise ValueError(
"If 'window_size' is a float, it must be greater "
"than 0 and lower than or equal to 1."
)
window_size = ceil(self.window_size * n_timestamps)
if not isinstance(self.window_step,
(int, np.integer, float, np.floating)):
raise TypeError("'window_step' must be an integer or a float.")
if isinstance(self.window_step, (int, np.integer)):
if not 1 <= self.window_step <= n_timestamps:
raise ValueError(
"If 'window_step' is an integer, it must be greater "
"than or equal to 1 and lower than or equal to "
"n_timestamps."
)
window_step = self.window_step
else:
if not 0 < self.window_step <= 1:
raise ValueError(
"If 'window_step' is a float, it must be greater "
"than 0 and lower than or equal to 1."
)
window_step = ceil(self.window_step * n_timestamps)
if self.drop_sum:
if not self.word_size <= (window_size - 1):
raise ValueError(
"'word_size' must be lower than or equal to "
"(window_size - 1) if 'drop_sum=True'."
)
else:
if not self.word_size <= window_size:
raise ValueError(
"'word_size' must be lower than or equal to "
"window_size if 'drop_sum=False'."
)
return window_size, window_step
| true | true |
f71d87a97f28b6912c291299e2155b00941ed654 | 1,615 | py | Python | imcsdk/__init__.py | kenrusse/imcsdk | c35ec5d41072c3ea82c64b1b66e0650d1d873657 | [
"Apache-2.0"
] | null | null | null | imcsdk/__init__.py | kenrusse/imcsdk | c35ec5d41072c3ea82c64b1b66e0650d1d873657 | [
"Apache-2.0"
] | null | null | null | imcsdk/__init__.py | kenrusse/imcsdk | c35ec5d41072c3ea82c64b1b66e0650d1d873657 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import logging.handlers
log = logging.getLogger('imc')
console = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
def enable_file_logging(filename="imcsdk.log"):
file_handler = logging.handlers.RotatingFileHandler(
filename, maxBytes=10*1024*1024, backupCount=5)
log.addHandler(file_handler)
def set_log_level(level=logging.DEBUG):
"""
Allows setting log level
Args:
level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..)
Returns:
None
Example:
from imcsdk import set_log_level
import logging
set_log_level(logging.INFO)
"""
log.setLevel(level)
console.setLevel(level)
set_log_level(logging.DEBUG)
log.addHandler(console)
if os.path.exists('/tmp/imcsdk_debug'):
enable_file_logging()
__author__ = 'Cisco Systems'
__email__ = 'ucs-python@cisco.com'
__version__ = '0.9.11'
| 26.47541 | 92 | 0.721981 |
import os
import logging
import logging.handlers
log = logging.getLogger('imc')
console = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
def enable_file_logging(filename="imcsdk.log"):
file_handler = logging.handlers.RotatingFileHandler(
filename, maxBytes=10*1024*1024, backupCount=5)
log.addHandler(file_handler)
def set_log_level(level=logging.DEBUG):
log.setLevel(level)
console.setLevel(level)
set_log_level(logging.DEBUG)
log.addHandler(console)
if os.path.exists('/tmp/imcsdk_debug'):
enable_file_logging()
__author__ = 'Cisco Systems'
__email__ = 'ucs-python@cisco.com'
__version__ = '0.9.11'
| true | true |
f71d8838f9ddabbae871e9cf39c1127d06d5da3b | 7,514 | py | Python | pysnmp/ROOMALERT3E-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/ROOMALERT3E-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/ROOMALERT3E-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module ROOMALERT3E-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ROOMALERT3E-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:50:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Counter64, enterprises, Integer32, Counter32, Unsigned32, NotificationType, MibIdentifier, ObjectIdentity, Gauge32, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, TimeTicks, ModuleIdentity, iso = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter64", "enterprises", "Integer32", "Counter32", "Unsigned32", "NotificationType", "MibIdentifier", "ObjectIdentity", "Gauge32", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "TimeTicks", "ModuleIdentity", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
avtech = MibIdentifier((1, 3, 6, 1, 4, 1, 20916))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1))
ROOMALERT3E = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9))
sensors = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1))
signaltower = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2))
traps = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 3))
digital = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1))
digital_sen1 = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 1)).setLabel("digital-sen1")
digital_sen2 = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2)).setLabel("digital-sen2")
switch = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 2))
digital_sen1_1 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen1-1").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen1_1.setStatus('mandatory')
digital_sen1_2 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen1-2").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen1_2.setStatus('mandatory')
digital_sen1_label = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 1, 3), OctetString()).setLabel("digital-sen1-label").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen1_label.setStatus('mandatory')
digital_sen2_1 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-1").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_1.setStatus('mandatory')
digital_sen2_2 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-2").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_2.setStatus('mandatory')
digital_sen2_3 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-3").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_3.setStatus('mandatory')
digital_sen2_4 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-4").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_4.setStatus('mandatory')
digital_sen2_5 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-5").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_5.setStatus('mandatory')
digital_sen2_label = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 6), OctetString()).setLabel("digital-sen2-label").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_label.setStatus('mandatory')
switch_sen1 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("switch-sen1").setMaxAccess("readonly")
if mibBuilder.loadTexts: switch_sen1.setStatus('mandatory')
switch_label = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 2, 2), OctetString()).setLabel("switch-label").setMaxAccess("readonly")
if mibBuilder.loadTexts: switch_label.setStatus('mandatory')
red_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("red-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: red_led.setStatus('current')
amber_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("amber-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: amber_led.setStatus('current')
green_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("green-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: green_led.setStatus('current')
blue_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("blue-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: blue_led.setStatus('current')
white_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("white-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: white_led.setStatus('current')
alarm1 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alarm1.setStatus('current')
alarm2 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alarm2.setStatus('current')
alarmmessage = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 3, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmmessage.setStatus('mandatory')
room_alert_3e_snmp_trap = NotificationType((1, 3, 6, 1, 4, 1, 20916, 1, 9) + (0,2)).setLabel("room-alert-3e-snmp-trap").setObjects(("ROOMALERT3E-MIB", "alarmmessage"))
mibBuilder.exportSymbols("ROOMALERT3E-MIB", signaltower=signaltower, digital_sen2_2=digital_sen2_2, traps=traps, digital_sen2_label=digital_sen2_label, alarm1=alarm1, switch_sen1=switch_sen1, ROOMALERT3E=ROOMALERT3E, digital_sen1_1=digital_sen1_1, alarmmessage=alarmmessage, avtech=avtech, digital=digital, digital_sen1_2=digital_sen1_2, sensors=sensors, amber_led=amber_led, digital_sen2_3=digital_sen2_3, green_led=green_led, digital_sen2_4=digital_sen2_4, digital_sen2=digital_sen2, digital_sen1_label=digital_sen1_label, alarm2=alarm2, blue_led=blue_led, room_alert_3e_snmp_trap=room_alert_3e_snmp_trap, switch_label=switch_label, digital_sen1=digital_sen1, products=products, digital_sen2_5=digital_sen2_5, digital_sen2_1=digital_sen2_1, white_led=white_led, switch=switch, red_led=red_led)
| 117.40625 | 795 | 0.746207 |
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Counter64, enterprises, Integer32, Counter32, Unsigned32, NotificationType, MibIdentifier, ObjectIdentity, Gauge32, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, TimeTicks, ModuleIdentity, iso = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Counter64", "enterprises", "Integer32", "Counter32", "Unsigned32", "NotificationType", "MibIdentifier", "ObjectIdentity", "Gauge32", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "TimeTicks", "ModuleIdentity", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
avtech = MibIdentifier((1, 3, 6, 1, 4, 1, 20916))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1))
ROOMALERT3E = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9))
sensors = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1))
signaltower = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2))
traps = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 3))
digital = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1))
digital_sen1 = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 1)).setLabel("digital-sen1")
digital_sen2 = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2)).setLabel("digital-sen2")
switch = MibIdentifier((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 2))
digital_sen1_1 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen1-1").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen1_1.setStatus('mandatory')
digital_sen1_2 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen1-2").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen1_2.setStatus('mandatory')
digital_sen1_label = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 1, 3), OctetString()).setLabel("digital-sen1-label").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen1_label.setStatus('mandatory')
digital_sen2_1 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-1").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_1.setStatus('mandatory')
digital_sen2_2 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-2").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_2.setStatus('mandatory')
digital_sen2_3 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-3").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_3.setStatus('mandatory')
digital_sen2_4 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-4").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_4.setStatus('mandatory')
digital_sen2_5 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setLabel("digital-sen2-5").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_5.setStatus('mandatory')
digital_sen2_label = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 1, 2, 6), OctetString()).setLabel("digital-sen2-label").setMaxAccess("readonly")
if mibBuilder.loadTexts: digital_sen2_label.setStatus('mandatory')
switch_sen1 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("switch-sen1").setMaxAccess("readonly")
if mibBuilder.loadTexts: switch_sen1.setStatus('mandatory')
switch_label = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 1, 2, 2), OctetString()).setLabel("switch-label").setMaxAccess("readonly")
if mibBuilder.loadTexts: switch_label.setStatus('mandatory')
red_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("red-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: red_led.setStatus('current')
amber_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("amber-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: amber_led.setStatus('current')
green_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("green-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: green_led.setStatus('current')
blue_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("blue-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: blue_led.setStatus('current')
white_led = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setLabel("white-led").setMaxAccess("readwrite")
if mibBuilder.loadTexts: white_led.setStatus('current')
alarm1 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alarm1.setStatus('current')
alarm2 = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alarm2.setStatus('current')
alarmmessage = MibScalar((1, 3, 6, 1, 4, 1, 20916, 1, 9, 3, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: alarmmessage.setStatus('mandatory')
room_alert_3e_snmp_trap = NotificationType((1, 3, 6, 1, 4, 1, 20916, 1, 9) + (0,2)).setLabel("room-alert-3e-snmp-trap").setObjects(("ROOMALERT3E-MIB", "alarmmessage"))
mibBuilder.exportSymbols("ROOMALERT3E-MIB", signaltower=signaltower, digital_sen2_2=digital_sen2_2, traps=traps, digital_sen2_label=digital_sen2_label, alarm1=alarm1, switch_sen1=switch_sen1, ROOMALERT3E=ROOMALERT3E, digital_sen1_1=digital_sen1_1, alarmmessage=alarmmessage, avtech=avtech, digital=digital, digital_sen1_2=digital_sen1_2, sensors=sensors, amber_led=amber_led, digital_sen2_3=digital_sen2_3, green_led=green_led, digital_sen2_4=digital_sen2_4, digital_sen2=digital_sen2, digital_sen1_label=digital_sen1_label, alarm2=alarm2, blue_led=blue_led, room_alert_3e_snmp_trap=room_alert_3e_snmp_trap, switch_label=switch_label, digital_sen1=digital_sen1, products=products, digital_sen2_5=digital_sen2_5, digital_sen2_1=digital_sen2_1, white_led=white_led, switch=switch, red_led=red_led)
| true | true |
f71d889ea9aa7b8f833a526c4ae748e8ab7f962a | 708 | py | Python | rh/apps/meta/migrations/0005_metaattributes_image.py | rapidpro/chpro-microsite | 4e1d1210b49ec60ab0711d78235bf45eeb5c0275 | [
"BSD-3-Clause"
] | null | null | null | rh/apps/meta/migrations/0005_metaattributes_image.py | rapidpro/chpro-microsite | 4e1d1210b49ec60ab0711d78235bf45eeb5c0275 | [
"BSD-3-Clause"
] | 108 | 2018-01-30T15:26:18.000Z | 2021-06-10T17:29:57.000Z | rh/apps/meta/migrations/0005_metaattributes_image.py | rapidpro/chpro-microsite | 4e1d1210b49ec60ab0711d78235bf45eeb5c0275 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-04-04 05:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
import django.db.models.deletion
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('meta', '0004_auto_20180322_1026'),
]
operations = [
migrations.AddField(
model_name='metaattributes',
name='image',
field=filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.FILER_IMAGE_MODEL),
),
]
| 28.32 | 152 | 0.696328 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
import django.db.models.deletion
import filer.fields.image
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('meta', '0004_auto_20180322_1026'),
]
operations = [
migrations.AddField(
model_name='metaattributes',
name='image',
field=filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.FILER_IMAGE_MODEL),
),
]
| true | true |
f71d8b088ea126f1b60b55d2095453b1d56c7fd9 | 207 | py | Python | src/lm/datasets/seq2seq.py | NeuroArchitect/lm | 036dcd9a49aa4f5b8b872f4e3a3b8821cbb0b060 | [
"Apache-2.0"
] | 1 | 2020-10-14T18:37:46.000Z | 2020-10-14T18:37:46.000Z | src/lm/datasets/seq2seq.py | shawwn/lm | 8f82f7e5933e8ec7498cbc147b801a729c80c7d3 | [
"Apache-2.0"
] | 2 | 2020-09-06T01:19:50.000Z | 2020-09-06T20:17:38.000Z | src/lm/datasets/seq2seq.py | shawwn/lm | 8f82f7e5933e8ec7498cbc147b801a729c80c7d3 | [
"Apache-2.0"
] | 1 | 2020-09-05T20:58:47.000Z | 2020-09-05T20:58:47.000Z | from typing import Optional
from pydantic import BaseModel
class Seq2SeqFormat(BaseModel):
vocab_size: int
context_length: int
has_eos: Optional[bool] = False
keys = ["content", "target"]
| 18.818182 | 35 | 0.719807 | from typing import Optional
from pydantic import BaseModel
class Seq2SeqFormat(BaseModel):
vocab_size: int
context_length: int
has_eos: Optional[bool] = False
keys = ["content", "target"]
| true | true |
f71d8b6368f70117f7b32ad062fc7597ae7b66dc | 3,726 | py | Python | cassette-calculator.py | BandW2011/cassette-calculator | b3a116a40a8c7a4872678ec4da6da5d1605584f8 | [
"MIT"
] | null | null | null | cassette-calculator.py | BandW2011/cassette-calculator | b3a116a40a8c7a4872678ec4da6da5d1605584f8 | [
"MIT"
] | null | null | null | cassette-calculator.py | BandW2011/cassette-calculator | b3a116a40a8c7a4872678ec4da6da5d1605584f8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import time
def isTime(str):
try:
time.strptime(str, '%M:%S')
return True
except ValueError:
return False
def timeToSec(input):
return int(input.split(":")[0]) * 60 + int(input.split(":")[1])
def secToTime(input):
return str(input // 60) + ":" + '{:02d}'.format(input % 60)
def main():
print("Select an option:")
print("1) Enter tracks to find album length")
print("2) Enter tracks to split album evenly")
print(" ) Enter tracks to find ideal side length")
print("4) Convert min:sec to seconds")
print("5) Convert seconds to min:sec")
print("6) Find average track length")
print("Q) Exit calculator")
choice = input("> ")
if choice == "1":
print("(Enter \'q\' to calculate)")
total_time = 0
i = 0
while (True):
i += 1
print("Track " + str(i) + " length (min:sec): ")
choice = input("> ")
if isTime(choice):
total_time += timeToSec(choice)
else:
break
print("Total album length is " + secToTime(total_time) + ".")
elif choice == "2":
print("(Enter \'q\' to calculate)")
tracks = []
i = 0
while (True):
i += 1
print("Track " + str(i) + " length (min:sec): ")
choice = input("> ")
if isTime(choice):
tracks.append(timeToSec(choice))
else:
break
if len(tracks) == 0:
print("It'll be really easy to divide the tracks in your album when there are none.")
elif len(tracks) == 1:
print("It'll be really easy to divide the tracks in your album when there\'s only one.")
elif len(tracks) == 2:
print("The total album length is " + secToTime(sum(tracks)) + ".")
print("The album will most evenly be split if track 1 is on Side A (" + secToTime(tracks[0]) + "),")
print("And track 2 is on Side B (" + secToTime(tracks[1]) + ").")
else:
halfway_time = sum(tracks) // 2
halfway_track = -1
k = 0
while (k != len(tracks)):
if sum(tracks[:k + 1]) >= halfway_time:
break
k += 1
print(str(halfway_time))
print("The total album length is " + secToTime(sum(tracks)) + ".")
print("The album will most evenly be split if tracks 1-" + str(k) + " are on Side A (" + secToTime(sum(tracks[:k])) + "),")
print("And tracks " + str(k+1) + "-" + str(len(tracks)) + " are on Side B (" + secToTime(sum(tracks[k:])) + ").")
elif choice == "4":
print("Input time in format (min:sec): ")
choice = input("> ")
print(timeToSec(choice))
elif choice == "5":
print("Input time in seconds: ")
choice = input("> ")
print(secToTime(int(choice)))
elif choice == "6":
print("(Enter \'q\' to calculate)")
total_time = 0
i = 0
while (True):
i += 1
print("Track " + str(i) + " length (min:sec): ")
choice = input("> ")
if isTime(choice):
total_time += timeToSec(choice)
else:
break
if total_time == 0:
print("It doesn\'t seem as if you've input any tracks!")
else:
print("The average track length (" + secToTime(total_time) + " / " + str(i - 1) + " tracks) is " + secToTime(total_time // (i - 1)) + ".")
elif choice == str.lower("q"):
return 0
print("")
main()
print("* Cassette Calculator *")
main()
| 32.973451 | 150 | 0.496511 |
import time
def isTime(str):
try:
time.strptime(str, '%M:%S')
return True
except ValueError:
return False
def timeToSec(input):
return int(input.split(":")[0]) * 60 + int(input.split(":")[1])
def secToTime(input):
return str(input // 60) + ":" + '{:02d}'.format(input % 60)
def main():
print("Select an option:")
print("1) Enter tracks to find album length")
print("2) Enter tracks to split album evenly")
print(" ) Enter tracks to find ideal side length")
print("4) Convert min:sec to seconds")
print("5) Convert seconds to min:sec")
print("6) Find average track length")
print("Q) Exit calculator")
choice = input("> ")
if choice == "1":
print("(Enter \'q\' to calculate)")
total_time = 0
i = 0
while (True):
i += 1
print("Track " + str(i) + " length (min:sec): ")
choice = input("> ")
if isTime(choice):
total_time += timeToSec(choice)
else:
break
print("Total album length is " + secToTime(total_time) + ".")
elif choice == "2":
print("(Enter \'q\' to calculate)")
tracks = []
i = 0
while (True):
i += 1
print("Track " + str(i) + " length (min:sec): ")
choice = input("> ")
if isTime(choice):
tracks.append(timeToSec(choice))
else:
break
if len(tracks) == 0:
print("It'll be really easy to divide the tracks in your album when there are none.")
elif len(tracks) == 1:
print("It'll be really easy to divide the tracks in your album when there\'s only one.")
elif len(tracks) == 2:
print("The total album length is " + secToTime(sum(tracks)) + ".")
print("The album will most evenly be split if track 1 is on Side A (" + secToTime(tracks[0]) + "),")
print("And track 2 is on Side B (" + secToTime(tracks[1]) + ").")
else:
halfway_time = sum(tracks) // 2
halfway_track = -1
k = 0
while (k != len(tracks)):
if sum(tracks[:k + 1]) >= halfway_time:
break
k += 1
print(str(halfway_time))
print("The total album length is " + secToTime(sum(tracks)) + ".")
print("The album will most evenly be split if tracks 1-" + str(k) + " are on Side A (" + secToTime(sum(tracks[:k])) + "),")
print("And tracks " + str(k+1) + "-" + str(len(tracks)) + " are on Side B (" + secToTime(sum(tracks[k:])) + ").")
elif choice == "4":
print("Input time in format (min:sec): ")
choice = input("> ")
print(timeToSec(choice))
elif choice == "5":
print("Input time in seconds: ")
choice = input("> ")
print(secToTime(int(choice)))
elif choice == "6":
print("(Enter \'q\' to calculate)")
total_time = 0
i = 0
while (True):
i += 1
print("Track " + str(i) + " length (min:sec): ")
choice = input("> ")
if isTime(choice):
total_time += timeToSec(choice)
else:
break
if total_time == 0:
print("It doesn\'t seem as if you've input any tracks!")
else:
print("The average track length (" + secToTime(total_time) + " / " + str(i - 1) + " tracks) is " + secToTime(total_time // (i - 1)) + ".")
elif choice == str.lower("q"):
return 0
print("")
main()
print("* Cassette Calculator *")
main()
| true | true |
f71d8cd840396855122bd25a3c882b5f3a776d40 | 1,908 | py | Python | 12.py | mramendi/advent-of-code-2021 | 7cd98e6006659f42654b8fc126f06ebfba5774c0 | [
"CC0-1.0"
] | null | null | null | 12.py | mramendi/advent-of-code-2021 | 7cd98e6006659f42654b8fc126f06ebfba5774c0 | [
"CC0-1.0"
] | null | null | null | 12.py | mramendi/advent-of-code-2021 | 7cd98e6006659f42654b8fc126f06ebfba5774c0 | [
"CC0-1.0"
] | null | null | null | def how_many_paths_to_end(start_node,paths,no_go,exception_used,path_string):
'''
recursive walk
Parameters:
start_node (string) - the node from which we walk
paths (dict) - a dictionary where every node has a set of connected nodes
no_go (set) - a set of nodes that can not be visited as they are "small caves" and were visited; an exception can be used to visit one small cave twice
exception_used (boolean): if True, the exception for visiting a small cave
twice was already used; for part 1 answer set to True from the start
'''
new_path_string=path_string+" "+start_node
if start_node == "end":
# print(new_path_string) # UNCOMMENT TO PRINT PATHS
return 1 # there is exactly one path from end to end
new_no_go = no_go.copy()
if start_node[0].islower():
new_no_go.add(start_node) # small caves can only be visited once
count = 0
for connected_node in paths[start_node]:
new_exception_used=exception_used
if connected_node=="start":
continue
if connected_node in no_go:
if exception_used:
continue
new_exception_used=True
if start_node[0].isupper() and connected_node[0].isupper():
print("WARNING: path between two large caves, infinite loop likely: "+start_node+","+connected_node)
count += how_many_paths_to_end(connected_node,paths,new_no_go,new_exception_used,new_path_string)
return count
paths = {}
for s in open("input12.txt"):
if s.strip()=="": continue
p1,p2=s.split("-")
p1=p1.strip()
p2=p2.strip()
if not p1 in paths:
paths[p1]=set()
if not p2 in paths:
paths[p2]=set()
paths[p1].add(p2)
paths[p2].add(p1)
print("Part 1: ",how_many_paths_to_end("start",paths,set(),True,""))
print("Part 2: ",how_many_paths_to_end("start",paths,set(),False,""))
| 36.692308 | 155 | 0.664046 | def how_many_paths_to_end(start_node,paths,no_go,exception_used,path_string):
new_path_string=path_string+" "+start_node
if start_node == "end":
w_no_go = no_go.copy()
if start_node[0].islower():
new_no_go.add(start_node)
count = 0
for connected_node in paths[start_node]:
new_exception_used=exception_used
if connected_node=="start":
continue
if connected_node in no_go:
if exception_used:
continue
new_exception_used=True
if start_node[0].isupper() and connected_node[0].isupper():
print("WARNING: path between two large caves, infinite loop likely: "+start_node+","+connected_node)
count += how_many_paths_to_end(connected_node,paths,new_no_go,new_exception_used,new_path_string)
return count
paths = {}
for s in open("input12.txt"):
if s.strip()=="": continue
p1,p2=s.split("-")
p1=p1.strip()
p2=p2.strip()
if not p1 in paths:
paths[p1]=set()
if not p2 in paths:
paths[p2]=set()
paths[p1].add(p2)
paths[p2].add(p1)
print("Part 1: ",how_many_paths_to_end("start",paths,set(),True,""))
print("Part 2: ",how_many_paths_to_end("start",paths,set(),False,""))
| true | true |
f71d8cdeec3da4e234767b37f0ad7f8ee3dad966 | 1,502 | py | Python | CODES/a_star_03.py | YashwanthYarala/MICROMOUSE | 69ba518ee81e1e6b70a13f7480844459d240ed11 | [
"MIT"
] | null | null | null | CODES/a_star_03.py | YashwanthYarala/MICROMOUSE | 69ba518ee81e1e6b70a13f7480844459d240ed11 | [
"MIT"
] | null | null | null | CODES/a_star_03.py | YashwanthYarala/MICROMOUSE | 69ba518ee81e1e6b70a13f7480844459d240ed11 | [
"MIT"
] | null | null | null | maze = [[0,0,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,0,0,0,0,0,0,0,0,0]]
# we start at (1,1)
a = 1
b = 1
h = k = 4
def cost(p,q):
t = 1+abs(p-h)+abs(q-k)
return t
while(a!=4 and b!=4):
# checking the posibility of motion
maze[a][b] = '*'
if(maze[a+1][b]==1): #checking for right
t_r=cost(a+1,b)
else:
t_r = cost(a,b)
if(maze[a][b+1]==1):
t_d = cost(a,b+1)
else:
t_d = cost(a,b)
if(maze[a-1][b]==1):
t_l = cost(a-1,b)
else:
t_l = cost(a,b)
if(maze[a][b-1]==1):
t_u = cost(a,b-1)
else:
t_u = cost(a,b)
# comparison between costs
if (t_d > t_l or t_d > t_r or t_d > t_u):
a = a
b = b
# then t_d is not the least values of cost
else:
# then t_d is the least value
b = b + 1
if (t_r > t_l or t_r > t_u or t_r > t_d):
a = a
b = b
else:
a += 1
if (t_l > t_u or t_l > t_r or t_l > t_d):
a = a
b = b
else:
a = a - 1
if (t_u > t_l or t_u > t_r or t_r > t_d):
a = a
b = b
else:
b = b - 1
if (a == 4 and b == 4):
maze[a][b] = "*"
print(maze)
| 20.861111 | 54 | 0.388815 | maze = [[0,0,0,0,0,0,0,0,0,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,1,1,1,1,1,1,1,1,0],
[0,0,0,0,0,0,0,0,0,0]]
a = 1
b = 1
h = k = 4
def cost(p,q):
t = 1+abs(p-h)+abs(q-k)
return t
while(a!=4 and b!=4):
maze[a][b] = '*'
if(maze[a+1][b]==1):
t_r=cost(a+1,b)
else:
t_r = cost(a,b)
if(maze[a][b+1]==1):
t_d = cost(a,b+1)
else:
t_d = cost(a,b)
if(maze[a-1][b]==1):
t_l = cost(a-1,b)
else:
t_l = cost(a,b)
if(maze[a][b-1]==1):
t_u = cost(a,b-1)
else:
t_u = cost(a,b)
if (t_d > t_l or t_d > t_r or t_d > t_u):
a = a
b = b
else:
b = b + 1
if (t_r > t_l or t_r > t_u or t_r > t_d):
a = a
b = b
else:
a += 1
if (t_l > t_u or t_l > t_r or t_l > t_d):
a = a
b = b
else:
a = a - 1
if (t_u > t_l or t_u > t_r or t_r > t_d):
a = a
b = b
else:
b = b - 1
if (a == 4 and b == 4):
maze[a][b] = "*"
print(maze)
| true | true |
f71d8eaac6d3bc9bc701b395bc1f1700b57e561f | 770 | py | Python | examples/research/proj/C3H6.py | fishjojo/pydmfe | 93cfc655314933d3531b5733521a1f95a044f6cb | [
"MIT"
] | 3 | 2021-02-26T06:26:00.000Z | 2022-02-20T08:58:20.000Z | examples/research/proj/C3H6.py | fishjojo/pydmfet | 93cfc655314933d3531b5733521a1f95a044f6cb | [
"MIT"
] | null | null | null | examples/research/proj/C3H6.py | fishjojo/pydmfet | 93cfc655314933d3531b5733521a1f95a044f6cb | [
"MIT"
] | null | null | null | from pydmfet import proj_ao
from pydmfet.qcwrap.pyscf_rks_ao import rks_ao
from pyscf import gto,scf
import numpy as np
from pyscf.tools import molden
from pyscf import lo
from pyscf.lo import iao,orth
from functools import reduce
import math
bas ='ccpvdz'
temp = 0.01
mol = gto.Mole()
mol.atom = open('C3H6.xyz').read()
mol.basis = bas
mol.charge = 0
mol.build(max_memory = 4000, verbose=4)
#mf = scf.RKS(mol)
mf = rks_ao(mol,smear_sigma = temp)
mf.xc = "pbe,pbe"
mf.max_cycle = 50
DMguess = None
mf.scf(dm0=DMguess)
natoms = mol.natm
impAtom = np.zeros([natoms], dtype=int)
for i in range(5):
impAtom[i] = 1
embed = proj_ao.proj_embed(mf,impAtom, Ne_env = 8)
embed.pop_method = 'meta_lowdin'
embed.make_frozen_orbs(norb = 11)
#embed.embedding_potential()
| 19.25 | 50 | 0.733766 | from pydmfet import proj_ao
from pydmfet.qcwrap.pyscf_rks_ao import rks_ao
from pyscf import gto,scf
import numpy as np
from pyscf.tools import molden
from pyscf import lo
from pyscf.lo import iao,orth
from functools import reduce
import math
bas ='ccpvdz'
temp = 0.01
mol = gto.Mole()
mol.atom = open('C3H6.xyz').read()
mol.basis = bas
mol.charge = 0
mol.build(max_memory = 4000, verbose=4)
mf = rks_ao(mol,smear_sigma = temp)
mf.xc = "pbe,pbe"
mf.max_cycle = 50
DMguess = None
mf.scf(dm0=DMguess)
natoms = mol.natm
impAtom = np.zeros([natoms], dtype=int)
for i in range(5):
impAtom[i] = 1
embed = proj_ao.proj_embed(mf,impAtom, Ne_env = 8)
embed.pop_method = 'meta_lowdin'
embed.make_frozen_orbs(norb = 11)
| true | true |
f71d8ef2d5e34813e862d6041a815735fe0485fa | 2,893 | py | Python | src/python/pants/backend/python/pants_requirement.py | StephanErb/pants | a368267b6b4cf50138ba567f582409ed31bf5db9 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/python/pants_requirement.py | StephanErb/pants | a368267b6b4cf50138ba567f582409ed31bf5db9 | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/python/pants_requirement.py | StephanErb/pants | a368267b6b4cf50138ba567f582409ed31bf5db9 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import object
from pants.backend.python.python_requirement import PythonRequirement
from pants.base.build_environment import pants_version
from pants.base.exceptions import TargetDefinitionException
from pants.build_graph.address import Address
from pants.util.meta import classproperty
class PantsRequirement(object):
"""Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.
This requirement is useful for custom plugin authors who want to build and test their plugin with
pants itself. Using the resulting target as a dependency of their plugin target ensures the
dependency stays true to the surrounding repo's version of pants.
NB: The requirement generated is for official pants releases on pypi; so may not be appropriate
for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.
:API: public
"""
@classproperty
def alias(self):
return 'pants_requirement'
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(self, name=None, dist=None):
"""
:param string name: The name to use for the target, defaults to the dist name if specified and
otherwise the parent dir name.
:param string dist: The pants dist to create a requirement for. This must be a
'pantsbuild.pants*' distribution; eg:
'pantsbuild.pants.contrib.python.checks'.
"""
name = name or dist or os.path.basename(self._parse_context.rel_path)
dist = dist or 'pantsbuild.pants'
if not (dist == 'pantsbuild.pants' or dist.startswith('pantsbuild.pants.')):
target = Address(spec_path=self._parse_context.rel_path, target_name=name)
raise TargetDefinitionException(target=target,
msg='The {} target only works for pantsbuild.pants '
'distributions, given {}'.format(self.alias, dist))
# Update the environment marker in lockstep with other changes as described in
# https://github.com/pantsbuild/pants/issues/6450
env_marker = "python_version>='2.7' and python_version<'3'"
requirement = PythonRequirement(requirement="{key}=={version} ; {env_marker}"
.format(key=dist,
version=pants_version(),
env_marker=env_marker))
self._parse_context.create_object('python_requirement_library',
name=name,
requirements=[requirement])
| 44.507692 | 99 | 0.676115 |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import object
from pants.backend.python.python_requirement import PythonRequirement
from pants.base.build_environment import pants_version
from pants.base.exceptions import TargetDefinitionException
from pants.build_graph.address import Address
from pants.util.meta import classproperty
class PantsRequirement(object):
@classproperty
def alias(self):
return 'pants_requirement'
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(self, name=None, dist=None):
name = name or dist or os.path.basename(self._parse_context.rel_path)
dist = dist or 'pantsbuild.pants'
if not (dist == 'pantsbuild.pants' or dist.startswith('pantsbuild.pants.')):
target = Address(spec_path=self._parse_context.rel_path, target_name=name)
raise TargetDefinitionException(target=target,
msg='The {} target only works for pantsbuild.pants '
'distributions, given {}'.format(self.alias, dist))
env_marker = "python_version>='2.7' and python_version<'3'"
requirement = PythonRequirement(requirement="{key}=={version} ; {env_marker}"
.format(key=dist,
version=pants_version(),
env_marker=env_marker))
self._parse_context.create_object('python_requirement_library',
name=name,
requirements=[requirement])
| true | true |
f71d9092e5371d5d2c143058ded24cb5f4ca1958 | 6,692 | py | Python | youtube_dl/extractor/gdcvault.py | aalvarito68/https-github.com-rg3-youtube-dl | dfc80bdd2e4ef3d30f161a93f99f3050537944ab | [
"Unlicense"
] | 24 | 2017-03-17T10:27:12.000Z | 2022-02-16T05:55:50.000Z | youtube_dl/extractor/gdcvault.py | travis-south/youtube-dl | dc89f968330fe9b2f0e56b07febc8cd57005f2c0 | [
"Unlicense"
] | 8 | 2017-12-05T23:45:54.000Z | 2022-02-09T23:28:51.000Z | youtube_dl/extractor/gdcvault.py | travis-south/youtube-dl | dc89f968330fe9b2f0e56b07febc8cd57005f2c0 | [
"Unlicense"
] | 6 | 2017-07-15T07:17:29.000Z | 2018-03-13T07:31:18.000Z | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
HEADRequest,
sanitized_Request,
urlencode_postdata,
)
class GDCVaultIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)?'
_NETRC_MACHINE = 'gdcvault'
_TESTS = [
{
'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple',
'md5': '7ce8388f544c88b7ac11c7ab1b593704',
'info_dict': {
'id': '1019721',
'display_id': 'Doki-Doki-Universe-Sweet-Simple',
'ext': 'mp4',
'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)'
}
},
{
'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of',
'info_dict': {
'id': '1015683',
'display_id': 'Embracing-the-Dark-Art-of',
'ext': 'flv',
'title': 'Embracing the Dark Art of Mathematical Modeling in AI'
},
'params': {
'skip_download': True, # Requires rtmpdump
}
},
{
'url': 'http://www.gdcvault.com/play/1015301/Thexder-Meets-Windows-95-or',
'md5': 'a5eb77996ef82118afbbe8e48731b98e',
'info_dict': {
'id': '1015301',
'display_id': 'Thexder-Meets-Windows-95-or',
'ext': 'flv',
'title': 'Thexder Meets Windows 95, or Writing Great Games in the Windows 95 Environment',
},
'skip': 'Requires login',
},
{
'url': 'http://gdcvault.com/play/1020791/',
'only_matching': True,
},
{
# Hard-coded hostname
'url': 'http://gdcvault.com/play/1023460/Tenacious-Design-and-The-Interface',
'md5': 'a8efb6c31ed06ca8739294960b2dbabd',
'info_dict': {
'id': '1023460',
'ext': 'mp4',
'display_id': 'Tenacious-Design-and-The-Interface',
'title': 'Tenacious Design and The Interface of \'Destiny\'',
},
},
{
# Multiple audios
'url': 'http://www.gdcvault.com/play/1014631/Classic-Game-Postmortem-PAC',
'info_dict': {
'id': '1014631',
'ext': 'flv',
'title': 'How to Create a Good Game - From My Experience of Designing Pac-Man',
},
'params': {
'skip_download': True, # Requires rtmpdump
'format': 'jp', # The japanese audio
}
},
{
# gdc-player.html
'url': 'http://www.gdcvault.com/play/1435/An-American-engine-in-Tokyo',
'info_dict': {
'id': '1435',
'display_id': 'An-American-engine-in-Tokyo',
'ext': 'flv',
'title': 'An American Engine in Tokyo:/nThe collaboration of Epic Games and Square Enix/nFor THE LAST REMINANT',
},
'params': {
'skip_download': True, # Requires rtmpdump
},
},
]
def _login(self, webpage_url, display_id):
(username, password) = self._get_login_info()
if username is None or password is None:
self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.')
return None
mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url)
login_url = mobj.group('root_url') + 'api/login.php'
logout_url = mobj.group('root_url') + 'logout'
login_form = {
'email': username,
'password': password,
}
request = sanitized_Request(login_url, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(request, display_id, 'Logging in')
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
self._download_webpage(logout_url, display_id, 'Logging out')
return start_page
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('name') or video_id
webpage_url = 'http://www.gdcvault.com/play/' + video_id
start_page = self._download_webpage(webpage_url, display_id)
direct_url = self._search_regex(
r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
start_page, 'url', default=None)
if direct_url:
title = self._html_search_regex(
r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
start_page, 'title')
video_url = 'http://www.gdcvault.com' + direct_url
# resolve the url so that we can detect the correct extension
head = self._request_webpage(HEADRequest(video_url), video_id)
video_url = head.geturl()
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
}
PLAYER_REGEX = r'<iframe src="(?P<xml_root>.+?)/(?:gdc-)?player.*?\.html.*?".*?</iframe>'
xml_root = self._html_search_regex(
PLAYER_REGEX, start_page, 'xml root', default=None)
if xml_root is None:
# Probably need to authenticate
login_res = self._login(webpage_url, display_id)
if login_res is None:
self.report_warning('Could not login.')
else:
start_page = login_res
# Grab the url from the authenticated page
xml_root = self._html_search_regex(
PLAYER_REGEX, start_page, 'xml root')
xml_name = self._html_search_regex(
r'<iframe src=".*?\?xml=(.+?\.xml).*?".*?</iframe>',
start_page, 'xml filename', default=None)
if xml_name is None:
# Fallback to the older format
xml_name = self._html_search_regex(
r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>',
start_page, 'xml filename')
return {
'_type': 'url_transparent',
'id': video_id,
'display_id': display_id,
'url': '%s/xml/%s' % (xml_root, xml_name),
'ie_key': 'DigitallySpeaking',
}
| 38.24 | 140 | 0.527794 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
HEADRequest,
sanitized_Request,
urlencode_postdata,
)
class GDCVaultIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)?'
_NETRC_MACHINE = 'gdcvault'
_TESTS = [
{
'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple',
'md5': '7ce8388f544c88b7ac11c7ab1b593704',
'info_dict': {
'id': '1019721',
'display_id': 'Doki-Doki-Universe-Sweet-Simple',
'ext': 'mp4',
'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)'
}
},
{
'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of',
'info_dict': {
'id': '1015683',
'display_id': 'Embracing-the-Dark-Art-of',
'ext': 'flv',
'title': 'Embracing the Dark Art of Mathematical Modeling in AI'
},
'params': {
'skip_download': True,
}
},
{
'url': 'http://www.gdcvault.com/play/1015301/Thexder-Meets-Windows-95-or',
'md5': 'a5eb77996ef82118afbbe8e48731b98e',
'info_dict': {
'id': '1015301',
'display_id': 'Thexder-Meets-Windows-95-or',
'ext': 'flv',
'title': 'Thexder Meets Windows 95, or Writing Great Games in the Windows 95 Environment',
},
'skip': 'Requires login',
},
{
'url': 'http://gdcvault.com/play/1020791/',
'only_matching': True,
},
{
'url': 'http://gdcvault.com/play/1023460/Tenacious-Design-and-The-Interface',
'md5': 'a8efb6c31ed06ca8739294960b2dbabd',
'info_dict': {
'id': '1023460',
'ext': 'mp4',
'display_id': 'Tenacious-Design-and-The-Interface',
'title': 'Tenacious Design and The Interface of \'Destiny\'',
},
},
{
'url': 'http://www.gdcvault.com/play/1014631/Classic-Game-Postmortem-PAC',
'info_dict': {
'id': '1014631',
'ext': 'flv',
'title': 'How to Create a Good Game - From My Experience of Designing Pac-Man',
},
'params': {
'skip_download': True,
'format': 'jp',
}
},
{
'url': 'http://www.gdcvault.com/play/1435/An-American-engine-in-Tokyo',
'info_dict': {
'id': '1435',
'display_id': 'An-American-engine-in-Tokyo',
'ext': 'flv',
'title': 'An American Engine in Tokyo:/nThe collaboration of Epic Games and Square Enix/nFor THE LAST REMINANT',
},
'params': {
'skip_download': True,
},
},
]
def _login(self, webpage_url, display_id):
(username, password) = self._get_login_info()
if username is None or password is None:
self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.')
return None
mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url)
login_url = mobj.group('root_url') + 'api/login.php'
logout_url = mobj.group('root_url') + 'logout'
login_form = {
'email': username,
'password': password,
}
request = sanitized_Request(login_url, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(request, display_id, 'Logging in')
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
self._download_webpage(logout_url, display_id, 'Logging out')
return start_page
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('name') or video_id
webpage_url = 'http://www.gdcvault.com/play/' + video_id
start_page = self._download_webpage(webpage_url, display_id)
direct_url = self._search_regex(
r's1\.addVariable\("file",\s*encodeURIComponent\("(/[^"]+)"\)\);',
start_page, 'url', default=None)
if direct_url:
title = self._html_search_regex(
r'<td><strong>Session Name</strong></td>\s*<td>(.*?)</td>',
start_page, 'title')
video_url = 'http://www.gdcvault.com' + direct_url
# resolve the url so that we can detect the correct extension
head = self._request_webpage(HEADRequest(video_url), video_id)
video_url = head.geturl()
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
}
PLAYER_REGEX = r'<iframe src="(?P<xml_root>.+?)/(?:gdc-)?player.*?\.html.*?".*?</iframe>'
xml_root = self._html_search_regex(
PLAYER_REGEX, start_page, 'xml root', default=None)
if xml_root is None:
# Probably need to authenticate
login_res = self._login(webpage_url, display_id)
if login_res is None:
self.report_warning('Could not login.')
else:
start_page = login_res
# Grab the url from the authenticated page
xml_root = self._html_search_regex(
PLAYER_REGEX, start_page, 'xml root')
xml_name = self._html_search_regex(
r'<iframe src=".*?\?xml=(.+?\.xml).*?".*?</iframe>',
start_page, 'xml filename', default=None)
if xml_name is None:
# Fallback to the older format
xml_name = self._html_search_regex(
r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>',
start_page, 'xml filename')
return {
'_type': 'url_transparent',
'id': video_id,
'display_id': display_id,
'url': '%s/xml/%s' % (xml_root, xml_name),
'ie_key': 'DigitallySpeaking',
}
| true | true |
f71d90b544b5058b7fccd9644f2bf263c8823742 | 2,339 | py | Python | Quantum Key Distribution/Mutation Testing/QKD Mutation Testing Cirq/Remove_mutant_2.py | Lilgabz/Quantum-Algorithm-Implementations | 2bb5df522d76e94b300275dfefff2869ff31bc2c | [
"MIT"
] | 1 | 2022-03-20T17:20:09.000Z | 2022-03-20T17:20:09.000Z | Quantum Key Distribution/Mutation Testing/QKD Mutation Testing Cirq/Remove_mutant_2.py | Lilgabz/Quantum-Algorithm-Implementations | 2bb5df522d76e94b300275dfefff2869ff31bc2c | [
"MIT"
] | null | null | null | Quantum Key Distribution/Mutation Testing/QKD Mutation Testing Cirq/Remove_mutant_2.py | Lilgabz/Quantum-Algorithm-Implementations | 2bb5df522d76e94b300275dfefff2869ff31bc2c | [
"MIT"
] | 2 | 2021-12-30T22:23:20.000Z | 2022-03-20T17:20:22.000Z | import unittest
import cirq
from cirq.ops import H, X, I
import random
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randint
import hypothesis.strategies as st
from hypothesis import given, settings
def generate_binary(len):
return randint(2, size=len)
def encode_message(bits, bases, messageLen):
message = []
for i in range(messageLen):
qubits = cirq.LineQubit.range(1)
qc = cirq.Circuit()
if bases[i] == 0: # Prepare qubit in Z-basis
if bits[i] == 0:
qc.append(cirq.I(qubits[0]))
else:
qc.append(cirq.X(qubits[0]))
else: # Prepare qubit in X-basis
if bits[i] == 0:
### mutant - remove ###
qc.append(cirq.I(qubits[0]))
else:
qc.append(cirq.X(qubits[0]))
qc.append(cirq.H(qubits[0]))
message.append(qc)
return message
def measure_message(message, bases, messageLen):
measurements = []
for q in range(messageLen):
if bases[q] == 0: # measuring in Z-basis
if (not message[q].has_measurements()):
for qubit in message[q].all_qubits():
message[q].append(cirq.measure(qubit))
if bases[q] == 1: # measuring in X-basis
if (not message[q].has_measurements()):
for qubit in message[q].all_qubits():
message[q].append(cirq.H(qubit))
message[q].append(cirq.measure(qubit))
simulator = cirq.Simulator()
measured_bit = simulator.run(message[q])
measurements.append((measured_bit.data.iat[0,0]))
return measurements
def remove_garbage(a_bases, b_bases, bits, messageLen):
good_bits = []
for q in range(messageLen):
if a_bases[q] == b_bases[q]:
# If both used the same basis, add
# this to the list of 'good' bits
good_bits.append(bits[q])
return good_bits
def sample_bits(bits, selection):
sample = []
for i in selection:
# use np.mod to make sure the
# bit we sample is always in
# the list range
i = np.mod(i, len(bits))
# pop(i) removes the element of the
# list at index 'i'
sample.append(bits.pop(i))
return sample | 32.486111 | 60 | 0.57546 | import unittest
import cirq
from cirq.ops import H, X, I
import random
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import randint
import hypothesis.strategies as st
from hypothesis import given, settings
def generate_binary(len):
return randint(2, size=len)
def encode_message(bits, bases, messageLen):
message = []
for i in range(messageLen):
qubits = cirq.LineQubit.range(1)
qc = cirq.Circuit()
if bases[i] == 0:
if bits[i] == 0:
qc.append(cirq.I(qubits[0]))
else:
qc.append(cirq.X(qubits[0]))
else:
if bits[i] == 0:
else:
qc.append(cirq.X(qubits[0]))
qc.append(cirq.H(qubits[0]))
message.append(qc)
return message
def measure_message(message, bases, messageLen):
measurements = []
for q in range(messageLen):
if bases[q] == 0:
if (not message[q].has_measurements()):
for qubit in message[q].all_qubits():
message[q].append(cirq.measure(qubit))
if bases[q] == 1:
if (not message[q].has_measurements()):
for qubit in message[q].all_qubits():
message[q].append(cirq.H(qubit))
message[q].append(cirq.measure(qubit))
simulator = cirq.Simulator()
measured_bit = simulator.run(message[q])
measurements.append((measured_bit.data.iat[0,0]))
return measurements
def remove_garbage(a_bases, b_bases, bits, messageLen):
good_bits = []
for q in range(messageLen):
if a_bases[q] == b_bases[q]:
good_bits.append(bits[q])
return good_bits
def sample_bits(bits, selection):
sample = []
for i in selection:
i = np.mod(i, len(bits))
sample.append(bits.pop(i))
return sample | true | true |
f71d90e180f56bda6ddb0db33bef3d8021a809dc | 1,803 | py | Python | jotdx/parser/get_minute_time_data.py | jojoquant/jotdx | bc1cf478ddb262883d779d3c494877a767ebedd5 | [
"MIT"
] | null | null | null | jotdx/parser/get_minute_time_data.py | jojoquant/jotdx | bc1cf478ddb262883d779d3c494877a767ebedd5 | [
"MIT"
] | null | null | null | jotdx/parser/get_minute_time_data.py | jojoquant/jotdx | bc1cf478ddb262883d779d3c494877a767ebedd5 | [
"MIT"
] | null | null | null | # coding=utf-8
from jotdx.parser.base import BaseParser
from jotdx.helper import get_datetime, get_volume, get_price
from collections import OrderedDict
import struct
import six
class GetMinuteTimeData(BaseParser):
def setParams(self, market, code):
if type(code) is six.text_type:
code = code.encode("utf-8")
pkg = bytearray.fromhex(u'0c 1b 08 00 01 01 0e 00 0e 00 1d 05')
pkg.extend(struct.pack("<H6sI", market, code, 0))
self.send_pkg = pkg
"""
b1cb74000c1b080001b61d05be03be03f0000000a208ce038d2c028302972f4124b11a00219821011183180014891c0009be0b4207b11000429c2041....
In [26]: get_price(b, 0)
Out[26]: (0, 1)
In [27]: get_price(b, 1)
Out[27]: (0, 2)
In [28]: get_price(b, 2)
Out[28]: (546, 4)
In [29]: get_price(b, 4)
Out[29]: (-206, 6)
In [30]: get_price(b, 6)
Out[30]: (2829, 8)
In [31]: get_price(b, 8)
Out[31]: (2, 9)
In [32]: get_price(b, 9)
Out[32]: (131, 11)
In [36]: get_price(b, 11)
Out[36]: (3031, 13)
In [37]: get_price(b, 13)
Out[37]: (-1, 14)
In [38]: get_price(b, 14)
Out[38]: (36, 15)
In [39]: get_price(b, 15)
Out[39]: (1713, 17)
In [40]: get_price(b, 17)
Out[40]: (0, 18)
"""
def parseResponse(self, body_buf):
pos = 0
(num, ) = struct.unpack("<H", body_buf[:2])
last_price = 0
pos += 4
prices = []
for i in range(num):
price_raw, pos = get_price(body_buf, pos)
reversed1, pos = get_price(body_buf, pos)
vol, pos = get_price(body_buf, pos)
last_price = last_price + price_raw
price = OrderedDict(
[
("price", float(last_price)/100),
("vol", vol)
]
)
prices.append(price)
return prices
| 23.723684 | 128 | 0.576262 |
from jotdx.parser.base import BaseParser
from jotdx.helper import get_datetime, get_volume, get_price
from collections import OrderedDict
import struct
import six
class GetMinuteTimeData(BaseParser):
def setParams(self, market, code):
if type(code) is six.text_type:
code = code.encode("utf-8")
pkg = bytearray.fromhex(u'0c 1b 08 00 01 01 0e 00 0e 00 1d 05')
pkg.extend(struct.pack("<H6sI", market, code, 0))
self.send_pkg = pkg
def parseResponse(self, body_buf):
pos = 0
(num, ) = struct.unpack("<H", body_buf[:2])
last_price = 0
pos += 4
prices = []
for i in range(num):
price_raw, pos = get_price(body_buf, pos)
reversed1, pos = get_price(body_buf, pos)
vol, pos = get_price(body_buf, pos)
last_price = last_price + price_raw
price = OrderedDict(
[
("price", float(last_price)/100),
("vol", vol)
]
)
prices.append(price)
return prices
| true | true |
f71d913ea75fe27913d288c4dcaf44d98ac472ac | 46 | py | Python | .history/ClassFiles/DataTypes/StringData_20201230222853.py | minefarmer/Comprehensive-Python | f97b9b83ec328fc4e4815607e6a65de90bb8de66 | [
"Unlicense"
] | null | null | null | .history/ClassFiles/DataTypes/StringData_20201230222853.py | minefarmer/Comprehensive-Python | f97b9b83ec328fc4e4815607e6a65de90bb8de66 | [
"Unlicense"
] | null | null | null | .history/ClassFiles/DataTypes/StringData_20201230222853.py | minefarmer/Comprehensive-Python | f97b9b83ec328fc4e4815607e6a65de90bb8de66 | [
"Unlicense"
] | null | null | null | """ ThisString Data type(str)
""" | 15.333333 | 33 | 0.456522 | """ ThisString Data type(str)
""" | false | true |
f71d91b6162a24bdd995d0f0381a0643a85de953 | 4,535 | py | Python | Chapter03/Chapter_3/musegen/musegen.py | YMandCL/Hands-On-Deep-Learning-for-Games | 0225661409c3bf59ae6b7996c254bb485ebd10cb | [
"MIT"
] | 33 | 2018-12-29T15:39:20.000Z | 2022-03-18T14:36:11.000Z | Chapter03/Chapter_3/musegen/musegen.py | YMandCL/Hands-On-Deep-Learning-for-Games | 0225661409c3bf59ae6b7996c254bb485ebd10cb | [
"MIT"
] | 4 | 2019-05-01T08:30:47.000Z | 2020-08-14T21:13:53.000Z | Chapter03/Chapter_3/musegen/musegen.py | YMandCL/Hands-On-Deep-Learning-for-Games | 0225661409c3bf59ae6b7996c254bb485ebd10cb | [
"MIT"
] | 14 | 2019-01-13T15:52:08.000Z | 2021-10-10T06:14:39.000Z | # Currently this script is configured to use the note-generator model.
from config import sequence_length, output_dir, note_generator_dir
from helper import loadChorales, loadModelAndWeights, createPitchSpecificVocabularies, createDurationVocabularySpecific
from music21 import note, instrument, stream, duration
import numpy as np
import os
# disable GPU processing
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# ----------------------------------------------
from keras.utils import to_categorical
# select the epoch to use when loading the weights of the model generator
generator_epoch = 43
# how many notes to generate ('end' marks are created along the way and the result is split into pieces)
number_of_notes = 200
# load chorales to create the vocabularies
print('loading chorales...')
notes = loadChorales()
# create the vocabulary
note_vocab, note_names_vocab, note_vocab_categorical = createPitchSpecificVocabularies([x[0] for (x, _) in notes])
duration_vocab = createDurationVocabularySpecific([d for (_, d) in notes])
duration_vocab_categorical = to_categorical(range(len(duration_vocab)))
note_to_int = dict((note, number) for number, note in enumerate(note_vocab))
int_to_note = dict((number, note) for number, note in enumerate(note_vocab))
duration_to_int = dict((dur, number) for number, dur in enumerate(duration_vocab))
duration_dim = duration_vocab.shape[0]
pitch_dim = np.array(note_vocab).shape[0]
print('loading networks...')
dir_path = os.path.dirname(os.path.realpath(__file__))
generator = loadModelAndWeights(os.path.join(dir_path, note_generator_dir, 'model.json'),
os.path.join(dir_path, note_generator_dir, 'weights-{:02d}.hdf5'.format(generator_epoch)))
# make a melody!!!
pitch_input = np.eye(pitch_dim)[np.random.choice(pitch_dim, size=sequence_length)]
duration_input = np.eye(duration_dim)[np.random.choice(duration_dim, size=sequence_length)]
print('generating output...')
# generate notes
generator_output = []
for _ in range(number_of_notes):
# reshape inputs
pi = np.reshape(pitch_input, (1, sequence_length, pitch_dim))
di = np.reshape(duration_input, (1, sequence_length, duration_dim))
# make prediction
pitch_pred, dur_pred = generator.predict({'pitches_input': pi, 'durations_input': di}, verbose=0)
generator_output.append((pitch_pred, dur_pred))
pitch_input = np.vstack([pitch_input, pitch_pred])
pitch_input = pitch_input[1:len(pitch_input)]
duration_input = np.vstack([duration_input, dur_pred])
duration_input = duration_input[1:len(duration_input)]
output_notes = [(int_to_note[np.argmax(n)], duration_vocab[np.argmax(d)]) for (n, d) in generator_output]
output_notes = np.array(output_notes)
output_notes = np.reshape(output_notes, (-1, 2))
# output_notes contains: pitch values in midi format (integers), 'rest' marks, 'end' marks
# split the generated notes into pieces based on 'end' marks
indices = []
for (ind, (n, _)) in enumerate(output_notes):
if n == 'end':
indices.append(ind)
indices = np.insert(np.reshape(indices, (-1)), 0, 0)
pieces = [output_notes]
if len(indices) > 1:
pieces = ([ output_notes[(indices[j] + 1):indices[j + 1] ] for j in range(len(indices) - 1)])
print('writing output to disk...')
os.makedirs(os.path.join(dir_path, output_dir, 'note-generator'), exist_ok=True)
# output pieces to midi files
for index, notes in enumerate(pieces):
midi_notes = []
offset = 0
for n, d in notes:
# since a duration of 0 is included in the vocabulary (for the 'end' marks), the network may generate a 0 duration for other notes
# naively correct and report this erroneous behaviour
if abs(float(d)) < 0.001:
print('found zero duration')
d = '1.0'
if n == 'rest':
new_note = note.Rest()
new_note.duration = duration.Duration(float(d))
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
midi_notes.append(new_note)
else:
new_note = note.Note(int(n))
new_note.duration = duration.Duration(float(d))
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
midi_notes.append(new_note)
offset += float(d)
midi_stream = stream.Stream(midi_notes)
midi_stream.write('midi', fp=os.path.join(dir_path, output_dir, 'note-generator', 'sample-{}.mid'.format(index))) | 39.094828 | 138 | 0.704961 |
from config import sequence_length, output_dir, note_generator_dir
from helper import loadChorales, loadModelAndWeights, createPitchSpecificVocabularies, createDurationVocabularySpecific
from music21 import note, instrument, stream, duration
import numpy as np
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
from keras.utils import to_categorical
generator_epoch = 43
number_of_notes = 200
print('loading chorales...')
notes = loadChorales()
note_vocab, note_names_vocab, note_vocab_categorical = createPitchSpecificVocabularies([x[0] for (x, _) in notes])
duration_vocab = createDurationVocabularySpecific([d for (_, d) in notes])
duration_vocab_categorical = to_categorical(range(len(duration_vocab)))
note_to_int = dict((note, number) for number, note in enumerate(note_vocab))
int_to_note = dict((number, note) for number, note in enumerate(note_vocab))
duration_to_int = dict((dur, number) for number, dur in enumerate(duration_vocab))
duration_dim = duration_vocab.shape[0]
pitch_dim = np.array(note_vocab).shape[0]
print('loading networks...')
dir_path = os.path.dirname(os.path.realpath(__file__))
generator = loadModelAndWeights(os.path.join(dir_path, note_generator_dir, 'model.json'),
os.path.join(dir_path, note_generator_dir, 'weights-{:02d}.hdf5'.format(generator_epoch)))
pitch_input = np.eye(pitch_dim)[np.random.choice(pitch_dim, size=sequence_length)]
duration_input = np.eye(duration_dim)[np.random.choice(duration_dim, size=sequence_length)]
print('generating output...')
generator_output = []
for _ in range(number_of_notes):
pi = np.reshape(pitch_input, (1, sequence_length, pitch_dim))
di = np.reshape(duration_input, (1, sequence_length, duration_dim))
pitch_pred, dur_pred = generator.predict({'pitches_input': pi, 'durations_input': di}, verbose=0)
generator_output.append((pitch_pred, dur_pred))
pitch_input = np.vstack([pitch_input, pitch_pred])
pitch_input = pitch_input[1:len(pitch_input)]
duration_input = np.vstack([duration_input, dur_pred])
duration_input = duration_input[1:len(duration_input)]
output_notes = [(int_to_note[np.argmax(n)], duration_vocab[np.argmax(d)]) for (n, d) in generator_output]
output_notes = np.array(output_notes)
output_notes = np.reshape(output_notes, (-1, 2))
indices = []
for (ind, (n, _)) in enumerate(output_notes):
if n == 'end':
indices.append(ind)
indices = np.insert(np.reshape(indices, (-1)), 0, 0)
pieces = [output_notes]
if len(indices) > 1:
pieces = ([ output_notes[(indices[j] + 1):indices[j + 1] ] for j in range(len(indices) - 1)])
print('writing output to disk...')
os.makedirs(os.path.join(dir_path, output_dir, 'note-generator'), exist_ok=True)
for index, notes in enumerate(pieces):
midi_notes = []
offset = 0
for n, d in notes:
if abs(float(d)) < 0.001:
print('found zero duration')
d = '1.0'
if n == 'rest':
new_note = note.Rest()
new_note.duration = duration.Duration(float(d))
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
midi_notes.append(new_note)
else:
new_note = note.Note(int(n))
new_note.duration = duration.Duration(float(d))
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
midi_notes.append(new_note)
offset += float(d)
midi_stream = stream.Stream(midi_notes)
midi_stream.write('midi', fp=os.path.join(dir_path, output_dir, 'note-generator', 'sample-{}.mid'.format(index))) | true | true |
f71d92446ac29000ec218291728624b2d6598670 | 295 | py | Python | feature_extraction.py | LFRusso/kinnect-keyboard | 52c4a205528d524ae07eb15c15f3f08300110635 | [
"MIT"
] | null | null | null | feature_extraction.py | LFRusso/kinnect-keyboard | 52c4a205528d524ae07eb15c15f3f08300110635 | [
"MIT"
] | null | null | null | feature_extraction.py | LFRusso/kinnect-keyboard | 52c4a205528d524ae07eb15c15f3f08300110635 | [
"MIT"
] | null | null | null | import mahotas as mt
from skimage.feature import hog
def extract_features(img_gray, img_mask):
zernike = mt.features.zernike_moments(img_gray, 3)
fd = hog(img_mask, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(2, 2))
return list(zernike)+list(fd) | 36.875 | 62 | 0.688136 | import mahotas as mt
from skimage.feature import hog
def extract_features(img_gray, img_mask):
zernike = mt.features.zernike_moments(img_gray, 3)
fd = hog(img_mask, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(2, 2))
return list(zernike)+list(fd) | true | true |
f71d939ead9c998360910e553be21fdb201e464c | 341 | py | Python | setup.py | Edward-FitzSimons/TetrisAgent_ML | 191a8c198941444d6dda8374eac796d8455ba003 | [
"MIT"
] | null | null | null | setup.py | Edward-FitzSimons/TetrisAgent_ML | 191a8c198941444d6dda8374eac796d8455ba003 | [
"MIT"
] | null | null | null | setup.py | Edward-FitzSimons/TetrisAgent_ML | 191a8c198941444d6dda8374eac796d8455ba003 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name="tetrisRL",
version="0.5",
author="Jay Butera",
author_email="buterajay@gmail.com",
license="MIT",
url="https://github.com/jaybutera/tetrisRL",
packages=find_packages(),
install_requires=[
'numpy>=1.13',
'torch'
],
)
| 21.3125 | 49 | 0.592375 | from setuptools import setup, find_packages
setup(
name="tetrisRL",
version="0.5",
author="Jay Butera",
author_email="buterajay@gmail.com",
license="MIT",
url="https://github.com/jaybutera/tetrisRL",
packages=find_packages(),
install_requires=[
'numpy>=1.13',
'torch'
],
)
| true | true |
f71d93e8560f437371b035f0098f804c534076a4 | 524 | py | Python | hw1/connect_cisco_dict.py | rshetty07/pyneta | bb82d516ae3727161a5f77c3a6a5f4294f6e0ba1 | [
"Apache-2.0"
] | null | null | null | hw1/connect_cisco_dict.py | rshetty07/pyneta | bb82d516ae3727161a5f77c3a6a5f4294f6e0ba1 | [
"Apache-2.0"
] | null | null | null | hw1/connect_cisco_dict.py | rshetty07/pyneta | bb82d516ae3727161a5f77c3a6a5f4294f6e0ba1 | [
"Apache-2.0"
] | null | null | null | from netmiko import ConnectHandler
from getpass import getpass
device1 = {
"host": 'cisco3.lasthop.io',
"username": 'pyclass',
"password": getpass(),
"device_type": 'cisco_ios',
# "session_log": 'my_session.txt'
}
device2 = {
"host": 'cisco4.lasthop.io',
"username": 'pyclass',
"password": getpass(),
"device_type": 'cisco_ios',
# "session_log": 'my_session.txt'
}
for device in (device1, device2):
net_connect = ConnectHandler(**device)
print(net_connect.find_prompt())
| 22.782609 | 42 | 0.645038 | from netmiko import ConnectHandler
from getpass import getpass
device1 = {
"host": 'cisco3.lasthop.io',
"username": 'pyclass',
"password": getpass(),
"device_type": 'cisco_ios',
}
device2 = {
"host": 'cisco4.lasthop.io',
"username": 'pyclass',
"password": getpass(),
"device_type": 'cisco_ios',
}
for device in (device1, device2):
net_connect = ConnectHandler(**device)
print(net_connect.find_prompt())
| true | true |
f71d94a79dd3fc9185f2556c95a6d1391c0ca76d | 4,023 | py | Python | bin/GetUSGSNLCDForDEMExtent.py | selimnairb/EcohydroLib | 38aa4020c88a57c9d2f1fb66acd393b6e989e897 | [
"Unlicense"
] | 12 | 2015-03-03T05:08:55.000Z | 2021-01-27T12:38:33.000Z | bin/GetUSGSNLCDForDEMExtent.py | selimnairb/EcohydroLib | 38aa4020c88a57c9d2f1fb66acd393b6e989e897 | [
"Unlicense"
] | 3 | 2016-01-04T15:05:43.000Z | 2019-02-01T02:19:45.000Z | bin/GetUSGSNLCDForDEMExtent.py | selimnairb/EcohydroLib | 38aa4020c88a57c9d2f1fb66acd393b6e989e897 | [
"Unlicense"
] | 5 | 2015-02-15T18:20:38.000Z | 2017-05-21T13:14:32.000Z | #!/usr/bin/env python
"""@package GetUSGSNLCDForBoundingbox
@brief Download NLCD 2006 or 2011 data hosted by U.S. Geological Survey Web
Coverage Service (WCS) interface.
This software is provided free of charge under the New BSD License. Please see
the following license information:
Copyright (c) 2015, University of North Carolina at Chapel Hill
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of North Carolina at Chapel Hill nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF NORTH CAROLINA AT CHAPEL HILL
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@author Brian Miles <brian_miles@unc.edu>
"""
import sys
import os
import argparse
import traceback
from ecohydrolib.command.exceptions import *
from ecohydrolib.command.landcover import USGSWCSNLCD, KNOWN_LC_TYPES, DEFAULT_LC_TYPE
if __name__ == "__main__":
# Handle command line options
parser = argparse.ArgumentParser(description='Download NLCD data via USGS-hosted WCS web service')
parser.add_argument('-i', '--configfile', dest='configfile', required=False,
help='The configuration file.')
parser.add_argument('-p', '--projectDir', dest='projectDir', required=True,
help='The directory to which metadata, intermediate, and final files should be saved')
parser.add_argument('-f', '--outfile', dest='outfile', required=False,
help='The name of the DEM file to be written. File extension ".tif" will be added.')
parser.add_argument('-t', '--lctype', dest='lctype', required=False, default=DEFAULT_LC_TYPE,
choices=KNOWN_LC_TYPES,
help='Type of NLCD landcover data to download. ' +
"Defaults to '{0}'.".format(DEFAULT_LC_TYPE))
parser.add_argument('--overwrite', dest='overwrite', action='store_true', required=False,
help='Overwrite existing data in project directory. If not specified, program will halt if a dataset already exists.')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Print detailed information about what the program is doing')
args = parser.parse_args()
configFile = None
if args.configfile:
configFile = args.configfile
command = USGSWCSNLCD(args.projectDir, configFile)
exitCode = os.EX_OK
try:
command.run(lctype=args.lctype,
outfile=args.outfile,
verbose=args.verbose,
overwrite=args.overwrite)
except CommandException as e:
traceback.print_exc(file=sys.stderr)
exitCode = os.EX_DATAERR
sys.exit(exitCode) | 49.666667 | 143 | 0.712155 |
import sys
import os
import argparse
import traceback
from ecohydrolib.command.exceptions import *
from ecohydrolib.command.landcover import USGSWCSNLCD, KNOWN_LC_TYPES, DEFAULT_LC_TYPE
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Download NLCD data via USGS-hosted WCS web service')
parser.add_argument('-i', '--configfile', dest='configfile', required=False,
help='The configuration file.')
parser.add_argument('-p', '--projectDir', dest='projectDir', required=True,
help='The directory to which metadata, intermediate, and final files should be saved')
parser.add_argument('-f', '--outfile', dest='outfile', required=False,
help='The name of the DEM file to be written. File extension ".tif" will be added.')
parser.add_argument('-t', '--lctype', dest='lctype', required=False, default=DEFAULT_LC_TYPE,
choices=KNOWN_LC_TYPES,
help='Type of NLCD landcover data to download. ' +
"Defaults to '{0}'.".format(DEFAULT_LC_TYPE))
parser.add_argument('--overwrite', dest='overwrite', action='store_true', required=False,
help='Overwrite existing data in project directory. If not specified, program will halt if a dataset already exists.')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Print detailed information about what the program is doing')
args = parser.parse_args()
configFile = None
if args.configfile:
configFile = args.configfile
command = USGSWCSNLCD(args.projectDir, configFile)
exitCode = os.EX_OK
try:
command.run(lctype=args.lctype,
outfile=args.outfile,
verbose=args.verbose,
overwrite=args.overwrite)
except CommandException as e:
traceback.print_exc(file=sys.stderr)
exitCode = os.EX_DATAERR
sys.exit(exitCode) | true | true |
f71d9562c12f81854e60e618863958ef2c14c867 | 2,016 | py | Python | tests/pytests/functional/cli/test_salt_cloud.py | babs/salt | c536ea716d5308880b244e7980f4b659d86fc104 | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | tests/pytests/functional/cli/test_salt_cloud.py | babs/salt | c536ea716d5308880b244e7980f4b659d86fc104 | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | tests/pytests/functional/cli/test_salt_cloud.py | babs/salt | c536ea716d5308880b244e7980f4b659d86fc104 | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | """
tests.pytests.integration.cli.test_salt_cloud
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import pytest
pytest.importorskip("libcloud", reason="salt-cloud requires >= libcloud 0.11.4")
def test_function_arguments(salt_cloud_cli):
ret = salt_cloud_cli.run("--function", "show_image", "-h")
assert ret.exitcode != 0
assert (
"error: --function expects two arguments: <function-name> <provider>"
in ret.stderr
)
def test_list_providers_accepts_no_arguments(salt_cloud_cli):
ret = salt_cloud_cli.run("--list-providers", "ec2")
assert ret.exitcode != 0
assert "error: '--list-providers' does not accept any arguments" in ret.stderr
@pytest.mark.parametrize(
"query_option", ["--query", "--full-query", "--select-query", "--list-providers"]
)
def test_mutually_exclusive_query_options(salt_cloud_cli, query_option):
if query_option != "--query":
conflicting_option = "--query"
elif query_option != "--full-query":
conflicting_option = "--full-query"
elif query_option != "--select-query":
conflicting_option = "--select-query"
elif query_option != "--list-providers":
conflicting_option = "--list-providers"
ret = salt_cloud_cli.run(query_option, conflicting_option)
assert ret.exitcode != 0
assert "are mutually exclusive. Please only choose one of them" in ret.stderr
@pytest.mark.parametrize(
"list_option", ["--list-locations", "--list-images", "--list-sizes"]
)
def test_mutually_exclusive_list_options(salt_cloud_cli, list_option):
if list_option != "--list-locations":
conflicting__option = "--list-locations"
elif list_option != "--list-images":
conflicting__option = "--list-images"
elif list_option != "--list-sizes":
conflicting__option = "--list-sizes"
ret = salt_cloud_cli.run(list_option, "ec2", conflicting__option, "ec2")
assert ret.exitcode != 0
assert "are mutually exclusive. Please only choose one of them" in ret.stderr
| 35.368421 | 85 | 0.672619 | import pytest
pytest.importorskip("libcloud", reason="salt-cloud requires >= libcloud 0.11.4")
def test_function_arguments(salt_cloud_cli):
ret = salt_cloud_cli.run("--function", "show_image", "-h")
assert ret.exitcode != 0
assert (
"error: --function expects two arguments: <function-name> <provider>"
in ret.stderr
)
def test_list_providers_accepts_no_arguments(salt_cloud_cli):
ret = salt_cloud_cli.run("--list-providers", "ec2")
assert ret.exitcode != 0
assert "error: '--list-providers' does not accept any arguments" in ret.stderr
@pytest.mark.parametrize(
"query_option", ["--query", "--full-query", "--select-query", "--list-providers"]
)
def test_mutually_exclusive_query_options(salt_cloud_cli, query_option):
if query_option != "--query":
conflicting_option = "--query"
elif query_option != "--full-query":
conflicting_option = "--full-query"
elif query_option != "--select-query":
conflicting_option = "--select-query"
elif query_option != "--list-providers":
conflicting_option = "--list-providers"
ret = salt_cloud_cli.run(query_option, conflicting_option)
assert ret.exitcode != 0
assert "are mutually exclusive. Please only choose one of them" in ret.stderr
@pytest.mark.parametrize(
"list_option", ["--list-locations", "--list-images", "--list-sizes"]
)
def test_mutually_exclusive_list_options(salt_cloud_cli, list_option):
if list_option != "--list-locations":
conflicting__option = "--list-locations"
elif list_option != "--list-images":
conflicting__option = "--list-images"
elif list_option != "--list-sizes":
conflicting__option = "--list-sizes"
ret = salt_cloud_cli.run(list_option, "ec2", conflicting__option, "ec2")
assert ret.exitcode != 0
assert "are mutually exclusive. Please only choose one of them" in ret.stderr
| true | true |
f71d9631429f556fd55add33baaf1526744d5702 | 22,864 | py | Python | nipype/interfaces/slicer/generate_classes.py | koenhelwegen/nipype | 87d04aeb4cb78c95345506a0e5e3571d380ed1d1 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/slicer/generate_classes.py | koenhelwegen/nipype | 87d04aeb4cb78c95345506a0e5e3571d380ed1d1 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/slicer/generate_classes.py | koenhelwegen/nipype | 87d04aeb4cb78c95345506a0e5e3571d380ed1d1 | [
"Apache-2.0"
] | 1 | 2020-10-22T12:50:23.000Z | 2020-10-22T12:50:23.000Z | # -*- coding: utf-8 -*-
"""This script generates Slicer Interfaces based on the CLI modules XML. CLI
modules are selected from the hardcoded list below and generated code is placed
in the cli_modules.py file (and imported in __init__.py). For this to work
correctly you must have your CLI executabes in $PATH"""
import xml.dom.minidom
import subprocess
import os
from shutil import rmtree
import keyword
python_keywords = (
keyword.kwlist
) # If c++ SEM module uses one of these key words as a command line parameter, we need to modify variable
def force_to_valid_python_variable_name(old_name):
"""Valid c++ names are not always valid in python, so
provide alternate naming
>>> force_to_valid_python_variable_name('lambda')
'opt_lambda'
>>> force_to_valid_python_variable_name('inputVolume')
'inputVolume'
"""
new_name = old_name
new_name = new_name.lstrip().rstrip()
if old_name in python_keywords:
new_name = "opt_" + old_name
return new_name
def add_class_to_package(class_codes, class_names, module_name, package_dir):
module_python_filename = os.path.join(package_dir, "%s.py" % module_name)
f_m = open(module_python_filename, "w")
f_i = open(os.path.join(package_dir, "__init__.py"), "a+")
f_m.write(
"""# -*- coding: utf-8 -*-
\"\"\"Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator.\"\"\"\n\n"""
)
imports = """\
from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec,
File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath)
import os\n\n\n"""
f_m.write(imports)
f_m.write("\n\n".join(class_codes))
f_i.write("from %s import %s\n" % (module_name, ", ".join(class_names)))
f_m.close()
f_i.close()
def crawl_code_struct(code_struct, package_dir):
subpackages = []
for k, v in code_struct.items():
if isinstance(v, str) or isinstance(v, (str, bytes)):
module_name = k.lower()
class_name = k
class_code = v
add_class_to_package([class_code], [class_name], module_name, package_dir)
else:
l1 = {}
l2 = {}
for key in list(v.keys()):
if isinstance(v[key], str) or isinstance(v[key], (str, bytes)):
l1[key] = v[key]
else:
l2[key] = v[key]
if l2:
v = l2
subpackages.append(k.lower())
f_i = open(os.path.join(package_dir, "__init__.py"), "a+")
f_i.write("from %s import *\n" % k.lower())
f_i.close()
new_pkg_dir = os.path.join(package_dir, k.lower())
if os.path.exists(new_pkg_dir):
rmtree(new_pkg_dir)
os.mkdir(new_pkg_dir)
crawl_code_struct(v, new_pkg_dir)
if l1:
for ik, iv in l1.items():
crawl_code_struct({ik: {ik: iv}}, new_pkg_dir)
elif l1:
v = l1
module_name = k.lower()
add_class_to_package(
list(v.values()), list(v.keys()), module_name, package_dir
)
if subpackages:
f = open(os.path.join(package_dir, "setup.py"), "w")
f.write(
"""# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('{pkg_name}', parent_package, top_path)
{sub_pks}
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
""".format(
pkg_name=package_dir.split("/")[-1],
sub_pks="\n ".join(
[
"config.add_data_dir('%s')" % sub_pkg
for sub_pkg in subpackages
]
),
)
)
f.close()
def generate_all_classes(
modules_list=[], launcher=[], redirect_x=False, mipav_hacks=False
):
"""modules_list contains all the SEM compliant tools that should have wrappers created for them.
launcher containtains the command line prefix wrapper arugments needed to prepare
a proper environment for each of the modules.
"""
all_code = {}
for module in modules_list:
print("=" * 80)
print("Generating Definition for module {0}".format(module))
print("^" * 80)
package, code, module = generate_class(
module, launcher, redirect_x=redirect_x, mipav_hacks=mipav_hacks
)
cur_package = all_code
module_name = package.strip().split(" ")[0].split(".")[-1]
for package in package.strip().split(" ")[0].split(".")[:-1]:
if package not in cur_package:
cur_package[package] = {}
cur_package = cur_package[package]
if module_name not in cur_package:
cur_package[module_name] = {}
cur_package[module_name][module] = code
if os.path.exists("__init__.py"):
os.unlink("__init__.py")
crawl_code_struct(all_code, os.getcwd())
def generate_class(
module, launcher, strip_module_name_prefix=True, redirect_x=False, mipav_hacks=False
):
dom = grab_xml(module, launcher, mipav_hacks=mipav_hacks)
if strip_module_name_prefix:
module_name = module.split(".")[-1]
else:
module_name = module
inputTraits = []
outputTraits = []
outputs_filenames = {}
# self._outputs_nodes = []
class_string = '"""'
for desc_str in [
"title",
"category",
"description",
"version",
"documentation-url",
"license",
"contributor",
"acknowledgements",
]:
el = dom.getElementsByTagName(desc_str)
if el and el[0].firstChild and el[0].firstChild.nodeValue.strip():
class_string += (
desc_str + ": " + el[0].firstChild.nodeValue.strip() + "\n\n"
)
if desc_str == "category":
category = el[0].firstChild.nodeValue.strip()
class_string += '"""'
for paramGroup in dom.getElementsByTagName("parameters"):
indices = paramGroup.getElementsByTagName("index")
max_index = 0
for index in indices:
if int(index.firstChild.nodeValue) > max_index:
max_index = int(index.firstChild.nodeValue)
for param in paramGroup.childNodes:
if param.nodeName in ["label", "description", "#text", "#comment"]:
continue
traitsParams = {}
longFlagNode = param.getElementsByTagName("longflag")
if longFlagNode:
# Prefer to use longFlag as name if it is given, rather than the parameter name
longFlagName = longFlagNode[0].firstChild.nodeValue
# SEM automatically strips prefixed "--" or "-" from from xml before processing
# we need to replicate that behavior here The following
# two nodes in xml have the same behavior in the program
# <longflag>--test</longflag>
# <longflag>test</longflag>
longFlagName = longFlagName.lstrip(" -").rstrip(" ")
name = longFlagName
name = force_to_valid_python_variable_name(name)
traitsParams["argstr"] = "--" + longFlagName + " "
else:
name = param.getElementsByTagName("name")[0].firstChild.nodeValue
name = force_to_valid_python_variable_name(name)
if param.getElementsByTagName("index"):
traitsParams["argstr"] = ""
else:
traitsParams["argstr"] = "--" + name + " "
if (
param.getElementsByTagName("description")
and param.getElementsByTagName("description")[0].firstChild
):
traitsParams["desc"] = (
param.getElementsByTagName("description")[0]
.firstChild.nodeValue.replace('"', '\\"')
.replace("\n", ", ")
)
argsDict = {
"directory": "%s",
"file": "%s",
"integer": "%d",
"double": "%f",
"float": "%f",
"image": "%s",
"transform": "%s",
"boolean": "",
"string-enumeration": "%s",
"string": "%s",
"integer-enumeration": "%s",
"table": "%s",
"point": "%s",
"region": "%s",
"geometry": "%s",
}
if param.nodeName.endswith("-vector"):
traitsParams["argstr"] += "%s"
else:
traitsParams["argstr"] += argsDict[param.nodeName]
index = param.getElementsByTagName("index")
if index:
traitsParams["position"] = int(index[0].firstChild.nodeValue) - (
max_index + 1
)
desc = param.getElementsByTagName("description")
if index:
traitsParams["desc"] = desc[0].firstChild.nodeValue
typesDict = {
"integer": "traits.Int",
"double": "traits.Float",
"float": "traits.Float",
"image": "File",
"transform": "File",
"boolean": "traits.Bool",
"string": "traits.Str",
"file": "File",
"geometry": "File",
"directory": "Directory",
"table": "File",
"point": "traits.List",
"region": "traits.List",
}
if param.nodeName.endswith("-enumeration"):
type = "traits.Enum"
values = [
'"%s"' % str(el.firstChild.nodeValue).replace('"', "")
for el in param.getElementsByTagName("element")
]
elif param.nodeName.endswith("-vector"):
type = "InputMultiPath"
if param.nodeName in [
"file",
"directory",
"image",
"geometry",
"transform",
"table",
]:
values = [
"%s(exists=True)"
% typesDict[param.nodeName.replace("-vector", "")]
]
else:
values = [typesDict[param.nodeName.replace("-vector", "")]]
if mipav_hacks is True:
traitsParams["sep"] = ";"
else:
traitsParams["sep"] = ","
elif param.getAttribute("multiple") == "true":
type = "InputMultiPath"
if param.nodeName in [
"file",
"directory",
"image",
"geometry",
"transform",
"table",
]:
values = ["%s(exists=True)" % typesDict[param.nodeName]]
elif param.nodeName in ["point", "region"]:
values = [
"%s(traits.Float(), minlen=3, maxlen=3)"
% typesDict[param.nodeName]
]
else:
values = [typesDict[param.nodeName]]
traitsParams["argstr"] += "..."
else:
values = []
type = typesDict[param.nodeName]
if param.nodeName in [
"file",
"directory",
"image",
"geometry",
"transform",
"table",
]:
if not param.getElementsByTagName("channel"):
raise RuntimeError(
"Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field.\n{0}".format(
traitsParams
)
)
elif (
param.getElementsByTagName("channel")[0].firstChild.nodeValue
== "output"
):
traitsParams["hash_files"] = False
inputTraits.append(
"%s = traits.Either(traits.Bool, %s(%s), %s)"
% (
name,
type,
parse_values(values).replace("exists=True", ""),
parse_params(traitsParams),
)
)
traitsParams["exists"] = True
traitsParams.pop("argstr")
traitsParams.pop("hash_files")
outputTraits.append(
"%s = %s(%s%s)"
% (
name,
type.replace("Input", "Output"),
parse_values(values),
parse_params(traitsParams),
)
)
outputs_filenames[name] = gen_filename_from_param(param, name)
elif (
param.getElementsByTagName("channel")[0].firstChild.nodeValue
== "input"
):
if param.nodeName in [
"file",
"directory",
"image",
"geometry",
"transform",
"table",
] and type not in ["InputMultiPath", "traits.List"]:
traitsParams["exists"] = True
inputTraits.append(
"%s = %s(%s%s)"
% (name, type, parse_values(values), parse_params(traitsParams))
)
else:
raise RuntimeError(
"Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field to be in ['input','output'].\n{0}".format(
traitsParams
)
)
else: # For all other parameter types, they are implicitly only input types
inputTraits.append(
"%s = %s(%s%s)"
% (name, type, parse_values(values), parse_params(traitsParams))
)
if mipav_hacks:
blacklisted_inputs = ["maxMemoryUsage"]
inputTraits = [
trait for trait in inputTraits if trait.split()[0] not in blacklisted_inputs
]
compulsory_inputs = [
'xDefaultMem = traits.Int(desc="Set default maximum heap size", argstr="-xDefaultMem %d")',
'xMaxProcess = traits.Int(1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", usedefault=True)',
]
inputTraits += compulsory_inputs
input_spec_code = "class " + module_name + "InputSpec(CommandLineInputSpec):\n"
for trait in inputTraits:
input_spec_code += " " + trait + "\n"
output_spec_code = "class " + module_name + "OutputSpec(TraitedSpec):\n"
if not outputTraits:
output_spec_code += " pass\n"
else:
for trait in outputTraits:
output_spec_code += " " + trait + "\n"
output_filenames_code = "_outputs_filenames = {"
output_filenames_code += ",".join(
["'%s':'%s'" % (key, value) for key, value in outputs_filenames.items()]
)
output_filenames_code += "}"
input_spec_code += "\n\n"
output_spec_code += "\n\n"
template = """class %module_name%(SEMLikeCommandLine):
%class_str%
input_spec = %module_name%InputSpec
output_spec = %module_name%OutputSpec
_cmd = "%launcher% %name% "
%output_filenames_code%\n"""
template += " _redirect_x = {0}\n".format(str(redirect_x))
main_class = (
template.replace("%class_str%", class_string)
.replace("%module_name%", module_name)
.replace("%name%", module)
.replace("%output_filenames_code%", output_filenames_code)
.replace("%launcher%", " ".join(launcher))
)
return category, input_spec_code + output_spec_code + main_class, module_name
def grab_xml(module, launcher, mipav_hacks=False):
# cmd = CommandLine(command = "Slicer3", args="--launch %s --xml"%module)
# ret = cmd.run()
command_list = launcher[:] # force copy to preserve original
command_list.extend([module, "--xml"])
final_command = " ".join(command_list)
xmlReturnValue = subprocess.Popen(
final_command, stdout=subprocess.PIPE, shell=True
).communicate()[0]
if mipav_hacks:
# workaround for a jist bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7234&group_id=228&atid=942
new_xml = ""
replace_closing_tag = False
for line in xmlReturnValue.splitlines():
if line.strip() == "<file collection: semi-colon delimited list>":
new_xml += "<file-vector>\n"
replace_closing_tag = True
elif replace_closing_tag and line.strip() == "</file>":
new_xml += "</file-vector>\n"
replace_closing_tag = False
else:
new_xml += line + "\n"
xmlReturnValue = new_xml
# workaround for a JIST bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7233&group_id=228&atid=942
if xmlReturnValue.strip().endswith("XML"):
xmlReturnValue = xmlReturnValue.strip()[:-3]
if xmlReturnValue.strip().startswith("Error: Unable to set default atlas"):
xmlReturnValue = xmlReturnValue.strip()[
len("Error: Unable to set default atlas") :
]
try:
dom = xml.dom.minidom.parseString(xmlReturnValue.strip())
except Exception as e:
print(xmlReturnValue.strip())
raise e
return dom
# if ret.runtime.returncode == 0:
# return xml.dom.minidom.parseString(ret.runtime.stdout)
# else:
# raise Exception(cmd.cmdline + " failed:\n%s"%ret.runtime.stderr)
def parse_params(params):
list = []
for key, value in params.items():
if isinstance(value, (str, bytes)):
list.append('%s="%s"' % (key, value.replace('"', "'")))
else:
list.append("%s=%s" % (key, value))
return ", ".join(list)
def parse_values(values):
values = ["%s" % value for value in values]
if len(values) > 0:
retstr = ", ".join(values) + ", "
else:
retstr = ""
return retstr
def gen_filename_from_param(param, base):
fileExtensions = param.getAttribute("fileExtensions")
if fileExtensions:
# It is possible that multiple file extensions can be specified in a
# comma separated list, This will extract just the first extension
firstFileExtension = fileExtensions.split(",")[0]
ext = firstFileExtension
else:
ext = {
"image": ".nii",
"transform": ".mat",
"file": "",
"directory": "",
"geometry": ".vtk",
}[param.nodeName]
return base + ext
if __name__ == "__main__":
# NOTE: For now either the launcher needs to be found on the default path, or
# every tool in the modules list must be found on the default path
# AND calling the module with --xml must be supported and compliant.
modules_list = [
"MedianImageFilter",
"CheckerBoardFilter",
"EMSegmentCommandLine",
"GrayscaleFillHoleImageFilter",
# 'CreateDICOMSeries', #missing channel
"TractographyLabelMapSeeding",
"IntensityDifferenceMetric",
"DWIToDTIEstimation",
"MaskScalarVolume",
"ImageLabelCombine",
"DTIimport",
"OtsuThresholdImageFilter",
"ExpertAutomatedRegistration",
"ThresholdScalarVolume",
"DWIUnbiasedNonLocalMeansFilter",
"BRAINSFit",
"MergeModels",
"ResampleDTIVolume",
"MultiplyScalarVolumes",
"LabelMapSmoothing",
"RigidRegistration",
"VotingBinaryHoleFillingImageFilter",
"BRAINSROIAuto",
"RobustStatisticsSegmenter",
"GradientAnisotropicDiffusion",
"ProbeVolumeWithModel",
"ModelMaker",
"ExtractSkeleton",
"GrayscaleGrindPeakImageFilter",
"N4ITKBiasFieldCorrection",
"BRAINSResample",
"DTIexport",
"VBRAINSDemonWarp",
"ResampleScalarVectorDWIVolume",
"ResampleScalarVolume",
"OtsuThresholdSegmentation",
# 'ExecutionModelTour',
"HistogramMatching",
"BRAINSDemonWarp",
"ModelToLabelMap",
"GaussianBlurImageFilter",
"DiffusionWeightedVolumeMasking",
"GrayscaleModelMaker",
"CastScalarVolume",
"DicomToNrrdConverter",
"AffineRegistration",
"AddScalarVolumes",
"LinearRegistration",
"SimpleRegionGrowingSegmentation",
"DWIJointRicianLMMSEFilter",
"MultiResolutionAffineRegistration",
"SubtractScalarVolumes",
"DWIRicianLMMSEFilter",
"OrientScalarVolume",
"FiducialRegistration",
"BSplineDeformableRegistration",
"CurvatureAnisotropicDiffusion",
"PETStandardUptakeValueComputation",
"DiffusionTensorScalarMeasurements",
"ACPCTransform",
"EMSegmentTransformToNewFormat",
"BSplineToDeformationField",
]
# SlicerExecutionModel compliant tools that are usually statically built, and don't need the Slicer3 --launcher
generate_all_classes(modules_list=modules_list, launcher=[])
# Tools compliant with SlicerExecutionModel called from the Slicer environment (for shared lib compatibility)
# launcher = ['/home/raid3/gorgolewski/software/slicer/Slicer', '--launch']
# generate_all_classes(modules_list=modules_list, launcher=launcher)
# generate_all_classes(modules_list=['BRAINSABC'], launcher=[] )
| 37.729373 | 212 | 0.532497 |
import xml.dom.minidom
import subprocess
import os
from shutil import rmtree
import keyword
python_keywords = (
keyword.kwlist
)
def force_to_valid_python_variable_name(old_name):
new_name = old_name
new_name = new_name.lstrip().rstrip()
if old_name in python_keywords:
new_name = "opt_" + old_name
return new_name
def add_class_to_package(class_codes, class_names, module_name, package_dir):
module_python_filename = os.path.join(package_dir, "%s.py" % module_name)
f_m = open(module_python_filename, "w")
f_i = open(os.path.join(package_dir, "__init__.py"), "a+")
f_m.write(
"""# -*- coding: utf-8 -*-
\"\"\"Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator.\"\"\"\n\n"""
)
imports = """\
from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec,
File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath)
import os\n\n\n"""
f_m.write(imports)
f_m.write("\n\n".join(class_codes))
f_i.write("from %s import %s\n" % (module_name, ", ".join(class_names)))
f_m.close()
f_i.close()
def crawl_code_struct(code_struct, package_dir):
subpackages = []
for k, v in code_struct.items():
if isinstance(v, str) or isinstance(v, (str, bytes)):
module_name = k.lower()
class_name = k
class_code = v
add_class_to_package([class_code], [class_name], module_name, package_dir)
else:
l1 = {}
l2 = {}
for key in list(v.keys()):
if isinstance(v[key], str) or isinstance(v[key], (str, bytes)):
l1[key] = v[key]
else:
l2[key] = v[key]
if l2:
v = l2
subpackages.append(k.lower())
f_i = open(os.path.join(package_dir, "__init__.py"), "a+")
f_i.write("from %s import *\n" % k.lower())
f_i.close()
new_pkg_dir = os.path.join(package_dir, k.lower())
if os.path.exists(new_pkg_dir):
rmtree(new_pkg_dir)
os.mkdir(new_pkg_dir)
crawl_code_struct(v, new_pkg_dir)
if l1:
for ik, iv in l1.items():
crawl_code_struct({ik: {ik: iv}}, new_pkg_dir)
elif l1:
v = l1
module_name = k.lower()
add_class_to_package(
list(v.values()), list(v.keys()), module_name, package_dir
)
if subpackages:
f = open(os.path.join(package_dir, "setup.py"), "w")
f.write(
"""# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('{pkg_name}', parent_package, top_path)
{sub_pks}
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
""".format(
pkg_name=package_dir.split("/")[-1],
sub_pks="\n ".join(
[
"config.add_data_dir('%s')" % sub_pkg
for sub_pkg in subpackages
]
),
)
)
f.close()
def generate_all_classes(
modules_list=[], launcher=[], redirect_x=False, mipav_hacks=False
):
all_code = {}
for module in modules_list:
print("=" * 80)
print("Generating Definition for module {0}".format(module))
print("^" * 80)
package, code, module = generate_class(
module, launcher, redirect_x=redirect_x, mipav_hacks=mipav_hacks
)
cur_package = all_code
module_name = package.strip().split(" ")[0].split(".")[-1]
for package in package.strip().split(" ")[0].split(".")[:-1]:
if package not in cur_package:
cur_package[package] = {}
cur_package = cur_package[package]
if module_name not in cur_package:
cur_package[module_name] = {}
cur_package[module_name][module] = code
if os.path.exists("__init__.py"):
os.unlink("__init__.py")
crawl_code_struct(all_code, os.getcwd())
def generate_class(
module, launcher, strip_module_name_prefix=True, redirect_x=False, mipav_hacks=False
):
dom = grab_xml(module, launcher, mipav_hacks=mipav_hacks)
if strip_module_name_prefix:
module_name = module.split(".")[-1]
else:
module_name = module
inputTraits = []
outputTraits = []
outputs_filenames = {}
class_string = '"""'
for desc_str in [
"title",
"category",
"description",
"version",
"documentation-url",
"license",
"contributor",
"acknowledgements",
]:
el = dom.getElementsByTagName(desc_str)
if el and el[0].firstChild and el[0].firstChild.nodeValue.strip():
class_string += (
desc_str + ": " + el[0].firstChild.nodeValue.strip() + "\n\n"
)
if desc_str == "category":
category = el[0].firstChild.nodeValue.strip()
class_string += '"""'
for paramGroup in dom.getElementsByTagName("parameters"):
indices = paramGroup.getElementsByTagName("index")
max_index = 0
for index in indices:
if int(index.firstChild.nodeValue) > max_index:
max_index = int(index.firstChild.nodeValue)
for param in paramGroup.childNodes:
if param.nodeName in ["label", "description", "#text", "#comment"]:
continue
traitsParams = {}
longFlagNode = param.getElementsByTagName("longflag")
if longFlagNode:
longFlagName = longFlagNode[0].firstChild.nodeValue
longFlagName = longFlagName.lstrip(" -").rstrip(" ")
name = longFlagName
name = force_to_valid_python_variable_name(name)
traitsParams["argstr"] = "--" + longFlagName + " "
else:
name = param.getElementsByTagName("name")[0].firstChild.nodeValue
name = force_to_valid_python_variable_name(name)
if param.getElementsByTagName("index"):
traitsParams["argstr"] = ""
else:
traitsParams["argstr"] = "--" + name + " "
if (
param.getElementsByTagName("description")
and param.getElementsByTagName("description")[0].firstChild
):
traitsParams["desc"] = (
param.getElementsByTagName("description")[0]
.firstChild.nodeValue.replace('"', '\\"')
.replace("\n", ", ")
)
argsDict = {
"directory": "%s",
"file": "%s",
"integer": "%d",
"double": "%f",
"float": "%f",
"image": "%s",
"transform": "%s",
"boolean": "",
"string-enumeration": "%s",
"string": "%s",
"integer-enumeration": "%s",
"table": "%s",
"point": "%s",
"region": "%s",
"geometry": "%s",
}
if param.nodeName.endswith("-vector"):
traitsParams["argstr"] += "%s"
else:
traitsParams["argstr"] += argsDict[param.nodeName]
index = param.getElementsByTagName("index")
if index:
traitsParams["position"] = int(index[0].firstChild.nodeValue) - (
max_index + 1
)
desc = param.getElementsByTagName("description")
if index:
traitsParams["desc"] = desc[0].firstChild.nodeValue
typesDict = {
"integer": "traits.Int",
"double": "traits.Float",
"float": "traits.Float",
"image": "File",
"transform": "File",
"boolean": "traits.Bool",
"string": "traits.Str",
"file": "File",
"geometry": "File",
"directory": "Directory",
"table": "File",
"point": "traits.List",
"region": "traits.List",
}
if param.nodeName.endswith("-enumeration"):
type = "traits.Enum"
values = [
'"%s"' % str(el.firstChild.nodeValue).replace('"', "")
for el in param.getElementsByTagName("element")
]
elif param.nodeName.endswith("-vector"):
type = "InputMultiPath"
if param.nodeName in [
"file",
"directory",
"image",
"geometry",
"transform",
"table",
]:
values = [
"%s(exists=True)"
% typesDict[param.nodeName.replace("-vector", "")]
]
else:
values = [typesDict[param.nodeName.replace("-vector", "")]]
if mipav_hacks is True:
traitsParams["sep"] = ";"
else:
traitsParams["sep"] = ","
elif param.getAttribute("multiple") == "true":
type = "InputMultiPath"
if param.nodeName in [
"file",
"directory",
"image",
"geometry",
"transform",
"table",
]:
values = ["%s(exists=True)" % typesDict[param.nodeName]]
elif param.nodeName in ["point", "region"]:
values = [
"%s(traits.Float(), minlen=3, maxlen=3)"
% typesDict[param.nodeName]
]
else:
values = [typesDict[param.nodeName]]
traitsParams["argstr"] += "..."
else:
values = []
type = typesDict[param.nodeName]
if param.nodeName in [
"file",
"directory",
"image",
"geometry",
"transform",
"table",
]:
if not param.getElementsByTagName("channel"):
raise RuntimeError(
"Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field.\n{0}".format(
traitsParams
)
)
elif (
param.getElementsByTagName("channel")[0].firstChild.nodeValue
== "output"
):
traitsParams["hash_files"] = False
inputTraits.append(
"%s = traits.Either(traits.Bool, %s(%s), %s)"
% (
name,
type,
parse_values(values).replace("exists=True", ""),
parse_params(traitsParams),
)
)
traitsParams["exists"] = True
traitsParams.pop("argstr")
traitsParams.pop("hash_files")
outputTraits.append(
"%s = %s(%s%s)"
% (
name,
type.replace("Input", "Output"),
parse_values(values),
parse_params(traitsParams),
)
)
outputs_filenames[name] = gen_filename_from_param(param, name)
elif (
param.getElementsByTagName("channel")[0].firstChild.nodeValue
== "input"
):
if param.nodeName in [
"file",
"directory",
"image",
"geometry",
"transform",
"table",
] and type not in ["InputMultiPath", "traits.List"]:
traitsParams["exists"] = True
inputTraits.append(
"%s = %s(%s%s)"
% (name, type, parse_values(values), parse_params(traitsParams))
)
else:
raise RuntimeError(
"Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field to be in ['input','output'].\n{0}".format(
traitsParams
)
)
else: # For all other parameter types, they are implicitly only input types
inputTraits.append(
"%s = %s(%s%s)"
% (name, type, parse_values(values), parse_params(traitsParams))
)
if mipav_hacks:
blacklisted_inputs = ["maxMemoryUsage"]
inputTraits = [
trait for trait in inputTraits if trait.split()[0] not in blacklisted_inputs
]
compulsory_inputs = [
'xDefaultMem = traits.Int(desc="Set default maximum heap size", argstr="-xDefaultMem %d")',
'xMaxProcess = traits.Int(1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", usedefault=True)',
]
inputTraits += compulsory_inputs
input_spec_code = "class " + module_name + "InputSpec(CommandLineInputSpec):\n"
for trait in inputTraits:
input_spec_code += " " + trait + "\n"
output_spec_code = "class " + module_name + "OutputSpec(TraitedSpec):\n"
if not outputTraits:
output_spec_code += " pass\n"
else:
for trait in outputTraits:
output_spec_code += " " + trait + "\n"
output_filenames_code = "_outputs_filenames = {"
output_filenames_code += ",".join(
["'%s':'%s'" % (key, value) for key, value in outputs_filenames.items()]
)
output_filenames_code += "}"
input_spec_code += "\n\n"
output_spec_code += "\n\n"
template = """class %module_name%(SEMLikeCommandLine):
%class_str%
input_spec = %module_name%InputSpec
output_spec = %module_name%OutputSpec
_cmd = "%launcher% %name% "
%output_filenames_code%\n"""
template += " _redirect_x = {0}\n".format(str(redirect_x))
main_class = (
template.replace("%class_str%", class_string)
.replace("%module_name%", module_name)
.replace("%name%", module)
.replace("%output_filenames_code%", output_filenames_code)
.replace("%launcher%", " ".join(launcher))
)
return category, input_spec_code + output_spec_code + main_class, module_name
def grab_xml(module, launcher, mipav_hacks=False):
# cmd = CommandLine(command = "Slicer3", args="--launch %s --xml"%module)
# ret = cmd.run()
command_list = launcher[:] # force copy to preserve original
command_list.extend([module, "--xml"])
final_command = " ".join(command_list)
xmlReturnValue = subprocess.Popen(
final_command, stdout=subprocess.PIPE, shell=True
).communicate()[0]
if mipav_hacks:
# workaround for a jist bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7234&group_id=228&atid=942
new_xml = ""
replace_closing_tag = False
for line in xmlReturnValue.splitlines():
if line.strip() == "<file collection: semi-colon delimited list>":
new_xml += "<file-vector>\n"
replace_closing_tag = True
elif replace_closing_tag and line.strip() == "</file>":
new_xml += "</file-vector>\n"
replace_closing_tag = False
else:
new_xml += line + "\n"
xmlReturnValue = new_xml
# workaround for a JIST bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7233&group_id=228&atid=942
if xmlReturnValue.strip().endswith("XML"):
xmlReturnValue = xmlReturnValue.strip()[:-3]
if xmlReturnValue.strip().startswith("Error: Unable to set default atlas"):
xmlReturnValue = xmlReturnValue.strip()[
len("Error: Unable to set default atlas") :
]
try:
dom = xml.dom.minidom.parseString(xmlReturnValue.strip())
except Exception as e:
print(xmlReturnValue.strip())
raise e
return dom
# if ret.runtime.returncode == 0:
# return xml.dom.minidom.parseString(ret.runtime.stdout)
# else:
# raise Exception(cmd.cmdline + " failed:\n%s"%ret.runtime.stderr)
def parse_params(params):
list = []
for key, value in params.items():
if isinstance(value, (str, bytes)):
list.append('%s="%s"' % (key, value.replace('"', "'")))
else:
list.append("%s=%s" % (key, value))
return ", ".join(list)
def parse_values(values):
values = ["%s" % value for value in values]
if len(values) > 0:
retstr = ", ".join(values) + ", "
else:
retstr = ""
return retstr
def gen_filename_from_param(param, base):
fileExtensions = param.getAttribute("fileExtensions")
if fileExtensions:
# It is possible that multiple file extensions can be specified in a
# comma separated list, This will extract just the first extension
firstFileExtension = fileExtensions.split(",")[0]
ext = firstFileExtension
else:
ext = {
"image": ".nii",
"transform": ".mat",
"file": "",
"directory": "",
"geometry": ".vtk",
}[param.nodeName]
return base + ext
if __name__ == "__main__":
# NOTE: For now either the launcher needs to be found on the default path, or
# every tool in the modules list must be found on the default path
# AND calling the module with --xml must be supported and compliant.
modules_list = [
"MedianImageFilter",
"CheckerBoardFilter",
"EMSegmentCommandLine",
"GrayscaleFillHoleImageFilter",
# 'CreateDICOMSeries', #missing channel
"TractographyLabelMapSeeding",
"IntensityDifferenceMetric",
"DWIToDTIEstimation",
"MaskScalarVolume",
"ImageLabelCombine",
"DTIimport",
"OtsuThresholdImageFilter",
"ExpertAutomatedRegistration",
"ThresholdScalarVolume",
"DWIUnbiasedNonLocalMeansFilter",
"BRAINSFit",
"MergeModels",
"ResampleDTIVolume",
"MultiplyScalarVolumes",
"LabelMapSmoothing",
"RigidRegistration",
"VotingBinaryHoleFillingImageFilter",
"BRAINSROIAuto",
"RobustStatisticsSegmenter",
"GradientAnisotropicDiffusion",
"ProbeVolumeWithModel",
"ModelMaker",
"ExtractSkeleton",
"GrayscaleGrindPeakImageFilter",
"N4ITKBiasFieldCorrection",
"BRAINSResample",
"DTIexport",
"VBRAINSDemonWarp",
"ResampleScalarVectorDWIVolume",
"ResampleScalarVolume",
"OtsuThresholdSegmentation",
# 'ExecutionModelTour',
"HistogramMatching",
"BRAINSDemonWarp",
"ModelToLabelMap",
"GaussianBlurImageFilter",
"DiffusionWeightedVolumeMasking",
"GrayscaleModelMaker",
"CastScalarVolume",
"DicomToNrrdConverter",
"AffineRegistration",
"AddScalarVolumes",
"LinearRegistration",
"SimpleRegionGrowingSegmentation",
"DWIJointRicianLMMSEFilter",
"MultiResolutionAffineRegistration",
"SubtractScalarVolumes",
"DWIRicianLMMSEFilter",
"OrientScalarVolume",
"FiducialRegistration",
"BSplineDeformableRegistration",
"CurvatureAnisotropicDiffusion",
"PETStandardUptakeValueComputation",
"DiffusionTensorScalarMeasurements",
"ACPCTransform",
"EMSegmentTransformToNewFormat",
"BSplineToDeformationField",
]
# SlicerExecutionModel compliant tools that are usually statically built, and don't need the Slicer3 --launcher
generate_all_classes(modules_list=modules_list, launcher=[])
| true | true |
f71d993fda551358cdcb47a5d1ebc884dfe12422 | 1,056 | py | Python | trains_gtfs.py | rome2rio/TokyoGTFS | c641529cd4e1d84692e85060dacb710270bc235f | [
"MIT"
] | 10 | 2018-10-24T13:49:18.000Z | 2022-02-06T22:58:34.000Z | trains_gtfs.py | rome2rio/TokyoGTFS | c641529cd4e1d84692e85060dacb710270bc235f | [
"MIT"
] | null | null | null | trains_gtfs.py | rome2rio/TokyoGTFS | c641529cd4e1d84692e85060dacb710270bc235f | [
"MIT"
] | 6 | 2019-04-17T03:57:29.000Z | 2021-01-26T11:44:53.000Z | import argparse
import time
import sys
import os
from src.static_trains import TrainParser
from src.const import HEADER
if __name__ == "__main__":
args_parser = argparse.ArgumentParser()
args_parser.add_argument("-a", "--apikey", metavar="YOUR_APIKEY",
help="apikey from developer-tokyochallenge.odpt.org")
args = args_parser.parse_args()
# Apikey checks
if args.apikey:
apikey = args.apikey
elif os.path.exists("apikey.txt"):
with open("apikey.txt", mode="r", encoding="utf8") as f:
apikey = f.read().strip()
else:
sys.exit(
"No apikey!\n"
"Provide it inside command line argument '--apikey',\n"
"Or put it inside a file named 'apikey.txt'."
)
start_time = time.time()
print(HEADER)
print("=== Trains GTFS: Starting! ===")
print("Warming up")
TrainParser.parse(apikey)
total_time = time.time() - start_time
print("=== TokyoGTFS: Finished in {} s ===".format(round(total_time, 2)))
| 27.076923 | 82 | 0.609848 | import argparse
import time
import sys
import os
from src.static_trains import TrainParser
from src.const import HEADER
if __name__ == "__main__":
args_parser = argparse.ArgumentParser()
args_parser.add_argument("-a", "--apikey", metavar="YOUR_APIKEY",
help="apikey from developer-tokyochallenge.odpt.org")
args = args_parser.parse_args()
if args.apikey:
apikey = args.apikey
elif os.path.exists("apikey.txt"):
with open("apikey.txt", mode="r", encoding="utf8") as f:
apikey = f.read().strip()
else:
sys.exit(
"No apikey!\n"
"Provide it inside command line argument '--apikey',\n"
"Or put it inside a file named 'apikey.txt'."
)
start_time = time.time()
print(HEADER)
print("=== Trains GTFS: Starting! ===")
print("Warming up")
TrainParser.parse(apikey)
total_time = time.time() - start_time
print("=== TokyoGTFS: Finished in {} s ===".format(round(total_time, 2)))
| true | true |
f71d9afb951ff91df5b86f95e1126ec3220aa334 | 2,020 | py | Python | treetensor/torch/__init__.py | opendilab/DI-treetensor | fe5f681123c3d6e8d7507fba38586d2edf12e693 | [
"Apache-2.0"
] | 45 | 2021-09-04T15:57:44.000Z | 2022-03-11T19:28:56.000Z | treetensor/torch/__init__.py | opendilab/DI-treetensor | fe5f681123c3d6e8d7507fba38586d2edf12e693 | [
"Apache-2.0"
] | 7 | 2021-09-06T13:06:12.000Z | 2022-03-03T13:38:05.000Z | treetensor/torch/__init__.py | opendilab/DI-treetensor | fe5f681123c3d6e8d7507fba38586d2edf12e693 | [
"Apache-2.0"
] | 1 | 2021-09-30T15:18:06.000Z | 2021-09-30T15:18:06.000Z | import builtins
from functools import lru_cache
from types import ModuleType, FunctionType, BuiltinFunctionType
from typing import Iterable
import torch
from .funcs import *
from .funcs import __all__ as _funcs_all
from .funcs.base import get_func_from_torch
from .size import *
from .size import __all__ as _size_all
from .tensor import *
from .tensor import __all__ as _tensor_all
from ..config.meta import __VERSION__
__all__ = [
*_funcs_all,
*_size_all,
*_tensor_all,
]
_basic_types = (
builtins.bool, builtins.bytearray, builtins.bytes, builtins.complex, builtins.dict,
builtins.float, builtins.frozenset, builtins.int, builtins.list, builtins.range, builtins.set,
builtins.slice, builtins.str, builtins.tuple,
)
_torch_all = set(torch.__all__)
class _Module(ModuleType):
def __init__(self, module):
ModuleType.__init__(self, module.__name__)
for name in filter(lambda x: x.startswith('__') and x.endswith('__'), dir(module)):
setattr(self, name, getattr(module, name))
self.__origin__ = module
self.__torch_version__ = torch.__version__
self.__version__ = __VERSION__
@lru_cache()
def __getattr__(self, name):
if (name in self.__all__) or \
(hasattr(self.__origin__, name) and isinstance(getattr(self.__origin__, name), ModuleType)):
return getattr(self.__origin__, name)
else:
item = getattr(torch, name)
if isinstance(item, (FunctionType, BuiltinFunctionType)) and not name.startswith('_'):
return get_func_from_torch(name)
elif (isinstance(item, torch.dtype)) or \
isinstance(item, _basic_types) and name in _torch_all:
return item
else:
raise AttributeError(f'Attribute {repr(name)} not found in {repr(__name__)}.')
def __dir__(self) -> Iterable[str]:
return self.__all__
import sys
sys.modules[__name__] = _Module(sys.modules[__name__])
| 32.063492 | 108 | 0.681683 | import builtins
from functools import lru_cache
from types import ModuleType, FunctionType, BuiltinFunctionType
from typing import Iterable
import torch
from .funcs import *
from .funcs import __all__ as _funcs_all
from .funcs.base import get_func_from_torch
from .size import *
from .size import __all__ as _size_all
from .tensor import *
from .tensor import __all__ as _tensor_all
from ..config.meta import __VERSION__
__all__ = [
*_funcs_all,
*_size_all,
*_tensor_all,
]
_basic_types = (
builtins.bool, builtins.bytearray, builtins.bytes, builtins.complex, builtins.dict,
builtins.float, builtins.frozenset, builtins.int, builtins.list, builtins.range, builtins.set,
builtins.slice, builtins.str, builtins.tuple,
)
_torch_all = set(torch.__all__)
class _Module(ModuleType):
def __init__(self, module):
ModuleType.__init__(self, module.__name__)
for name in filter(lambda x: x.startswith('__') and x.endswith('__'), dir(module)):
setattr(self, name, getattr(module, name))
self.__origin__ = module
self.__torch_version__ = torch.__version__
self.__version__ = __VERSION__
@lru_cache()
def __getattr__(self, name):
if (name in self.__all__) or \
(hasattr(self.__origin__, name) and isinstance(getattr(self.__origin__, name), ModuleType)):
return getattr(self.__origin__, name)
else:
item = getattr(torch, name)
if isinstance(item, (FunctionType, BuiltinFunctionType)) and not name.startswith('_'):
return get_func_from_torch(name)
elif (isinstance(item, torch.dtype)) or \
isinstance(item, _basic_types) and name in _torch_all:
return item
else:
raise AttributeError(f'Attribute {repr(name)} not found in {repr(__name__)}.')
def __dir__(self) -> Iterable[str]:
return self.__all__
import sys
sys.modules[__name__] = _Module(sys.modules[__name__])
| true | true |
f71d9c7d4d1edc3d1f3f51bfea2e872d5c549f44 | 1,047 | py | Python | 64-minimum-path-sum/64-minimum-path-sum.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | 64-minimum-path-sum/64-minimum-path-sum.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | 64-minimum-path-sum/64-minimum-path-sum.py | jurayev/data-structures-algorithms-solutions | 7103294bafb60117fc77efe4913edcffbeb1ac7a | [
"MIT"
] | null | null | null | class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
"""
[1,3,1]
[1,5,1]
[4,2,1]
time O (nm)
space O(nm)
state -> sums[r][c] = min path sum till r, c position
initial state -> sums[0][0…cols] = inf
-> sums[0…cols][0] = inf
transition function -> sum[r][c] = min(sum[r-1][c], sum[r][c-1]) + sum[r][c]
calculation order: 1….rows-1; 1….cols-1
"""
return self.find_min_path_sum(grid)
def find_min_path_sum(self, grid):
rows, cols = len(grid), len(grid[0])
if not rows or not cols:
return -1
sums = [[float("inf") for _ in range(cols+1)] for _ in range(rows+1)]
for r in range(1, rows+1):
for c in range(1, cols+1):
if r == 1 and c == 1:
sums[r][c] = grid[r-1][c-1]
else:
sums[r][c] = min(sums[r-1][c], sums[r][c-1]) + grid[r-1][c-1]
return sums[rows][cols]
| 31.727273 | 84 | 0.455587 | class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
return self.find_min_path_sum(grid)
def find_min_path_sum(self, grid):
rows, cols = len(grid), len(grid[0])
if not rows or not cols:
return -1
sums = [[float("inf") for _ in range(cols+1)] for _ in range(rows+1)]
for r in range(1, rows+1):
for c in range(1, cols+1):
if r == 1 and c == 1:
sums[r][c] = grid[r-1][c-1]
else:
sums[r][c] = min(sums[r-1][c], sums[r][c-1]) + grid[r-1][c-1]
return sums[rows][cols]
| true | true |
f71d9d0a976dd2d6fab7c86d11a2c85c733baff5 | 11,274 | py | Python | tfx/orchestration/kubeflow/executor_wrappers.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | 2 | 2019-07-08T20:56:13.000Z | 2020-08-04T17:07:26.000Z | tfx/orchestration/kubeflow/executor_wrappers.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/kubeflow/executor_wrappers.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for TFX executors running as part of a Kubeflow pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import argparse
import json
import os
import re
from future import utils
import six
import tensorflow as tf
from typing import Any, Dict, List, Text
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tfx import version
from tfx.components.base import base_executor
from tfx.utils import import_utils
from tfx.utils import types
def parse_tfx_type(json_str: Text):
"""Parses a list of artifacts and their types from json."""
json_artifact_list = json.loads(json_str)
tfx_types = []
for json_artifact in json_artifact_list:
tfx_type = types.TfxArtifact.parse_from_json_dict(json_artifact)
tfx_types.append(tfx_type)
return tfx_types
def to_snake_case(name: Text):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class KubeflowExecutorWrapper(utils.with_metaclass(abc.ABCMeta), object):
"""Abstract base class for all Kubeflow Pipelines-based TFX components."""
def __init__(
self,
executor_class_path: Text,
name: Text,
input_dict: Dict[Text, List[types.TfxArtifact]],
outputs: Text,
exec_properties: Dict[Text, Any],
):
self._input_dict = input_dict
self._output_dict = types.parse_tfx_type_dict(outputs)
self._component_name = to_snake_case(name)
self._exec_properties = exec_properties
self._output_dir = self._exec_properties['output_dir']
self._workflow_id = os.environ['WORKFLOW_ID']
raw_args = self._exec_properties.get('beam_pipeline_args', [])
# Beam expects str types for it's pipeline args. Ensure unicode type is
# converted to str if required.
beam_pipeline_args = []
for arg in raw_args:
# In order to support both Py2 and Py3: Py3 doesn't have `unicode` type.
if six.PY2 and isinstance(arg, unicode):
arg = arg.encode('ascii', 'ignore')
beam_pipeline_args.append(arg)
# TODO(zhitaoli): Revisit usage of setup_file here.
module_dir = os.path.dirname(os.path.dirname(version.__file__))
setup_file = os.path.join(module_dir, 'setup.py')
tf.logging.info('Using setup_file \'%s\' to capture TFX dependencies',
setup_file)
beam_pipeline_args.append('--setup_file={}'.format(setup_file))
executor_cls = import_utils.import_class_by_path(executor_class_path)
# TODO(swoonna): Switch to execution_id when available
unique_id = '{}_{}'.format(self._component_name, self._workflow_id)
# TODO(swoonna): Add tmp_dir to additional_pipeline_args
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=os.path.join(self._output_dir, '.temp', ''),
unique_id=unique_id)
self._executor = executor_cls(executor_context)
def _set_outputs(self):
tf.logging.info('Using workflow id {}'.format(self._workflow_id))
max_input_span = 0
for input_list in self._input_dict.values():
for single_input in input_list:
max_input_span = max(max_input_span, single_input.span)
for output_name, output_artifact_list in self._output_dict.items():
for output_artifact in output_artifact_list:
output_artifact.uri = os.path.join(self._output_dir,
self._component_name, output_name,
self._workflow_id,
output_artifact.split, '')
output_artifact.span = max_input_span
def run(self, output_basedir: Text = '/'):
"""Runs the wrapped Executor, and writes metadata of output artifacts.
Args:
output_basedir: Base directory to which output artifacts metadata
is written. Useful for unit tests.
"""
self._executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
output_dir = os.path.join(output_basedir, 'output/ml_metadata')
tf.gfile.MakeDirs(output_dir)
for output_name, output_artifact_list in self._output_dict.items():
filename = os.path.join(output_dir, output_name)
with file_io.FileIO(filename, 'w') as f:
output_list = [x.json_dict() for x in output_artifact_list]
f.write(json.dumps(output_list))
# TODO(b/132197968): Get rid of all the individual wrapper classes below and
# combine them into a single generic one that constructs the input dict from
# the individual named arguments instead. In the future, the generic wrapper
# can call into TFX drivers to handle component-specific logic as well.
class CsvExampleGenWrapper(KubeflowExecutorWrapper):
"""Wrapper for CSVExampleGen component."""
def __init__(self, args: argparse.Namespace):
super(CsvExampleGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='CSVExampleGen',
input_dict={
'input-base': parse_tfx_type(args.input_base),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_input_artifact_span()
self._set_outputs()
def _set_input_artifact_span(self):
for input_artifact in self._input_dict['input-base']:
matched = re.match(r'span_([0-9]+)', input_artifact.uri)
span = matched.group(1) if matched else 1
input_artifact.span = span
class BigQueryExampleGenWrapper(KubeflowExecutorWrapper):
"""Wrapper for BigQueryExampleGen component."""
def __init__(self, args: argparse.Namespace):
super(BigQueryExampleGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='BigQueryExampleGen',
input_dict={},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class StatisticsGenWrapper(KubeflowExecutorWrapper):
"""Wrapper for StatisticsGen component."""
def __init__(self, args: argparse.Namespace):
super(StatisticsGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='StatisticsGen',
input_dict={
'input_data': parse_tfx_type(args.input_data),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class SchemaGenWrapper(KubeflowExecutorWrapper):
"""Wrapper for SchemaGen component."""
def __init__(self, args: argparse.Namespace):
super(SchemaGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='SchemaGen',
input_dict={
'stats': parse_tfx_type(args.stats),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class ExampleValidatorWrapper(KubeflowExecutorWrapper):
"""Wrapper for ExampleValidator component."""
def __init__(self, args: argparse.Namespace):
super(ExampleValidatorWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='ExampleValidator',
input_dict={
'stats': parse_tfx_type(args.stats),
'schema': parse_tfx_type(args.schema),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class TransformWrapper(KubeflowExecutorWrapper):
"""Wrapper for Transform component."""
def __init__(self, args: argparse.Namespace):
super(TransformWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Transform',
input_dict={
'input_data': parse_tfx_type(args.input_data),
'schema': parse_tfx_type(args.schema),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class TrainerWrapper(KubeflowExecutorWrapper):
"""Wrapper for Trainer component."""
def __init__(self, args: argparse.Namespace):
super(TrainerWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Trainer',
input_dict={
'transformed_examples': parse_tfx_type(args.transformed_examples),
'transform_output': parse_tfx_type(args.transform_output),
'schema': parse_tfx_type(args.schema),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
# TODO(ajaygopinathan): Implement warm starting.
self._exec_properties['warm_starting'] = False
self._exec_properties['warm_start_from'] = None
class EvaluatorWrapper(KubeflowExecutorWrapper):
"""Wrapper for Evaluator component."""
def __init__(self, args: argparse.Namespace):
super(EvaluatorWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Evaluator',
input_dict={
'examples': parse_tfx_type(args.examples),
'model_exports': parse_tfx_type(args.model_exports),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class ModelValidatorWrapper(KubeflowExecutorWrapper):
"""Wrapper for ModelValidator component."""
def __init__(self, args: argparse.Namespace):
super(ModelValidatorWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='ModelValidator',
input_dict={
'examples': parse_tfx_type(args.examples),
'model': parse_tfx_type(args.model),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
# TODO(ajaygopinathan): Implement latest blessed model determination.
self._exec_properties['latest_blessed_model'] = None
self._exec_properties['latest_blessed_model_id'] = None
class PusherWrapper(KubeflowExecutorWrapper):
"""Wrapper for Pusher component."""
def __init__(self, args: argparse.Namespace):
super(PusherWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Pusher',
input_dict={
'model_export': parse_tfx_type(args.model_export),
'model_blessing': parse_tfx_type(args.model_blessing),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
# TODO(ajaygopinathan): Implement latest pushed model
self._exec_properties['latest_pushed_model'] = None
| 35.23125 | 90 | 0.699219 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import argparse
import json
import os
import re
from future import utils
import six
import tensorflow as tf
from typing import Any, Dict, List, Text
from tensorflow.python.lib.io import file_io
from tfx import version
from tfx.components.base import base_executor
from tfx.utils import import_utils
from tfx.utils import types
def parse_tfx_type(json_str: Text):
json_artifact_list = json.loads(json_str)
tfx_types = []
for json_artifact in json_artifact_list:
tfx_type = types.TfxArtifact.parse_from_json_dict(json_artifact)
tfx_types.append(tfx_type)
return tfx_types
def to_snake_case(name: Text):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class KubeflowExecutorWrapper(utils.with_metaclass(abc.ABCMeta), object):
def __init__(
self,
executor_class_path: Text,
name: Text,
input_dict: Dict[Text, List[types.TfxArtifact]],
outputs: Text,
exec_properties: Dict[Text, Any],
):
self._input_dict = input_dict
self._output_dict = types.parse_tfx_type_dict(outputs)
self._component_name = to_snake_case(name)
self._exec_properties = exec_properties
self._output_dir = self._exec_properties['output_dir']
self._workflow_id = os.environ['WORKFLOW_ID']
raw_args = self._exec_properties.get('beam_pipeline_args', [])
# converted to str if required.
beam_pipeline_args = []
for arg in raw_args:
# In order to support both Py2 and Py3: Py3 doesn't have `unicode` type.
if six.PY2 and isinstance(arg, unicode):
arg = arg.encode('ascii', 'ignore')
beam_pipeline_args.append(arg)
module_dir = os.path.dirname(os.path.dirname(version.__file__))
setup_file = os.path.join(module_dir, 'setup.py')
tf.logging.info('Using setup_file \'%s\' to capture TFX dependencies',
setup_file)
beam_pipeline_args.append('--setup_file={}'.format(setup_file))
executor_cls = import_utils.import_class_by_path(executor_class_path)
unique_id = '{}_{}'.format(self._component_name, self._workflow_id)
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=os.path.join(self._output_dir, '.temp', ''),
unique_id=unique_id)
self._executor = executor_cls(executor_context)
def _set_outputs(self):
tf.logging.info('Using workflow id {}'.format(self._workflow_id))
max_input_span = 0
for input_list in self._input_dict.values():
for single_input in input_list:
max_input_span = max(max_input_span, single_input.span)
for output_name, output_artifact_list in self._output_dict.items():
for output_artifact in output_artifact_list:
output_artifact.uri = os.path.join(self._output_dir,
self._component_name, output_name,
self._workflow_id,
output_artifact.split, '')
output_artifact.span = max_input_span
def run(self, output_basedir: Text = '/'):
self._executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
output_dir = os.path.join(output_basedir, 'output/ml_metadata')
tf.gfile.MakeDirs(output_dir)
for output_name, output_artifact_list in self._output_dict.items():
filename = os.path.join(output_dir, output_name)
with file_io.FileIO(filename, 'w') as f:
output_list = [x.json_dict() for x in output_artifact_list]
f.write(json.dumps(output_list))
class CsvExampleGenWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(CsvExampleGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='CSVExampleGen',
input_dict={
'input-base': parse_tfx_type(args.input_base),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_input_artifact_span()
self._set_outputs()
def _set_input_artifact_span(self):
for input_artifact in self._input_dict['input-base']:
matched = re.match(r'span_([0-9]+)', input_artifact.uri)
span = matched.group(1) if matched else 1
input_artifact.span = span
class BigQueryExampleGenWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(BigQueryExampleGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='BigQueryExampleGen',
input_dict={},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class StatisticsGenWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(StatisticsGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='StatisticsGen',
input_dict={
'input_data': parse_tfx_type(args.input_data),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class SchemaGenWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(SchemaGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='SchemaGen',
input_dict={
'stats': parse_tfx_type(args.stats),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class ExampleValidatorWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(ExampleValidatorWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='ExampleValidator',
input_dict={
'stats': parse_tfx_type(args.stats),
'schema': parse_tfx_type(args.schema),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class TransformWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(TransformWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Transform',
input_dict={
'input_data': parse_tfx_type(args.input_data),
'schema': parse_tfx_type(args.schema),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class TrainerWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(TrainerWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Trainer',
input_dict={
'transformed_examples': parse_tfx_type(args.transformed_examples),
'transform_output': parse_tfx_type(args.transform_output),
'schema': parse_tfx_type(args.schema),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
self._exec_properties['warm_starting'] = False
self._exec_properties['warm_start_from'] = None
class EvaluatorWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(EvaluatorWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Evaluator',
input_dict={
'examples': parse_tfx_type(args.examples),
'model_exports': parse_tfx_type(args.model_exports),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class ModelValidatorWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(ModelValidatorWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='ModelValidator',
input_dict={
'examples': parse_tfx_type(args.examples),
'model': parse_tfx_type(args.model),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
self._exec_properties['latest_blessed_model'] = None
self._exec_properties['latest_blessed_model_id'] = None
class PusherWrapper(KubeflowExecutorWrapper):
def __init__(self, args: argparse.Namespace):
super(PusherWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Pusher',
input_dict={
'model_export': parse_tfx_type(args.model_export),
'model_blessing': parse_tfx_type(args.model_blessing),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
self._exec_properties['latest_pushed_model'] = None
| true | true |
f71d9d8f6b820ccbcff9a86dd6bbf0941792f053 | 2,110 | py | Python | src/djask/admin/views.py | greyli/Djask | b886610c792aaa40d6a3975fd26c44fc7825c8b6 | [
"MIT"
] | null | null | null | src/djask/admin/views.py | greyli/Djask | b886610c792aaa40d6a3975fd26c44fc7825c8b6 | [
"MIT"
] | null | null | null | src/djask/admin/views.py | greyli/Djask | b886610c792aaa40d6a3975fd26c44fc7825c8b6 | [
"MIT"
] | null | null | null | from typing import Optional
from flask import render_template, abort, flash, redirect, url_for
from flask_login.utils import login_user, logout_user
from .forms import LoginForm
from .decorators import admin_required
from ..auth.models import User
from ..blueprints import Blueprint
from ..globals import current_app, request
admin_bp = Blueprint("admin", __name__, url_prefix="/admin")
@admin_bp.route("/")
@admin_required
def index():
blueprints = current_app.blueprint_objects
return render_template(
"admin/dashboard.html",
User=User,
models=current_app.models,
blueprints=blueprints,
)
@admin_bp.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
# identify the user
if not user:
flash("User not found.", "danger")
elif not user.is_admin:
flash("User not administrative.", "danger")
elif not user.check_password(form.password.data):
flash("Wrong password.", "danger")
else:
login_user(user, form.remember_me.data)
next: Optional[str] = request.args.get("next")
return redirect(next or url_for("admin.index"))
return render_template("admin/login.html", form=form)
@admin_bp.route("/logout")
def logout():
logout_user()
return redirect(url_for("admin.login"))
@admin_bp.route("/<model_name>")
@admin_required
def specific_model(model_name: str):
model_name = model_name.lower()
models = current_app.models
registered_models = [model.__name__.lower() for model in models]
if model_name not in registered_models:
abort(404, "Data model not defined or registered.")
model = models[registered_models.index(model_name)]
return render_template("admin/model.html", model=model, model_name=model.__name__)
@admin_bp.route("/<model_name>/add")
@admin_required
def add_model(model_name: str):
# TODO: Write add_model view
return "Hello World" # pragma: no cover
| 30.57971 | 86 | 0.690521 | from typing import Optional
from flask import render_template, abort, flash, redirect, url_for
from flask_login.utils import login_user, logout_user
from .forms import LoginForm
from .decorators import admin_required
from ..auth.models import User
from ..blueprints import Blueprint
from ..globals import current_app, request
admin_bp = Blueprint("admin", __name__, url_prefix="/admin")
@admin_bp.route("/")
@admin_required
def index():
blueprints = current_app.blueprint_objects
return render_template(
"admin/dashboard.html",
User=User,
models=current_app.models,
blueprints=blueprints,
)
@admin_bp.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if not user:
flash("User not found.", "danger")
elif not user.is_admin:
flash("User not administrative.", "danger")
elif not user.check_password(form.password.data):
flash("Wrong password.", "danger")
else:
login_user(user, form.remember_me.data)
next: Optional[str] = request.args.get("next")
return redirect(next or url_for("admin.index"))
return render_template("admin/login.html", form=form)
@admin_bp.route("/logout")
def logout():
logout_user()
return redirect(url_for("admin.login"))
@admin_bp.route("/<model_name>")
@admin_required
def specific_model(model_name: str):
model_name = model_name.lower()
models = current_app.models
registered_models = [model.__name__.lower() for model in models]
if model_name not in registered_models:
abort(404, "Data model not defined or registered.")
model = models[registered_models.index(model_name)]
return render_template("admin/model.html", model=model, model_name=model.__name__)
@admin_bp.route("/<model_name>/add")
@admin_required
def add_model(model_name: str):
return "Hello World"
| true | true |
f71d9ef0e4be00ca05ee0dfe3d9ea824198afb50 | 57,881 | py | Python | kafka/admin/client.py | jtribble/kafka-python | bbb8c90391e446de81833bc38dba4233a781fa5b | [
"Apache-2.0"
] | null | null | null | kafka/admin/client.py | jtribble/kafka-python | bbb8c90391e446de81833bc38dba4233a781fa5b | [
"Apache-2.0"
] | null | null | null | kafka/admin/client.py | jtribble/kafka-python | bbb8c90391e446de81833bc38dba4233a781fa5b | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from collections import defaultdict
import copy
import logging
import socket
from . import ConfigResourceType
from kafka.vendor import six
from kafka.client_async import KafkaClient, selectors
import kafka.errors as Errors
from kafka.errors import (
IncompatibleBrokerVersion, KafkaConfigurationError, NotControllerError,
UnrecognizedBrokerVersion, IllegalArgumentError)
from kafka.metrics import MetricConfig, Metrics
from kafka.protocol.admin import (
CreateTopicsRequest, DeleteTopicsRequest, DescribeConfigsRequest, AlterConfigsRequest, CreatePartitionsRequest,
ListGroupsRequest, DescribeGroupsRequest, DescribeAclsRequest, CreateAclsRequest, DeleteAclsRequest)
from kafka.protocol.commit import GroupCoordinatorRequest, OffsetFetchRequest
from kafka.protocol.metadata import MetadataRequest
from kafka.structs import TopicPartition, OffsetAndMetadata
from kafka.admin.acl_resource import ACLOperation, ACLPermissionType, ACLFilter, ACL, ResourcePattern, ResourceType, \
ACLResourcePatternType
from kafka.version import __version__
log = logging.getLogger(__name__)
class KafkaAdminClient(object):
"""A class for administering the Kafka cluster.
Warning:
This is an unstable interface that was recently added and is subject to
change without warning. In particular, many methods currently return
raw protocol tuples. In future releases, we plan to make these into
nicer, more pythonic objects. Unfortunately, this will likely break
those interfaces.
The KafkaAdminClient class will negotiate for the latest version of each message
protocol format supported by both the kafka-python client library and the
Kafka broker. Usage of optional fields from protocol versions that are not
supported by the broker will result in IncompatibleBrokerVersion exceptions.
Use of this class requires a minimum broker version >= 0.10.0.0.
Keyword Arguments:
bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default port is 9092. If no servers are
specified, will default to localhost:9092.
client_id (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: 'kafka-python-{version}'
reconnect_backoff_ms (int): The amount of time in milliseconds to
wait before attempting to reconnect to a given host.
Default: 50.
reconnect_backoff_max_ms (int): The maximum amount of time in
milliseconds to backoff/wait when reconnecting to a broker that has
repeatedly failed to connect. If provided, the backoff per host
will increase exponentially for each consecutive connection
failure, up to this maximum. Once the maximum is reached,
reconnection attempts will continue periodically with this fixed
rate. To avoid connection storms, a randomization factor of 0.2
will be applied to the backoff resulting in a random range between
20% below and 20% above the computed value. Default: 1000.
request_timeout_ms (int): Client request timeout in milliseconds.
Default: 30000.
connections_max_idle_ms: Close idle connections after the number of
milliseconds specified by this config. The broker closes idle
connections after connections.max.idle.ms, so this avoids hitting
unexpected socket disconnected errors on the client.
Default: 540000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
max_in_flight_requests_per_connection (int): Requests are pipelined
to kafka brokers up to this number of maximum requests per
broker connection. Default: 5.
receive_buffer_bytes (int): The size of the TCP receive buffer
(SO_RCVBUF) to use when reading data. Default: None (relies on
system defaults). Java client defaults to 32768.
send_buffer_bytes (int): The size of the TCP send buffer
(SO_SNDBUF) to use when sending data. Default: None (relies on
system defaults). Java client defaults to 131072.
socket_options (list): List of tuple-arguments to socket.setsockopt
to apply to broker connection sockets. Default:
[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
metadata_max_age_ms (int): The period of time in milliseconds after
which we force a refresh of metadata even if we haven't seen any
partition leadership changes to proactively discover any new
brokers or partitions. Default: 300000
security_protocol (str): Protocol used to communicate with brokers.
Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
Default: PLAINTEXT.
ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
socket connections. If provided, all other ssl_* configurations
will be ignored. Default: None.
ssl_check_hostname (bool): Flag to configure whether SSL handshake
should verify that the certificate matches the broker's hostname.
Default: True.
ssl_cafile (str): Optional filename of CA file to use in certificate
verification. Default: None.
ssl_certfile (str): Optional filename of file in PEM format containing
the client certificate, as well as any CA certificates needed to
establish the certificate's authenticity. Default: None.
ssl_keyfile (str): Optional filename containing the client private key.
Default: None.
ssl_password (str): Optional password to be used when loading the
certificate chain. Default: None.
ssl_crlfile (str): Optional filename containing the CRL to check for
certificate expiration. By default, no CRL check is done. When
providing a file, only the leaf certificate will be checked against
this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
Default: None.
api_version (tuple): Specify which Kafka API version to use. If set
to None, KafkaClient will attempt to infer the broker version by
probing various APIs. Example: (0, 10, 2). Default: None
api_version_auto_timeout_ms (int): number of milliseconds to throw a
timeout exception from the constructor when checking the broker
api version. Only applies if api_version is None
selector (selectors.BaseSelector): Provide a specific selector
implementation to use for I/O multiplexing.
Default: selectors.DefaultSelector
metrics (kafka.metrics.Metrics): Optionally provide a metrics
instance for capturing network IO stats. Default: None.
metric_group_prefix (str): Prefix for metric names. Default: ''
sasl_mechanism (str): Authentication mechanism when security_protocol
is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512.
sasl_plain_username (str): username for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_plain_password (str): password for sasl PLAIN and SCRAM authentication.
Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
sasl_kerberos_service_name (str): Service name to include in GSSAPI
sasl mechanism handshake. Default: 'kafka'
sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI
sasl mechanism handshake. Default: one of bootstrap servers
sasl_oauth_token_provider (AbstractTokenProvider): OAuthBearer token provider
instance. (See kafka.oauth.abstract). Default: None
"""
DEFAULT_CONFIG = {
# client configs
'bootstrap_servers': 'localhost',
'client_id': 'kafka-python-' + __version__,
'request_timeout_ms': 30000,
'connections_max_idle_ms': 9 * 60 * 1000,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'sock_chunk_bytes': 4096, # undocumented experimental option
'sock_chunk_buffer_count': 1000, # undocumented experimental option
'retry_backoff_ms': 100,
'metadata_max_age_ms': 300000,
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_password': None,
'ssl_crlfile': None,
'api_version': None,
'api_version_auto_timeout_ms': 2000,
'selector': selectors.DefaultSelector,
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'sasl_kerberos_service_name': 'kafka',
'sasl_kerberos_domain_name': None,
'sasl_oauth_token_provider': None,
# metrics configs
'metric_reporters': [],
'metrics_num_samples': 2,
'metrics_sample_window_ms': 30000,
}
def __init__(self, **configs):
log.debug("Starting KafkaAdminClient with configuration: %s", configs)
extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
if extra_configs:
raise KafkaConfigurationError("Unrecognized configs: {}".format(extra_configs))
self.config = copy.copy(self.DEFAULT_CONFIG)
self.config.update(configs)
# Configure metrics
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
self._client = KafkaClient(metrics=self._metrics,
metric_group_prefix='admin',
**self.config)
self._client.check_version()
# Get auto-discovered version from client if necessary
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
self._closed = False
self._refresh_controller_id()
log.debug("KafkaAdminClient started.")
def close(self):
"""Close the KafkaAdminClient connection to the Kafka broker."""
if not hasattr(self, '_closed') or self._closed:
log.info("KafkaAdminClient already closed.")
return
self._metrics.close()
self._client.close()
self._closed = True
log.debug("KafkaAdminClient is now closed.")
def _matching_api_version(self, operation):
"""Find the latest version of the protocol operation supported by both
this library and the broker.
This resolves to the lesser of either the latest api version this
library supports, or the max version supported by the broker.
:param operation: A list of protocol operation versions from kafka.protocol.
:return: The max matching version number between client and broker.
"""
broker_api_versions = self._client.get_api_versions()
api_key = operation[0].API_KEY
if broker_api_versions is None or api_key not in broker_api_versions:
raise IncompatibleBrokerVersion(
"Kafka broker does not support the '{}' Kafka protocol."
.format(operation[0].__name__))
min_version, max_version = broker_api_versions[api_key]
version = min(len(operation) - 1, max_version)
if version < min_version:
# max library version is less than min broker version. Currently,
# no Kafka versions specify a min msg version. Maybe in the future?
raise IncompatibleBrokerVersion(
"No version of the '{}' Kafka protocol is supported by both the client and broker."
.format(operation[0].__name__))
return version
def _validate_timeout(self, timeout_ms):
"""Validate the timeout is set or use the configuration default.
:param timeout_ms: The timeout provided by api call, in milliseconds.
:return: The timeout to use for the operation.
"""
return timeout_ms or self.config['request_timeout_ms']
def _refresh_controller_id(self):
"""Determine the Kafka cluster controller."""
version = self._matching_api_version(MetadataRequest)
if 1 <= version <= 6:
request = MetadataRequest[version]()
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
controller_id = response.controller_id
# verify the controller is new enough to support our requests
controller_version = self._client.check_version(controller_id)
if controller_version < (0, 10, 0):
raise IncompatibleBrokerVersion(
"The controller appears to be running Kafka {}. KafkaAdminClient requires brokers >= 0.10.0.0."
.format(controller_version))
self._controller_id = controller_id
else:
raise UnrecognizedBrokerVersion(
"Kafka Admin interface cannot determine the controller using MetadataRequest_v{}."
.format(version))
def _find_coordinator_id_send_request(self, group_id):
"""Send a FindCoordinatorRequest to a broker.
:param group_id: The consumer group ID. This is typically the group
name as a string.
:return: A message future
"""
# TODO add support for dynamically picking version of
# GroupCoordinatorRequest which was renamed to FindCoordinatorRequest.
# When I experimented with this, the coordinator value returned in
# GroupCoordinatorResponse_v1 didn't match the value returned by
# GroupCoordinatorResponse_v0 and I couldn't figure out why.
version = 0
# version = self._matching_api_version(GroupCoordinatorRequest)
if version <= 0:
request = GroupCoordinatorRequest[version](group_id)
else:
raise NotImplementedError(
"Support for GroupCoordinatorRequest_v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_node(self._client.least_loaded_node(), request)
def _find_coordinator_id_process_response(self, response):
"""Process a FindCoordinatorResponse.
:param response: a FindCoordinatorResponse.
:return: The node_id of the broker that is the coordinator.
"""
if response.API_VERSION <= 0:
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
# Note: When error_type.retriable, Java will retry... see
# KafkaAdminClient's handleFindCoordinatorError method
raise error_type(
"FindCoordinatorRequest failed with response '{}'."
.format(response))
else:
raise NotImplementedError(
"Support for FindCoordinatorRequest_v{} has not yet been added to KafkaAdminClient."
.format(response.API_VERSION))
return response.coordinator_id
def _find_coordinator_id(self, group_id):
"""Find the broker node_id of the coordinator of the given group.
Sends a FindCoordinatorRequest message to the cluster. Will block until
the FindCoordinatorResponse is received. Any errors are immediately
raised.
:param group_id: The consumer group ID. This is typically the group
name as a string.
:return: The node_id of the broker that is the coordinator.
"""
# Note: Java may change how this is implemented in KAFKA-6791.
future = self._find_coordinator_id_send_request(group_id)
self._wait_for_futures([future])
response = future.value
return self._find_coordinator_id_process_response(response)
def _send_request_to_node(self, node_id, request):
"""Send a Kafka protocol message to a specific broker.
Returns a future that may be polled for status and results.
:param node_id: The broker id to which to send the message.
:param request: The message to send.
:return: A future object that may be polled for status and results.
:exception: The exception if the message could not be sent.
"""
while not self._client.ready(node_id):
# poll until the connection to broker is ready, otherwise send()
# will fail with NodeNotReadyError
self._client.poll()
return self._client.send(node_id, request)
def _send_request_to_controller(self, request):
"""Send a Kafka protocol message to the cluster controller.
Will block until the message result is received.
:param request: The message to send.
:return: The Kafka protocol response for the message.
"""
tries = 2 # in case our cached self._controller_id is outdated
while tries:
tries -= 1
future = self._send_request_to_node(self._controller_id, request)
self._wait_for_futures([future])
response = future.value
# In Java, the error fieldname is inconsistent:
# - CreateTopicsResponse / CreatePartitionsResponse uses topic_errors
# - DeleteTopicsResponse uses topic_error_codes
# So this is a little brittle in that it assumes all responses have
# one of these attributes and that they always unpack into
# (topic, error_code) tuples.
topic_error_tuples = (response.topic_errors if hasattr(response, 'topic_errors')
else response.topic_error_codes)
# Also small py2/py3 compatibility -- py3 can ignore extra values
# during unpack via: for x, y, *rest in list_of_values. py2 cannot.
# So for now we have to map across the list and explicitly drop any
# extra values (usually the error_message)
for topic, error_code in map(lambda e: e[:2], topic_error_tuples):
error_type = Errors.for_code(error_code)
if tries and error_type is NotControllerError:
# No need to inspect the rest of the errors for
# non-retriable errors because NotControllerError should
# either be thrown for all errors or no errors.
self._refresh_controller_id()
break
elif error_type is not Errors.NoError:
raise error_type(
"Request '{}' failed with response '{}'."
.format(request, response))
else:
return response
raise RuntimeError("This should never happen, please file a bug with full stacktrace if encountered")
@staticmethod
def _convert_new_topic_request(new_topic):
return (
new_topic.name,
new_topic.num_partitions,
new_topic.replication_factor,
[
(partition_id, replicas) for partition_id, replicas in new_topic.replica_assignments.items()
],
[
(config_key, config_value) for config_key, config_value in new_topic.topic_configs.items()
]
)
def create_topics(self, new_topics, timeout_ms=None, validate_only=False):
"""Create new topics in the cluster.
:param new_topics: A list of NewTopic objects.
:param timeout_ms: Milliseconds to wait for new topics to be created
before the broker returns.
:param validate_only: If True, don't actually create new topics.
Not supported by all versions. Default: False
:return: Appropriate version of CreateTopicResponse class.
"""
version = self._matching_api_version(CreateTopicsRequest)
timeout_ms = self._validate_timeout(timeout_ms)
if version == 0:
if validate_only:
raise IncompatibleBrokerVersion(
"validate_only requires CreateTopicsRequest >= v1, which is not supported by Kafka {}."
.format(self.config['api_version']))
request = CreateTopicsRequest[version](
create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics],
timeout=timeout_ms
)
elif version <= 3:
request = CreateTopicsRequest[version](
create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics],
timeout=timeout_ms,
validate_only=validate_only
)
else:
raise NotImplementedError(
"Support for CreateTopics v{} has not yet been added to KafkaAdminClient."
.format(version))
# TODO convert structs to a more pythonic interface
# TODO raise exceptions if errors
return self._send_request_to_controller(request)
def delete_topics(self, topics, timeout_ms=None):
"""Delete topics from the cluster.
:param topics: A list of topic name strings.
:param timeout_ms: Milliseconds to wait for topics to be deleted
before the broker returns.
:return: Appropriate version of DeleteTopicsResponse class.
"""
version = self._matching_api_version(DeleteTopicsRequest)
timeout_ms = self._validate_timeout(timeout_ms)
if version <= 3:
request = DeleteTopicsRequest[version](
topics=topics,
timeout=timeout_ms
)
response = self._send_request_to_controller(request)
else:
raise NotImplementedError(
"Support for DeleteTopics v{} has not yet been added to KafkaAdminClient."
.format(version))
return response
def _get_cluster_metadata(self, topics=None, auto_topic_creation=False, use_controller=False):
"""
topics == None means "get all topics"
"""
version = self._matching_api_version(MetadataRequest)
if version <= 3:
if auto_topic_creation:
raise IncompatibleBrokerVersion(
"auto_topic_creation requires MetadataRequest >= v4, which"
" is not supported by Kafka {}"
.format(self.config['api_version']))
request = MetadataRequest[version](topics=topics)
elif version <= 5:
request = MetadataRequest[version](
topics=topics,
allow_auto_topic_creation=auto_topic_creation
)
if use_controller:
return self._send_request_to_controller(request)
future = self._send_request_to_node(
self._client.least_loaded_node(),
request
)
self._wait_for_futures([future])
return future.value
def list_topics(self):
metadata = self._get_cluster_metadata(topics=None)
obj = metadata.to_object()
return [t['topic'] for t in obj['topics']]
def describe_topics(self, topics=None):
metadata = self._get_cluster_metadata(topics=topics, use_controller=True)
obj = metadata.to_object()
return obj['topics']
def describe_cluster(self):
metadata = self._get_cluster_metadata()
obj = metadata.to_object()
obj.pop('topics') # We have 'describe_topics' for this
return obj
@staticmethod
def _convert_describe_acls_response_to_acls(describe_response):
version = describe_response.API_VERSION
error = Errors.for_code(describe_response.error_code)
acl_list = []
for resources in describe_response.resources:
if version == 0:
resource_type, resource_name, acls = resources
resource_pattern_type = ACLResourcePatternType.LITERAL.value
elif version <= 1:
resource_type, resource_name, resource_pattern_type, acls = resources
else:
raise NotImplementedError(
"Support for DescribeAcls Response v{} has not yet been added to KafkaAdmin."
.format(version)
)
for acl in acls:
principal, host, operation, permission_type = acl
conv_acl = ACL(
principal=principal,
host=host,
operation=ACLOperation(operation),
permission_type=ACLPermissionType(permission_type),
resource_pattern=ResourcePattern(
ResourceType(resource_type),
resource_name,
ACLResourcePatternType(resource_pattern_type)
)
)
acl_list.append(conv_acl)
return (acl_list, error,)
def describe_acls(self, acl_filter):
"""Describe a set of ACLs
Used to return a set of ACLs matching the supplied ACLFilter.
The cluster must be configured with an authorizer for this to work, or
you will get a SecurityDisabledError
:param acl_filter: an ACLFilter object
:return: tuple of a list of matching ACL objects and a KafkaError (NoError if successful)
"""
version = self._matching_api_version(DescribeAclsRequest)
if version == 0:
request = DescribeAclsRequest[version](
resource_type=acl_filter.resource_pattern.resource_type,
resource_name=acl_filter.resource_pattern.resource_name,
principal=acl_filter.principal,
host=acl_filter.host,
operation=acl_filter.operation,
permission_type=acl_filter.permission_type
)
elif version <= 1:
request = DescribeAclsRequest[version](
resource_type=acl_filter.resource_pattern.resource_type,
resource_name=acl_filter.resource_pattern.resource_name,
resource_pattern_type_filter=acl_filter.resource_pattern.pattern_type,
principal=acl_filter.principal,
host=acl_filter.host,
operation=acl_filter.operation,
permission_type=acl_filter.permission_type
)
else:
raise NotImplementedError(
"Support for DescribeAcls v{} has not yet been added to KafkaAdmin."
.format(version)
)
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
# optionally we could retry if error_type.retriable
raise error_type(
"Request '{}' failed with response '{}'."
.format(request, response))
return self._convert_describe_acls_response_to_acls(response)
@staticmethod
def _convert_create_acls_resource_request_v0(acl):
return (
acl.resource_pattern.resource_type,
acl.resource_pattern.resource_name,
acl.principal,
acl.host,
acl.operation,
acl.permission_type
)
@staticmethod
def _convert_create_acls_resource_request_v1(acl):
return (
acl.resource_pattern.resource_type,
acl.resource_pattern.resource_name,
acl.resource_pattern.pattern_type,
acl.principal,
acl.host,
acl.operation,
acl.permission_type
)
@staticmethod
def _convert_create_acls_response_to_acls(acls, create_response):
version = create_response.API_VERSION
creations_error = []
creations_success = []
for i, creations in enumerate(create_response.creation_responses):
if version <= 1:
error_code, error_message = creations
acl = acls[i]
error = Errors.for_code(error_code)
else:
raise NotImplementedError(
"Support for DescribeAcls Response v{} has not yet been added to KafkaAdmin."
.format(version)
)
if error is Errors.NoError:
creations_success.append(acl)
else:
creations_error.append((acl, error,))
return {"succeeded": creations_success, "failed": creations_error}
def create_acls(self, acls):
"""Create a list of ACLs
This endpoint only accepts a list of concrete ACL objects, no ACLFilters.
Throws TopicAlreadyExistsError if topic is already present.
:param acls: a list of ACL objects
:return: dict of successes and failures
"""
for acl in acls:
if not isinstance(acl, ACL):
raise IllegalArgumentError("acls must contain ACL objects")
version = self._matching_api_version(CreateAclsRequest)
if version == 0:
request = CreateAclsRequest[version](
creations=[self._convert_create_acls_resource_request_v0(acl) for acl in acls]
)
elif version <= 1:
request = CreateAclsRequest[version](
creations=[self._convert_create_acls_resource_request_v1(acl) for acl in acls]
)
else:
raise NotImplementedError(
"Support for CreateAcls v{} has not yet been added to KafkaAdmin."
.format(version)
)
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
return self._convert_create_acls_response_to_acls(acls, response)
@staticmethod
def _convert_delete_acls_resource_request_v0(acl):
return (
acl.resource_pattern.resource_type,
acl.resource_pattern.resource_name,
acl.principal,
acl.host,
acl.operation,
acl.permission_type
)
@staticmethod
def _convert_delete_acls_resource_request_v1(acl):
return (
acl.resource_pattern.resource_type,
acl.resource_pattern.resource_name,
acl.resource_pattern.pattern_type,
acl.principal,
acl.host,
acl.operation,
acl.permission_type
)
@staticmethod
def _convert_delete_acls_response_to_matching_acls(acl_filters, delete_response):
version = delete_response.API_VERSION
filter_result_list = []
for i, filter_responses in enumerate(delete_response.filter_responses):
filter_error_code, filter_error_message, matching_acls = filter_responses
filter_error = Errors.for_code(filter_error_code)
acl_result_list = []
for acl in matching_acls:
if version == 0:
error_code, error_message, resource_type, resource_name, principal, host, operation, permission_type = acl
resource_pattern_type = ACLResourcePatternType.LITERAL.value
elif version == 1:
error_code, error_message, resource_type, resource_name, resource_pattern_type, principal, host, operation, permission_type = acl
else:
raise NotImplementedError(
"Support for DescribeAcls Response v{} has not yet been added to KafkaAdmin."
.format(version)
)
acl_error = Errors.for_code(error_code)
conv_acl = ACL(
principal=principal,
host=host,
operation=ACLOperation(operation),
permission_type=ACLPermissionType(permission_type),
resource_pattern=ResourcePattern(
ResourceType(resource_type),
resource_name,
ACLResourcePatternType(resource_pattern_type)
)
)
acl_result_list.append((conv_acl, acl_error,))
filter_result_list.append((acl_filters[i], acl_result_list, filter_error,))
return filter_result_list
def delete_acls(self, acl_filters):
"""Delete a set of ACLs
Deletes all ACLs matching the list of input ACLFilter
:param acl_filters: a list of ACLFilter
:return: a list of 3-tuples corresponding to the list of input filters.
The tuples hold (the input ACLFilter, list of affected ACLs, KafkaError instance)
"""
for acl in acl_filters:
if not isinstance(acl, ACLFilter):
raise IllegalArgumentError("acl_filters must contain ACLFilter type objects")
version = self._matching_api_version(DeleteAclsRequest)
if version == 0:
request = DeleteAclsRequest[version](
filters=[self._convert_delete_acls_resource_request_v0(acl) for acl in acl_filters]
)
elif version <= 1:
request = DeleteAclsRequest[version](
filters=[self._convert_delete_acls_resource_request_v1(acl) for acl in acl_filters]
)
else:
raise NotImplementedError(
"Support for DeleteAcls v{} has not yet been added to KafkaAdmin."
.format(version)
)
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
return self._convert_delete_acls_response_to_matching_acls(acl_filters, response)
@staticmethod
def _convert_describe_config_resource_request(config_resource):
return (
config_resource.resource_type,
config_resource.name,
[
config_key for config_key, config_value in config_resource.configs.items()
] if config_resource.configs else None
)
def describe_configs(self, config_resources, include_synonyms=False):
"""Fetch configuration parameters for one or more Kafka resources.
:param config_resources: An list of ConfigResource objects.
Any keys in ConfigResource.configs dict will be used to filter the
result. Setting the configs dict to None will get all values. An
empty dict will get zero values (as per Kafka protocol).
:param include_synonyms: If True, return synonyms in response. Not
supported by all versions. Default: False.
:return: Appropriate version of DescribeConfigsResponse class.
"""
# Break up requests by type - a broker config request must be sent to the specific broker.
# All other (currently just topic resources) can be sent to any broker.
broker_resources = []
topic_resources = []
for config_resource in config_resources:
if config_resource.resource_type == ConfigResourceType.BROKER:
broker_resources.append(self._convert_describe_config_resource_request(config_resource))
else:
topic_resources.append(self._convert_describe_config_resource_request(config_resource))
futures = []
version = self._matching_api_version(DescribeConfigsRequest)
if version == 0:
if include_synonyms:
raise IncompatibleBrokerVersion(
"include_synonyms requires DescribeConfigsRequest >= v1, which is not supported by Kafka {}."
.format(self.config['api_version']))
if len(broker_resources) > 0:
for broker_resource in broker_resources:
try:
broker_id = int(broker_resource[1])
except ValueError:
raise ValueError("Broker resource names must be an integer or a string represented integer")
futures.append(self._send_request_to_node(
broker_id,
DescribeConfigsRequest[version](resources=[broker_resource])
))
if len(topic_resources) > 0:
futures.append(self._send_request_to_node(
self._client.least_loaded_node(),
DescribeConfigsRequest[version](resources=topic_resources)
))
elif version <= 2:
if len(broker_resources) > 0:
for broker_resource in broker_resources:
try:
broker_id = int(broker_resource[1])
except ValueError:
raise ValueError("Broker resource names must be an integer or a string represented integer")
futures.append(self._send_request_to_node(
broker_id,
DescribeConfigsRequest[version](
resources=[broker_resource],
include_synonyms=include_synonyms)
))
if len(topic_resources) > 0:
futures.append(self._send_request_to_node(
self._client.least_loaded_node(),
DescribeConfigsRequest[version](resources=topic_resources, include_synonyms=include_synonyms)
))
else:
raise NotImplementedError(
"Support for DescribeConfigs v{} has not yet been added to KafkaAdminClient.".format(version))
self._wait_for_futures(futures)
return [f.value for f in futures]
@staticmethod
def _convert_alter_config_resource_request(config_resource):
return (
config_resource.resource_type,
config_resource.name,
[
(config_key, config_value) for config_key, config_value in config_resource.configs.items()
]
)
def alter_configs(self, config_resources):
"""Alter configuration parameters of one or more Kafka resources.
Warning:
This is currently broken for BROKER resources because those must be
sent to that specific broker, versus this always picks the
least-loaded node. See the comment in the source code for details.
We would happily accept a PR fixing this.
:param config_resources: A list of ConfigResource objects.
:return: Appropriate version of AlterConfigsResponse class.
"""
version = self._matching_api_version(AlterConfigsRequest)
if version <= 1:
request = AlterConfigsRequest[version](
resources=[self._convert_alter_config_resource_request(config_resource) for config_resource in config_resources]
)
else:
raise NotImplementedError(
"Support for AlterConfigs v{} has not yet been added to KafkaAdminClient."
.format(version))
# TODO the Java client has the note:
# // We must make a separate AlterConfigs request for every BROKER resource we want to alter
# // and send the request to that specific broker. Other resources are grouped together into
# // a single request that may be sent to any broker.
#
# So this is currently broken as it always sends to the least_loaded_node()
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
return response
# alter replica logs dir protocol not yet implemented
# Note: have to lookup the broker with the replica assignment and send the request to that broker
# describe log dirs protocol not yet implemented
# Note: have to lookup the broker with the replica assignment and send the request to that broker
@staticmethod
def _convert_create_partitions_request(topic_name, new_partitions):
return (
topic_name,
(
new_partitions.total_count,
new_partitions.new_assignments
)
)
def create_partitions(self, topic_partitions, timeout_ms=None, validate_only=False):
"""Create additional partitions for an existing topic.
:param topic_partitions: A map of topic name strings to NewPartition objects.
:param timeout_ms: Milliseconds to wait for new partitions to be
created before the broker returns.
:param validate_only: If True, don't actually create new partitions.
Default: False
:return: Appropriate version of CreatePartitionsResponse class.
"""
version = self._matching_api_version(CreatePartitionsRequest)
timeout_ms = self._validate_timeout(timeout_ms)
if version <= 1:
request = CreatePartitionsRequest[version](
topic_partitions=[self._convert_create_partitions_request(topic_name, new_partitions) for topic_name, new_partitions in topic_partitions.items()],
timeout=timeout_ms,
validate_only=validate_only
)
else:
raise NotImplementedError(
"Support for CreatePartitions v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_controller(request)
# delete records protocol not yet implemented
# Note: send the request to the partition leaders
# create delegation token protocol not yet implemented
# Note: send the request to the least_loaded_node()
# renew delegation token protocol not yet implemented
# Note: send the request to the least_loaded_node()
# expire delegation_token protocol not yet implemented
# Note: send the request to the least_loaded_node()
# describe delegation_token protocol not yet implemented
# Note: send the request to the least_loaded_node()
def _describe_consumer_groups_send_request(self, group_id, group_coordinator_id, include_authorized_operations=False):
"""Send a DescribeGroupsRequest to the group's coordinator.
:param group_id: The group name as a string
:param group_coordinator_id: The node_id of the groups' coordinator
broker.
:return: A message future.
"""
version = self._matching_api_version(DescribeGroupsRequest)
if version <= 2:
if include_authorized_operations:
raise IncompatibleBrokerVersion(
"include_authorized_operations requests "
"DescribeGroupsRequest >= v3, which is not "
"supported by Kafka {}".format(version)
)
# Note: KAFKA-6788 A potential optimization is to group the
# request per coordinator and send one request with a list of
# all consumer groups. Java still hasn't implemented this
# because the error checking is hard to get right when some
# groups error and others don't.
request = DescribeGroupsRequest[version](groups=(group_id,))
elif version <= 3:
request = DescribeGroupsRequest[version](
groups=(group_id,),
include_authorized_operations=include_authorized_operations
)
else:
raise NotImplementedError(
"Support for DescribeGroupsRequest_v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_node(group_coordinator_id, request)
def _describe_consumer_groups_process_response(self, response):
"""Process a DescribeGroupsResponse into a group description."""
if response.API_VERSION <= 3:
assert len(response.groups) == 1
# TODO need to implement converting the response tuple into
# a more accessible interface like a namedtuple and then stop
# hardcoding tuple indices here. Several Java examples,
# including KafkaAdminClient.java
group_description = response.groups[0]
error_code = group_description[0]
error_type = Errors.for_code(error_code)
# Java has the note: KAFKA-6789, we can retry based on the error code
if error_type is not Errors.NoError:
raise error_type(
"DescribeGroupsResponse failed with response '{}'."
.format(response))
# TODO Java checks the group protocol type, and if consumer
# (ConsumerProtocol.PROTOCOL_TYPE) or empty string, it decodes
# the members' partition assignments... that hasn't yet been
# implemented here so just return the raw struct results
else:
raise NotImplementedError(
"Support for DescribeGroupsResponse_v{} has not yet been added to KafkaAdminClient."
.format(response.API_VERSION))
return group_description
def describe_consumer_groups(self, group_ids, group_coordinator_id=None, include_authorized_operations=False):
"""Describe a set of consumer groups.
Any errors are immediately raised.
:param group_ids: A list of consumer group IDs. These are typically the
group names as strings.
:param group_coordinator_id: The node_id of the groups' coordinator
broker. If set to None, it will query the cluster for each group to
find that group's coordinator. Explicitly specifying this can be
useful for avoiding extra network round trips if you already know
the group coordinator. This is only useful when all the group_ids
have the same coordinator, otherwise it will error. Default: None.
:param include_authorized_operations: Whether or not to include
information about the operations a group is allowed to perform.
Only supported on API version >= v3. Default: False.
:return: A list of group descriptions. For now the group descriptions
are the raw results from the DescribeGroupsResponse. Long-term, we
plan to change this to return namedtuples as well as decoding the
partition assignments.
"""
group_descriptions = []
futures = []
for group_id in group_ids:
if group_coordinator_id is not None:
this_groups_coordinator_id = group_coordinator_id
else:
this_groups_coordinator_id = self._find_coordinator_id(group_id)
f = self._describe_consumer_groups_send_request(
group_id,
this_groups_coordinator_id,
include_authorized_operations)
futures.append(f)
self._wait_for_futures(futures)
for future in futures:
response = future.value
group_description = self._describe_consumer_groups_process_response(response)
group_descriptions.append(group_description)
return group_descriptions
def _list_consumer_groups_send_request(self, broker_id):
"""Send a ListGroupsRequest to a broker.
:param broker_id: The broker's node_id.
:return: A message future
"""
version = self._matching_api_version(ListGroupsRequest)
if version <= 2:
request = ListGroupsRequest[version]()
else:
raise NotImplementedError(
"Support for ListGroupsRequest_v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_node(broker_id, request)
def _list_consumer_groups_process_response(self, response):
"""Process a ListGroupsResponse into a list of groups."""
if response.API_VERSION <= 2:
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
raise error_type(
"ListGroupsRequest failed with response '{}'."
.format(response))
else:
raise NotImplementedError(
"Support for ListGroupsResponse_v{} has not yet been added to KafkaAdminClient."
.format(response.API_VERSION))
return response.groups
def list_consumer_groups(self, broker_ids=None):
"""List all consumer groups known to the cluster.
This returns a list of Consumer Group tuples. The tuples are
composed of the consumer group name and the consumer group protocol
type.
Only consumer groups that store their offsets in Kafka are returned.
The protocol type will be an empty string for groups created using
Kafka < 0.9 APIs because, although they store their offsets in Kafka,
they don't use Kafka for group coordination. For groups created using
Kafka >= 0.9, the protocol type will typically be "consumer".
As soon as any error is encountered, it is immediately raised.
:param broker_ids: A list of broker node_ids to query for consumer
groups. If set to None, will query all brokers in the cluster.
Explicitly specifying broker(s) can be useful for determining which
consumer groups are coordinated by those broker(s). Default: None
:return list: List of tuples of Consumer Groups.
:exception GroupCoordinatorNotAvailableError: The coordinator is not
available, so cannot process requests.
:exception GroupLoadInProgressError: The coordinator is loading and
hence can't process requests.
"""
# While we return a list, internally use a set to prevent duplicates
# because if a group coordinator fails after being queried, and its
# consumer groups move to new brokers that haven't yet been queried,
# then the same group could be returned by multiple brokers.
consumer_groups = set()
if broker_ids is None:
broker_ids = [broker.nodeId for broker in self._client.cluster.brokers()]
futures = [self._list_consumer_groups_send_request(b) for b in broker_ids]
self._wait_for_futures(futures)
for f in futures:
response = f.value
consumer_groups.update(self._list_consumer_groups_process_response(response))
return list(consumer_groups)
def _list_consumer_group_offsets_send_request(self, group_id,
group_coordinator_id, partitions=None):
"""Send an OffsetFetchRequest to a broker.
:param group_id: The consumer group id name for which to fetch offsets.
:param group_coordinator_id: The node_id of the group's coordinator
broker.
:return: A message future
"""
version = self._matching_api_version(OffsetFetchRequest)
if version <= 3:
if partitions is None:
if version <= 1:
raise ValueError(
"""OffsetFetchRequest_v{} requires specifying the
partitions for which to fetch offsets. Omitting the
partitions is only supported on brokers >= 0.10.2.
For details, see KIP-88.""".format(version))
topics_partitions = None
else:
# transform from [TopicPartition("t1", 1), TopicPartition("t1", 2)] to [("t1", [1, 2])]
topics_partitions_dict = defaultdict(set)
for topic, partition in partitions:
topics_partitions_dict[topic].add(partition)
topics_partitions = list(six.iteritems(topics_partitions_dict))
request = OffsetFetchRequest[version](group_id, topics_partitions)
else:
raise NotImplementedError(
"Support for OffsetFetchRequest_v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_node(group_coordinator_id, request)
def _list_consumer_group_offsets_process_response(self, response):
"""Process an OffsetFetchResponse.
:param response: an OffsetFetchResponse.
:return: A dictionary composed of TopicPartition keys and
OffsetAndMetada values.
"""
if response.API_VERSION <= 3:
# OffsetFetchResponse_v1 lacks a top-level error_code
if response.API_VERSION > 1:
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
# optionally we could retry if error_type.retriable
raise error_type(
"OffsetFetchResponse failed with response '{}'."
.format(response))
# transform response into a dictionary with TopicPartition keys and
# OffsetAndMetada values--this is what the Java AdminClient returns
offsets = {}
for topic, partitions in response.topics:
for partition, offset, metadata, error_code in partitions:
error_type = Errors.for_code(error_code)
if error_type is not Errors.NoError:
raise error_type(
"Unable to fetch consumer group offsets for topic {}, partition {}"
.format(topic, partition))
offsets[TopicPartition(topic, partition)] = OffsetAndMetadata(offset, metadata)
else:
raise NotImplementedError(
"Support for OffsetFetchResponse_v{} has not yet been added to KafkaAdminClient."
.format(response.API_VERSION))
return offsets
def list_consumer_group_offsets(self, group_id, group_coordinator_id=None,
partitions=None):
"""Fetch Consumer Offsets for a single consumer group.
Note:
This does not verify that the group_id or partitions actually exist
in the cluster.
As soon as any error is encountered, it is immediately raised.
:param group_id: The consumer group id name for which to fetch offsets.
:param group_coordinator_id: The node_id of the group's coordinator
broker. If set to None, will query the cluster to find the group
coordinator. Explicitly specifying this can be useful to prevent
that extra network round trip if you already know the group
coordinator. Default: None.
:param partitions: A list of TopicPartitions for which to fetch
offsets. On brokers >= 0.10.2, this can be set to None to fetch all
known offsets for the consumer group. Default: None.
:return dictionary: A dictionary with TopicPartition keys and
OffsetAndMetada values. Partitions that are not specified and for
which the group_id does not have a recorded offset are omitted. An
offset value of `-1` indicates the group_id has no offset for that
TopicPartition. A `-1` can only happen for partitions that are
explicitly specified.
"""
if group_coordinator_id is None:
group_coordinator_id = self._find_coordinator_id(group_id)
future = self._list_consumer_group_offsets_send_request(
group_id, group_coordinator_id, partitions)
self._wait_for_futures([future])
response = future.value
return self._list_consumer_group_offsets_process_response(response)
# delete groups protocol not yet implemented
# Note: send the request to the group's coordinator.
def _wait_for_futures(self, futures):
while not all(future.succeeded() for future in futures):
for future in futures:
self._client.poll(future=future)
if future.failed():
raise future.exception # pylint: disable-msg=raising-bad-type
| 46.3048 | 162 | 0.640988 | from __future__ import absolute_import
from collections import defaultdict
import copy
import logging
import socket
from . import ConfigResourceType
from kafka.vendor import six
from kafka.client_async import KafkaClient, selectors
import kafka.errors as Errors
from kafka.errors import (
IncompatibleBrokerVersion, KafkaConfigurationError, NotControllerError,
UnrecognizedBrokerVersion, IllegalArgumentError)
from kafka.metrics import MetricConfig, Metrics
from kafka.protocol.admin import (
CreateTopicsRequest, DeleteTopicsRequest, DescribeConfigsRequest, AlterConfigsRequest, CreatePartitionsRequest,
ListGroupsRequest, DescribeGroupsRequest, DescribeAclsRequest, CreateAclsRequest, DeleteAclsRequest)
from kafka.protocol.commit import GroupCoordinatorRequest, OffsetFetchRequest
from kafka.protocol.metadata import MetadataRequest
from kafka.structs import TopicPartition, OffsetAndMetadata
from kafka.admin.acl_resource import ACLOperation, ACLPermissionType, ACLFilter, ACL, ResourcePattern, ResourceType, \
ACLResourcePatternType
from kafka.version import __version__
log = logging.getLogger(__name__)
class KafkaAdminClient(object):
DEFAULT_CONFIG = {
'bootstrap_servers': 'localhost',
'client_id': 'kafka-python-' + __version__,
'request_timeout_ms': 30000,
'connections_max_idle_ms': 9 * 60 * 1000,
'reconnect_backoff_ms': 50,
'reconnect_backoff_max_ms': 1000,
'max_in_flight_requests_per_connection': 5,
'receive_buffer_bytes': None,
'send_buffer_bytes': None,
'socket_options': [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)],
'sock_chunk_bytes': 4096,
'sock_chunk_buffer_count': 1000,
'retry_backoff_ms': 100,
'metadata_max_age_ms': 300000,
'security_protocol': 'PLAINTEXT',
'ssl_context': None,
'ssl_check_hostname': True,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None,
'ssl_password': None,
'ssl_crlfile': None,
'api_version': None,
'api_version_auto_timeout_ms': 2000,
'selector': selectors.DefaultSelector,
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'sasl_kerberos_service_name': 'kafka',
'sasl_kerberos_domain_name': None,
'sasl_oauth_token_provider': None,
'metric_reporters': [],
'metrics_num_samples': 2,
'metrics_sample_window_ms': 30000,
}
def __init__(self, **configs):
log.debug("Starting KafkaAdminClient with configuration: %s", configs)
extra_configs = set(configs).difference(self.DEFAULT_CONFIG)
if extra_configs:
raise KafkaConfigurationError("Unrecognized configs: {}".format(extra_configs))
self.config = copy.copy(self.DEFAULT_CONFIG)
self.config.update(configs)
metrics_tags = {'client-id': self.config['client_id']}
metric_config = MetricConfig(samples=self.config['metrics_num_samples'],
time_window_ms=self.config['metrics_sample_window_ms'],
tags=metrics_tags)
reporters = [reporter() for reporter in self.config['metric_reporters']]
self._metrics = Metrics(metric_config, reporters)
self._client = KafkaClient(metrics=self._metrics,
metric_group_prefix='admin',
**self.config)
self._client.check_version()
if self.config['api_version'] is None:
self.config['api_version'] = self._client.config['api_version']
self._closed = False
self._refresh_controller_id()
log.debug("KafkaAdminClient started.")
def close(self):
if not hasattr(self, '_closed') or self._closed:
log.info("KafkaAdminClient already closed.")
return
self._metrics.close()
self._client.close()
self._closed = True
log.debug("KafkaAdminClient is now closed.")
def _matching_api_version(self, operation):
broker_api_versions = self._client.get_api_versions()
api_key = operation[0].API_KEY
if broker_api_versions is None or api_key not in broker_api_versions:
raise IncompatibleBrokerVersion(
"Kafka broker does not support the '{}' Kafka protocol."
.format(operation[0].__name__))
min_version, max_version = broker_api_versions[api_key]
version = min(len(operation) - 1, max_version)
if version < min_version:
raise IncompatibleBrokerVersion(
"No version of the '{}' Kafka protocol is supported by both the client and broker."
.format(operation[0].__name__))
return version
def _validate_timeout(self, timeout_ms):
return timeout_ms or self.config['request_timeout_ms']
def _refresh_controller_id(self):
version = self._matching_api_version(MetadataRequest)
if 1 <= version <= 6:
request = MetadataRequest[version]()
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
controller_id = response.controller_id
controller_version = self._client.check_version(controller_id)
if controller_version < (0, 10, 0):
raise IncompatibleBrokerVersion(
"The controller appears to be running Kafka {}. KafkaAdminClient requires brokers >= 0.10.0.0."
.format(controller_version))
self._controller_id = controller_id
else:
raise UnrecognizedBrokerVersion(
"Kafka Admin interface cannot determine the controller using MetadataRequest_v{}."
.format(version))
def _find_coordinator_id_send_request(self, group_id):
# GroupCoordinatorResponse_v0 and I couldn't figure out why.
version = 0
if version <= 0:
request = GroupCoordinatorRequest[version](group_id)
else:
raise NotImplementedError(
"Support for GroupCoordinatorRequest_v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_node(self._client.least_loaded_node(), request)
def _find_coordinator_id_process_response(self, response):
if response.API_VERSION <= 0:
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
raise error_type(
"FindCoordinatorRequest failed with response '{}'."
.format(response))
else:
raise NotImplementedError(
"Support for FindCoordinatorRequest_v{} has not yet been added to KafkaAdminClient."
.format(response.API_VERSION))
return response.coordinator_id
def _find_coordinator_id(self, group_id):
# Note: Java may change how this is implemented in KAFKA-6791.
future = self._find_coordinator_id_send_request(group_id)
self._wait_for_futures([future])
response = future.value
return self._find_coordinator_id_process_response(response)
def _send_request_to_node(self, node_id, request):
while not self._client.ready(node_id):
# poll until the connection to broker is ready, otherwise send()
# will fail with NodeNotReadyError
self._client.poll()
return self._client.send(node_id, request)
def _send_request_to_controller(self, request):
tries = 2 # in case our cached self._controller_id is outdated
while tries:
tries -= 1
future = self._send_request_to_node(self._controller_id, request)
self._wait_for_futures([future])
response = future.value
# In Java, the error fieldname is inconsistent:
# - CreateTopicsResponse / CreatePartitionsResponse uses topic_errors
# - DeleteTopicsResponse uses topic_error_codes
# So this is a little brittle in that it assumes all responses have
# one of these attributes and that they always unpack into
# (topic, error_code) tuples.
topic_error_tuples = (response.topic_errors if hasattr(response, 'topic_errors')
else response.topic_error_codes)
# Also small py2/py3 compatibility -- py3 can ignore extra values
# during unpack via: for x, y, *rest in list_of_values. py2 cannot.
# So for now we have to map across the list and explicitly drop any
# extra values (usually the error_message)
for topic, error_code in map(lambda e: e[:2], topic_error_tuples):
error_type = Errors.for_code(error_code)
if tries and error_type is NotControllerError:
# No need to inspect the rest of the errors for
# non-retriable errors because NotControllerError should
# either be thrown for all errors or no errors.
self._refresh_controller_id()
break
elif error_type is not Errors.NoError:
raise error_type(
"Request '{}' failed with response '{}'."
.format(request, response))
else:
return response
raise RuntimeError("This should never happen, please file a bug with full stacktrace if encountered")
@staticmethod
def _convert_new_topic_request(new_topic):
return (
new_topic.name,
new_topic.num_partitions,
new_topic.replication_factor,
[
(partition_id, replicas) for partition_id, replicas in new_topic.replica_assignments.items()
],
[
(config_key, config_value) for config_key, config_value in new_topic.topic_configs.items()
]
)
def create_topics(self, new_topics, timeout_ms=None, validate_only=False):
version = self._matching_api_version(CreateTopicsRequest)
timeout_ms = self._validate_timeout(timeout_ms)
if version == 0:
if validate_only:
raise IncompatibleBrokerVersion(
"validate_only requires CreateTopicsRequest >= v1, which is not supported by Kafka {}."
.format(self.config['api_version']))
request = CreateTopicsRequest[version](
create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics],
timeout=timeout_ms
)
elif version <= 3:
request = CreateTopicsRequest[version](
create_topic_requests=[self._convert_new_topic_request(new_topic) for new_topic in new_topics],
timeout=timeout_ms,
validate_only=validate_only
)
else:
raise NotImplementedError(
"Support for CreateTopics v{} has not yet been added to KafkaAdminClient."
.format(version))
# TODO convert structs to a more pythonic interface
# TODO raise exceptions if errors
return self._send_request_to_controller(request)
def delete_topics(self, topics, timeout_ms=None):
version = self._matching_api_version(DeleteTopicsRequest)
timeout_ms = self._validate_timeout(timeout_ms)
if version <= 3:
request = DeleteTopicsRequest[version](
topics=topics,
timeout=timeout_ms
)
response = self._send_request_to_controller(request)
else:
raise NotImplementedError(
"Support for DeleteTopics v{} has not yet been added to KafkaAdminClient."
.format(version))
return response
def _get_cluster_metadata(self, topics=None, auto_topic_creation=False, use_controller=False):
version = self._matching_api_version(MetadataRequest)
if version <= 3:
if auto_topic_creation:
raise IncompatibleBrokerVersion(
"auto_topic_creation requires MetadataRequest >= v4, which"
" is not supported by Kafka {}"
.format(self.config['api_version']))
request = MetadataRequest[version](topics=topics)
elif version <= 5:
request = MetadataRequest[version](
topics=topics,
allow_auto_topic_creation=auto_topic_creation
)
if use_controller:
return self._send_request_to_controller(request)
future = self._send_request_to_node(
self._client.least_loaded_node(),
request
)
self._wait_for_futures([future])
return future.value
def list_topics(self):
metadata = self._get_cluster_metadata(topics=None)
obj = metadata.to_object()
return [t['topic'] for t in obj['topics']]
def describe_topics(self, topics=None):
metadata = self._get_cluster_metadata(topics=topics, use_controller=True)
obj = metadata.to_object()
return obj['topics']
def describe_cluster(self):
metadata = self._get_cluster_metadata()
obj = metadata.to_object()
obj.pop('topics') # We have 'describe_topics' for this
return obj
@staticmethod
def _convert_describe_acls_response_to_acls(describe_response):
version = describe_response.API_VERSION
error = Errors.for_code(describe_response.error_code)
acl_list = []
for resources in describe_response.resources:
if version == 0:
resource_type, resource_name, acls = resources
resource_pattern_type = ACLResourcePatternType.LITERAL.value
elif version <= 1:
resource_type, resource_name, resource_pattern_type, acls = resources
else:
raise NotImplementedError(
"Support for DescribeAcls Response v{} has not yet been added to KafkaAdmin."
.format(version)
)
for acl in acls:
principal, host, operation, permission_type = acl
conv_acl = ACL(
principal=principal,
host=host,
operation=ACLOperation(operation),
permission_type=ACLPermissionType(permission_type),
resource_pattern=ResourcePattern(
ResourceType(resource_type),
resource_name,
ACLResourcePatternType(resource_pattern_type)
)
)
acl_list.append(conv_acl)
return (acl_list, error,)
def describe_acls(self, acl_filter):
version = self._matching_api_version(DescribeAclsRequest)
if version == 0:
request = DescribeAclsRequest[version](
resource_type=acl_filter.resource_pattern.resource_type,
resource_name=acl_filter.resource_pattern.resource_name,
principal=acl_filter.principal,
host=acl_filter.host,
operation=acl_filter.operation,
permission_type=acl_filter.permission_type
)
elif version <= 1:
request = DescribeAclsRequest[version](
resource_type=acl_filter.resource_pattern.resource_type,
resource_name=acl_filter.resource_pattern.resource_name,
resource_pattern_type_filter=acl_filter.resource_pattern.pattern_type,
principal=acl_filter.principal,
host=acl_filter.host,
operation=acl_filter.operation,
permission_type=acl_filter.permission_type
)
else:
raise NotImplementedError(
"Support for DescribeAcls v{} has not yet been added to KafkaAdmin."
.format(version)
)
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
# optionally we could retry if error_type.retriable
raise error_type(
"Request '{}' failed with response '{}'."
.format(request, response))
return self._convert_describe_acls_response_to_acls(response)
@staticmethod
def _convert_create_acls_resource_request_v0(acl):
return (
acl.resource_pattern.resource_type,
acl.resource_pattern.resource_name,
acl.principal,
acl.host,
acl.operation,
acl.permission_type
)
@staticmethod
def _convert_create_acls_resource_request_v1(acl):
return (
acl.resource_pattern.resource_type,
acl.resource_pattern.resource_name,
acl.resource_pattern.pattern_type,
acl.principal,
acl.host,
acl.operation,
acl.permission_type
)
@staticmethod
def _convert_create_acls_response_to_acls(acls, create_response):
version = create_response.API_VERSION
creations_error = []
creations_success = []
for i, creations in enumerate(create_response.creation_responses):
if version <= 1:
error_code, error_message = creations
acl = acls[i]
error = Errors.for_code(error_code)
else:
raise NotImplementedError(
"Support for DescribeAcls Response v{} has not yet been added to KafkaAdmin."
.format(version)
)
if error is Errors.NoError:
creations_success.append(acl)
else:
creations_error.append((acl, error,))
return {"succeeded": creations_success, "failed": creations_error}
def create_acls(self, acls):
for acl in acls:
if not isinstance(acl, ACL):
raise IllegalArgumentError("acls must contain ACL objects")
version = self._matching_api_version(CreateAclsRequest)
if version == 0:
request = CreateAclsRequest[version](
creations=[self._convert_create_acls_resource_request_v0(acl) for acl in acls]
)
elif version <= 1:
request = CreateAclsRequest[version](
creations=[self._convert_create_acls_resource_request_v1(acl) for acl in acls]
)
else:
raise NotImplementedError(
"Support for CreateAcls v{} has not yet been added to KafkaAdmin."
.format(version)
)
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
return self._convert_create_acls_response_to_acls(acls, response)
@staticmethod
def _convert_delete_acls_resource_request_v0(acl):
return (
acl.resource_pattern.resource_type,
acl.resource_pattern.resource_name,
acl.principal,
acl.host,
acl.operation,
acl.permission_type
)
@staticmethod
def _convert_delete_acls_resource_request_v1(acl):
return (
acl.resource_pattern.resource_type,
acl.resource_pattern.resource_name,
acl.resource_pattern.pattern_type,
acl.principal,
acl.host,
acl.operation,
acl.permission_type
)
@staticmethod
def _convert_delete_acls_response_to_matching_acls(acl_filters, delete_response):
version = delete_response.API_VERSION
filter_result_list = []
for i, filter_responses in enumerate(delete_response.filter_responses):
filter_error_code, filter_error_message, matching_acls = filter_responses
filter_error = Errors.for_code(filter_error_code)
acl_result_list = []
for acl in matching_acls:
if version == 0:
error_code, error_message, resource_type, resource_name, principal, host, operation, permission_type = acl
resource_pattern_type = ACLResourcePatternType.LITERAL.value
elif version == 1:
error_code, error_message, resource_type, resource_name, resource_pattern_type, principal, host, operation, permission_type = acl
else:
raise NotImplementedError(
"Support for DescribeAcls Response v{} has not yet been added to KafkaAdmin."
.format(version)
)
acl_error = Errors.for_code(error_code)
conv_acl = ACL(
principal=principal,
host=host,
operation=ACLOperation(operation),
permission_type=ACLPermissionType(permission_type),
resource_pattern=ResourcePattern(
ResourceType(resource_type),
resource_name,
ACLResourcePatternType(resource_pattern_type)
)
)
acl_result_list.append((conv_acl, acl_error,))
filter_result_list.append((acl_filters[i], acl_result_list, filter_error,))
return filter_result_list
def delete_acls(self, acl_filters):
for acl in acl_filters:
if not isinstance(acl, ACLFilter):
raise IllegalArgumentError("acl_filters must contain ACLFilter type objects")
version = self._matching_api_version(DeleteAclsRequest)
if version == 0:
request = DeleteAclsRequest[version](
filters=[self._convert_delete_acls_resource_request_v0(acl) for acl in acl_filters]
)
elif version <= 1:
request = DeleteAclsRequest[version](
filters=[self._convert_delete_acls_resource_request_v1(acl) for acl in acl_filters]
)
else:
raise NotImplementedError(
"Support for DeleteAcls v{} has not yet been added to KafkaAdmin."
.format(version)
)
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
return self._convert_delete_acls_response_to_matching_acls(acl_filters, response)
@staticmethod
def _convert_describe_config_resource_request(config_resource):
return (
config_resource.resource_type,
config_resource.name,
[
config_key for config_key, config_value in config_resource.configs.items()
] if config_resource.configs else None
)
def describe_configs(self, config_resources, include_synonyms=False):
# Break up requests by type - a broker config request must be sent to the specific broker.
# All other (currently just topic resources) can be sent to any broker.
broker_resources = []
topic_resources = []
for config_resource in config_resources:
if config_resource.resource_type == ConfigResourceType.BROKER:
broker_resources.append(self._convert_describe_config_resource_request(config_resource))
else:
topic_resources.append(self._convert_describe_config_resource_request(config_resource))
futures = []
version = self._matching_api_version(DescribeConfigsRequest)
if version == 0:
if include_synonyms:
raise IncompatibleBrokerVersion(
"include_synonyms requires DescribeConfigsRequest >= v1, which is not supported by Kafka {}."
.format(self.config['api_version']))
if len(broker_resources) > 0:
for broker_resource in broker_resources:
try:
broker_id = int(broker_resource[1])
except ValueError:
raise ValueError("Broker resource names must be an integer or a string represented integer")
futures.append(self._send_request_to_node(
broker_id,
DescribeConfigsRequest[version](resources=[broker_resource])
))
if len(topic_resources) > 0:
futures.append(self._send_request_to_node(
self._client.least_loaded_node(),
DescribeConfigsRequest[version](resources=topic_resources)
))
elif version <= 2:
if len(broker_resources) > 0:
for broker_resource in broker_resources:
try:
broker_id = int(broker_resource[1])
except ValueError:
raise ValueError("Broker resource names must be an integer or a string represented integer")
futures.append(self._send_request_to_node(
broker_id,
DescribeConfigsRequest[version](
resources=[broker_resource],
include_synonyms=include_synonyms)
))
if len(topic_resources) > 0:
futures.append(self._send_request_to_node(
self._client.least_loaded_node(),
DescribeConfigsRequest[version](resources=topic_resources, include_synonyms=include_synonyms)
))
else:
raise NotImplementedError(
"Support for DescribeConfigs v{} has not yet been added to KafkaAdminClient.".format(version))
self._wait_for_futures(futures)
return [f.value for f in futures]
@staticmethod
def _convert_alter_config_resource_request(config_resource):
return (
config_resource.resource_type,
config_resource.name,
[
(config_key, config_value) for config_key, config_value in config_resource.configs.items()
]
)
def alter_configs(self, config_resources):
version = self._matching_api_version(AlterConfigsRequest)
if version <= 1:
request = AlterConfigsRequest[version](
resources=[self._convert_alter_config_resource_request(config_resource) for config_resource in config_resources]
)
else:
raise NotImplementedError(
"Support for AlterConfigs v{} has not yet been added to KafkaAdminClient."
.format(version))
# TODO the Java client has the note:
# // We must make a separate AlterConfigs request for every BROKER resource we want to alter
# // and send the request to that specific broker. Other resources are grouped together into
# // a single request that may be sent to any broker.
#
# So this is currently broken as it always sends to the least_loaded_node()
future = self._send_request_to_node(self._client.least_loaded_node(), request)
self._wait_for_futures([future])
response = future.value
return response
# alter replica logs dir protocol not yet implemented
# Note: have to lookup the broker with the replica assignment and send the request to that broker
# describe log dirs protocol not yet implemented
# Note: have to lookup the broker with the replica assignment and send the request to that broker
@staticmethod
def _convert_create_partitions_request(topic_name, new_partitions):
return (
topic_name,
(
new_partitions.total_count,
new_partitions.new_assignments
)
)
def create_partitions(self, topic_partitions, timeout_ms=None, validate_only=False):
version = self._matching_api_version(CreatePartitionsRequest)
timeout_ms = self._validate_timeout(timeout_ms)
if version <= 1:
request = CreatePartitionsRequest[version](
topic_partitions=[self._convert_create_partitions_request(topic_name, new_partitions) for topic_name, new_partitions in topic_partitions.items()],
timeout=timeout_ms,
validate_only=validate_only
)
else:
raise NotImplementedError(
"Support for CreatePartitions v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_controller(request)
# delete records protocol not yet implemented
# Note: send the request to the partition leaders
# create delegation token protocol not yet implemented
# Note: send the request to the least_loaded_node()
# renew delegation token protocol not yet implemented
# Note: send the request to the least_loaded_node()
# expire delegation_token protocol not yet implemented
# Note: send the request to the least_loaded_node()
# describe delegation_token protocol not yet implemented
# Note: send the request to the least_loaded_node()
def _describe_consumer_groups_send_request(self, group_id, group_coordinator_id, include_authorized_operations=False):
version = self._matching_api_version(DescribeGroupsRequest)
if version <= 2:
if include_authorized_operations:
raise IncompatibleBrokerVersion(
"include_authorized_operations requests "
"DescribeGroupsRequest >= v3, which is not "
"supported by Kafka {}".format(version)
)
# Note: KAFKA-6788 A potential optimization is to group the
# request per coordinator and send one request with a list of
# all consumer groups. Java still hasn't implemented this
request = DescribeGroupsRequest[version](groups=(group_id,))
elif version <= 3:
request = DescribeGroupsRequest[version](
groups=(group_id,),
include_authorized_operations=include_authorized_operations
)
else:
raise NotImplementedError(
"Support for DescribeGroupsRequest_v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_node(group_coordinator_id, request)
def _describe_consumer_groups_process_response(self, response):
if response.API_VERSION <= 3:
assert len(response.groups) == 1
# TODO need to implement converting the response tuple into
# a more accessible interface like a namedtuple and then stop
# hardcoding tuple indices here. Several Java examples,
# including KafkaAdminClient.java
group_description = response.groups[0]
error_code = group_description[0]
error_type = Errors.for_code(error_code)
# Java has the note: KAFKA-6789, we can retry based on the error code
if error_type is not Errors.NoError:
raise error_type(
"DescribeGroupsResponse failed with response '{}'."
.format(response))
# TODO Java checks the group protocol type, and if consumer
# (ConsumerProtocol.PROTOCOL_TYPE) or empty string, it decodes
# the members' partition assignments... that hasn't yet been
# implemented here so just return the raw struct results
else:
raise NotImplementedError(
"Support for DescribeGroupsResponse_v{} has not yet been added to KafkaAdminClient."
.format(response.API_VERSION))
return group_description
def describe_consumer_groups(self, group_ids, group_coordinator_id=None, include_authorized_operations=False):
group_descriptions = []
futures = []
for group_id in group_ids:
if group_coordinator_id is not None:
this_groups_coordinator_id = group_coordinator_id
else:
this_groups_coordinator_id = self._find_coordinator_id(group_id)
f = self._describe_consumer_groups_send_request(
group_id,
this_groups_coordinator_id,
include_authorized_operations)
futures.append(f)
self._wait_for_futures(futures)
for future in futures:
response = future.value
group_description = self._describe_consumer_groups_process_response(response)
group_descriptions.append(group_description)
return group_descriptions
def _list_consumer_groups_send_request(self, broker_id):
version = self._matching_api_version(ListGroupsRequest)
if version <= 2:
request = ListGroupsRequest[version]()
else:
raise NotImplementedError(
"Support for ListGroupsRequest_v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_node(broker_id, request)
def _list_consumer_groups_process_response(self, response):
if response.API_VERSION <= 2:
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
raise error_type(
"ListGroupsRequest failed with response '{}'."
.format(response))
else:
raise NotImplementedError(
"Support for ListGroupsResponse_v{} has not yet been added to KafkaAdminClient."
.format(response.API_VERSION))
return response.groups
def list_consumer_groups(self, broker_ids=None):
# While we return a list, internally use a set to prevent duplicates
# because if a group coordinator fails after being queried, and its
# consumer groups move to new brokers that haven't yet been queried,
consumer_groups = set()
if broker_ids is None:
broker_ids = [broker.nodeId for broker in self._client.cluster.brokers()]
futures = [self._list_consumer_groups_send_request(b) for b in broker_ids]
self._wait_for_futures(futures)
for f in futures:
response = f.value
consumer_groups.update(self._list_consumer_groups_process_response(response))
return list(consumer_groups)
def _list_consumer_group_offsets_send_request(self, group_id,
group_coordinator_id, partitions=None):
version = self._matching_api_version(OffsetFetchRequest)
if version <= 3:
if partitions is None:
if version <= 1:
raise ValueError(
"""OffsetFetchRequest_v{} requires specifying the
partitions for which to fetch offsets. Omitting the
partitions is only supported on brokers >= 0.10.2.
For details, see KIP-88.""".format(version))
topics_partitions = None
else:
topics_partitions_dict = defaultdict(set)
for topic, partition in partitions:
topics_partitions_dict[topic].add(partition)
topics_partitions = list(six.iteritems(topics_partitions_dict))
request = OffsetFetchRequest[version](group_id, topics_partitions)
else:
raise NotImplementedError(
"Support for OffsetFetchRequest_v{} has not yet been added to KafkaAdminClient."
.format(version))
return self._send_request_to_node(group_coordinator_id, request)
def _list_consumer_group_offsets_process_response(self, response):
if response.API_VERSION <= 3:
if response.API_VERSION > 1:
error_type = Errors.for_code(response.error_code)
if error_type is not Errors.NoError:
raise error_type(
"OffsetFetchResponse failed with response '{}'."
.format(response))
offsets = {}
for topic, partitions in response.topics:
for partition, offset, metadata, error_code in partitions:
error_type = Errors.for_code(error_code)
if error_type is not Errors.NoError:
raise error_type(
"Unable to fetch consumer group offsets for topic {}, partition {}"
.format(topic, partition))
offsets[TopicPartition(topic, partition)] = OffsetAndMetadata(offset, metadata)
else:
raise NotImplementedError(
"Support for OffsetFetchResponse_v{} has not yet been added to KafkaAdminClient."
.format(response.API_VERSION))
return offsets
def list_consumer_group_offsets(self, group_id, group_coordinator_id=None,
partitions=None):
if group_coordinator_id is None:
group_coordinator_id = self._find_coordinator_id(group_id)
future = self._list_consumer_group_offsets_send_request(
group_id, group_coordinator_id, partitions)
self._wait_for_futures([future])
response = future.value
return self._list_consumer_group_offsets_process_response(response)
def _wait_for_futures(self, futures):
while not all(future.succeeded() for future in futures):
for future in futures:
self._client.poll(future=future)
if future.failed():
raise future.exception # pylint: disable-msg=raising-bad-type
| true | true |
f71d9f5f3241fcead0f224c1faf1af11464dc292 | 1,236 | py | Python | feincms/module/extensions/seo.py | barseghyanartur/feincms | 7a319f78c5a5cd0c60e6141d1221f3c7a0d4a7f0 | [
"BSD-3-Clause"
] | 1 | 2016-05-07T11:50:28.000Z | 2016-05-07T11:50:28.000Z | feincms/module/extensions/seo.py | barseghyanartur/feincms | 7a319f78c5a5cd0c60e6141d1221f3c7a0d4a7f0 | [
"BSD-3-Clause"
] | null | null | null | feincms/module/extensions/seo.py | barseghyanartur/feincms | 7a319f78c5a5cd0c60e6141d1221f3c7a0d4a7f0 | [
"BSD-3-Clause"
] | 1 | 2019-03-15T19:36:34.000Z | 2019-03-15T19:36:34.000Z | """
Add a keyword and a description field which are helpful for SEO optimization.
"""
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from feincms import extensions
class Extension(extensions.Extension):
def handle_model(self):
self.model.add_to_class('meta_keywords', models.TextField(
_('meta keywords'),
blank=True,
help_text=_('Keywords are ignored by most search engines.')))
self.model.add_to_class('meta_description', models.TextField(
_('meta description'),
blank=True,
help_text=_('This text is displayed on the search results page. '
'It is however not used for the SEO ranking. '
'Text longer than 140 characters is truncated.')))
def handle_modeladmin(self, modeladmin):
modeladmin.extend_list(
'search_fields',
['meta_keywords', 'meta_description'],
)
modeladmin.add_extension_options(_('Search engine optimization'), {
'fields': ('meta_keywords', 'meta_description'),
'classes': ('collapse',),
})
| 34.333333 | 77 | 0.636731 |
from __future__ import absolute_import, unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from feincms import extensions
class Extension(extensions.Extension):
def handle_model(self):
self.model.add_to_class('meta_keywords', models.TextField(
_('meta keywords'),
blank=True,
help_text=_('Keywords are ignored by most search engines.')))
self.model.add_to_class('meta_description', models.TextField(
_('meta description'),
blank=True,
help_text=_('This text is displayed on the search results page. '
'It is however not used for the SEO ranking. '
'Text longer than 140 characters is truncated.')))
def handle_modeladmin(self, modeladmin):
modeladmin.extend_list(
'search_fields',
['meta_keywords', 'meta_description'],
)
modeladmin.add_extension_options(_('Search engine optimization'), {
'fields': ('meta_keywords', 'meta_description'),
'classes': ('collapse',),
})
| true | true |
f71da02dc1dd8dc3b72ea75a423abc1e89aa3bf5 | 293 | py | Python | eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs4.egg/bx/pwm/_position_weight_matrix.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs4.egg/bx/pwm/_position_weight_matrix.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs4.egg/bx/pwm/_position_weight_matrix.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'_position_weight_matrix.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| 36.625 | 84 | 0.795222 | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'_position_weight_matrix.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| true | true |
f71da18676c625d83bf9f11496ad2774d4ad9b06 | 348 | py | Python | main.py | filiptronicek/github-rank-action | d3f675233ccc26162cf0b70e825cd717f3b2b1d3 | [
"MIT"
] | 1 | 2020-07-22T21:46:10.000Z | 2020-07-22T21:46:10.000Z | main.py | filiptronicek/github-rank-action | d3f675233ccc26162cf0b70e825cd717f3b2b1d3 | [
"MIT"
] | null | null | null | main.py | filiptronicek/github-rank-action | d3f675233ccc26162cf0b70e825cd717f3b2b1d3 | [
"MIT"
] | null | null | null | import requests, json
from sys import argv
from modify import write
country = "czech_republic"
url = f"https://top-ghusers.vercel.app/api?c={country}"
responce = requests.get(url)
resJson = json.loads(responce.text)
for user in resJson['users']:
if user['user']['username'] == argv[1]:
write(user['rank'])
print(user['rank']) | 24.857143 | 55 | 0.678161 | import requests, json
from sys import argv
from modify import write
country = "czech_republic"
url = f"https://top-ghusers.vercel.app/api?c={country}"
responce = requests.get(url)
resJson = json.loads(responce.text)
for user in resJson['users']:
if user['user']['username'] == argv[1]:
write(user['rank'])
print(user['rank']) | true | true |
f71da18e1e0bbfc1e0daede493d7642be9502b6b | 18,868 | py | Python | src/msc/datagroups/__init__.py | magicbadger/python-dabmsc | fe4f2e195ffe0b0d75f4766893df687563e857a1 | [
"Apache-2.0"
] | null | null | null | src/msc/datagroups/__init__.py | magicbadger/python-dabmsc | fe4f2e195ffe0b0d75f4766893df687563e857a1 | [
"Apache-2.0"
] | null | null | null | src/msc/datagroups/__init__.py | magicbadger/python-dabmsc | fe4f2e195ffe0b0d75f4766893df687563e857a1 | [
"Apache-2.0"
] | 2 | 2021-10-15T01:29:38.000Z | 2022-03-22T06:05:35.000Z | from msc import bitarray_to_hex, int_to_bitarray, calculate_crc, InvalidCrcError, generate_transport_id
from mot import DirectoryEncoder, SortedHeaderInformation
from bitarray import bitarray
import logging
import types
import itertools
logger = logging.getLogger('msc.datagroups')
MAX_SEGMENT_SIZE=8189 # maximum data segment size in bytes
# datagroup types
HEADER = 3
BODY = 4
DIRECTORY_UNCOMPRESSED = 6
DIRECTORY_COMPRESSED = 7
class SegmentingStrategy:
def get_next_segment_size(self, data, position, segments):
"""Returns the suggested maximum size of the next segment"""
raise NotImplementedError('strategy has not been implemented properly - expected method: get_next_segment_size(self, data, position, segments)')
class ConstantSegmentSize(SegmentingStrategy):
"""Strategy to ensure that each segment is the same size, apart
from the last one, which may be smaller"""
def __init__(self, maximum_segment_size=MAX_SEGMENT_SIZE):
self.maximum_segment_size = maximum_segment_size
def get_next_segment_size(self, data, position, segments):
return self.maximum_segment_size
class CompletionTriggerSegmentingStrategy(SegmentingStrategy):
"""Strategy to ensure the last datagroup is small enough to be held within a single packet
for triggering via the completion of the total set of datagroups.
This is to enable synchronised imagery"""
def __init__(self, target_final_segment_size, maximum_segment_size=MAX_SEGMENT_SIZE, ):
if target_final_segment_size > maximum_segment_size: raise ValueError('target final segment size must be less than the maximum segment size')
self.maximum_segment_size = maximum_segment_size
# # calculate the estimated final segment size from parameters
# estimated_final_segment_size = target_final_packet_size
# estimated_final_segment_size -= 2 # packet CRC
# estimated_final_segment_size -= 3 # packet header
# estimated_final_segment_size -= 2 # datagroup CRC
# estimated_final_segment_size -= 7 # datagroup header (typical minimal config)
self.target_final_segment_size = target_final_segment_size
def calculate_segment_sizes(self, length):
# need to try for the exact target final segment size, or less
# with equal sizes of the preceding segments - therefore they
# will need to be exactly fitting
X = self.maximum_segment_size
Y = self.target_final_segment_size
while Y > 0:
while X > 0:
if (length - Y + 2) % X == 0:
return X, Y
X -= 1
Y -= 1
def get_next_segment_size(self, data, position, segments):
if not len(segments): # no calculation done yet
X, Y = self.calculate_segment_sizes(len(data))
else:
X = len(segments[0]) - 2
n = 1
Y = (len(data) / X) % n - 2
while Y > self.target_final_segment_size:
n += 1
if len(data) - position > Y: return X
else: return Y
def _segment(data, strategy):
segments = []
# partition the segments up using the maximum segment size
i = 0
if not data: return segments
while i < len(data):
segment_size = strategy.get_next_segment_size(data, i, segments)
# get segment data
segment_data = data[i:i+segment_size if i+segment_size < len(data) else len(data)]
# segment header
bits = bitarray()
bits += int_to_bitarray(0, 3) # (0-2): Repetition Count remaining (0 = only broadcast)
bits += int_to_bitarray(len(segment_data), 13) # (3-16): SegmentSize
segments.append((bits.tobytes()) + segment_data)
i += segment_size
return segments;
def encode_headermode(objects, segmenting_strategy=None):
"""
Encode a set of MOT Objects into header mode segments
"""
datagroups = []
if not segmenting_strategy: segmenting_strategy=ConstantSegmentSize()
# backward compatibility
if not isinstance(objects, list): objects = [objects]
logger.debug('encoding %d MOT objects to header mode datagroups', len(objects))
for object in objects:
if not object: raise ValueError('object returned is null')
# split body data into segments
body_data = object.get_body()
body_segments = _segment(body_data, segmenting_strategy)
# encode header extension parameters
extension_bits = bitarray()
for parameter in object.get_parameters():
extension_bits += parameter.encode()
# insert the core parameters into the header
bits = bitarray()
bits += int_to_bitarray(len(body_data) if body_data else 0, 28) # (0-27): BodySize in bytes
bits += int_to_bitarray(len(extension_bits) / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension)
bits += int_to_bitarray(object.get_type().type, 6) # (41-46): ContentType
bits += int_to_bitarray(object.get_type().subtype, 9) # (47-55): ContentSubType
bits += extension_bits # (56-n): Header extension data
header_segments = _segment(bits.tobytes(), segmenting_strategy)
# add header datagroups
for i, segment in enumerate(header_segments):
header_group = Datagroup(object.get_transport_id(), HEADER, segment, i, i%16, last=True if i == len(header_segments) - 1 else False)
datagroups.append(header_group)
# add body datagroups
for i, segment in enumerate(body_segments):
body_group = Datagroup(object.get_transport_id(), BODY, segment, i, i%16, last=True if i == len(body_segments) - 1 else False)
datagroups.append(body_group)
return datagroups;
def encode_directorymode(objects, directory_parameters=None, segmenting_strategy=None):
"""
Encode a set of MOT objects into directory mode segments, along with a segmented
directory object
"""
datagroups = []
if not segmenting_strategy: segmenting_strategy=ConstantSegmentSize()
# build the directory entries
entries = bitarray()
for object in objects:
# encode header extension parameters
extension_bits = bitarray()
for parameter in object.get_parameters():
extension_bits += parameter.encode()
# transport ID in first 2 bytes
entries += int_to_bitarray(object.get_transport_id(), 16)
# add the core parameters into the header
entries += int_to_bitarray(len(object.get_body()), 28) # (0-27): BodySize in bytes
entries += int_to_bitarray(len(extension_bits) / 8 + 7, 13) # (28-40): HeaderSize in bytes (core=7 + extension)
entries += int_to_bitarray(object.get_type().type, 6) # (41-46): ContentType
entries += int_to_bitarray(object.get_type().subtype, 9) # (47-55): ContentSubType
entries += extension_bits # (56-n): Header extension data
# build directory parameters
directory_params = bitarray()
if directory_parameters is not None:
for parameter in directory_parameters:
directory_params += parameter.encode()
# build directory header
bits = bitarray()
bits += bitarray('0') # (0): CompressionFlag: This bit shall be set to 0
bits += bitarray('0') # (1): RFU
bits += int_to_bitarray(len(entries.tobytes()) + 13 + len(directory_params.tobytes()), 30) # (2-31): DirectorySize: total size of the MOT directory in bytes, including the 13 header bytes and length of the directory parameter bytes
bits += int_to_bitarray(len(objects), 16) # (32-47): NumberOfObjects: Total number of objects described by the directory
bits += int_to_bitarray(0, 24) # (48-71): DataCarouselPeriod: Max time in tenths of seconds for the data carousel to complete a cycle. Value of zero for undefined
bits += bitarray('000') # (72-74): RFU
bits += int_to_bitarray(0, 13) # (75-87): SegmentSize: Size in bytes that will be used for the segmentation of objects within the MOT carousel. Value of zero indicates that objects can have different segmentation sizes. The last segment of an obect may be smaller than this size.
bits += int_to_bitarray(len(directory_params.tobytes()), 16) # (88-103): DirectoryExtensionLength: Length of following directory extension bytes
# add directory parameters
bits += directory_params
# add directory entries
bits += entries
# segment and add directory datagroups with a new transport ID
directory_transport_id = generate_transport_id()
segments = _segment(bits.tobytes(), segmenting_strategy)
for i, segment in enumerate(segments):
header_group = Datagroup(directory_transport_id, DIRECTORY_UNCOMPRESSED, segment, i, i%16, last=True if i == len(segments) - 1 else False)
tmp = bitarray()
tmp.frombytes(header_group.tobytes())
tmp.frombytes(header_group.tobytes())
datagroups.append(header_group)
# add body datagroups
for object in objects:
segments = _segment(object.get_body(), segmenting_strategy)
for i, segment in enumerate(segments):
body_group = Datagroup(object.get_transport_id(), BODY, segment, i, i%16, last=True if i == len(segments) - 1 else False)
datagroups.append(body_group)
return datagroups
import select
def read(fd, n = 1):
poll = select.poll()
poll.register(fd.fileno(), select.POLLIN or select.POLLPRI)
p = poll.poll()
if len(p):
f = p[0]
if f[1] > 0:
return fd.read(n)
def decode_datagroups(data, error_callback=None, check_crc=True, resync=True):
"""
Generator function to decode datagroups from a bitstream
The bitstream may be presented as either a bitarray, a file object or a generator
"""
if isinstance(data, bitarray):
i = 0
while i < len(data):
datagroup = Datagroup.frombits(data, i=i, check_crc=check_crc)
yield datagroup
i += (datagroup.size * 8)
elif isinstance(data, file):
logger.debug('decoding datagroups from file: %s', data)
buf = bitarray()
reading = True
while reading:
try:
r = data.read(8)
buf.frombytes(r)
except:
reading = False
logger.exception("error")
if not len(buf):
logger.debug('buffer is at zero length')
return
i = 0
#logger.debug('chunking buffer of length %d bytes', len(buf)/8)
length = len(buf)/8
if length < 9:
continue
size = int(buf[59:72].to01(), 2)
if length < size:
#logger.debug('buffer still not at right size for datagroup size of %d bytes', size)
continue
while i < len(buf):
try:
datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc)
yield datagroup
i = (datagroup.size * 8)
buf = buf[i:]
except IncompleteDatagroupError:
break
except (InvalidCrcError, ice):
if error_callback: error_callback(ice)
buf = buf[8:] # attempt to resync?
#i += 8
elif isinstance(data, types.GeneratorType):
logger.debug('decoding datagroups from generator: %s', data)
buf = bitarray()
i = 0
in_packet = False
for p in data:
if not in_packet and p.first:
in_packet = True
elif not in_packet: continue
buf.frombytes(p.data)
if p.last:
logger.debug('got packet %s - buffer now %d bytes', p, len(buf)/8)
try:
datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc)
logger.debug('yielding datagroup: %s', datagroup)
yield datagroup
except (IncompleteDatagroupError, ide):
if error_callback: error_callback(ide)
except (InvalidCrcError, ice):
if error_callback: error_callback(ice)
del buf
buf = bitarray()
in_packet = False
class IncompleteDatagroupError(Exception):
pass
class PaddingDatagroup:
def __init__(self, delay=0):
self.delay = delay
class Datagroup:
def __init__(self, transport_id, type, data, segment_index, continuity, crc_enabled=True, repetition=0, last=False):
self._transport_id = transport_id
self._type = type
self._data = data
self.crc_enabled = crc_enabled
self.continuity = continuity
self.repetition = repetition
self.segment_index = segment_index
self.last = last
self.size = 7 + 2 + len(self._data) + 2 # encoded datagroup size for chunking = [dg header] + [segment header] + [data] + [crc]
def __eq__(self, other):
if not isinstance(other, Datagroup): return False
return self.get_transport_id() == other.get_transport_id() and self.get_type() == other.get_type() and self.segment_index == other.segment_index
def get_transport_id(self):
return self._transport_id
def get_type(self):
return self._type
def get_data(self):
return self._data
def tobytes(self):
bits = bitarray()
# datagroup header
bits += bitarray('0') # (0): ExtensionFlag - 0=no extension
bits += bitarray('1' if self.crc_enabled else '0') # (1): CrcFlag - true if there is a CRC at the end of the datagroup
bits += bitarray('1') # (2): SegmentFlag - 1=segment header included
bits += bitarray('1') # (3): UserAccessFlag - true
bits += int_to_bitarray(self._type, 4) # (4-7): DataGroupType
bits += int_to_bitarray(self.continuity % 16, 4) # (8-11): ContinuityIndex
bits += int_to_bitarray(self.repetition, 4) # (12-15): RepetitionIndex - remaining = 0 (only this once)
# session header
# segment field
bits += bitarray('1' if self.last else '0') # (16): Last - true if the last segment
bits += int_to_bitarray(self.segment_index, 15) # (17-32): SegmentNumber
# user access field
bits += bitarray('000') # (33-35): RFA
bits += bitarray('1') # (36): TransportId - true to include Transport ID
bits += int_to_bitarray(2, 4) # (37-40): LengthIndicator - length of transport Id and End user address fields (will be 2 bytes as only transport ID defined)
bits += int_to_bitarray(self._transport_id, 16) # (41-56) transport ID
# data field
tmp = bitarray()
tmp.frombytes(self._data)
bits += tmp
# CRC
crc = 0;
if self.crc_enabled: crc = calculate_crc(bits.tobytes())
bits += int_to_bitarray(crc, 16)
return bits.tobytes()
@staticmethod
def frombits(bits, i=0, check_crc=True):
"""Parse a datagroup from a bitarray, with an optional offset"""
# check we have enough header first
if (len(bits) - i) < ((9 + 2) * 8): raise IncompleteDatagroupError
# datagroup header
type = int(bits[4:8].to01(), 2)
continuity = int(bits[8:12].to01(), 2)
repetition = int(bits[12:16].to01(), 2)
# session header
# segment field
last = bits[16]
segment_index = int(bits[17:32].to01(), 2)
# user access field
transport_id = int(bits[40:56].to01(), 2)
# data segment header
size = int(bits[59:72].to01(), 2) # get size to check we have a complete datagroup
if len(bits) < 72 + size * 8 + 16: raise IncompleteDatagroupError
data = bits[72 : 72 + (size*8)]
if check_crc:
crc = int(bits[72 + len(data) : 72 + len(data) + 16].to01(), 2)
calculated = calculate_crc(bits[:72+len(data)].tobytes())
if crc != calculated: raise InvalidCrcError(crc, bits[:72+len(data) + 16].tobytes())
datagroup = Datagroup(transport_id, type, data.tobytes(), segment_index, continuity, True, repetition, last)
logger.debug('parsed datagroup: %s', datagroup)
return datagroup
def __str__(self):
if self._type == 3: type_description = 'MOT Header'
elif self._type == 4: type_description = 'MOT Body'
elif self._type == 6: type_description = 'MOT Directory (uncompressed)'
elif self._type == 7: type_description = 'MOT Directory (compressed)'
else: type_description = 'unknown'
return '[segment=%d bytes], type=%d [%s], transportid=%d, segmentindex=%d, continuity=%d, last=%s' % (len(self._data), self._type, type_description, self._transport_id, self.segment_index, self.continuity, self.last)
def __repr__(self):
return '<DataGroup: %s>' % str(self)
class DirectoryDatagroupEncoder(DirectoryEncoder):
def __init__(self, segmenting_strategy=None, single=False):
DirectoryEncoder.__init__(self)
self.segmenting_strategy = segmenting_strategy
self.single = single
self.datagroups = []
self.regenerate()
def add(self, object):
if object in self.objects: return False
self.objects.append(object)
self.regenerate()
return True
def remove(self, object):
if object not in self.objects: return False
self.objects.remove(object)
self.regenerate()
return True
def clear(self):
self.objects = []
self.regenerate()
return True
def set(self, objects):
if objects == self.objects: return False
self.objects = objects
self.regenerate()
return True
def regenerate(self):
"""called when the directory needs to regenerate"""
self.datagroups = encode_directorymode(self.objects, directory_parameters=[SortedHeaderInformation()], segmenting_strategy=self.segmenting_strategy)
if self.single: self.iterator = iter(self.datagroups)
else: self.iterator = itertools.cycle(self.datagroups)
def __iter__(self):
return self.iterator
def __next__(self):
return next(self.iterator)
| 40.928416 | 283 | 0.621741 | from msc import bitarray_to_hex, int_to_bitarray, calculate_crc, InvalidCrcError, generate_transport_id
from mot import DirectoryEncoder, SortedHeaderInformation
from bitarray import bitarray
import logging
import types
import itertools
logger = logging.getLogger('msc.datagroups')
MAX_SEGMENT_SIZE=8189
HEADER = 3
BODY = 4
DIRECTORY_UNCOMPRESSED = 6
DIRECTORY_COMPRESSED = 7
class SegmentingStrategy:
def get_next_segment_size(self, data, position, segments):
raise NotImplementedError('strategy has not been implemented properly - expected method: get_next_segment_size(self, data, position, segments)')
class ConstantSegmentSize(SegmentingStrategy):
def __init__(self, maximum_segment_size=MAX_SEGMENT_SIZE):
self.maximum_segment_size = maximum_segment_size
def get_next_segment_size(self, data, position, segments):
return self.maximum_segment_size
class CompletionTriggerSegmentingStrategy(SegmentingStrategy):
def __init__(self, target_final_segment_size, maximum_segment_size=MAX_SEGMENT_SIZE, ):
if target_final_segment_size > maximum_segment_size: raise ValueError('target final segment size must be less than the maximum segment size')
self.maximum_segment_size = maximum_segment_size
X = self.maximum_segment_size
Y = self.target_final_segment_size
while Y > 0:
while X > 0:
if (length - Y + 2) % X == 0:
return X, Y
X -= 1
Y -= 1
def get_next_segment_size(self, data, position, segments):
if not len(segments):
X, Y = self.calculate_segment_sizes(len(data))
else:
X = len(segments[0]) - 2
n = 1
Y = (len(data) / X) % n - 2
while Y > self.target_final_segment_size:
n += 1
if len(data) - position > Y: return X
else: return Y
def _segment(data, strategy):
segments = []
i = 0
if not data: return segments
while i < len(data):
segment_size = strategy.get_next_segment_size(data, i, segments)
segment_data = data[i:i+segment_size if i+segment_size < len(data) else len(data)]
bits = bitarray()
bits += int_to_bitarray(0, 3)
bits += int_to_bitarray(len(segment_data), 13)
segments.append((bits.tobytes()) + segment_data)
i += segment_size
return segments;
def encode_headermode(objects, segmenting_strategy=None):
datagroups = []
if not segmenting_strategy: segmenting_strategy=ConstantSegmentSize()
if not isinstance(objects, list): objects = [objects]
logger.debug('encoding %d MOT objects to header mode datagroups', len(objects))
for object in objects:
if not object: raise ValueError('object returned is null')
body_data = object.get_body()
body_segments = _segment(body_data, segmenting_strategy)
extension_bits = bitarray()
for parameter in object.get_parameters():
extension_bits += parameter.encode()
bits = bitarray()
bits += int_to_bitarray(len(body_data) if body_data else 0, 28)
bits += int_to_bitarray(len(extension_bits) / 8 + 7, 13)
bits += int_to_bitarray(object.get_type().type, 6)
bits += int_to_bitarray(object.get_type().subtype, 9)
bits += extension_bits
header_segments = _segment(bits.tobytes(), segmenting_strategy)
for i, segment in enumerate(header_segments):
header_group = Datagroup(object.get_transport_id(), HEADER, segment, i, i%16, last=True if i == len(header_segments) - 1 else False)
datagroups.append(header_group)
for i, segment in enumerate(body_segments):
body_group = Datagroup(object.get_transport_id(), BODY, segment, i, i%16, last=True if i == len(body_segments) - 1 else False)
datagroups.append(body_group)
return datagroups;
def encode_directorymode(objects, directory_parameters=None, segmenting_strategy=None):
datagroups = []
if not segmenting_strategy: segmenting_strategy=ConstantSegmentSize()
entries = bitarray()
for object in objects:
extension_bits = bitarray()
for parameter in object.get_parameters():
extension_bits += parameter.encode()
entries += int_to_bitarray(object.get_transport_id(), 16)
entries += int_to_bitarray(len(object.get_body()), 28)
entries += int_to_bitarray(len(extension_bits) / 8 + 7, 13)
entries += int_to_bitarray(object.get_type().type, 6)
entries += int_to_bitarray(object.get_type().subtype, 9)
entries += extension_bits
directory_params = bitarray()
if directory_parameters is not None:
for parameter in directory_parameters:
directory_params += parameter.encode()
bits = bitarray()
bits += bitarray('0')
bits += bitarray('0')
bits += int_to_bitarray(len(entries.tobytes()) + 13 + len(directory_params.tobytes()), 30)
bits += int_to_bitarray(len(objects), 16)
bits += int_to_bitarray(0, 24)
bits += bitarray('000')
bits += int_to_bitarray(0, 13)
bits += int_to_bitarray(len(directory_params.tobytes()), 16)
bits += directory_params
bits += entries
directory_transport_id = generate_transport_id()
segments = _segment(bits.tobytes(), segmenting_strategy)
for i, segment in enumerate(segments):
header_group = Datagroup(directory_transport_id, DIRECTORY_UNCOMPRESSED, segment, i, i%16, last=True if i == len(segments) - 1 else False)
tmp = bitarray()
tmp.frombytes(header_group.tobytes())
tmp.frombytes(header_group.tobytes())
datagroups.append(header_group)
for object in objects:
segments = _segment(object.get_body(), segmenting_strategy)
for i, segment in enumerate(segments):
body_group = Datagroup(object.get_transport_id(), BODY, segment, i, i%16, last=True if i == len(segments) - 1 else False)
datagroups.append(body_group)
return datagroups
import select
def read(fd, n = 1):
poll = select.poll()
poll.register(fd.fileno(), select.POLLIN or select.POLLPRI)
p = poll.poll()
if len(p):
f = p[0]
if f[1] > 0:
return fd.read(n)
def decode_datagroups(data, error_callback=None, check_crc=True, resync=True):
if isinstance(data, bitarray):
i = 0
while i < len(data):
datagroup = Datagroup.frombits(data, i=i, check_crc=check_crc)
yield datagroup
i += (datagroup.size * 8)
elif isinstance(data, file):
logger.debug('decoding datagroups from file: %s', data)
buf = bitarray()
reading = True
while reading:
try:
r = data.read(8)
buf.frombytes(r)
except:
reading = False
logger.exception("error")
if not len(buf):
logger.debug('buffer is at zero length')
return
i = 0
length = len(buf)/8
if length < 9:
continue
size = int(buf[59:72].to01(), 2)
if length < size:
continue
while i < len(buf):
try:
datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc)
yield datagroup
i = (datagroup.size * 8)
buf = buf[i:]
except IncompleteDatagroupError:
break
except (InvalidCrcError, ice):
if error_callback: error_callback(ice)
buf = buf[8:]
elif isinstance(data, types.GeneratorType):
logger.debug('decoding datagroups from generator: %s', data)
buf = bitarray()
i = 0
in_packet = False
for p in data:
if not in_packet and p.first:
in_packet = True
elif not in_packet: continue
buf.frombytes(p.data)
if p.last:
logger.debug('got packet %s - buffer now %d bytes', p, len(buf)/8)
try:
datagroup = Datagroup.frombits(buf, i=i, check_crc=check_crc)
logger.debug('yielding datagroup: %s', datagroup)
yield datagroup
except (IncompleteDatagroupError, ide):
if error_callback: error_callback(ide)
except (InvalidCrcError, ice):
if error_callback: error_callback(ice)
del buf
buf = bitarray()
in_packet = False
class IncompleteDatagroupError(Exception):
pass
class PaddingDatagroup:
def __init__(self, delay=0):
self.delay = delay
class Datagroup:
def __init__(self, transport_id, type, data, segment_index, continuity, crc_enabled=True, repetition=0, last=False):
self._transport_id = transport_id
self._type = type
self._data = data
self.crc_enabled = crc_enabled
self.continuity = continuity
self.repetition = repetition
self.segment_index = segment_index
self.last = last
self.size = 7 + 2 + len(self._data) + 2
def __eq__(self, other):
if not isinstance(other, Datagroup): return False
return self.get_transport_id() == other.get_transport_id() and self.get_type() == other.get_type() and self.segment_index == other.segment_index
def get_transport_id(self):
return self._transport_id
def get_type(self):
return self._type
def get_data(self):
return self._data
def tobytes(self):
bits = bitarray()
bits += bitarray('0')
bits += bitarray('1' if self.crc_enabled else '0')
bits += bitarray('1')
bits += bitarray('1')
bits += int_to_bitarray(self._type, 4)
bits += int_to_bitarray(self.continuity % 16, 4)
bits += int_to_bitarray(self.repetition, 4)
bits += bitarray('1' if self.last else '0')
bits += int_to_bitarray(self.segment_index, 15)
bits += bitarray('000')
bits += bitarray('1')
bits += int_to_bitarray(2, 4)
bits += int_to_bitarray(self._transport_id, 16)
tmp = bitarray()
tmp.frombytes(self._data)
bits += tmp
crc = 0;
if self.crc_enabled: crc = calculate_crc(bits.tobytes())
bits += int_to_bitarray(crc, 16)
return bits.tobytes()
@staticmethod
def frombits(bits, i=0, check_crc=True):
if (len(bits) - i) < ((9 + 2) * 8): raise IncompleteDatagroupError
type = int(bits[4:8].to01(), 2)
continuity = int(bits[8:12].to01(), 2)
repetition = int(bits[12:16].to01(), 2)
last = bits[16]
segment_index = int(bits[17:32].to01(), 2)
transport_id = int(bits[40:56].to01(), 2)
size = int(bits[59:72].to01(), 2)
if len(bits) < 72 + size * 8 + 16: raise IncompleteDatagroupError
data = bits[72 : 72 + (size*8)]
if check_crc:
crc = int(bits[72 + len(data) : 72 + len(data) + 16].to01(), 2)
calculated = calculate_crc(bits[:72+len(data)].tobytes())
if crc != calculated: raise InvalidCrcError(crc, bits[:72+len(data) + 16].tobytes())
datagroup = Datagroup(transport_id, type, data.tobytes(), segment_index, continuity, True, repetition, last)
logger.debug('parsed datagroup: %s', datagroup)
return datagroup
def __str__(self):
if self._type == 3: type_description = 'MOT Header'
elif self._type == 4: type_description = 'MOT Body'
elif self._type == 6: type_description = 'MOT Directory (uncompressed)'
elif self._type == 7: type_description = 'MOT Directory (compressed)'
else: type_description = 'unknown'
return '[segment=%d bytes], type=%d [%s], transportid=%d, segmentindex=%d, continuity=%d, last=%s' % (len(self._data), self._type, type_description, self._transport_id, self.segment_index, self.continuity, self.last)
def __repr__(self):
return '<DataGroup: %s>' % str(self)
class DirectoryDatagroupEncoder(DirectoryEncoder):
def __init__(self, segmenting_strategy=None, single=False):
DirectoryEncoder.__init__(self)
self.segmenting_strategy = segmenting_strategy
self.single = single
self.datagroups = []
self.regenerate()
def add(self, object):
if object in self.objects: return False
self.objects.append(object)
self.regenerate()
return True
def remove(self, object):
if object not in self.objects: return False
self.objects.remove(object)
self.regenerate()
return True
def clear(self):
self.objects = []
self.regenerate()
return True
def set(self, objects):
if objects == self.objects: return False
self.objects = objects
self.regenerate()
return True
def regenerate(self):
self.datagroups = encode_directorymode(self.objects, directory_parameters=[SortedHeaderInformation()], segmenting_strategy=self.segmenting_strategy)
if self.single: self.iterator = iter(self.datagroups)
else: self.iterator = itertools.cycle(self.datagroups)
def __iter__(self):
return self.iterator
def __next__(self):
return next(self.iterator)
| true | true |
f71da1cb4f5c3c2a1b177350a4e50eb0d6673de5 | 3,568 | py | Python | benchmark/benchmark_SingleOrigin.py | maptube/UMaaS | 0758d8352213f332546d728f3eb02411c16c97ac | [
"MIT"
] | null | null | null | benchmark/benchmark_SingleOrigin.py | maptube/UMaaS | 0758d8352213f332546d728f3eb02411c16c97ac | [
"MIT"
] | null | null | null | benchmark/benchmark_SingleOrigin.py | maptube/UMaaS | 0758d8352213f332546d728f3eb02411c16c97ac | [
"MIT"
] | null | null | null | import timeit
import os.path
import numpy as np
from math import exp, fabs
from sys import float_info
from globals import *
from utils import loadMatrix, resizeMatrix
from models.SingleOrigin import SingleOrigin
"""
Benchmarks for the Single Origin Constrained model (models/SingleOrigin.py)
All code here is lifted from the original model code and changed to be
self-contained (no setup) so that timings of various optimisations are easy.
Code here is designed to be a test of timings, NOT necessarily a test of
return values, although real data has been used wherever possible i.e. instead
of an NxN matrix containing random values, I try to load in a real matrix
instead.
"""
#modelRunsDir = '../model-runs'
#TObsFilename = 'TObs.bin' #1 mode
#CijRoadMinFilename = 'Cij_road_min.bin'
#load and init
Tij=loadMatrix(os.path.join(modelRunsDir,TObs31Filename))
cij=loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))
#end load and init
###############################################################################
"""
calculateCBar_slow
Mean trips calculation, straight conversion from original C# code, no python optimisation
@returns float
"""
def benchmark_calculateCBar_slow():
#main code
(M, N) = np.shape(Tij)
CNumerator = 0.0
CDenominator = 0.0
for i in range(0,N):
for j in range(0,N):
CNumerator += Tij[i, j] * cij[i, j]
CDenominator += Tij[i, j]
CBar = CNumerator / CDenominator
print("CBar=",CBar)
return CBar
###############################################################################
"""
calculateCBar_fast
Mean trips calculation, python optimised version of "_slow"
@returns float (NOTE: the return value MUST be identical to the _slow version, to prove they're functionally identical)
"""
def benchmark_calculateCBar_fast():
#load and init
Tij=loadMatrix(os.path.join(modelRunsDir,TObs31Filename))
cij=loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))
#end load and init
#main code
CNumerator2 = np.sum(Tij*cij)
CDenominator2 = np.sum(Tij)
CBar2=CNumerator2/CDenominator2
print("CBar2=",CBar2)
return CBar2
###############################################################################
"""
This is a benchmark of the simple Python code for SingleOrigin using different matrix sizes.
It is a test for how long a single execution of the main loop takes. Timings are printed
to the console based on 1000 runs of the model code i.e. the timing you see in seconds
must be divided by 1000.
NOTE: this could take a VERY long time to run if you pass in a high number for Nfinish
"""
def benchmarkSingleOriginMatrixSizes(Nstart,Nfinish,Nstep):
print("benchmark_SingleDest running matrix Nstart=",Nstart," Nfinish=",Nfinish, " Nstep=",Nstep)
#load testing matrices
TObs1 = loadMatrix(os.path.join(modelRunsDir,TObs31Filename))
Cij1 = loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))
for N in range(Nstart,Nfinish,Nstep):
#print("TPred runModel N=",N)
#set up the model
testModel = SingleOrigin()
(TPred, secs)=testModel.benchmarkRun(1000,resizeMatrix(TObs1,N),resizeMatrix(Cij1,N),1.0)
#NOTE: timing printed to console based on 1000 iterations of the main loop in the above code
#Should not contain any setup timings - only the actual algorithm run time.
print(N,",1000,",secs) #all console logging from here - makes it nice and easy to import into excel
###############################################################################
| 35.68 | 119 | 0.668442 | import timeit
import os.path
import numpy as np
from math import exp, fabs
from sys import float_info
from globals import *
from utils import loadMatrix, resizeMatrix
from models.SingleOrigin import SingleOrigin
=loadMatrix(os.path.join(modelRunsDir,TObs31Filename))
cij=loadMatrix(os.path.join(modelRunsDir,CijRoadMinFilename))
| true | true |
f71da3dcf19d3269f76e23ee85ee43c5f96e9665 | 1,047 | py | Python | lib/surface/healthcare/consent_stores/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/healthcare/consent_stores/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/healthcare/consent_stores/__init__.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The consent stores command group for the Cloud Healthcare API CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class ConsentStores(base.Group):
"""Manage Cloud Healthcare API consent stores."""
| 36.103448 | 74 | 0.759312 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class ConsentStores(base.Group):
| true | true |
f71da4622f2c54b555de3eae3160f218b25dc007 | 4,261 | py | Python | io_scene_niftools/properties/collision.py | reddraconi/blender_nif_plugin | a4cd7545ccff952fa99bdf66dc043e00372de2ee | [
"BSD-3-Clause"
] | 1 | 2021-10-18T00:35:25.000Z | 2021-10-18T00:35:25.000Z | io_scene_niftools/properties/collision.py | luxaritas/blender_niftools_addon | 704c40d7edabea292ddb2da875fd3ff1bf354d4a | [
"BSD-3-Clause"
] | 2 | 2020-11-06T21:37:09.000Z | 2020-12-06T20:33:59.000Z | io_scene_niftools/properties/collision.py | luxaritas/blender_niftools_addon | 704c40d7edabea292ddb2da875fd3ff1bf354d4a | [
"BSD-3-Clause"
] | null | null | null | """Nif User Interface, custom nif properties store for collisions settings"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2014, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
from bpy.props import (PointerProperty,
IntProperty,
BoolProperty,
EnumProperty,
FloatProperty,
)
from bpy.types import PropertyGroup
from pyffi.formats.nif import NifFormat
class CollisionProperty(PropertyGroup):
"""Group of Havok related properties, which gets attached to objects through a property pointer."""
motion_system: EnumProperty(
name='Motion System',
description='Havok Motion System settings for bhkRigidBody(t)',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.MotionSystem._enumkeys)],
# default = 'MO_SYS_FIXED',
)
oblivion_layer: EnumProperty(
name='Oblivion Layer',
description='Mesh color, used in Editor',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.OblivionLayer._enumkeys)],
# default = 'OL_STATIC',
)
deactivator_type: EnumProperty(
name='Deactivator Type',
description='Motion deactivation setting',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.DeactivatorType._enumkeys)],
)
solver_deactivation: EnumProperty(
name='Solver Deactivation',
description='Motion deactivation setting',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.SolverDeactivation._enumkeys)],
)
quality_type: EnumProperty(
name='Quality Type',
description='Determines quality of motion',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.MotionQuality._enumkeys)],
# default = 'MO_QUAL_FIXED',
)
col_filter: IntProperty(
name='Col Filter',
description='Flags for bhkRigidBody(t)',
default=0
)
max_linear_velocity: FloatProperty(
name='Max Linear Velocity',
description='Linear velocity limit for bhkRigidBody(t)',
default=0
)
max_angular_velocity: FloatProperty(
name='Max Angular Velocity',
description='Angular velocity limit for bhkRigidBody(t)',
default=0
)
export_bhklist: BoolProperty(
name='Export BHKList',
description='None',
default=False
)
use_blender_properties: BoolProperty(
name='Use Blender Properties',
description='Whether or not to export collision settings via blender properties',
default=False,
)
| 36.732759 | 103 | 0.681295 |
from bpy.props import (PointerProperty,
IntProperty,
BoolProperty,
EnumProperty,
FloatProperty,
)
from bpy.types import PropertyGroup
from pyffi.formats.nif import NifFormat
class CollisionProperty(PropertyGroup):
motion_system: EnumProperty(
name='Motion System',
description='Havok Motion System settings for bhkRigidBody(t)',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.MotionSystem._enumkeys)],
)
oblivion_layer: EnumProperty(
name='Oblivion Layer',
description='Mesh color, used in Editor',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.OblivionLayer._enumkeys)],
)
deactivator_type: EnumProperty(
name='Deactivator Type',
description='Motion deactivation setting',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.DeactivatorType._enumkeys)],
)
solver_deactivation: EnumProperty(
name='Solver Deactivation',
description='Motion deactivation setting',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.SolverDeactivation._enumkeys)],
)
quality_type: EnumProperty(
name='Quality Type',
description='Determines quality of motion',
items=[(item, item, "", i) for i, item in enumerate(NifFormat.MotionQuality._enumkeys)],
)
col_filter: IntProperty(
name='Col Filter',
description='Flags for bhkRigidBody(t)',
default=0
)
max_linear_velocity: FloatProperty(
name='Max Linear Velocity',
description='Linear velocity limit for bhkRigidBody(t)',
default=0
)
max_angular_velocity: FloatProperty(
name='Max Angular Velocity',
description='Angular velocity limit for bhkRigidBody(t)',
default=0
)
export_bhklist: BoolProperty(
name='Export BHKList',
description='None',
default=False
)
use_blender_properties: BoolProperty(
name='Use Blender Properties',
description='Whether or not to export collision settings via blender properties',
default=False,
)
| true | true |
f71da48478c4aa6e386c6265a0ace72a8f997f99 | 2,394 | py | Python | month05/Spider/day07_course/day07_code/Tencent-demo/Tencent/spiders/tencent.py | chaofan-zheng/python_learning_code | 5d05848911d55aa49eaee4afd7ffd80536fad7aa | [
"Apache-2.0"
] | 4 | 2021-01-07T14:25:15.000Z | 2021-02-01T10:36:10.000Z | month05/Spider/day07_course/day07_code/Tencent-demo/Tencent/spiders/tencent.py | chaofan-zheng/python_learning_code | 5d05848911d55aa49eaee4afd7ffd80536fad7aa | [
"Apache-2.0"
] | null | null | null | month05/Spider/day07_course/day07_code/Tencent-demo/Tencent/spiders/tencent.py | chaofan-zheng/python_learning_code | 5d05848911d55aa49eaee4afd7ffd80536fad7aa | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import requests
from ..items import TencentItem
import json
class TencentSpider(scrapy.Spider):
name = 'tencent'
allowed_domains = ['careers.tencent.com']
one_url = 'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1608216394591&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword={}&pageIndex={}&pageSize=10&language=zh-cn&area=cn'
two_url = 'https://careers.tencent.com/tencentcareer/api/post/ByPostId?timestamp=1608216394591&postId={}&language=zh-cn'
keyword = input('请输入关键字:')
# 某个类别下第一页的url地址
start_urls = [one_url.format(keyword, 1)]
def get_total(self):
"""获取某个类别下的总页数"""
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'}
html = requests.get(url=self.one_url.format(self.keyword, 1), headers=headers).json()
count = html['Data']['Count']
total = count // 10 if count % 10 == 0 else count // 10 + 1
return total
def parse(self, response):
"""生成所有页的一级页面的url地址,交给调度器入队列"""
total = self.get_total()
for index in range(1, total + 1):
page_url = self.one_url.format(self.keyword, index)
# dont_filter: 让交给调度器的此请求不参与去重
yield scrapy.Request(url=page_url, dont_filter=True, callback=self.parse_one_page)
def parse_one_page(self, response):
"""一级页面解析: 提取postId的值,用于拼接职位详情页的地址"""
one_html = json.loads(response.text)
for one_job_dict in one_html['Data']['Posts']:
post_id = one_job_dict['PostId']
job_info_url = self.two_url.format(post_id)
# 把详情页的Url地址交给调度器入队列
yield scrapy.Request(url=job_info_url, callback=self.parse_two_page)
def parse_two_page(self, response):
# 获取响应内容,并转为python数据类型
html = json.loads(response.text)
# 提取具体数据
item = TencentItem()
item['job_name'] = html['Data']['RecruitPostName']
item['job_type'] = html['Data']['CategoryName']
item['job_duty'] = html['Data']['Responsibility']
item['job_require'] = html['Data']['Requirement']
item['job_add'] = html['Data']['LocationName']
item['job_time'] = html['Data']['LastUpdateTime']
# 至此,一条完整数据提取完成,交给项目管道去处理
yield item
| 39.9 | 230 | 0.645781 |
import scrapy
import requests
from ..items import TencentItem
import json
class TencentSpider(scrapy.Spider):
name = 'tencent'
allowed_domains = ['careers.tencent.com']
one_url = 'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1608216394591&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword={}&pageIndex={}&pageSize=10&language=zh-cn&area=cn'
two_url = 'https://careers.tencent.com/tencentcareer/api/post/ByPostId?timestamp=1608216394591&postId={}&language=zh-cn'
keyword = input('请输入关键字:')
start_urls = [one_url.format(keyword, 1)]
def get_total(self):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36'}
html = requests.get(url=self.one_url.format(self.keyword, 1), headers=headers).json()
count = html['Data']['Count']
total = count // 10 if count % 10 == 0 else count // 10 + 1
return total
def parse(self, response):
total = self.get_total()
for index in range(1, total + 1):
page_url = self.one_url.format(self.keyword, index)
yield scrapy.Request(url=page_url, dont_filter=True, callback=self.parse_one_page)
def parse_one_page(self, response):
one_html = json.loads(response.text)
for one_job_dict in one_html['Data']['Posts']:
post_id = one_job_dict['PostId']
job_info_url = self.two_url.format(post_id)
yield scrapy.Request(url=job_info_url, callback=self.parse_two_page)
def parse_two_page(self, response):
html = json.loads(response.text)
item = TencentItem()
item['job_name'] = html['Data']['RecruitPostName']
item['job_type'] = html['Data']['CategoryName']
item['job_duty'] = html['Data']['Responsibility']
item['job_require'] = html['Data']['Requirement']
item['job_add'] = html['Data']['LocationName']
item['job_time'] = html['Data']['LastUpdateTime']
yield item
| true | true |
f71da4be567bb4884b3e8f840b280ee3f5884d41 | 35,504 | py | Python | sympy/solvers/pde.py | harsh-98/sympy | 53fc684467088cdf0acccb6ad770cbde97e32268 | [
"BSD-3-Clause"
] | null | null | null | sympy/solvers/pde.py | harsh-98/sympy | 53fc684467088cdf0acccb6ad770cbde97e32268 | [
"BSD-3-Clause"
] | null | null | null | sympy/solvers/pde.py | harsh-98/sympy | 53fc684467088cdf0acccb6ad770cbde97e32268 | [
"BSD-3-Clause"
] | null | null | null | """
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from __future__ import print_function, division
from itertools import combinations_with_replacement
from sympy.simplify import simplify
from sympy.core import Add, S
from sympy.core.compatibility import (reduce, is_sequence, range)
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)))
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
given_hint = hint # hint given by the user.
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func,
hint=hint, simplify=True, type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'],
hints['func'], hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimizes the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)))
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
prep = kwargs.pop('prep', True)
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols, diff
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), (_xi_1,), (4*x - 3*y,)))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
# Try solving for the function
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
# try direct substitution of the solution into the PDE and simplify
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{df(x,y)}{dx} + b \frac{df(x,y)}{dy} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy.solvers.pde import (
... pde_1st_linear_constant_coeff_homogeneous)
>>> from sympy import pdsolve
>>> from sympy import Function, diff, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{df(x,y)}{dx} + b \frac{df(x,y)}{dy} + c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*u + b*ux + c*uy - G(x,y)
>>> pprint(genform)
d d
a*f(x, y) + b*--(f(x, y)) + c*--(f(x, y)) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// b*x + c*y \
|| / |
|| | |
|| | a*xi |
|| | ------- |
|| | 2 2 |
|| | /b*xi + c*eta -b*eta + c*xi\ b + c |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ b + c b + c / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ b + c /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-a*xi ||
-------||
2 2||
b + c ||
e ||
||
/|eta=-b*y + c*x, xi=b*x + c*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial differential equation is
.. math:: a(x, y) \frac{df(x, y)}{dx} + a(x, y) \frac{df(x, y)}{dy}
+ c(x, y) f(x, y) - G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary functions
in `x` and `y`. This PDE is converted into an ODE by making the following transformation.
1] `\xi` as `x`
2] `\eta` as the constant in the solution to the differential equation
`\frac{dy}{dx} = -\frac{b}{a}`
Making the following substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - d(\xi, \eta) = 0
which can be solved using dsolve.
The general form of this PDE is::
>>> from sympy.solvers.pde import pdsolve
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
fname = func.__name__
for key, sym in enumerate(syms):
tempfun = Function(fname + str(key))
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), x, x)/X(x), Derivative(T(t), t, t)/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
raise ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), x, x)/X(x), Derivative(Y(y), y, y)/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
| 35.2572 | 133 | 0.550614 | from __future__ import print_function, division
from itertools import combinations_with_replacement
from sympy.simplify import simplify
from sympy.core import Add, S
from sympy.core.compatibility import (reduce, is_sequence, range)
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
given_hint = hint
if not solvefun:
solvefun = Function('F')
hints = _desolve(eq, func=func,
hint=hint, simplify=True, type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'],
hints['func'], hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, **kwargs):
prep = kwargs.pop('prep', True)
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
order = ode_order(eq, f(x,y))
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
ing_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
fname = func.__name__
for key, sym in enumerate(syms):
tempfun = Function(fname + str(key))
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
raise ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
result = eq.lhs.subs(fun, functions).doit()
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
if sep.has(*others):
return None
div.add(ext)
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
div = set()
lhs = rhs = 0
for term in eq.args:
if not term.has(*others):
lhs += term
continue
temp, sep = term.expand().as_independent(dep)
if sep.has(*others):
return None
div.add(sep)
rhs -= term.expand()
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
| true | true |
f71da5ee1e04a350be35e6e930db6f799a4b2850 | 2,221 | py | Python | tests/fixtures/transaction.py | nickderobertis/flexlate | 81d6dbc2d87219a2a89266d6e8fb03310a24a3a1 | [
"MIT"
] | null | null | null | tests/fixtures/transaction.py | nickderobertis/flexlate | 81d6dbc2d87219a2a89266d6e8fb03310a24a3a1 | [
"MIT"
] | 25 | 2021-12-05T18:57:53.000Z | 2022-03-29T13:45:47.000Z | tests/fixtures/transaction.py | nickderobertis/flexlate | 81d6dbc2d87219a2a89266d6e8fb03310a24a3a1 | [
"MIT"
] | null | null | null | from uuid import UUID
import pytest
from flexlate.transactions.transaction import FlexlateTransaction, TransactionType
ADD_SOURCE_ID = UUID("93f984ca-6e8f-45e9-b9b0-aebebfe798c1")
ADD_OUTPUT_ID = UUID("86465f4d-9752-4ae5-aaa7-791b4c814e8d")
ADD_SOURCE_AND_OUTPUT_ID = UUID("bf4cd42c-10b1-4bf9-a15f-294f5be738b0")
REMOVE_SOURCE_ID = UUID("c034ec63-d2b5-4d8c-aef1-f96e29a6f5d1")
REMOVE_OUTPUT_ID = UUID("79715a11-a3c4-40b1-a49b-9d8388e5c28d")
UPDATE_TRANSACTION_ID = UUID("347711b7-3bf9-484e-be52-df488f3cf598")
SYNC_TRANSACTION_ID = UUID("4825ce35-1a03-43de-ad8a-1ecc0ed68b62")
BOOTSTRAP_TRANSACTION_ID = UUID("37c61224-2b8d-4ee5-8846-49d5474a40bd")
UPDATE_TARGET_VERSION_ID = UUID("a5632854-48b4-4f82-904b-bff81dc40b02")
@pytest.fixture
def add_source_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.ADD_SOURCE, id=ADD_SOURCE_ID)
@pytest.fixture
def add_output_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.ADD_OUTPUT, id=ADD_OUTPUT_ID)
@pytest.fixture
def add_source_and_output_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(
type=TransactionType.ADD_SOURCE_AND_OUTPUT, id=ADD_SOURCE_AND_OUTPUT_ID
)
@pytest.fixture
def remove_source_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.REMOVE_SOURCE, id=REMOVE_SOURCE_ID)
@pytest.fixture
def remove_output_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.REMOVE_OUTPUT, id=REMOVE_OUTPUT_ID)
@pytest.fixture
def update_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.UPDATE, id=UPDATE_TRANSACTION_ID)
@pytest.fixture
def sync_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.SYNC, id=SYNC_TRANSACTION_ID)
@pytest.fixture
def bootstrap_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(
type=TransactionType.BOOTSTRAP, id=BOOTSTRAP_TRANSACTION_ID
)
@pytest.fixture
def update_target_version_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(
type=TransactionType.UPDATE_TARGET_VERSION, id=UPDATE_TARGET_VERSION_ID
)
| 33.149254 | 86 | 0.815398 | from uuid import UUID
import pytest
from flexlate.transactions.transaction import FlexlateTransaction, TransactionType
ADD_SOURCE_ID = UUID("93f984ca-6e8f-45e9-b9b0-aebebfe798c1")
ADD_OUTPUT_ID = UUID("86465f4d-9752-4ae5-aaa7-791b4c814e8d")
ADD_SOURCE_AND_OUTPUT_ID = UUID("bf4cd42c-10b1-4bf9-a15f-294f5be738b0")
REMOVE_SOURCE_ID = UUID("c034ec63-d2b5-4d8c-aef1-f96e29a6f5d1")
REMOVE_OUTPUT_ID = UUID("79715a11-a3c4-40b1-a49b-9d8388e5c28d")
UPDATE_TRANSACTION_ID = UUID("347711b7-3bf9-484e-be52-df488f3cf598")
SYNC_TRANSACTION_ID = UUID("4825ce35-1a03-43de-ad8a-1ecc0ed68b62")
BOOTSTRAP_TRANSACTION_ID = UUID("37c61224-2b8d-4ee5-8846-49d5474a40bd")
UPDATE_TARGET_VERSION_ID = UUID("a5632854-48b4-4f82-904b-bff81dc40b02")
@pytest.fixture
def add_source_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.ADD_SOURCE, id=ADD_SOURCE_ID)
@pytest.fixture
def add_output_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.ADD_OUTPUT, id=ADD_OUTPUT_ID)
@pytest.fixture
def add_source_and_output_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(
type=TransactionType.ADD_SOURCE_AND_OUTPUT, id=ADD_SOURCE_AND_OUTPUT_ID
)
@pytest.fixture
def remove_source_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.REMOVE_SOURCE, id=REMOVE_SOURCE_ID)
@pytest.fixture
def remove_output_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.REMOVE_OUTPUT, id=REMOVE_OUTPUT_ID)
@pytest.fixture
def update_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.UPDATE, id=UPDATE_TRANSACTION_ID)
@pytest.fixture
def sync_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(type=TransactionType.SYNC, id=SYNC_TRANSACTION_ID)
@pytest.fixture
def bootstrap_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(
type=TransactionType.BOOTSTRAP, id=BOOTSTRAP_TRANSACTION_ID
)
@pytest.fixture
def update_target_version_transaction() -> FlexlateTransaction:
yield FlexlateTransaction(
type=TransactionType.UPDATE_TARGET_VERSION, id=UPDATE_TARGET_VERSION_ID
)
| true | true |
f71da789b8896079130d361bbef9e9e5d5278187 | 55 | py | Python | aox/boilerplate/boilerplates/__init__.py | costas-basdekis/aox | 63a90fb722f29d9b2d26041f9035f99b6b21615e | [
"MIT"
] | 2 | 2021-11-10T22:38:49.000Z | 2021-12-03T08:09:01.000Z | aox/boilerplate/boilerplates/__init__.py | costas-basdekis/aox | 63a90fb722f29d9b2d26041f9035f99b6b21615e | [
"MIT"
] | null | null | null | aox/boilerplate/boilerplates/__init__.py | costas-basdekis/aox | 63a90fb722f29d9b2d26041f9035f99b6b21615e | [
"MIT"
] | null | null | null | from .default_boilerplate import * # noqa: F401, F403
| 27.5 | 54 | 0.745455 | from .default_boilerplate import *
| true | true |
f71da79286c09974c9ab43177fa4bb6812abe8c6 | 858 | py | Python | ihatefacebook/facebook/page.py | hot3eed/ihatefacebook | fd199d9597fca53010ff5132404d8f4ac6efe1a8 | [
"Apache-2.0"
] | 1 | 2020-06-18T05:31:03.000Z | 2020-06-18T05:31:03.000Z | ihatefacebook/facebook/page.py | hot3eed/ihatefacebook | fd199d9597fca53010ff5132404d8f4ac6efe1a8 | [
"Apache-2.0"
] | null | null | null | ihatefacebook/facebook/page.py | hot3eed/ihatefacebook | fd199d9597fca53010ff5132404d8f4ac6efe1a8 | [
"Apache-2.0"
] | null | null | null | import re
import sys
from selenium import webdriver
def get_page_title(page_id):
"""
Get the title of a Facebook page. Reports an error and exits if no page was found.
:param page_id: Facebook ID of the page
:return:
string of the title of the page, if found
"""
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_experimental_option("prefs", {'profile.managed_default_content_settings.javascript': 2})
options.add_experimental_option("prefs", {'profile.managed_default_content_settings.images': 2})
driver = webdriver.Chrome(options=options)
url = "https://www.facebook.com/%s/" % page_id
driver.get(url)
try:
title = re.search('(.+)\s-', driver.title).group(1)
return title
except:
print("ERROR: Page not found.")
sys.exit(1)
| 30.642857 | 104 | 0.677156 | import re
import sys
from selenium import webdriver
def get_page_title(page_id):
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_experimental_option("prefs", {'profile.managed_default_content_settings.javascript': 2})
options.add_experimental_option("prefs", {'profile.managed_default_content_settings.images': 2})
driver = webdriver.Chrome(options=options)
url = "https://www.facebook.com/%s/" % page_id
driver.get(url)
try:
title = re.search('(.+)\s-', driver.title).group(1)
return title
except:
print("ERROR: Page not found.")
sys.exit(1)
| true | true |
f71da7e411b828d81ea079e642da4185b3730145 | 2,156 | py | Python | main.py | KazakovM/qbittorrent_rechecker | 0fa08903c63b45a54ccda7688c0f48f33b82533f | [
"MIT"
] | null | null | null | main.py | KazakovM/qbittorrent_rechecker | 0fa08903c63b45a54ccda7688c0f48f33b82533f | [
"MIT"
] | null | null | null | main.py | KazakovM/qbittorrent_rechecker | 0fa08903c63b45a54ccda7688c0f48f33b82533f | [
"MIT"
] | null | null | null | import json
from time import sleep, strftime, localtime
from qbittorrent import Client
def load_configs():
with open('config.json', 'r', encoding="UTF-8") as file:
js = json.load(file)
global HOST, PORT, LOGIN, PASSWORD, MIN_SIZE, MAX_SIZE
HOST = js["HOST"]
PORT = js["PORT"]
LOGIN = js["LOGIN"]
PASSWORD = js["PASSWORD"]
MIN_SIZE = float(js["MIN_SIZE"])
MAX_SIZE = float(js["MAX_SIZE"])
def get_time():
return strftime("%Y-%m-%d %H:%M:%S", localtime())
def main():
try:
qb = Client(f'http://{HOST}:{PORT}/')
qb.login(LOGIN, PASSWORD)
try:
if qb is not None:
while True:
torrents = qb.torrents()
for torrent in torrents:
sleep(2) #без задержки может вернуть "-1"
if (MIN_SIZE*1073741824 > torrent['size'] or torrent['size'] > MAX_SIZE*1073741824) and float(str(torrent["progress"])[0]) != 1:
if torrent['size'] != 0:
print(f'{get_time()}: Torrent "{torrent["name"]}" is out of size limit: {round(torrent["size"]/1073741824, 2)} GB. Deleting...')
qb.delete_permanently(torrent['hash'])
sleep(3)
if torrent['state'] == 'stalledDL' and float(str(torrent["progress"])[0:4]) > 0.98:
print(f'{get_time()}: Torrent "{torrent["name"]}" is stuck. Rechecking...')
qb.recheck(torrent['hash'])
qb.increase_priority(torrent['hash'])
sleep(300) #после проверки торрент может недолго быть в stalled, нужна задержка
except Exception as e:
print(f'{get_time()}: Failed to get torrent list or recheck stuck torrent: {e}')
except Exception as e:
print(f'{get_time()}: Failed to establish connection: {e}')
if __name__ == "__main__":
print(f'{get_time()}: Starting script...')
load_configs()
main()
| 42.27451 | 161 | 0.512059 | import json
from time import sleep, strftime, localtime
from qbittorrent import Client
def load_configs():
with open('config.json', 'r', encoding="UTF-8") as file:
js = json.load(file)
global HOST, PORT, LOGIN, PASSWORD, MIN_SIZE, MAX_SIZE
HOST = js["HOST"]
PORT = js["PORT"]
LOGIN = js["LOGIN"]
PASSWORD = js["PASSWORD"]
MIN_SIZE = float(js["MIN_SIZE"])
MAX_SIZE = float(js["MAX_SIZE"])
def get_time():
return strftime("%Y-%m-%d %H:%M:%S", localtime())
def main():
try:
qb = Client(f'http://{HOST}:{PORT}/')
qb.login(LOGIN, PASSWORD)
try:
if qb is not None:
while True:
torrents = qb.torrents()
for torrent in torrents:
sleep(2)
if (MIN_SIZE*1073741824 > torrent['size'] or torrent['size'] > MAX_SIZE*1073741824) and float(str(torrent["progress"])[0]) != 1:
if torrent['size'] != 0:
print(f'{get_time()}: Torrent "{torrent["name"]}" is out of size limit: {round(torrent["size"]/1073741824, 2)} GB. Deleting...')
qb.delete_permanently(torrent['hash'])
sleep(3)
if torrent['state'] == 'stalledDL' and float(str(torrent["progress"])[0:4]) > 0.98:
print(f'{get_time()}: Torrent "{torrent["name"]}" is stuck. Rechecking...')
qb.recheck(torrent['hash'])
qb.increase_priority(torrent['hash'])
sleep(300)
except Exception as e:
print(f'{get_time()}: Failed to get torrent list or recheck stuck torrent: {e}')
except Exception as e:
print(f'{get_time()}: Failed to establish connection: {e}')
if __name__ == "__main__":
print(f'{get_time()}: Starting script...')
load_configs()
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.