max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
Control/antennaControl.py | jerrydark/antenna-tracking | 1 | 6624151 | import GeneralSettings
from Utility.abstract_process import processAbstract
from Actuator.pitch_servo import PitchServo
from Actuator.yaw_servo import YawServo
from Sensors.imuYaw import imuYaw
from Sensors.imuPitch import imuPitch
from Vehicle.uavInteraction import mavlinkHandler
#from debugger import Debugger
from Sensors.gps import GPSClient
import time
import math
import sys
from multiprocessing import Value
class antennaControl(processAbstract):
def __init__(self, antenna_data, uav_data, actuator_setpoint):
processAbstract.__init__(self)
# classes for shared data across multiprocess
# compre from utility.multiprocessDataType
self.antenna_data = antenna_data
self.uav_data = uav_data
self.actuator_setpoint = actuator_setpoint
# self.period = 0.2 # 200 ms of period might not be optimal
self.antenna_data.setYawOffset(math.radians(
GeneralSettings.MAGNETIC_DECLINATION))
self.yaw = YawServo(
self.antenna_data,
self.actuator_setpoint,
GeneralSettings.servo_yaw_pin,
0, 0)
self.pitch = PitchServo( # processAbstract):
self.antenna_data,
self.actuator_setpoint,
GeneralSettings.servo_yaw_pin,
math.radians(10), math.radians(70))
self.imuYaw = imuYaw(self.antenna_data)
self.imuPitch = imuPitch(self.antenna_data)
self.uav = mavlinkHandler(self.uav_data)
self.antenna_data.setLon(GeneralSettings.default_lon)
self.antenna_data.setLat(GeneralSettings.default_lat)
self.antenna_data.setAlt(GeneralSettings.default_rel_alt)
#self.debugger = Debugger(self.antenna_data, self.uav_data, self.actuator_setpoint)
self.gps = GPSClient(self.antenna_data)
self.running = False
def process(self):
if not self.running:
self.gps.start()
self.imuYaw.start()
self.imuPitch.start()
self.uav.start()
self.yaw.start()
self.pitch.start()
self.running = True
while self.running:
self.actuator_setpoint.setPitch(self._calculate_pitch(
self.antenna_data.getLat(),
self.antenna_data.getLon(),
self.antenna_data.getAlt(),
self.uav_data.getLat(),
self.uav_data.getLon(),
self.uav_data.getAlt()))
_bearing = self._calculate_bearing(
self.antenna_data.getLat(),
self.antenna_data.getLon(),
self.uav_data.getLat(),
self.uav_data.getLon())
self.actuator_setpoint.setYaw(
_bearing + self.antenna_data.getYawOffset())
time.sleep(0.1)
def stop(self):
self.running = False
def soft_stop_everything(self):
self.running = False
self.yaw.soft_process_stop()
self.pitch.soft_process_stop()
self.gps.soft_process_stop()
self.imuYaw.soft_process_stop()
self.imuPitch.soft_process_stop()
self.uav.soft_process_stop()
# time.sleep(1)
# self.start()
def _calculate_pitch(self, lat_sat, long_sat, alt_sat, lat_drone, long_drone, alt_drone):
""" Calculate the pitch using haversine formula """
R = 6371000
lat_sat = math.radians(lat_sat)
lat_drone = math.radians(lat_drone)
long_sat = math.radians(long_sat)
long_drone = math.radians(long_drone)
delta_long = long_drone - long_sat
delta_lat = lat_drone - lat_sat
delta_alt = alt_drone - alt_sat
a = math.pow(math.sin(delta_lat / 2), 2) + math.cos(lat_sat) * \
math.cos(lat_drone) * math.pow(math.sin(delta_long / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c
pitch_angle = math.atan2(delta_alt, d)
return pitch_angle
def _calculate_bearing(self, lat_sat, long_sat, lat_drone, long_drone):
""" Calculate the bearing based on antenna and uav gps coordinates"""
lat_sat = math.radians(lat_sat)
lat_drone = math.radians(lat_drone)
long_sat = math.radians(long_sat)
long_drone = math.radians(long_drone)
delta_long = long_drone - long_sat
delta_lat = lat_drone - lat_sat
y = math.sin(delta_long) * math.cos(lat_drone)
x = math.cos(lat_sat) * math.sin(lat_drone) - \
math.sin(lat_sat) * math.cos(lat_drone) * math.cos(delta_long)
bearing_initial = math.atan2(y, x)
return bearing_initial
| import GeneralSettings
from Utility.abstract_process import processAbstract
from Actuator.pitch_servo import PitchServo
from Actuator.yaw_servo import YawServo
from Sensors.imuYaw import imuYaw
from Sensors.imuPitch import imuPitch
from Vehicle.uavInteraction import mavlinkHandler
#from debugger import Debugger
from Sensors.gps import GPSClient
import time
import math
import sys
from multiprocessing import Value
class antennaControl(processAbstract):
def __init__(self, antenna_data, uav_data, actuator_setpoint):
processAbstract.__init__(self)
# classes for shared data across multiprocess
# compre from utility.multiprocessDataType
self.antenna_data = antenna_data
self.uav_data = uav_data
self.actuator_setpoint = actuator_setpoint
# self.period = 0.2 # 200 ms of period might not be optimal
self.antenna_data.setYawOffset(math.radians(
GeneralSettings.MAGNETIC_DECLINATION))
self.yaw = YawServo(
self.antenna_data,
self.actuator_setpoint,
GeneralSettings.servo_yaw_pin,
0, 0)
self.pitch = PitchServo( # processAbstract):
self.antenna_data,
self.actuator_setpoint,
GeneralSettings.servo_yaw_pin,
math.radians(10), math.radians(70))
self.imuYaw = imuYaw(self.antenna_data)
self.imuPitch = imuPitch(self.antenna_data)
self.uav = mavlinkHandler(self.uav_data)
self.antenna_data.setLon(GeneralSettings.default_lon)
self.antenna_data.setLat(GeneralSettings.default_lat)
self.antenna_data.setAlt(GeneralSettings.default_rel_alt)
#self.debugger = Debugger(self.antenna_data, self.uav_data, self.actuator_setpoint)
self.gps = GPSClient(self.antenna_data)
self.running = False
def process(self):
if not self.running:
self.gps.start()
self.imuYaw.start()
self.imuPitch.start()
self.uav.start()
self.yaw.start()
self.pitch.start()
self.running = True
while self.running:
self.actuator_setpoint.setPitch(self._calculate_pitch(
self.antenna_data.getLat(),
self.antenna_data.getLon(),
self.antenna_data.getAlt(),
self.uav_data.getLat(),
self.uav_data.getLon(),
self.uav_data.getAlt()))
_bearing = self._calculate_bearing(
self.antenna_data.getLat(),
self.antenna_data.getLon(),
self.uav_data.getLat(),
self.uav_data.getLon())
self.actuator_setpoint.setYaw(
_bearing + self.antenna_data.getYawOffset())
time.sleep(0.1)
def stop(self):
self.running = False
def soft_stop_everything(self):
self.running = False
self.yaw.soft_process_stop()
self.pitch.soft_process_stop()
self.gps.soft_process_stop()
self.imuYaw.soft_process_stop()
self.imuPitch.soft_process_stop()
self.uav.soft_process_stop()
# time.sleep(1)
# self.start()
def _calculate_pitch(self, lat_sat, long_sat, alt_sat, lat_drone, long_drone, alt_drone):
""" Calculate the pitch using haversine formula """
R = 6371000
lat_sat = math.radians(lat_sat)
lat_drone = math.radians(lat_drone)
long_sat = math.radians(long_sat)
long_drone = math.radians(long_drone)
delta_long = long_drone - long_sat
delta_lat = lat_drone - lat_sat
delta_alt = alt_drone - alt_sat
a = math.pow(math.sin(delta_lat / 2), 2) + math.cos(lat_sat) * \
math.cos(lat_drone) * math.pow(math.sin(delta_long / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R * c
pitch_angle = math.atan2(delta_alt, d)
return pitch_angle
def _calculate_bearing(self, lat_sat, long_sat, lat_drone, long_drone):
""" Calculate the bearing based on antenna and uav gps coordinates"""
lat_sat = math.radians(lat_sat)
lat_drone = math.radians(lat_drone)
long_sat = math.radians(long_sat)
long_drone = math.radians(long_drone)
delta_long = long_drone - long_sat
delta_lat = lat_drone - lat_sat
y = math.sin(delta_long) * math.cos(lat_drone)
x = math.cos(lat_sat) * math.sin(lat_drone) - \
math.sin(lat_sat) * math.cos(lat_drone) * math.cos(delta_long)
bearing_initial = math.atan2(y, x)
return bearing_initial
| en | 0.534621 | #from debugger import Debugger # classes for shared data across multiprocess # compre from utility.multiprocessDataType # self.period = 0.2 # 200 ms of period might not be optimal # processAbstract): #self.debugger = Debugger(self.antenna_data, self.uav_data, self.actuator_setpoint) # time.sleep(1) # self.start() Calculate the pitch using haversine formula Calculate the bearing based on antenna and uav gps coordinates | 2.419542 | 2 |
aiexplore/rls500/helper.py | kglavin/lunarip | 0 | 6624152 | <filename>aiexplore/rls500/helper.py
import matplotlib.pyplot as plt
from IPython import display
#derived from https://github.com/python-engineer/snake-ai-pytorch
plt.ion()
def plot_init(title):
##display.clear_output(wait=True)
#display.display(plt.gcf())
##plt.clf()
#plt.title(title)
#plt.figure()
fig, (ax1,ax2,ax3) = plt.subplots(3, 1, figsize=(10, 9))
fig.suptitle(title, fontsize=12, fontweight='bold')
ax1.set_xlabel('Number of Games')
ax1.set_ylabel('Game Score')
ax2.set_xlabel('Number of Games')
ax2.set_ylabel('Model Score')
ax3.set_xlabel('Number of Games')
ax3.set_ylabel('Learning Rate')
return (ax1,ax2,ax3)
def plot(axes,scores, mean_scores,model_scores,running_avg_model_scores,learning_rates):
ax1,ax2,ax3 = axes
#_ = plt.plot(scores)
#_ = plt.plot(mean_scores)
ax1.plot(scores,color='black')
ax1.plot(mean_scores,color='green')
#ax1.set_ylim(ymin=0,ymax=60)
#_ = plt.plot(model_scores)
ax2.plot(model_scores,color='red')
ax2.plot(running_avg_model_scores,color='green')
#ax2.set_ylim(ymin=0,ymax=5000)
ax3.plot(learning_rates,color='blue')
#ax3.set_ylim(ymin=0,ymax=0.01)
#ax1.text(len(scores)-1, scores[-1], str(scores[-1]))
#ax1.text(len(mean_scores)-1, mean_scores[-1], str(mean_scores[-1]))
#ax2.text(len(model_scores)-1, model_scores[-1], str(model_scores[-1]))
_ = plt.show(block=False)
#plt.pause(.1)
def dashboard(initial_config,learning_score, learning, playing_score):
display.clear_output(wait=True)
plt.clf()
plt.title(title)
| <filename>aiexplore/rls500/helper.py
import matplotlib.pyplot as plt
from IPython import display
#derived from https://github.com/python-engineer/snake-ai-pytorch
plt.ion()
def plot_init(title):
##display.clear_output(wait=True)
#display.display(plt.gcf())
##plt.clf()
#plt.title(title)
#plt.figure()
fig, (ax1,ax2,ax3) = plt.subplots(3, 1, figsize=(10, 9))
fig.suptitle(title, fontsize=12, fontweight='bold')
ax1.set_xlabel('Number of Games')
ax1.set_ylabel('Game Score')
ax2.set_xlabel('Number of Games')
ax2.set_ylabel('Model Score')
ax3.set_xlabel('Number of Games')
ax3.set_ylabel('Learning Rate')
return (ax1,ax2,ax3)
def plot(axes,scores, mean_scores,model_scores,running_avg_model_scores,learning_rates):
ax1,ax2,ax3 = axes
#_ = plt.plot(scores)
#_ = plt.plot(mean_scores)
ax1.plot(scores,color='black')
ax1.plot(mean_scores,color='green')
#ax1.set_ylim(ymin=0,ymax=60)
#_ = plt.plot(model_scores)
ax2.plot(model_scores,color='red')
ax2.plot(running_avg_model_scores,color='green')
#ax2.set_ylim(ymin=0,ymax=5000)
ax3.plot(learning_rates,color='blue')
#ax3.set_ylim(ymin=0,ymax=0.01)
#ax1.text(len(scores)-1, scores[-1], str(scores[-1]))
#ax1.text(len(mean_scores)-1, mean_scores[-1], str(mean_scores[-1]))
#ax2.text(len(model_scores)-1, model_scores[-1], str(model_scores[-1]))
_ = plt.show(block=False)
#plt.pause(.1)
def dashboard(initial_config,learning_score, learning, playing_score):
display.clear_output(wait=True)
plt.clf()
plt.title(title)
| en | 0.22376 | #derived from https://github.com/python-engineer/snake-ai-pytorch ##display.clear_output(wait=True) #display.display(plt.gcf()) ##plt.clf() #plt.title(title) #plt.figure() #_ = plt.plot(scores) #_ = plt.plot(mean_scores) #ax1.set_ylim(ymin=0,ymax=60) #_ = plt.plot(model_scores) #ax2.set_ylim(ymin=0,ymax=5000) #ax3.set_ylim(ymin=0,ymax=0.01) #ax1.text(len(scores)-1, scores[-1], str(scores[-1])) #ax1.text(len(mean_scores)-1, mean_scores[-1], str(mean_scores[-1])) #ax2.text(len(model_scores)-1, model_scores[-1], str(model_scores[-1])) #plt.pause(.1) | 3.085175 | 3 |
lib/python/treadmill/appcfg/features/docker.py | krcooke/treadmill | 133 | 6624153 | """Configures dockerd inside the container.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from treadmill import dockerutils
from treadmill import nodedata
from treadmill import subproc
from treadmill.appcfg.features import feature_base
_LOGGER = logging.getLogger(__name__)
class DockerdFeature(feature_base.Feature):
"""Feature to enabled docker daemon in container
"""
def applies(self, manifest, runtime):
return runtime == 'linux'
def configure(self, manifest):
_LOGGER.info('Configuring dockerd.')
# TODO: we need to move dockerd and docker authz to system-services
# when they are stable
# get registry address from node.json
node_data = nodedata.get(self._tm_env.configs_dir)
docker_conf = dockerutils.get_conf(manifest['environment'],
node_data)
manifest['services'].append(
_generate_dockerd_service(docker_conf)
)
manifest['services'].append(
_generate_docker_authz_service(docker_conf)
)
manifest['environ'].append(
{'name': 'DOCKER_HOST', 'value': 'tcp://127.0.0.1:2375'}
)
manifest['docker'] = True
def _generate_docker_authz_service(_docker_cfg):
# full command include creating rest module cfg file and launch sproc
cmd = (
'exec $TREADMILL/bin/treadmill'
' sproc restapi'
' -m docker_authz.authzreq,docker_authz.authzres,docker_authz.activate'
' --cors-origin=".*"'
' -s {sock}'
).format(
sock='/run/docker/plugins/authz.sock',
)
docker_authz_svc = {
'name': 'docker-auth',
'proid': 'root',
'restart': {
'limit': 5,
'interval': 60,
},
'command': cmd,
'root': True,
'environ': [],
'config': None,
'downed': False,
'trace': False,
}
return docker_authz_svc
def _generate_dockerd_service(docker_cfg):
"""Configure docker daemon services."""
# add dockerd service
# we disable advanced network features
command = 'exec {dockerd}'.format(
dockerd=subproc.resolve('dockerd')
)
extra_cmd_params = [''] # Start with a space
# configure ulimits.
ulimits = dockerutils.get_ulimits()
# Format rich dictionary to dockerd-compatible cli flags.
# Do not respect "soft" limit as dockerd has a known issue when comparing
# finite vs infinite values; will error on {Soft=0, Hard=-1}
for flag in ulimits:
extra_cmd_params.append('--default-ulimit')
extra_cmd_params.append('{}={}:{}'.format(flag['Name'],
flag['Hard'],
flag['Hard']))
# We block all registries and only allow image pulls from our configured
# registries.
extra_cmd_params.append('--block-registry="*"')
for registry in docker_cfg['registries']:
extra_cmd_params.append('--add-registry')
extra_cmd_params.append(registry['host'])
if registry.get('insecure', False):
extra_cmd_params.append('--insecure-registry')
extra_cmd_params.append(registry['host'])
command += ' '.join(extra_cmd_params)
_LOGGER.info('dockerd cmd: %s', command)
dockerd_svc = {
'name': 'dockerd',
'proid': 'root',
'restart': {
'limit': 5,
'interval': 60,
},
'command': command,
'root': True,
'environ': [],
'config': None,
'downed': False,
'trace': False,
}
return dockerd_svc
__all__ = (
'DockerdFeature',
)
| """Configures dockerd inside the container.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from treadmill import dockerutils
from treadmill import nodedata
from treadmill import subproc
from treadmill.appcfg.features import feature_base
_LOGGER = logging.getLogger(__name__)
class DockerdFeature(feature_base.Feature):
"""Feature to enabled docker daemon in container
"""
def applies(self, manifest, runtime):
return runtime == 'linux'
def configure(self, manifest):
_LOGGER.info('Configuring dockerd.')
# TODO: we need to move dockerd and docker authz to system-services
# when they are stable
# get registry address from node.json
node_data = nodedata.get(self._tm_env.configs_dir)
docker_conf = dockerutils.get_conf(manifest['environment'],
node_data)
manifest['services'].append(
_generate_dockerd_service(docker_conf)
)
manifest['services'].append(
_generate_docker_authz_service(docker_conf)
)
manifest['environ'].append(
{'name': 'DOCKER_HOST', 'value': 'tcp://127.0.0.1:2375'}
)
manifest['docker'] = True
def _generate_docker_authz_service(_docker_cfg):
# full command include creating rest module cfg file and launch sproc
cmd = (
'exec $TREADMILL/bin/treadmill'
' sproc restapi'
' -m docker_authz.authzreq,docker_authz.authzres,docker_authz.activate'
' --cors-origin=".*"'
' -s {sock}'
).format(
sock='/run/docker/plugins/authz.sock',
)
docker_authz_svc = {
'name': 'docker-auth',
'proid': 'root',
'restart': {
'limit': 5,
'interval': 60,
},
'command': cmd,
'root': True,
'environ': [],
'config': None,
'downed': False,
'trace': False,
}
return docker_authz_svc
def _generate_dockerd_service(docker_cfg):
"""Configure docker daemon services."""
# add dockerd service
# we disable advanced network features
command = 'exec {dockerd}'.format(
dockerd=subproc.resolve('dockerd')
)
extra_cmd_params = [''] # Start with a space
# configure ulimits.
ulimits = dockerutils.get_ulimits()
# Format rich dictionary to dockerd-compatible cli flags.
# Do not respect "soft" limit as dockerd has a known issue when comparing
# finite vs infinite values; will error on {Soft=0, Hard=-1}
for flag in ulimits:
extra_cmd_params.append('--default-ulimit')
extra_cmd_params.append('{}={}:{}'.format(flag['Name'],
flag['Hard'],
flag['Hard']))
# We block all registries and only allow image pulls from our configured
# registries.
extra_cmd_params.append('--block-registry="*"')
for registry in docker_cfg['registries']:
extra_cmd_params.append('--add-registry')
extra_cmd_params.append(registry['host'])
if registry.get('insecure', False):
extra_cmd_params.append('--insecure-registry')
extra_cmd_params.append(registry['host'])
command += ' '.join(extra_cmd_params)
_LOGGER.info('dockerd cmd: %s', command)
dockerd_svc = {
'name': 'dockerd',
'proid': 'root',
'restart': {
'limit': 5,
'interval': 60,
},
'command': command,
'root': True,
'environ': [],
'config': None,
'downed': False,
'trace': False,
}
return dockerd_svc
__all__ = (
'DockerdFeature',
)
| en | 0.784467 | Configures dockerd inside the container. Feature to enabled docker daemon in container # TODO: we need to move dockerd and docker authz to system-services # when they are stable # get registry address from node.json # full command include creating rest module cfg file and launch sproc Configure docker daemon services. # add dockerd service # we disable advanced network features # Start with a space # configure ulimits. # Format rich dictionary to dockerd-compatible cli flags. # Do not respect "soft" limit as dockerd has a known issue when comparing # finite vs infinite values; will error on {Soft=0, Hard=-1} # We block all registries and only allow image pulls from our configured # registries. | 1.962025 | 2 |
driver.py | kyclark/python_plugins | 0 | 6624154 | <gh_stars>0
#!/usr/bin/env python3
"""Run function from imported module"""
import argparse
import importlib
import os
import pkgutil
from typing import NamedTuple, Callable, Dict
class Args(NamedTuple):
language: str
name: str
# --------------------------------------------------
def get_args(plugins: Dict[str, Callable]) -> Args:
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Run function from imported module',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('language',
type=str,
metavar='lang',
help='Language to use',
choices=plugins.keys())
parser.add_argument('name',
type=str,
metavar='name',
help='Name to greet')
args = parser.parse_args()
return Args(args.language, args.name)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
cwd = os.path.realpath(__file__)
plugin_dir = os.path.join(os.path.dirname(cwd), 'plugins')
plugins = {
name.replace('greet_', ''): importlib.import_module('plugins.' + name)
for _, name, _ in pkgutil.iter_modules(path=[plugin_dir])
if name.startswith('greet_')
}
args = get_args(plugins)
print(plugins[args.language].greet(args.name))
# --------------------------------------------------
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
"""Run function from imported module"""
import argparse
import importlib
import os
import pkgutil
from typing import NamedTuple, Callable, Dict
class Args(NamedTuple):
language: str
name: str
# --------------------------------------------------
def get_args(plugins: Dict[str, Callable]) -> Args:
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Run function from imported module',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('language',
type=str,
metavar='lang',
help='Language to use',
choices=plugins.keys())
parser.add_argument('name',
type=str,
metavar='name',
help='Name to greet')
args = parser.parse_args()
return Args(args.language, args.name)
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
cwd = os.path.realpath(__file__)
plugin_dir = os.path.join(os.path.dirname(cwd), 'plugins')
plugins = {
name.replace('greet_', ''): importlib.import_module('plugins.' + name)
for _, name, _ in pkgutil.iter_modules(path=[plugin_dir])
if name.startswith('greet_')
}
args = get_args(plugins)
print(plugins[args.language].greet(args.name))
# --------------------------------------------------
if __name__ == '__main__':
main() | en | 0.172707 | #!/usr/bin/env python3 Run function from imported module # -------------------------------------------------- Get command-line arguments # -------------------------------------------------- Make a jazz noise here # -------------------------------------------------- | 3.116658 | 3 |
tests/validators/test_global_maps.py | YVautrin/xmlschema | 95 | 6624155 | <filename>tests/validators/test_global_maps.py
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author <NAME> <<EMAIL>>
#
import unittest
from xmlschema import XMLSchema10, XMLSchema11
from xmlschema.names import XSD_ELEMENT, XSD_STRING, XSD_SIMPLE_TYPE
class TestXsdGlobalsMaps(unittest.TestCase):
@classmethod
def setUpClass(cls):
XMLSchema10.meta_schema.build()
XMLSchema11.meta_schema.build()
@classmethod
def tearDownClass(cls):
XMLSchema10.meta_schema.clear()
XMLSchema11.meta_schema.clear()
def test_maps_repr(self):
self.assertEqual(
repr(XMLSchema10.meta_schema.maps),
"XsdGlobals(validator=MetaXMLSchema10(name='XMLSchema.xsd', "
"namespace='http://www.w3.org/2001/XMLSchema'), validation='strict')"
)
def test_lookup(self):
with self.assertRaises(KeyError):
XMLSchema10.meta_schema.maps.lookup(XSD_ELEMENT, 'wrong')
xs_string = XMLSchema10.meta_schema.maps.lookup(XSD_SIMPLE_TYPE, XSD_STRING)
self.assertEqual(xs_string.name, XSD_STRING)
with self.assertRaises(ValueError):
XMLSchema10.meta_schema.maps.lookup('simpleType', XSD_STRING)
def test_clear(self):
maps = XMLSchema10.meta_schema.maps.copy()
self.assertEqual(len(list(maps.iter_globals())), 158)
maps.clear(only_unbuilt=True)
self.assertEqual(len(list(maps.iter_globals())), 158)
maps.clear()
self.assertEqual(len(list(maps.iter_globals())), 0)
maps.build()
self.assertEqual(len(list(maps.iter_globals())), 158)
maps.clear(remove_schemas=True)
self.assertEqual(len(list(maps.iter_globals())), 0)
with self.assertRaises(ValueError):
maps.build() # missing XSD meta-schema
def test_xsd_10_globals(self):
self.assertEqual(len(XMLSchema10.meta_schema.maps.notations), 2)
self.assertEqual(len(XMLSchema10.meta_schema.maps.types), 92)
self.assertEqual(len(XMLSchema10.meta_schema.maps.attributes), 8)
self.assertEqual(len(XMLSchema10.meta_schema.maps.attribute_groups), 3)
self.assertEqual(len(XMLSchema10.meta_schema.maps.groups), 12)
self.assertEqual(len(XMLSchema10.meta_schema.maps.elements), 41)
self.assertEqual(
len([e.is_global() for e in XMLSchema10.meta_schema.maps.iter_globals()]), 158
)
self.assertEqual(len(XMLSchema10.meta_schema.maps.substitution_groups), 0)
def test_xsd_11_globals(self):
self.assertEqual(len(XMLSchema11.meta_schema.maps.notations), 2)
self.assertEqual(len(XMLSchema11.meta_schema.maps.types), 103)
self.assertEqual(len(XMLSchema11.meta_schema.maps.attributes), 14)
self.assertEqual(len(XMLSchema11.meta_schema.maps.attribute_groups), 4)
self.assertEqual(len(XMLSchema11.meta_schema.maps.groups), 13)
self.assertEqual(len(XMLSchema11.meta_schema.maps.elements), 47)
self.assertEqual(
len([e.is_global() for e in XMLSchema11.meta_schema.maps.iter_globals()]), 183
)
self.assertEqual(len(XMLSchema11.meta_schema.maps.substitution_groups), 1)
def test_xsd_10_build(self):
self.assertEqual(len([e for e in XMLSchema10.meta_schema.maps.iter_globals()]), 158)
self.assertTrue(XMLSchema10.meta_schema.maps.built)
XMLSchema10.meta_schema.maps.clear()
XMLSchema10.meta_schema.maps.build()
self.assertTrue(XMLSchema10.meta_schema.maps.built)
def test_xsd_11_build(self):
self.assertEqual(len([e for e in XMLSchema11.meta_schema.maps.iter_globals()]), 183)
self.assertTrue(XMLSchema11.meta_schema.maps.built)
XMLSchema11.meta_schema.maps.clear()
XMLSchema11.meta_schema.maps.build()
self.assertTrue(XMLSchema11.meta_schema.maps.built)
def test_xsd_10_components(self):
total_counter = 0
global_counter = 0
for g in XMLSchema10.meta_schema.maps.iter_globals():
for c in g.iter_components():
total_counter += 1
if c.is_global():
global_counter += 1
self.assertEqual(global_counter, 158)
self.assertEqual(total_counter, 808)
def test_xsd_11_components(self):
total_counter = 0
global_counter = 0
for g in XMLSchema11.meta_schema.maps.iter_globals():
for c in g.iter_components():
total_counter += 1
if c.is_global():
global_counter += 1
self.assertEqual(global_counter, 183)
self.assertEqual(total_counter, 972)
def test_xsd_11_restrictions(self):
all_model_type = XMLSchema11.meta_schema.types['all']
self.assertTrue(
all_model_type.content.is_restriction(all_model_type.base_type.content)
)
if __name__ == '__main__':
import platform
header_template = "Test xmlschema's global maps with Python {} on {}"
header = header_template.format(platform.python_version(), platform.platform())
print('{0}\n{1}\n{0}'.format("*" * len(header), header))
unittest.main()
| <filename>tests/validators/test_global_maps.py
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author <NAME> <<EMAIL>>
#
import unittest
from xmlschema import XMLSchema10, XMLSchema11
from xmlschema.names import XSD_ELEMENT, XSD_STRING, XSD_SIMPLE_TYPE
class TestXsdGlobalsMaps(unittest.TestCase):
@classmethod
def setUpClass(cls):
XMLSchema10.meta_schema.build()
XMLSchema11.meta_schema.build()
@classmethod
def tearDownClass(cls):
XMLSchema10.meta_schema.clear()
XMLSchema11.meta_schema.clear()
def test_maps_repr(self):
self.assertEqual(
repr(XMLSchema10.meta_schema.maps),
"XsdGlobals(validator=MetaXMLSchema10(name='XMLSchema.xsd', "
"namespace='http://www.w3.org/2001/XMLSchema'), validation='strict')"
)
def test_lookup(self):
with self.assertRaises(KeyError):
XMLSchema10.meta_schema.maps.lookup(XSD_ELEMENT, 'wrong')
xs_string = XMLSchema10.meta_schema.maps.lookup(XSD_SIMPLE_TYPE, XSD_STRING)
self.assertEqual(xs_string.name, XSD_STRING)
with self.assertRaises(ValueError):
XMLSchema10.meta_schema.maps.lookup('simpleType', XSD_STRING)
def test_clear(self):
maps = XMLSchema10.meta_schema.maps.copy()
self.assertEqual(len(list(maps.iter_globals())), 158)
maps.clear(only_unbuilt=True)
self.assertEqual(len(list(maps.iter_globals())), 158)
maps.clear()
self.assertEqual(len(list(maps.iter_globals())), 0)
maps.build()
self.assertEqual(len(list(maps.iter_globals())), 158)
maps.clear(remove_schemas=True)
self.assertEqual(len(list(maps.iter_globals())), 0)
with self.assertRaises(ValueError):
maps.build() # missing XSD meta-schema
def test_xsd_10_globals(self):
self.assertEqual(len(XMLSchema10.meta_schema.maps.notations), 2)
self.assertEqual(len(XMLSchema10.meta_schema.maps.types), 92)
self.assertEqual(len(XMLSchema10.meta_schema.maps.attributes), 8)
self.assertEqual(len(XMLSchema10.meta_schema.maps.attribute_groups), 3)
self.assertEqual(len(XMLSchema10.meta_schema.maps.groups), 12)
self.assertEqual(len(XMLSchema10.meta_schema.maps.elements), 41)
self.assertEqual(
len([e.is_global() for e in XMLSchema10.meta_schema.maps.iter_globals()]), 158
)
self.assertEqual(len(XMLSchema10.meta_schema.maps.substitution_groups), 0)
def test_xsd_11_globals(self):
self.assertEqual(len(XMLSchema11.meta_schema.maps.notations), 2)
self.assertEqual(len(XMLSchema11.meta_schema.maps.types), 103)
self.assertEqual(len(XMLSchema11.meta_schema.maps.attributes), 14)
self.assertEqual(len(XMLSchema11.meta_schema.maps.attribute_groups), 4)
self.assertEqual(len(XMLSchema11.meta_schema.maps.groups), 13)
self.assertEqual(len(XMLSchema11.meta_schema.maps.elements), 47)
self.assertEqual(
len([e.is_global() for e in XMLSchema11.meta_schema.maps.iter_globals()]), 183
)
self.assertEqual(len(XMLSchema11.meta_schema.maps.substitution_groups), 1)
def test_xsd_10_build(self):
self.assertEqual(len([e for e in XMLSchema10.meta_schema.maps.iter_globals()]), 158)
self.assertTrue(XMLSchema10.meta_schema.maps.built)
XMLSchema10.meta_schema.maps.clear()
XMLSchema10.meta_schema.maps.build()
self.assertTrue(XMLSchema10.meta_schema.maps.built)
def test_xsd_11_build(self):
self.assertEqual(len([e for e in XMLSchema11.meta_schema.maps.iter_globals()]), 183)
self.assertTrue(XMLSchema11.meta_schema.maps.built)
XMLSchema11.meta_schema.maps.clear()
XMLSchema11.meta_schema.maps.build()
self.assertTrue(XMLSchema11.meta_schema.maps.built)
def test_xsd_10_components(self):
total_counter = 0
global_counter = 0
for g in XMLSchema10.meta_schema.maps.iter_globals():
for c in g.iter_components():
total_counter += 1
if c.is_global():
global_counter += 1
self.assertEqual(global_counter, 158)
self.assertEqual(total_counter, 808)
def test_xsd_11_components(self):
total_counter = 0
global_counter = 0
for g in XMLSchema11.meta_schema.maps.iter_globals():
for c in g.iter_components():
total_counter += 1
if c.is_global():
global_counter += 1
self.assertEqual(global_counter, 183)
self.assertEqual(total_counter, 972)
def test_xsd_11_restrictions(self):
all_model_type = XMLSchema11.meta_schema.types['all']
self.assertTrue(
all_model_type.content.is_restriction(all_model_type.base_type.content)
)
if __name__ == '__main__':
import platform
header_template = "Test xmlschema's global maps with Python {} on {}"
header = header_template.format(platform.python_version(), platform.platform())
print('{0}\n{1}\n{0}'.format("*" * len(header), header))
unittest.main()
| en | 0.748873 | #!/usr/bin/env python # # Copyright (c), 2016-2020, SISSA (International School for Advanced Studies). # All rights reserved. # This file is distributed under the terms of the MIT License. # See the file 'LICENSE' in the root directory of the present # distribution, or http://opensource.org/licenses/MIT. # # @author <NAME> <<EMAIL>> # # missing XSD meta-schema | 2.302317 | 2 |
samples/sample_as_decorator.py | fabiofenoglio/py-load-limiter | 0 | 6624156 | import time
import random
import logging
from pyloadlimiter import LoadLimiter
logging.basicConfig(format='%(asctime)s %(threadName)s [%(name)s %(levelname)s] %(message)s', level=logging.DEBUG)
limiter = LoadLimiter(name='TestQueue80in20', maxload=80, period=20)
@limiter()
def do_things():
logging.info('doing things!')
@limiter(load=5)
def do_expensive():
logging.info('doing expensive things!')
@limiter(load=15)
def do_really_expensive():
logging.info('doing REALLY expensive things!')
@limiter(load=15, wait=False)
def do_really_expensive_no_wait():
logging.info('doing REALLY expensive things!')
for i in range(0, 50):
r = random.randint(1, 10)
if r <= 2:
try:
do_really_expensive_no_wait()
except Exception as e:
logging.error('error calling do_really_expensive_no_wait: ', exc_info=1)
elif r <= 4:
do_really_expensive()
elif r <= 6:
do_expensive()
else:
do_things()
sleep_factor = 1.0
time.sleep(sleep_factor * random.randint(0, 1000)/1000)
| import time
import random
import logging
from pyloadlimiter import LoadLimiter
logging.basicConfig(format='%(asctime)s %(threadName)s [%(name)s %(levelname)s] %(message)s', level=logging.DEBUG)
limiter = LoadLimiter(name='TestQueue80in20', maxload=80, period=20)
@limiter()
def do_things():
logging.info('doing things!')
@limiter(load=5)
def do_expensive():
logging.info('doing expensive things!')
@limiter(load=15)
def do_really_expensive():
logging.info('doing REALLY expensive things!')
@limiter(load=15, wait=False)
def do_really_expensive_no_wait():
logging.info('doing REALLY expensive things!')
for i in range(0, 50):
r = random.randint(1, 10)
if r <= 2:
try:
do_really_expensive_no_wait()
except Exception as e:
logging.error('error calling do_really_expensive_no_wait: ', exc_info=1)
elif r <= 4:
do_really_expensive()
elif r <= 6:
do_expensive()
else:
do_things()
sleep_factor = 1.0
time.sleep(sleep_factor * random.randint(0, 1000)/1000)
| none | 1 | 2.583811 | 3 | |
data/f_mnist_data.py | duyndh/PixelDefend | 18 | 6624157 | <gh_stars>10-100
# Copyright (c) Microsoft Corporation. Licensed under the MIT license.
import os
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
def load_data(data_dir, subset):
if subset == 'train':
mnist_data = input_data.read_data_sets(data_dir)
data, labels = mnist_data.train.images, mnist_data.train.labels
data = np.reshape(data, [-1, 28, 28, 1]) * 255.
return data, labels
elif subset == 'test':
if os.path.exists(os.path.join(data_dir, 'test_batch')):
import pickle
with open(os.path.join(data_dir, 'test_batch'), 'rb') as fin:
d = pickle.load(fin, encoding='latin1')
data = d['data'].reshape((-1, 28, 28, 1))
labels = np.array(d['labels']).astype(np.uint8)
return data, labels
else:
raise FileNotFoundError(os.path.join(data_dir, 'test_batch') + " Not found!")
else:
raise NotImplementedError("subset {} is not supported!".format(subset))
class DataLoader(object):
def __init__(self, data_dir, subset, batch_size, rng=None, shuffle=False, return_labels=False):
"""
- data_dir is location where to store files
- subset is train|test
- batch_size is int, of #examples to load at once
- rng is np.random.RandomState object for reproducibility
"""
self.data_dir = data_dir
self.batch_size = batch_size
self.shuffle = shuffle
self.return_labels = return_labels
# create temporary storage for the data, if not yet created
if not os.path.exists(data_dir):
print('creating folder', data_dir)
os.makedirs(data_dir)
self.data, self.labels = load_data(data_dir, subset)
self.p = 0
self.rng = np.random.RandomState(1) if rng is None else rng
def get_observation_size(self):
return self.data.shape[1:]
def get_num_labels(self):
return np.amax(self.labels) + 1
def reset(self):
self.p = 0
def __iter__(self):
return self
def __next__(self, n=None):
""" n is the number of examples to fetch """
if n is None: n = self.batch_size
# on first iteration lazily permute all data
if self.p == 0 and self.shuffle:
inds = self.rng.permutation(self.data.shape[0])
self.data = self.data[inds]
self.labels = self.labels[inds]
# on last iteration reset the counter and raise StopIteration
if self.p + n > self.data.shape[0]:
self.reset() # reset for next time we get called
raise StopIteration
# on intermediate iterations fetch the next batch
# make sure the dimension is (batch_size, 28, 28, 1)
x = self.data[self.p: self.p + n]
y = self.labels[self.p: self.p + n]
self.p += self.batch_size
if self.return_labels:
return x, y
else:
return x
next = __next__ # Python 2 compatibility (https://stackoverflow.com/questions/29578469/how-to-make-an-object-both-a-python2-and-python3-iterator)
| # Copyright (c) Microsoft Corporation. Licensed under the MIT license.
import os
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
def load_data(data_dir, subset):
if subset == 'train':
mnist_data = input_data.read_data_sets(data_dir)
data, labels = mnist_data.train.images, mnist_data.train.labels
data = np.reshape(data, [-1, 28, 28, 1]) * 255.
return data, labels
elif subset == 'test':
if os.path.exists(os.path.join(data_dir, 'test_batch')):
import pickle
with open(os.path.join(data_dir, 'test_batch'), 'rb') as fin:
d = pickle.load(fin, encoding='latin1')
data = d['data'].reshape((-1, 28, 28, 1))
labels = np.array(d['labels']).astype(np.uint8)
return data, labels
else:
raise FileNotFoundError(os.path.join(data_dir, 'test_batch') + " Not found!")
else:
raise NotImplementedError("subset {} is not supported!".format(subset))
class DataLoader(object):
def __init__(self, data_dir, subset, batch_size, rng=None, shuffle=False, return_labels=False):
"""
- data_dir is location where to store files
- subset is train|test
- batch_size is int, of #examples to load at once
- rng is np.random.RandomState object for reproducibility
"""
self.data_dir = data_dir
self.batch_size = batch_size
self.shuffle = shuffle
self.return_labels = return_labels
# create temporary storage for the data, if not yet created
if not os.path.exists(data_dir):
print('creating folder', data_dir)
os.makedirs(data_dir)
self.data, self.labels = load_data(data_dir, subset)
self.p = 0
self.rng = np.random.RandomState(1) if rng is None else rng
def get_observation_size(self):
return self.data.shape[1:]
def get_num_labels(self):
return np.amax(self.labels) + 1
def reset(self):
self.p = 0
def __iter__(self):
return self
def __next__(self, n=None):
""" n is the number of examples to fetch """
if n is None: n = self.batch_size
# on first iteration lazily permute all data
if self.p == 0 and self.shuffle:
inds = self.rng.permutation(self.data.shape[0])
self.data = self.data[inds]
self.labels = self.labels[inds]
# on last iteration reset the counter and raise StopIteration
if self.p + n > self.data.shape[0]:
self.reset() # reset for next time we get called
raise StopIteration
# on intermediate iterations fetch the next batch
# make sure the dimension is (batch_size, 28, 28, 1)
x = self.data[self.p: self.p + n]
y = self.labels[self.p: self.p + n]
self.p += self.batch_size
if self.return_labels:
return x, y
else:
return x
next = __next__ # Python 2 compatibility (https://stackoverflow.com/questions/29578469/how-to-make-an-object-both-a-python2-and-python3-iterator) | en | 0.818301 | # Copyright (c) Microsoft Corporation. Licensed under the MIT license. - data_dir is location where to store files - subset is train|test - batch_size is int, of #examples to load at once - rng is np.random.RandomState object for reproducibility # create temporary storage for the data, if not yet created n is the number of examples to fetch # on first iteration lazily permute all data # on last iteration reset the counter and raise StopIteration # reset for next time we get called # on intermediate iterations fetch the next batch # make sure the dimension is (batch_size, 28, 28, 1) # Python 2 compatibility (https://stackoverflow.com/questions/29578469/how-to-make-an-object-both-a-python2-and-python3-iterator) | 3.053076 | 3 |
test/time_radius_batch.py | humanpose1/torch_radius_neighbors | 1 | 6624158 | # Goal: compare different implementation to see which one is faster
import torch
import matplotlib.pyplot as plt
import numpy as np
import neighbors
import time
from scipy.spatial import cKDTree
def compute_batch_radius_with_scipy(x, y, batch_x, batch_y, radius):
x_ = torch.cat([x, 2 * radius * batch_x.view(-1, 1).to(x.dtype)], dim=-1)
y_ = torch.cat([y, 2 * radius * batch_y.view(-1, 1).to(y.dtype)], dim=-1)
tree = cKDTree(x_.detach().numpy())
col = tree.query_ball_point(y_.detach().numpy(), radius)
return col
if __name__ == "__main__":
print("measure simply the time of execution radius search and compare with scipy")
list_time_scipy = []
list_time_nanoflann = []
list_size = np.linspace(10000, 200000, 30)
for i, size in enumerate(list_size):
radius = 0.1
a = torch.randn(int(size), 3)
# generate a random batch
b = torch.randint(0, 16, (int(size),))
b = b.sort()[0]
t0 = time.time()
res = neighbors.batch_radius_search(a, a, b, b, radius, -1, 0)
list_time_nanoflann.append(time.time()-t0)
t0 = time.time()
res = compute_batch_radius_with_scipy(a, a, b, b, radius)
list_time_scipy.append(time.time()-t0)
plt.plot(list_size, list_time_nanoflann, 'bo', label='with nanoflann')
plt.plot(list_size, list_time_scipy, 'ro', label='with scipy')
plt.title("time of execution for batch radius neighbors")
plt.xlabel("size of the point cloud")
plt.ylabel("time of execution")
plt.legend()
plt.show()
| # Goal: compare different implementation to see which one is faster
import torch
import matplotlib.pyplot as plt
import numpy as np
import neighbors
import time
from scipy.spatial import cKDTree
def compute_batch_radius_with_scipy(x, y, batch_x, batch_y, radius):
x_ = torch.cat([x, 2 * radius * batch_x.view(-1, 1).to(x.dtype)], dim=-1)
y_ = torch.cat([y, 2 * radius * batch_y.view(-1, 1).to(y.dtype)], dim=-1)
tree = cKDTree(x_.detach().numpy())
col = tree.query_ball_point(y_.detach().numpy(), radius)
return col
if __name__ == "__main__":
print("measure simply the time of execution radius search and compare with scipy")
list_time_scipy = []
list_time_nanoflann = []
list_size = np.linspace(10000, 200000, 30)
for i, size in enumerate(list_size):
radius = 0.1
a = torch.randn(int(size), 3)
# generate a random batch
b = torch.randint(0, 16, (int(size),))
b = b.sort()[0]
t0 = time.time()
res = neighbors.batch_radius_search(a, a, b, b, radius, -1, 0)
list_time_nanoflann.append(time.time()-t0)
t0 = time.time()
res = compute_batch_radius_with_scipy(a, a, b, b, radius)
list_time_scipy.append(time.time()-t0)
plt.plot(list_size, list_time_nanoflann, 'bo', label='with nanoflann')
plt.plot(list_size, list_time_scipy, 'ro', label='with scipy')
plt.title("time of execution for batch radius neighbors")
plt.xlabel("size of the point cloud")
plt.ylabel("time of execution")
plt.legend()
plt.show()
| en | 0.901472 | # Goal: compare different implementation to see which one is faster # generate a random batch | 2.818028 | 3 |
tailpos_sync/sync_pos.py | iRaySpace/tailpos-sync | 1 | 6624159 | from sync_methods import *
import frappe
import datetime
@frappe.whitelist()
def pull_data(data):
query = "SELECT name FROM `tab{0}`".format(data['doctype'])
if data['doctype'] == "Item":
query = "SELECT name, description, standard_rate FROM `tabItem` WHERE disabled=0"
# Getting the resources
res = frappe.db.sql(query, as_dict=True)
return {"data": res}
@frappe.whitelist()
def sync_data(data):
#Check if there is Each and Weight in UOM
uom_check()
#Check if there are latest deleted records
deleted_records = deleted_documents()
#Delete records
delete_records(data['trashObject'])
for i in range(0,len(data['tailposData'])):
receipt_total = 0
# Check if record is existing in deleted documents
if deleted_records_check(data['tailposData'][i]['syncObject']['_id'], deleted_records):
try:
exist = frappe.db.sql("SELECT * FROM" + "`tab" + data['tailposData'][i]['dbName'] + "` WHERE name=%s ", (data['tailposData'][i]['syncObject']['_id']))
except Exception:
print(frappe.get_traceback())
if data['tailposData'][i]['dbName'] == "Receipts":
#Add receipt lines
receipt_total = add_receipt_lines(data['tailposData'],i)
if len(exist) > 0:
frappe_table = frappe.get_doc(data['tailposData'][i]['dbName'], data['tailposData'][i]['syncObject']['_id'])
else:
frappe_table = create_doc(data['tailposData'],i)
#Check modified time
update_data = check_modified(data['tailposData'][i]['syncObject']['dateUpdated'],frappe_table)
if update_data:
#Insert data
insert_data(i,data['tailposData'],frappe_table,receipt_total)
erpnext_data = ""
if data['typeOfSync'] == "forceSync":
#Fetch all data in ERPNext for selected tables
erpnext_data = force_sync_from_erpnext_to_tailpos()
elif data['typeOfSync'] == "sync":
#Fetch Updated or Added data in ERPNext for selected tables
erpnext_data = sync_from_erpnext_to_tailpos()
return {"data": {"data": erpnext_data, "deleted_documents": deleted_records}}
def check_modified(data,frappe_table):
date_from_pos = datetime.datetime.fromtimestamp(data / 1000.0)
if frappe_table.modified == None:
update_data = True
frappe_table.db_set("date_updated", None)
else:
if frappe_table.modified < date_from_pos:
update_data = True
frappe_table.db_set('date_updated', None)
else:
update_data = False
return update_data | from sync_methods import *
import frappe
import datetime
@frappe.whitelist()
def pull_data(data):
query = "SELECT name FROM `tab{0}`".format(data['doctype'])
if data['doctype'] == "Item":
query = "SELECT name, description, standard_rate FROM `tabItem` WHERE disabled=0"
# Getting the resources
res = frappe.db.sql(query, as_dict=True)
return {"data": res}
@frappe.whitelist()
def sync_data(data):
#Check if there is Each and Weight in UOM
uom_check()
#Check if there are latest deleted records
deleted_records = deleted_documents()
#Delete records
delete_records(data['trashObject'])
for i in range(0,len(data['tailposData'])):
receipt_total = 0
# Check if record is existing in deleted documents
if deleted_records_check(data['tailposData'][i]['syncObject']['_id'], deleted_records):
try:
exist = frappe.db.sql("SELECT * FROM" + "`tab" + data['tailposData'][i]['dbName'] + "` WHERE name=%s ", (data['tailposData'][i]['syncObject']['_id']))
except Exception:
print(frappe.get_traceback())
if data['tailposData'][i]['dbName'] == "Receipts":
#Add receipt lines
receipt_total = add_receipt_lines(data['tailposData'],i)
if len(exist) > 0:
frappe_table = frappe.get_doc(data['tailposData'][i]['dbName'], data['tailposData'][i]['syncObject']['_id'])
else:
frappe_table = create_doc(data['tailposData'],i)
#Check modified time
update_data = check_modified(data['tailposData'][i]['syncObject']['dateUpdated'],frappe_table)
if update_data:
#Insert data
insert_data(i,data['tailposData'],frappe_table,receipt_total)
erpnext_data = ""
if data['typeOfSync'] == "forceSync":
#Fetch all data in ERPNext for selected tables
erpnext_data = force_sync_from_erpnext_to_tailpos()
elif data['typeOfSync'] == "sync":
#Fetch Updated or Added data in ERPNext for selected tables
erpnext_data = sync_from_erpnext_to_tailpos()
return {"data": {"data": erpnext_data, "deleted_documents": deleted_records}}
def check_modified(data,frappe_table):
date_from_pos = datetime.datetime.fromtimestamp(data / 1000.0)
if frappe_table.modified == None:
update_data = True
frappe_table.db_set("date_updated", None)
else:
if frappe_table.modified < date_from_pos:
update_data = True
frappe_table.db_set('date_updated', None)
else:
update_data = False
return update_data | en | 0.874316 | # Getting the resources #Check if there is Each and Weight in UOM #Check if there are latest deleted records #Delete records # Check if record is existing in deleted documents #Add receipt lines #Check modified time #Insert data #Fetch all data in ERPNext for selected tables #Fetch Updated or Added data in ERPNext for selected tables | 2.319527 | 2 |
formlibrary/migrations/0002_auto_20171019_0107.py | sannleen/TolaActivity | 0 | 6624160 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-19 08:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('formlibrary', '0001_initial'),
('indicators', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='trainingattendance',
name='disaggregation_value',
field=models.ManyToManyField(blank=True, to='indicators.DisaggregationValue'),
),
migrations.AddField(
model_name='trainingattendance',
name='training_indicator',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='indicators.Indicator'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-19 08:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('formlibrary', '0001_initial'),
('indicators', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='trainingattendance',
name='disaggregation_value',
field=models.ManyToManyField(blank=True, to='indicators.DisaggregationValue'),
),
migrations.AddField(
model_name='trainingattendance',
name='training_indicator',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='indicators.Indicator'),
),
]
| en | 0.751956 | # -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-10-19 08:07 | 1.456043 | 1 |
datasets/convert_dataset/generate_density_map.py | ZJU-lishuang/C-3-Framework | 0 | 6624161 | <reponame>ZJU-lishuang/C-3-Framework
#-*-encoding: utf-8 -*-
import glob
import os
import os.path as path
from PIL import Image
import scipy.io as scio
import numpy as np
import scipy.ndimage
import pickle
from tqdm import tqdm
import pdb
import json
import cv2
# gauss kernel
def gen_gauss_kernels(kernel_size=15, sigma=4):
kernel_shape = (kernel_size, kernel_size)
kernel_center = (kernel_size // 2, kernel_size // 2)
arr = np.zeros(kernel_shape).astype(float)
arr[kernel_center] = 1
arr = scipy.ndimage.filters.gaussian_filter(arr, sigma, mode='constant')
kernel = arr / arr.sum()
return kernel
def gaussian_filter_density(non_zero_points, map_h, map_w):
"""
Fast gaussian filter implementation : using precomputed distances and kernels
"""
gt_count = non_zero_points.shape[0]
density_map = np.zeros((map_h, map_w), dtype=np.float32)
for i in range(gt_count):
point_y, point_x = non_zero_points[i]
#print(point_x, point_y)
kernel_size = 15 // 2
kernel = gen_gauss_kernels(kernel_size * 2 + 1, 4)
min_img_x = int(max(0, point_x-kernel_size))
min_img_y = int(max(0, point_y-kernel_size))
max_img_x = int(min(point_x+kernel_size+1, map_h - 1))
max_img_y = int(min(point_y+kernel_size+1, map_w - 1))
#print(min_img_x, min_img_y, max_img_x, max_img_y)
kernel_x_min = int(kernel_size - point_x if point_x <= kernel_size else 0)
kernel_y_min = int(kernel_size - point_y if point_y <= kernel_size else 0)
kernel_x_max = int(kernel_x_min + max_img_x - min_img_x)
kernel_y_max = int(kernel_y_min + max_img_y - min_img_y)
#print(kernel_x_max, kernel_x_min, kernel_y_max, kernel_y_min)
density_map[min_img_x:max_img_x, min_img_y:max_img_y] += kernel[kernel_x_min:kernel_x_max, kernel_y_min:kernel_y_max]
return density_map
mod = 16
dataset = ['SHHA', 'SHHB', 'UCF-QNRF', 'UCF-CC-50', 'GCC',"NWPU","JHU"][-2]
if dataset == 'SHHA':
# ShanghaiTech_A
root, nroot = path.join('ShanghaiTech_Crowd_Detecting', 'partA'), 'SHHA16'
elif dataset == 'SHHB':
# ShanghaiTech_B
root, nroot = path.join('ShanghaiTech_Crowd_Detecting', 'partB'), 'SHHB16'
elif dataset == 'UCF-QNRF':
# UCF-QNRF
root, nroot = '/home/lishuang/Disk/download/UCF-QNRF_ECCV18', 'UCF-QNRF_16'
elif dataset == 'UCF-CC-50':
# UCF-CC-50
root, nroot = 'UCF-CC-50', 'UCF-CC-50_16'
elif dataset == 'GCC':
root, nroot = path.join('GCC', 'GCC-scene'), path.join('GCC-16')
elif dataset == 'NWPU':
root, nroot = "/home/lishuang/Disk/download/NWPU-Crowd","/home/lishuang/Disk/download/NWPU-Crowd1024"
elif dataset == 'JHU':
root, nroot = "/home/lishuang/Disk/download/jhu_crowd_v2.0", "/home/lishuang/Disk/download/jhu_crowd_v2.0_1024"
if 'SHH' in dataset:
# ShanghiTech A and B
imgps = glob.glob(path.join(root, '*', 'img', '*.jpg'))
elif 'UCF' in dataset:
#UCF-QNRF and UCF-CC-50
imgps = glob.glob(path.join(root, '*', '*.jpg'))
elif 'GCC' in dataset:
imgps = glob.glob(path.join(root, 'scene_*', 'pngs', '*.png'))
elif 'NWPU' in dataset:
imgps=glob.glob(path.join(root, 'images', '*.jpg'))
elif 'JHU' in dataset:
imgps=glob.glob(path.join(root, '*', 'images', '*.jpg'))
a = 0
for i, imgp in enumerate(imgps[a:]):
print(f'[{i+a}]: {imgp}.')
img = Image.open(imgp)
img = img.convert('RGB')
w, h = img.size
if 'SHH' in dataset:
# ShanghiTech
mat_path = imgp.replace('.jpg', '.mat').replace('img', 'ground_truth').replace('IMG_', 'GT_IMG_')
imgNo = path.basename(imgp).replace('IMG_', '').replace('.jpg', '')
nimgfold = path.join(nroot, 'train' if 'train' in imgp else 'test', 'img')
matinfo = scio.loadmat(mat_path)
gt = matinfo["image_info"][0,0][0,0][0].astype(int) - 1.
elif 'UCF' in dataset:
# UCF
mat_path = imgp.replace('.jpg', '_ann.mat')
imgNo = path.basename(imgp).replace('img_', '').replace('.jpg', '')
if 'QNRF' in dataset:
nimgfold = path.join(nroot, 'train' if 'Train' in imgp else 'test', 'img')
else:
nimgfold = path.join(nroot, 'all', 'img')
matinfo = scio.loadmat(mat_path)
gt = matinfo['annPoints'].astype(int) - 1.
elif 'GCC' in dataset:
mat_path = imgp.replace('.png', '.mat').replace('pngs', 'mats')
imgNo = path.basename(imgp).replace('.png', '')
matinfo = scio.loadmat(mat_path)
gt = matinfo["image_info"][0,0][0].astype(int)
gt = gt[:, ::-1]
nimgfold = path.join(nroot, 'img')
elif "NWPU" in dataset:
json_path=imgp.replace('.jpg', '.json').replace('images/', 'jsons/')
imgNo = path.basename(imgp).replace('.jpg', '')
if not os.path.exists(json_path):
continue
with open(json_path) as f:
ImgInfo = json.load(f)
gt=np.array(ImgInfo['points']).astype(int)
nimgfold = path.join(nroot, 'img')
elif "JHU" in dataset:
txt_path=imgp.replace('.jpg', '.txt').replace('images/', 'gt/')
imgNo = path.basename(imgp).replace('.jpg', '')
with open(txt_path, 'r') as f:
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.int) # labels
if len(l) > 0:
gt=l[:,:2]
else:
gt=l
if 'train' in imgp:
nimgfold = path.join(nroot, 'train' , 'img')
elif 'val' in imgp:
nimgfold = path.join(nroot, 'val', 'img')
else:
nimgfold = path.join(nroot, 'test', 'img')
if max(w, h) > 1024:
if w == max(w, h):
nw, nh = 1024, round(h * 1024 / w / mod) * mod
else:
nh, nw = 1024, round(w * 1024 / h / mod) * mod
else:
nw, nh = round((w / mod)) * mod, round((h / mod)) * mod
# new resized image save
if not path.exists(nimgfold):
os.makedirs(nimgfold)
img.resize((nw, nh), Image.BILINEAR).save(path.join(nimgfold, imgNo + ('.jpg' if 'GCC' != dataset else '.png')))
if len(gt) > 0:
gt[:, 0] = gt[:, 0].clip(0, w - 1)
gt[:, 1] = gt[:, 1].clip(0, h - 1)
gt[:, 0] = (gt[:, 0] / w * nw).round().astype(int)
gt[:, 1] = (gt[:, 1] / h * nh).round().astype(int)
# new gt maps save
# ngtfold = nimgfold.replace('img', 'mat')
# if not path.exists(ngtfold):
# os.makedirs(ngtfold)
# if "image_info" in matinfo:
# matinfo["image_info"][0,0][0,0][0] = gt
# elif "annPoints" in matinfo:
# matinfo['annPoints'] = gt
# scio.savemat(path.join(ngtfold, f'{imgNo}.mat'), matinfo)
# new den csv save
csvfold = nimgfold.replace('img', 'den')
if not path.exists(csvfold):
os.makedirs(csvfold)
den = gaussian_filter_density(gt, nh, nw)
np.savetxt(path.join(csvfold, f'{imgNo}.csv'), den,fmt="%.6f", delimiter=",")
SHOW_MASK=False
if SHOW_MASK:
heatmapshow = None
heatmapshow = cv2.normalize(den, heatmapshow, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
heatmapshow = cv2.applyColorMap(heatmapshow, cv2.COLORMAP_JET)
mgtfold = nimgfold.replace('img', 'mask')
if not path.exists(mgtfold):
os.makedirs(mgtfold)
cv2.imwrite(path.join(mgtfold, f'{imgNo}.jpg'),heatmapshow)
print(f'-- OK --') | #-*-encoding: utf-8 -*-
import glob
import os
import os.path as path
from PIL import Image
import scipy.io as scio
import numpy as np
import scipy.ndimage
import pickle
from tqdm import tqdm
import pdb
import json
import cv2
# gauss kernel
def gen_gauss_kernels(kernel_size=15, sigma=4):
kernel_shape = (kernel_size, kernel_size)
kernel_center = (kernel_size // 2, kernel_size // 2)
arr = np.zeros(kernel_shape).astype(float)
arr[kernel_center] = 1
arr = scipy.ndimage.filters.gaussian_filter(arr, sigma, mode='constant')
kernel = arr / arr.sum()
return kernel
def gaussian_filter_density(non_zero_points, map_h, map_w):
"""
Fast gaussian filter implementation : using precomputed distances and kernels
"""
gt_count = non_zero_points.shape[0]
density_map = np.zeros((map_h, map_w), dtype=np.float32)
for i in range(gt_count):
point_y, point_x = non_zero_points[i]
#print(point_x, point_y)
kernel_size = 15 // 2
kernel = gen_gauss_kernels(kernel_size * 2 + 1, 4)
min_img_x = int(max(0, point_x-kernel_size))
min_img_y = int(max(0, point_y-kernel_size))
max_img_x = int(min(point_x+kernel_size+1, map_h - 1))
max_img_y = int(min(point_y+kernel_size+1, map_w - 1))
#print(min_img_x, min_img_y, max_img_x, max_img_y)
kernel_x_min = int(kernel_size - point_x if point_x <= kernel_size else 0)
kernel_y_min = int(kernel_size - point_y if point_y <= kernel_size else 0)
kernel_x_max = int(kernel_x_min + max_img_x - min_img_x)
kernel_y_max = int(kernel_y_min + max_img_y - min_img_y)
#print(kernel_x_max, kernel_x_min, kernel_y_max, kernel_y_min)
density_map[min_img_x:max_img_x, min_img_y:max_img_y] += kernel[kernel_x_min:kernel_x_max, kernel_y_min:kernel_y_max]
return density_map
mod = 16
dataset = ['SHHA', 'SHHB', 'UCF-QNRF', 'UCF-CC-50', 'GCC',"NWPU","JHU"][-2]
if dataset == 'SHHA':
# ShanghaiTech_A
root, nroot = path.join('ShanghaiTech_Crowd_Detecting', 'partA'), 'SHHA16'
elif dataset == 'SHHB':
# ShanghaiTech_B
root, nroot = path.join('ShanghaiTech_Crowd_Detecting', 'partB'), 'SHHB16'
elif dataset == 'UCF-QNRF':
# UCF-QNRF
root, nroot = '/home/lishuang/Disk/download/UCF-QNRF_ECCV18', 'UCF-QNRF_16'
elif dataset == 'UCF-CC-50':
# UCF-CC-50
root, nroot = 'UCF-CC-50', 'UCF-CC-50_16'
elif dataset == 'GCC':
root, nroot = path.join('GCC', 'GCC-scene'), path.join('GCC-16')
elif dataset == 'NWPU':
root, nroot = "/home/lishuang/Disk/download/NWPU-Crowd","/home/lishuang/Disk/download/NWPU-Crowd1024"
elif dataset == 'JHU':
root, nroot = "/home/lishuang/Disk/download/jhu_crowd_v2.0", "/home/lishuang/Disk/download/jhu_crowd_v2.0_1024"
if 'SHH' in dataset:
# ShanghiTech A and B
imgps = glob.glob(path.join(root, '*', 'img', '*.jpg'))
elif 'UCF' in dataset:
#UCF-QNRF and UCF-CC-50
imgps = glob.glob(path.join(root, '*', '*.jpg'))
elif 'GCC' in dataset:
imgps = glob.glob(path.join(root, 'scene_*', 'pngs', '*.png'))
elif 'NWPU' in dataset:
imgps=glob.glob(path.join(root, 'images', '*.jpg'))
elif 'JHU' in dataset:
imgps=glob.glob(path.join(root, '*', 'images', '*.jpg'))
a = 0
for i, imgp in enumerate(imgps[a:]):
print(f'[{i+a}]: {imgp}.')
img = Image.open(imgp)
img = img.convert('RGB')
w, h = img.size
if 'SHH' in dataset:
# ShanghiTech
mat_path = imgp.replace('.jpg', '.mat').replace('img', 'ground_truth').replace('IMG_', 'GT_IMG_')
imgNo = path.basename(imgp).replace('IMG_', '').replace('.jpg', '')
nimgfold = path.join(nroot, 'train' if 'train' in imgp else 'test', 'img')
matinfo = scio.loadmat(mat_path)
gt = matinfo["image_info"][0,0][0,0][0].astype(int) - 1.
elif 'UCF' in dataset:
# UCF
mat_path = imgp.replace('.jpg', '_ann.mat')
imgNo = path.basename(imgp).replace('img_', '').replace('.jpg', '')
if 'QNRF' in dataset:
nimgfold = path.join(nroot, 'train' if 'Train' in imgp else 'test', 'img')
else:
nimgfold = path.join(nroot, 'all', 'img')
matinfo = scio.loadmat(mat_path)
gt = matinfo['annPoints'].astype(int) - 1.
elif 'GCC' in dataset:
mat_path = imgp.replace('.png', '.mat').replace('pngs', 'mats')
imgNo = path.basename(imgp).replace('.png', '')
matinfo = scio.loadmat(mat_path)
gt = matinfo["image_info"][0,0][0].astype(int)
gt = gt[:, ::-1]
nimgfold = path.join(nroot, 'img')
elif "NWPU" in dataset:
json_path=imgp.replace('.jpg', '.json').replace('images/', 'jsons/')
imgNo = path.basename(imgp).replace('.jpg', '')
if not os.path.exists(json_path):
continue
with open(json_path) as f:
ImgInfo = json.load(f)
gt=np.array(ImgInfo['points']).astype(int)
nimgfold = path.join(nroot, 'img')
elif "JHU" in dataset:
txt_path=imgp.replace('.jpg', '.txt').replace('images/', 'gt/')
imgNo = path.basename(imgp).replace('.jpg', '')
with open(txt_path, 'r') as f:
l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.int) # labels
if len(l) > 0:
gt=l[:,:2]
else:
gt=l
if 'train' in imgp:
nimgfold = path.join(nroot, 'train' , 'img')
elif 'val' in imgp:
nimgfold = path.join(nroot, 'val', 'img')
else:
nimgfold = path.join(nroot, 'test', 'img')
if max(w, h) > 1024:
if w == max(w, h):
nw, nh = 1024, round(h * 1024 / w / mod) * mod
else:
nh, nw = 1024, round(w * 1024 / h / mod) * mod
else:
nw, nh = round((w / mod)) * mod, round((h / mod)) * mod
# new resized image save
if not path.exists(nimgfold):
os.makedirs(nimgfold)
img.resize((nw, nh), Image.BILINEAR).save(path.join(nimgfold, imgNo + ('.jpg' if 'GCC' != dataset else '.png')))
if len(gt) > 0:
gt[:, 0] = gt[:, 0].clip(0, w - 1)
gt[:, 1] = gt[:, 1].clip(0, h - 1)
gt[:, 0] = (gt[:, 0] / w * nw).round().astype(int)
gt[:, 1] = (gt[:, 1] / h * nh).round().astype(int)
# new gt maps save
# ngtfold = nimgfold.replace('img', 'mat')
# if not path.exists(ngtfold):
# os.makedirs(ngtfold)
# if "image_info" in matinfo:
# matinfo["image_info"][0,0][0,0][0] = gt
# elif "annPoints" in matinfo:
# matinfo['annPoints'] = gt
# scio.savemat(path.join(ngtfold, f'{imgNo}.mat'), matinfo)
# new den csv save
csvfold = nimgfold.replace('img', 'den')
if not path.exists(csvfold):
os.makedirs(csvfold)
den = gaussian_filter_density(gt, nh, nw)
np.savetxt(path.join(csvfold, f'{imgNo}.csv'), den,fmt="%.6f", delimiter=",")
SHOW_MASK=False
if SHOW_MASK:
heatmapshow = None
heatmapshow = cv2.normalize(den, heatmapshow, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
heatmapshow = cv2.applyColorMap(heatmapshow, cv2.COLORMAP_JET)
mgtfold = nimgfold.replace('img', 'mask')
if not path.exists(mgtfold):
os.makedirs(mgtfold)
cv2.imwrite(path.join(mgtfold, f'{imgNo}.jpg'),heatmapshow)
print(f'-- OK --') | en | 0.437501 | #-*-encoding: utf-8 -*- # gauss kernel Fast gaussian filter implementation : using precomputed distances and kernels #print(point_x, point_y) #print(min_img_x, min_img_y, max_img_x, max_img_y) #print(kernel_x_max, kernel_x_min, kernel_y_max, kernel_y_min) # ShanghaiTech_A # ShanghaiTech_B # UCF-QNRF # UCF-CC-50 # ShanghiTech A and B #UCF-QNRF and UCF-CC-50 # ShanghiTech # UCF # labels # new resized image save # new gt maps save # ngtfold = nimgfold.replace('img', 'mat') # if not path.exists(ngtfold): # os.makedirs(ngtfold) # if "image_info" in matinfo: # matinfo["image_info"][0,0][0,0][0] = gt # elif "annPoints" in matinfo: # matinfo['annPoints'] = gt # scio.savemat(path.join(ngtfold, f'{imgNo}.mat'), matinfo) # new den csv save | 2.334446 | 2 |
nepal_forex/conf.py | sbishnu019/nepal-forex | 1 | 6624162 | from nepal_forex.datasets import CURRENCY_DATA
NRB_API = 'https://archive.nrb.org.np/exportForexJSON.php'
SUPPORTED_CURRENCIES = [
key for key in CURRENCY_DATA.keys()
]
| from nepal_forex.datasets import CURRENCY_DATA
NRB_API = 'https://archive.nrb.org.np/exportForexJSON.php'
SUPPORTED_CURRENCIES = [
key for key in CURRENCY_DATA.keys()
]
| none | 1 | 1.475188 | 1 | |
tests/test_process_data.py | fluothunder/data-in-python | 0 | 6624163 | <reponame>fluothunder/data-in-python<filename>tests/test_process_data.py
from pandas._testing import assert_frame_equal
import psda.process.process_data
from variables_for_test_process_data import *
def test_create_dict_of_districts():
new_dict = psda.process.process_data.create_dict_of_districts(df1)
# compare to expected_dict1:
assert new_dict.keys() == expected_dict1.keys()
keys_of_dicts = new_dict.keys()
for ky in keys_of_dicts:
assert_frame_equal(new_dict[ky], expected_dict1[ky])
def test_compute_ratio():
assert_frame_equal(psda.process.process_data.compute_ratio(df2), expected_df2)
| from pandas._testing import assert_frame_equal
import psda.process.process_data
from variables_for_test_process_data import *
def test_create_dict_of_districts():
new_dict = psda.process.process_data.create_dict_of_districts(df1)
# compare to expected_dict1:
assert new_dict.keys() == expected_dict1.keys()
keys_of_dicts = new_dict.keys()
for ky in keys_of_dicts:
assert_frame_equal(new_dict[ky], expected_dict1[ky])
def test_compute_ratio():
assert_frame_equal(psda.process.process_data.compute_ratio(df2), expected_df2) | en | 0.873869 | # compare to expected_dict1: | 2.525064 | 3 |
attys-scope/ecg_analysis.py | glasgowneuro/attys-python-examples | 1 | 6624164 | import numpy as np
from scipy import signal as signal
"""
ECG R-peak detector and heart rate detector
The detector uses the matched filter approach by creating
an IIR bandpass filter which looks like an R peak and
thus is a recursive matched filter. One could also say
it's a causal wavelet or perhaps just a bandpass filter
which covers the frequency range of the R peak. It's all
the same in different forms!
As an input the detector just gets the data samples
at a given sampling rate and then it detects the r-peak and
heart rate from it.
It also has a callback function which is called when
a heartbeat is detected (implemented as a listener).
"""
class heartrate_detector:
def __init__(self,_fs):
# how fast the adaptive threshold follows changes in ECG
# amplitude. Realisic values: 0.1 .. 1.0
# 0.1 = slow recovery after an artefact but no wrong detections
# 1 = fast recovery after an artefact but possibly wrong detections
self.adaptive_threshold_decay_constant = 0.25
# the threshold for the detection is 0.6 times smaller than the amplitude
self.threshold_factor = 0.6
# 0.5mV as the thereshold the bandpass filtered ECG is an artefact
self.artefact_threshold = 1
# ignores 1000 samples to let the filter settle
self.ignoreECGdetector = 1000
#adaptive amplitude value of the detector output
self.amplitude = 0.0
def getAmplitude(self):
return amplitude
self.timestamp = 0
# previous timestamp
self.t2 = 0
# timewindow not to detect an R peak
self.doNotDetect = 0
self.ignoreRRvalue = 2
# create a 2nd order order bandpass filter
center = 20
width = 15
f1 = center-width/2
f2 = center+width/2
self.bp_b, self.bp_a = signal.butter(2, [f1/_fs*2.0, f2/_fs*2.0 ], 'bandpass')
# create the memory of the filter (=delay lines) so that
# incoming data can be shifted through sample by sample
self.bp_z = signal.lfiltic(self.bp_b, self.bp_a, [0])
# sampling rate in Hz
self.samplingRateInHz = _fs
# heartrate in BPM
self.bpm = 0;
# detect r peaks
# input: ECG samples at the specified sampling rate and in V
def detect(self,v):
#print('v=',v)
h,self.bp_z = signal.lfilter(self.bp_b, self.bp_a, [v], zi = self.bp_z)
if (self.ignoreECGdetector > 0):
self.ignoreECGdetector = self.ignoreECGdetector - 1
return
h = h * h
if (np.sqrt(h) > self.artefact_threshold):
# ignore signal for 1 sec
ignoreECGdetector = samplingRateInHz;
ignoreRRvalue = 2
return
if (h > self.amplitude):
self.amplitude = h
self.amplitude = self.amplitude - self.adaptive_threshold_decay_constant * self.amplitude / self.samplingRateInHz
if (self.doNotDetect > 0):
self.doNotDetect = self.doNotDetect - 1
else:
self.threshold = self.threshold_factor * self.amplitude
if (h > self.threshold):
t = (self.timestamp - self.t2) / self.samplingRateInHz;
if t>0:
tbpm = 1 / t * 60
else:
tbpm = 0
if ((tbpm > 30) and (tbpm < 250)):
if (self.ignoreRRvalue > 0):
self.ignoreRRvalue = self.ignoreRRvalue - 1
else:
self.bpm = tbpm
print(self.bpm," BPM")
else:
self.ignoreRRvalue = 3
self.t2 = self.timestamp
# advoid 1/5 sec
self.doNotDetect = self.samplingRateInHz / 5;
self.timestamp = self.timestamp + 1
| import numpy as np
from scipy import signal as signal
"""
ECG R-peak detector and heart rate detector
The detector uses the matched filter approach by creating
an IIR bandpass filter which looks like an R peak and
thus is a recursive matched filter. One could also say
it's a causal wavelet or perhaps just a bandpass filter
which covers the frequency range of the R peak. It's all
the same in different forms!
As an input the detector just gets the data samples
at a given sampling rate and then it detects the r-peak and
heart rate from it.
It also has a callback function which is called when
a heartbeat is detected (implemented as a listener).
"""
class heartrate_detector:
def __init__(self,_fs):
# how fast the adaptive threshold follows changes in ECG
# amplitude. Realisic values: 0.1 .. 1.0
# 0.1 = slow recovery after an artefact but no wrong detections
# 1 = fast recovery after an artefact but possibly wrong detections
self.adaptive_threshold_decay_constant = 0.25
# the threshold for the detection is 0.6 times smaller than the amplitude
self.threshold_factor = 0.6
# 0.5mV as the thereshold the bandpass filtered ECG is an artefact
self.artefact_threshold = 1
# ignores 1000 samples to let the filter settle
self.ignoreECGdetector = 1000
#adaptive amplitude value of the detector output
self.amplitude = 0.0
def getAmplitude(self):
return amplitude
self.timestamp = 0
# previous timestamp
self.t2 = 0
# timewindow not to detect an R peak
self.doNotDetect = 0
self.ignoreRRvalue = 2
# create a 2nd order order bandpass filter
center = 20
width = 15
f1 = center-width/2
f2 = center+width/2
self.bp_b, self.bp_a = signal.butter(2, [f1/_fs*2.0, f2/_fs*2.0 ], 'bandpass')
# create the memory of the filter (=delay lines) so that
# incoming data can be shifted through sample by sample
self.bp_z = signal.lfiltic(self.bp_b, self.bp_a, [0])
# sampling rate in Hz
self.samplingRateInHz = _fs
# heartrate in BPM
self.bpm = 0;
# detect r peaks
# input: ECG samples at the specified sampling rate and in V
def detect(self,v):
#print('v=',v)
h,self.bp_z = signal.lfilter(self.bp_b, self.bp_a, [v], zi = self.bp_z)
if (self.ignoreECGdetector > 0):
self.ignoreECGdetector = self.ignoreECGdetector - 1
return
h = h * h
if (np.sqrt(h) > self.artefact_threshold):
# ignore signal for 1 sec
ignoreECGdetector = samplingRateInHz;
ignoreRRvalue = 2
return
if (h > self.amplitude):
self.amplitude = h
self.amplitude = self.amplitude - self.adaptive_threshold_decay_constant * self.amplitude / self.samplingRateInHz
if (self.doNotDetect > 0):
self.doNotDetect = self.doNotDetect - 1
else:
self.threshold = self.threshold_factor * self.amplitude
if (h > self.threshold):
t = (self.timestamp - self.t2) / self.samplingRateInHz;
if t>0:
tbpm = 1 / t * 60
else:
tbpm = 0
if ((tbpm > 30) and (tbpm < 250)):
if (self.ignoreRRvalue > 0):
self.ignoreRRvalue = self.ignoreRRvalue - 1
else:
self.bpm = tbpm
print(self.bpm," BPM")
else:
self.ignoreRRvalue = 3
self.t2 = self.timestamp
# advoid 1/5 sec
self.doNotDetect = self.samplingRateInHz / 5;
self.timestamp = self.timestamp + 1
| en | 0.915713 | ECG R-peak detector and heart rate detector The detector uses the matched filter approach by creating an IIR bandpass filter which looks like an R peak and thus is a recursive matched filter. One could also say it's a causal wavelet or perhaps just a bandpass filter which covers the frequency range of the R peak. It's all the same in different forms! As an input the detector just gets the data samples at a given sampling rate and then it detects the r-peak and heart rate from it. It also has a callback function which is called when a heartbeat is detected (implemented as a listener). # how fast the adaptive threshold follows changes in ECG # amplitude. Realisic values: 0.1 .. 1.0 # 0.1 = slow recovery after an artefact but no wrong detections # 1 = fast recovery after an artefact but possibly wrong detections # the threshold for the detection is 0.6 times smaller than the amplitude # 0.5mV as the thereshold the bandpass filtered ECG is an artefact # ignores 1000 samples to let the filter settle #adaptive amplitude value of the detector output # previous timestamp # timewindow not to detect an R peak # create a 2nd order order bandpass filter # create the memory of the filter (=delay lines) so that # incoming data can be shifted through sample by sample # sampling rate in Hz # heartrate in BPM # detect r peaks # input: ECG samples at the specified sampling rate and in V #print('v=',v) # ignore signal for 1 sec # advoid 1/5 sec | 3.443749 | 3 |
mvnx/__init__.py | edscher/mvnx | 7 | 6624165 | import numpy as np
from mvnx.models import MVNX
import argparse
import errno
import os
import warnings
import traceback
def load(*args, **kwargs):
return MVNX(*args, **kwargs)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="the MVNX file to parse")
parser.add_argument("-m", "--modality", help="the modality to parse")
parser.add_argument("-o", "--output", help="filepath to save parsed data to (saves as .npy)")
parser.add_argument("-c", "--compress", help="Compress the saved object")
args = parser.parse_args()
if not args.output:
args.output = f'{args.file.split(".")[0]}.npy'
if (args.file == None and args.modality == None):
parser.print_help()
else:
try:
if args.file:
print(f'Writing {args.file} to {args.output}')
mvnx = MVNX(args.file)
if args.modality:
modality = mvnx.parse_modality(args.modality)
if args.compress:
np.savez_compressed(args.output, modality)
else:
np.save(args.output, modality)
elif args.output:
if args.compress:
np.savez_compressed(args.output, mvnx)
else:
np.save(args.output, mvnx)
else:
warnings.warn('No output location selected, printing to terminal instead')
else:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), args.file)
except Exception as e:
traceback.print_exc()
if __name__ == "__main__":
main()
| import numpy as np
from mvnx.models import MVNX
import argparse
import errno
import os
import warnings
import traceback
def load(*args, **kwargs):
return MVNX(*args, **kwargs)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", help="the MVNX file to parse")
parser.add_argument("-m", "--modality", help="the modality to parse")
parser.add_argument("-o", "--output", help="filepath to save parsed data to (saves as .npy)")
parser.add_argument("-c", "--compress", help="Compress the saved object")
args = parser.parse_args()
if not args.output:
args.output = f'{args.file.split(".")[0]}.npy'
if (args.file == None and args.modality == None):
parser.print_help()
else:
try:
if args.file:
print(f'Writing {args.file} to {args.output}')
mvnx = MVNX(args.file)
if args.modality:
modality = mvnx.parse_modality(args.modality)
if args.compress:
np.savez_compressed(args.output, modality)
else:
np.save(args.output, modality)
elif args.output:
if args.compress:
np.savez_compressed(args.output, mvnx)
else:
np.save(args.output, mvnx)
else:
warnings.warn('No output location selected, printing to terminal instead')
else:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), args.file)
except Exception as e:
traceback.print_exc()
if __name__ == "__main__":
main()
| none | 1 | 2.251492 | 2 | |
features/hooks/trie.py | caktux/pyethereum | 1 | 6624166 | import os
from os import path
class TrieHook(object):
db_dir = "tmp"
db_file_name = "trie-test.db"
def __init__(self):
self.db_path = path.join(self.db_dir, self.db_file_name)
def before_feature(self, context, feature):
from pyethereum import trie
self._create_dir()
self._delete_db()
context.trie = trie.Trie(self.db_path)
self._load_fixture()
def after_feature(self, context, feature):
del context.trie
self._delete_db()
def _create_dir(self):
if not path.exists(self.db_dir):
os.mkdir(self.db_dir)
def _delete_db(self):
import leveldb
leveldb.DestroyDB(self.db_path)
def _load_fixture(self):
pass
def before_scenario(self, context, scenario):
pass
def after_scenario(self, context, scenario):
pass
hook = TrieHook()
| import os
from os import path
class TrieHook(object):
db_dir = "tmp"
db_file_name = "trie-test.db"
def __init__(self):
self.db_path = path.join(self.db_dir, self.db_file_name)
def before_feature(self, context, feature):
from pyethereum import trie
self._create_dir()
self._delete_db()
context.trie = trie.Trie(self.db_path)
self._load_fixture()
def after_feature(self, context, feature):
del context.trie
self._delete_db()
def _create_dir(self):
if not path.exists(self.db_dir):
os.mkdir(self.db_dir)
def _delete_db(self):
import leveldb
leveldb.DestroyDB(self.db_path)
def _load_fixture(self):
pass
def before_scenario(self, context, scenario):
pass
def after_scenario(self, context, scenario):
pass
hook = TrieHook()
| none | 1 | 2.227122 | 2 | |
colortest.py | IGBC/scripts | 0 | 6624167 | <reponame>IGBC/scripts<gh_stars>0
#!/usr/bin/env python
import sys
write = sys.stdout.write
for i in range(40, 49):
for j in range(30, 39):
for k in range(0, 10):
write("\33[%d;%d;%dm%d;%d;%d\33[m " % (k, j, i, k, j, i))
write("\n")
write("\n")
| #!/usr/bin/env python
import sys
write = sys.stdout.write
for i in range(40, 49):
for j in range(30, 39):
for k in range(0, 10):
write("\33[%d;%d;%dm%d;%d;%d\33[m " % (k, j, i, k, j, i))
write("\n")
write("\n") | ru | 0.26433 | #!/usr/bin/env python | 2.775143 | 3 |
run_docs.py | fvbehr/cmdx | 128 | 6624168 | """Run each example in the README as a doctest"""
| """Run each example in the README as a doctest"""
| en | 0.91144 | Run each example in the README as a doctest | 1.265596 | 1 |
actions/formatters.py | seekM/talon_new_api_starter_pack | 1 | 6624169 | from talon import Module, Context, actions, ui
from talon.voice import Capture
from ..utils import sentence_text, text, parse_word, parse_words
ctx = Context()
key = actions.key
words_to_keep_lowercase = "a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor".split(",")
def surround(by):
def func(i, word, last):
if i == 0:
word = by + word
if last:
word += by
return word
return func
def get_formatted_string(words, fmt):
tmp = []
spaces = True
for i, w in enumerate(words):
w = parse_word(w)
smash, func = formatters[fmt]
w = func(i, w, i == len(words) - 1)
spaces = spaces and not smash
tmp.append(w)
words = tmp
sep = " "
if not spaces:
sep = ""
return sep.join(words)
def FormatText(m):
fmt = []
if m._words[-1] == "over":
m._words = m._words[:-1]
for w in m._words:
if isinstance(w, Word):
fmt.append(w.word)
try:
words = parse_words(m)
except AttributeError:
with clip.capture() as s:
edit.copy()
words = s.get().split(" ")
if not words:
return
tmp = []
spaces = True
for i, w in enumerate(words):
w = parse_word(w)
for name in reversed(fmt):
smash, func = formatters[name]
w = func(i, w, i == len(words) - 1)
spaces = spaces and not smash
tmp.append(w)
words = tmp
sep = " "
if not spaces:
sep = ""
actions.insert(sep.join(words))
formatters = {
# True -> no separator
"dunder": (True, lambda i, word, _: "__%s__" % word if i == 0 else word),
"camel": (True, lambda i, word, _: word if i == 0 else word.capitalize()),
"hammer" : (True, lambda i, word, _: word.capitalize()),
"snake": (True, lambda i, word, _: word.lower() if i == 0 else "_" + word.lower()),
"smash": (True, lambda i, word, _: word),
"kebab": (True, lambda i, word, _: word if i == 0 else "-" + word),
"packed": (True, lambda i, word, _: word if i == 0 else "::" + word),
"allcaps": (False, lambda i, word, _: word.upper()),
"alldown": (False, lambda i, word, _: word.lower()),
"dubstring": (False, surround('"')),
"string": (False, surround("'")),
"padded": (False, surround(" ")),
"dotted": (True, lambda i, word, _: word if i == 0 else "." + word),
"slasher": (True, lambda i, word, _: "/" + word),
"sentence": (False, lambda i, word, _: word.capitalize() if i == 0 else word),
"title": (False, lambda i, word, _: word.capitalize() if i == 0 or word not in words_to_keep_lowercase else word)
}
mod = Module()
mod.list('formatters', desc='list of formatters')
@mod.action_class
class Actions:
def to_sentence(m: Capture):
"""Sentence formatter"""
sentence_text(m)
def to_text(m: Capture):
"""text formatter"""
text(m)
@ctx.capture('format_text', rule='{self.formatters} <dgndictation>')
def format_text(m):
return get_formatted_string(m._words[1:], m.formatters[0])
ctx.lists['self.formatters'] = formatters.keys()
| from talon import Module, Context, actions, ui
from talon.voice import Capture
from ..utils import sentence_text, text, parse_word, parse_words
ctx = Context()
key = actions.key
words_to_keep_lowercase = "a,an,the,at,by,for,in,is,of,on,to,up,and,as,but,or,nor".split(",")
def surround(by):
def func(i, word, last):
if i == 0:
word = by + word
if last:
word += by
return word
return func
def get_formatted_string(words, fmt):
tmp = []
spaces = True
for i, w in enumerate(words):
w = parse_word(w)
smash, func = formatters[fmt]
w = func(i, w, i == len(words) - 1)
spaces = spaces and not smash
tmp.append(w)
words = tmp
sep = " "
if not spaces:
sep = ""
return sep.join(words)
def FormatText(m):
fmt = []
if m._words[-1] == "over":
m._words = m._words[:-1]
for w in m._words:
if isinstance(w, Word):
fmt.append(w.word)
try:
words = parse_words(m)
except AttributeError:
with clip.capture() as s:
edit.copy()
words = s.get().split(" ")
if not words:
return
tmp = []
spaces = True
for i, w in enumerate(words):
w = parse_word(w)
for name in reversed(fmt):
smash, func = formatters[name]
w = func(i, w, i == len(words) - 1)
spaces = spaces and not smash
tmp.append(w)
words = tmp
sep = " "
if not spaces:
sep = ""
actions.insert(sep.join(words))
formatters = {
# True -> no separator
"dunder": (True, lambda i, word, _: "__%s__" % word if i == 0 else word),
"camel": (True, lambda i, word, _: word if i == 0 else word.capitalize()),
"hammer" : (True, lambda i, word, _: word.capitalize()),
"snake": (True, lambda i, word, _: word.lower() if i == 0 else "_" + word.lower()),
"smash": (True, lambda i, word, _: word),
"kebab": (True, lambda i, word, _: word if i == 0 else "-" + word),
"packed": (True, lambda i, word, _: word if i == 0 else "::" + word),
"allcaps": (False, lambda i, word, _: word.upper()),
"alldown": (False, lambda i, word, _: word.lower()),
"dubstring": (False, surround('"')),
"string": (False, surround("'")),
"padded": (False, surround(" ")),
"dotted": (True, lambda i, word, _: word if i == 0 else "." + word),
"slasher": (True, lambda i, word, _: "/" + word),
"sentence": (False, lambda i, word, _: word.capitalize() if i == 0 else word),
"title": (False, lambda i, word, _: word.capitalize() if i == 0 or word not in words_to_keep_lowercase else word)
}
mod = Module()
mod.list('formatters', desc='list of formatters')
@mod.action_class
class Actions:
def to_sentence(m: Capture):
"""Sentence formatter"""
sentence_text(m)
def to_text(m: Capture):
"""text formatter"""
text(m)
@ctx.capture('format_text', rule='{self.formatters} <dgndictation>')
def format_text(m):
return get_formatted_string(m._words[1:], m.formatters[0])
ctx.lists['self.formatters'] = formatters.keys()
| en | 0.27142 | # True -> no separator Sentence formatter text formatter | 2.954164 | 3 |
Iniciante-Begginer/Python/#1051 - #1100/#1071-SomaDeImparesConsecutivosI.py | jocelinoFG017/Beecrowd-judge-solutions | 2 | 6624170 | <filename>Iniciante-Begginer/Python/#1051 - #1100/#1071-SomaDeImparesConsecutivosI.py
X = int(input())
Y = int(input())
soma = 0
if X % 2 == 0:
for i in range(X, Y, -1):
if i % 2 != 0:
soma = soma + i
else:
X -= 1
for i in range(X, Y, -1):
if i % 2 != 0:
soma = soma + i
print(soma)
| <filename>Iniciante-Begginer/Python/#1051 - #1100/#1071-SomaDeImparesConsecutivosI.py
X = int(input())
Y = int(input())
soma = 0
if X % 2 == 0:
for i in range(X, Y, -1):
if i % 2 != 0:
soma = soma + i
else:
X -= 1
for i in range(X, Y, -1):
if i % 2 != 0:
soma = soma + i
print(soma)
| es | 0.262093 | #1051 - #1100/#1071-SomaDeImparesConsecutivosI.py | 3.524248 | 4 |
test.py | NPS-Cisco-2019/OCR | 0 | 6624171 | import argparse
import os
import time
import cv2 as cv;
from nms import nms;
from math import degrees, sin, cos
from matplotlib import pyplot as plt
import numpy as np
from config import *
from text_from_im import text_from_image_path
initLog()
for i in range(1, 100):
try:
log(f'[{i}]')
image_path = str(i)+'.png'
text_from_image_path(os.path.join(os.getcwd(), 'images/'+image_path))
except Exception as e:
print(e)
break | import argparse
import os
import time
import cv2 as cv;
from nms import nms;
from math import degrees, sin, cos
from matplotlib import pyplot as plt
import numpy as np
from config import *
from text_from_im import text_from_image_path
initLog()
for i in range(1, 100):
try:
log(f'[{i}]')
image_path = str(i)+'.png'
text_from_image_path(os.path.join(os.getcwd(), 'images/'+image_path))
except Exception as e:
print(e)
break | none | 1 | 2.225598 | 2 | |
original-client/dike/__init__.py | earaujoassis/knock-knock | 2 | 6624172 | <filename>original-client/dike/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from .settings import *
from .errors import *
from .network_info import get_remote_ip, get_local_ip
from .remote_server import revise_domain_entry
| <filename>original-client/dike/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from .settings import *
from .errors import *
from .network_info import get_remote_ip, get_local_ip
from .remote_server import revise_domain_entry
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.297008 | 1 |
xcffibaer_lib/__init__.py | whitelynx/xcffibaer | 1 | 6624173 | <reponame>whitelynx/xcffibaer<filename>xcffibaer_lib/__init__.py
'''A themeable status bar written in Python, using xcffib.
'''
from . import chunks
from .Bar import Bar
from .FSReader import FSReader
from .Store import Store
from .Window import Window
from .XSetup import XSetup
__all__ = ['chunks', 'Bar', 'FSReader', 'Store', 'Window', 'XSetup']
| '''A themeable status bar written in Python, using xcffib.
'''
from . import chunks
from .Bar import Bar
from .FSReader import FSReader
from .Store import Store
from .Window import Window
from .XSetup import XSetup
__all__ = ['chunks', 'Bar', 'FSReader', 'Store', 'Window', 'XSetup'] | en | 0.690795 | A themeable status bar written in Python, using xcffib. | 1.604029 | 2 |
incasem/torch/loss/cross_entropy_loss_debug.py | kirchhausenlab/incasem | 0 | 6624174 | import logging
import torch
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class CrossEntropyLossDebug(torch.nn.CrossEntropyLoss):
# def __init__(self):
# super().__init__(reduction="none")
def forward(self, input, target):
logger.debug(f'{float(input.sum())=}')
logger.debug(f'{float(target.sum())=}')
return torch.nn.functional.cross_entropy(
input,
target,
weight=self.weight,
ignore_index=self.ignore_index,
reduction=self.reduction)
# return torch.mean(loss)
| import logging
import torch
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class CrossEntropyLossDebug(torch.nn.CrossEntropyLoss):
# def __init__(self):
# super().__init__(reduction="none")
def forward(self, input, target):
logger.debug(f'{float(input.sum())=}')
logger.debug(f'{float(target.sum())=}')
return torch.nn.functional.cross_entropy(
input,
target,
weight=self.weight,
ignore_index=self.ignore_index,
reduction=self.reduction)
# return torch.mean(loss)
| en | 0.295921 | # def __init__(self): # super().__init__(reduction="none") # return torch.mean(loss) | 2.484581 | 2 |
classifier.py | americast/glow-pytorch | 1 | 6624175 | """Train script.
Usage:
infer_celeba.py <hparams> <dataset_root> <z_dir>
"""
import os
import cv2
import random
import torch
import vision
import numpy as np
from docopt import docopt
from torchvision import transforms
from glow.builder import build
from glow.config import JsonConfig
from glow.utils import load
from tqdm import tqdm
import pudb
from random import random
import pickle
from PIL import Image
from torch import optim
from torch import nn
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from classifier_utils import *
def save_images(images, names):
if not os.path.exists("pictures/infer/"):
os.makedirs("pictures/infer/")
for img, name in zip(images, names):
img = (np.clip(img, 0, 1) * 255).astype(np.uint8)
cv2.imwrite("dps/{}.png".format(name), img)
# cv2.imshow("img", img)
# cv2.waitKey()
print("Saved as dps/{}.png".format(name))
def run_z(graph, z):
graph.eval()
x = graph(z=torch.tensor([z]), eps_std=0.3, reverse=True)
img = x[0].permute(1, 2, 0).detach().cpu().numpy()
img = img[:, :, ::-1]
img = cv2.resize(img, (64, 64))
return img
def get_base_indices(filename):
base_indices = []
f = open("pic_list/"+filename, "r")
while True:
line = f.readline()
if not line: break
base_indices.append(int(line.strip().split("/")[1].split(".")[0]) - 1)
f.close()
return base_indices
def get_n_indices(base_indices, n):
indices_here = []
for i in range(n):
num = int(random() * len(base_indices))
while base_indices[num] in indices_here:
num = int(random() * len(base_indices))
indices_here.append(base_indices[num])
return indices_here
def KL(P,Q):
""" Epsilon is used here to avoid conditional code for
checking that neither P nor Q is equal to 0. """
epsilon = 0.00001
# You may want to instead make copies to avoid changing the np arrays.
# P = P+epsilon
# Q = Q+epsilon
divergence = np.sum(P*np.log(P/Q))
return divergence
class classifier_data(Dataset):
"""Face Landmarks dataset."""
def __init__(self, male_neutral, female_neutral, male_smiling, female_smiling, transform=None, val=False):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.transform = transform
self.idx = range(1, int(0.7*len(male_smiling)) + 1)
if val:
self.idx = len(male_smiling) - self.idx
self.idx_all = []
for i in self.idx:
self.idx_all.append((i, 0))
self.idx_all.append((i, 1))
self.idx_all.append((i, 2))
self.idx_all.append((i, 3))
self.male_neutral, self.female_neutral, self.male_smiling, self.female_smiling = male_neutral, female_neutral, male_smiling, female_smiling
def __len__(self):
return len(self.idx_all)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
n, r = self.idx_all[idx]
all_here = self.male_neutral[:n], self.female_neutral[:n], self.male_smiling[:n], self.female_smiling[:n]
imgs_all = torch.zeros(int(len(self.idx_all) / 4), 3, 64, 64)
for i, each in enumerate(all_here[r]):
class enc_classifier(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 16, 3)
# self.resnet = resnet50(num_classes=768)
self.fc_0 = nn.Linear(61504, 2048)
self.fc_1 = nn.Linear(2048, 2048)
self.fc_2 = nn.Linear(2048, 4)
def forward(self, data, n, feat_in = None):
data = data.squeeze(0)[:n,:,:,:]
x = self.conv(data)
x = F.dropout(x)
x = x.flatten(start_dim = 1)
x = F.relu(x)
x = self.fc_0(x)
x = F.dropout(x)
x = F.relu(x)
try:
x = x.max(dim = 0)[0]
except:
pu.db
x = F.relu(x)
feat_org = self.fc_1(x)
feat = F.relu(feat_org)
final = self.fc_2(feat)
final = F.softmax(final)
return feat.reshape(1, -1), final.reshape(1, -1)
# if feat_in == None:
# # _, x1 = self.resnet(data[0])
# # _, x2 = self.resnet(data[1])
# x1, x2 = x1.squeeze(), x2.squeeze()
# else:
# x1 = feat_in[0]
# x2 = feat_in[1]
# merged = torch.cat([x1, x2], dim = -1)
# x = F.relu(merged)
# y1 = F.relu(self.fc_1(x))
# y = F.softmax(self.fc_2(y1))
return 0
if __name__ == "__main__":
EPOCHS = 100
LR = 1e-5
f = open("pic_list/['Male']~['Smiling']", "r")
male_neutral = []
while True:
line = f.readline()
if not line: break
male_neutral.append(line.strip())
f.close()
f = open("pic_list/['Smiling']~['Male']", "r")
female_smiling = []
while True:
line = f.readline()
if not line: break
female_smiling.append(line.strip())
f.close()
f = open("pic_list/[]~['Smiling', 'Male']", "r")
female_neutral = []
while True:
line = f.readline()
if not line: break
female_neutral.append(line.strip())
f.close()
male_smiling = ["pictures/smile_1000/male_smiling_"+str(x)+".png" for x in range(400, 1000)]
f = open("pic_list/['Smiling', 'Male']~[]", "r")
while True:
line = f.readline()
if not line: break
male_smiling.append(line.strip())
f.close()
hparams = JsonConfig("hparams/celeba.json")
# set transform of dataset
transform = transforms.Compose([
transforms.CenterCrop(hparams.Data.center_crop),
transforms.Resize(hparams.Data.resize),
transforms.ToTensor()])
transformed_dataset = classifier_data(male_neutral, female_neutral, male_smiling, female_smiling, transform=transform, cut_len=1000)
dataloader = DataLoader(transformed_dataset, batch_size=1, shuffle=True, num_workers=32)
model = enc_classifier().cuda()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
for E in tqdm(range(EPOCHS)):
print("\n")
losses = []
c = 0
for data, n, r in dataloader:
data, n, r = data.to("cuda"), n.to("cuda"), r.to("cuda")
c+=1
_, y = model(data, n)
out = loss(y, r)
# pu.db
print(str(c)+"/"+str(len(transformed_dataset))+"; "+"r: "+str(r)+"; loss: "+str(out)+" ", end="\r")
optimizer.zero_grad()
dataloader = DataLoader(transformed_dataset, batch_size=1, shuffle=True, num_workers=16)
model = enc_classifier()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
for E in tqdm(range(EPOCHS)):
print("\n")
losses = []
c = 0
for data, n, r in dataloader:
c+=1
_, y = model(data, n)
out = loss(y, r)
# pu.db
print(str(c)+"/"+str(len(transformed_dataset))+"; "+"r: "+str(r)+"; loss: "+str(out)+" ", end="\r")
optimizer.zero_grad()
out.backward()
optimizer.step()
losses.append(out)
print()
loss_here = sum(losses)/len(losses)
print("Avg loss in epoch "+str(E)+": "+str(loss_here))
if E == 0:
avg_loss = loss_here
if loss_here <= avg_loss:
avg_loss = loss_here
torch.save(model.state_dict(), "./classifier_model.pt")
f = open("classifier_model_details", "w")
f.write("loss: "+str(loss_here)+"\nEpoch: "+str(E)+"\n")
print("Model saved!")
"""
args = docopt(__doc__)
hparams = args["<hparams>"]
dataset_root = args["<dataset_root>"]
z_dir = args["<z_dir>"]
assert os.path.exists(dataset_root), (
"Failed to find root dir `{}` of dataset.".format(dataset_root))
assert os.path.exists(hparams), (
"Failed to find hparams josn `{}`".format(hparams))
if not os.path.exists(z_dir):
print("Generate Z to {}".format(z_dir))
os.makedirs(z_dir)
generate_z = True
else:
print("Load Z from {}".format(z_dir))
generate_z = False
hparams = JsonConfig("hparams/celeba.json")
dataset = vision.Datasets["celeba"]
# set transform of dataset
transform = transforms.Compose([
transforms.CenterCrop(hparams.Data.center_crop),
transforms.Resize(hparams.Data.resize),
transforms.ToTensor()])
# build
built = build(hparams, True)
load('trained.pkg', built['graph'], device=torch.device('cpu'))
graph = built['graph']
dataset = dataset(dataset_root, transform=transform)
# get Z
if not generate_z:
# try to load
try:
delta_Z = []
for i in range(hparams.Glow.y_classes):
z = np.load(os.path.join(z_dir, "detla_z_{}.npy".format(i)))
delta_Z.append(z)
except FileNotFoundError:
# need to generate
generate_z = True
print("Failed to load {} Z".format(hparams.Glow.y_classes))
quit()
if generate_z:
delta_Z = graph.generate_attr_deltaz(dataset)
for i, z in enumerate(delta_Z):
np.save(os.path.join(z_dir, "detla_z_{}.npy".format(i)), z)
print("Finish generating")
smile_avg_dist = []
smile_gen_dist = []
# img = cv2.imread("octocatgurumi.png")
# img = img[:, :, ::-1]
# img = cv2.resize(img, (64, 64))
# img = (torch.tensor(img).permute(2, 0, 1))/256.0
z_base_male_smiling_1 = graph.generate_z(dataset[0]["x"])
z_base_male_smiling_2 = graph.generate_z(dataset[1]["x"])
# imgs_male_smiling.append(img)
z_base_male_smiling = (z_base_male_smiling_1) + z_base_male_smiling_2
smile_avg_dist.append(z_base_male_smiling)
images_male_smiling = []
names = []
images_male_smiling.append(run_z(graph, z_base_male_smiling))
names.append("octogengurumi")
save_images(images_male_smiling, names)
"""
| """Train script.
Usage:
infer_celeba.py <hparams> <dataset_root> <z_dir>
"""
import os
import cv2
import random
import torch
import vision
import numpy as np
from docopt import docopt
from torchvision import transforms
from glow.builder import build
from glow.config import JsonConfig
from glow.utils import load
from tqdm import tqdm
import pudb
from random import random
import pickle
from PIL import Image
from torch import optim
from torch import nn
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from classifier_utils import *
def save_images(images, names):
if not os.path.exists("pictures/infer/"):
os.makedirs("pictures/infer/")
for img, name in zip(images, names):
img = (np.clip(img, 0, 1) * 255).astype(np.uint8)
cv2.imwrite("dps/{}.png".format(name), img)
# cv2.imshow("img", img)
# cv2.waitKey()
print("Saved as dps/{}.png".format(name))
def run_z(graph, z):
graph.eval()
x = graph(z=torch.tensor([z]), eps_std=0.3, reverse=True)
img = x[0].permute(1, 2, 0).detach().cpu().numpy()
img = img[:, :, ::-1]
img = cv2.resize(img, (64, 64))
return img
def get_base_indices(filename):
base_indices = []
f = open("pic_list/"+filename, "r")
while True:
line = f.readline()
if not line: break
base_indices.append(int(line.strip().split("/")[1].split(".")[0]) - 1)
f.close()
return base_indices
def get_n_indices(base_indices, n):
indices_here = []
for i in range(n):
num = int(random() * len(base_indices))
while base_indices[num] in indices_here:
num = int(random() * len(base_indices))
indices_here.append(base_indices[num])
return indices_here
def KL(P,Q):
""" Epsilon is used here to avoid conditional code for
checking that neither P nor Q is equal to 0. """
epsilon = 0.00001
# You may want to instead make copies to avoid changing the np arrays.
# P = P+epsilon
# Q = Q+epsilon
divergence = np.sum(P*np.log(P/Q))
return divergence
class classifier_data(Dataset):
"""Face Landmarks dataset."""
def __init__(self, male_neutral, female_neutral, male_smiling, female_smiling, transform=None, val=False):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.transform = transform
self.idx = range(1, int(0.7*len(male_smiling)) + 1)
if val:
self.idx = len(male_smiling) - self.idx
self.idx_all = []
for i in self.idx:
self.idx_all.append((i, 0))
self.idx_all.append((i, 1))
self.idx_all.append((i, 2))
self.idx_all.append((i, 3))
self.male_neutral, self.female_neutral, self.male_smiling, self.female_smiling = male_neutral, female_neutral, male_smiling, female_smiling
def __len__(self):
return len(self.idx_all)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
n, r = self.idx_all[idx]
all_here = self.male_neutral[:n], self.female_neutral[:n], self.male_smiling[:n], self.female_smiling[:n]
imgs_all = torch.zeros(int(len(self.idx_all) / 4), 3, 64, 64)
for i, each in enumerate(all_here[r]):
class enc_classifier(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 16, 3)
# self.resnet = resnet50(num_classes=768)
self.fc_0 = nn.Linear(61504, 2048)
self.fc_1 = nn.Linear(2048, 2048)
self.fc_2 = nn.Linear(2048, 4)
def forward(self, data, n, feat_in = None):
data = data.squeeze(0)[:n,:,:,:]
x = self.conv(data)
x = F.dropout(x)
x = x.flatten(start_dim = 1)
x = F.relu(x)
x = self.fc_0(x)
x = F.dropout(x)
x = F.relu(x)
try:
x = x.max(dim = 0)[0]
except:
pu.db
x = F.relu(x)
feat_org = self.fc_1(x)
feat = F.relu(feat_org)
final = self.fc_2(feat)
final = F.softmax(final)
return feat.reshape(1, -1), final.reshape(1, -1)
# if feat_in == None:
# # _, x1 = self.resnet(data[0])
# # _, x2 = self.resnet(data[1])
# x1, x2 = x1.squeeze(), x2.squeeze()
# else:
# x1 = feat_in[0]
# x2 = feat_in[1]
# merged = torch.cat([x1, x2], dim = -1)
# x = F.relu(merged)
# y1 = F.relu(self.fc_1(x))
# y = F.softmax(self.fc_2(y1))
return 0
if __name__ == "__main__":
EPOCHS = 100
LR = 1e-5
f = open("pic_list/['Male']~['Smiling']", "r")
male_neutral = []
while True:
line = f.readline()
if not line: break
male_neutral.append(line.strip())
f.close()
f = open("pic_list/['Smiling']~['Male']", "r")
female_smiling = []
while True:
line = f.readline()
if not line: break
female_smiling.append(line.strip())
f.close()
f = open("pic_list/[]~['Smiling', 'Male']", "r")
female_neutral = []
while True:
line = f.readline()
if not line: break
female_neutral.append(line.strip())
f.close()
male_smiling = ["pictures/smile_1000/male_smiling_"+str(x)+".png" for x in range(400, 1000)]
f = open("pic_list/['Smiling', 'Male']~[]", "r")
while True:
line = f.readline()
if not line: break
male_smiling.append(line.strip())
f.close()
hparams = JsonConfig("hparams/celeba.json")
# set transform of dataset
transform = transforms.Compose([
transforms.CenterCrop(hparams.Data.center_crop),
transforms.Resize(hparams.Data.resize),
transforms.ToTensor()])
transformed_dataset = classifier_data(male_neutral, female_neutral, male_smiling, female_smiling, transform=transform, cut_len=1000)
dataloader = DataLoader(transformed_dataset, batch_size=1, shuffle=True, num_workers=32)
model = enc_classifier().cuda()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
for E in tqdm(range(EPOCHS)):
print("\n")
losses = []
c = 0
for data, n, r in dataloader:
data, n, r = data.to("cuda"), n.to("cuda"), r.to("cuda")
c+=1
_, y = model(data, n)
out = loss(y, r)
# pu.db
print(str(c)+"/"+str(len(transformed_dataset))+"; "+"r: "+str(r)+"; loss: "+str(out)+" ", end="\r")
optimizer.zero_grad()
dataloader = DataLoader(transformed_dataset, batch_size=1, shuffle=True, num_workers=16)
model = enc_classifier()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LR)
for E in tqdm(range(EPOCHS)):
print("\n")
losses = []
c = 0
for data, n, r in dataloader:
c+=1
_, y = model(data, n)
out = loss(y, r)
# pu.db
print(str(c)+"/"+str(len(transformed_dataset))+"; "+"r: "+str(r)+"; loss: "+str(out)+" ", end="\r")
optimizer.zero_grad()
out.backward()
optimizer.step()
losses.append(out)
print()
loss_here = sum(losses)/len(losses)
print("Avg loss in epoch "+str(E)+": "+str(loss_here))
if E == 0:
avg_loss = loss_here
if loss_here <= avg_loss:
avg_loss = loss_here
torch.save(model.state_dict(), "./classifier_model.pt")
f = open("classifier_model_details", "w")
f.write("loss: "+str(loss_here)+"\nEpoch: "+str(E)+"\n")
print("Model saved!")
"""
args = docopt(__doc__)
hparams = args["<hparams>"]
dataset_root = args["<dataset_root>"]
z_dir = args["<z_dir>"]
assert os.path.exists(dataset_root), (
"Failed to find root dir `{}` of dataset.".format(dataset_root))
assert os.path.exists(hparams), (
"Failed to find hparams josn `{}`".format(hparams))
if not os.path.exists(z_dir):
print("Generate Z to {}".format(z_dir))
os.makedirs(z_dir)
generate_z = True
else:
print("Load Z from {}".format(z_dir))
generate_z = False
hparams = JsonConfig("hparams/celeba.json")
dataset = vision.Datasets["celeba"]
# set transform of dataset
transform = transforms.Compose([
transforms.CenterCrop(hparams.Data.center_crop),
transforms.Resize(hparams.Data.resize),
transforms.ToTensor()])
# build
built = build(hparams, True)
load('trained.pkg', built['graph'], device=torch.device('cpu'))
graph = built['graph']
dataset = dataset(dataset_root, transform=transform)
# get Z
if not generate_z:
# try to load
try:
delta_Z = []
for i in range(hparams.Glow.y_classes):
z = np.load(os.path.join(z_dir, "detla_z_{}.npy".format(i)))
delta_Z.append(z)
except FileNotFoundError:
# need to generate
generate_z = True
print("Failed to load {} Z".format(hparams.Glow.y_classes))
quit()
if generate_z:
delta_Z = graph.generate_attr_deltaz(dataset)
for i, z in enumerate(delta_Z):
np.save(os.path.join(z_dir, "detla_z_{}.npy".format(i)), z)
print("Finish generating")
smile_avg_dist = []
smile_gen_dist = []
# img = cv2.imread("octocatgurumi.png")
# img = img[:, :, ::-1]
# img = cv2.resize(img, (64, 64))
# img = (torch.tensor(img).permute(2, 0, 1))/256.0
z_base_male_smiling_1 = graph.generate_z(dataset[0]["x"])
z_base_male_smiling_2 = graph.generate_z(dataset[1]["x"])
# imgs_male_smiling.append(img)
z_base_male_smiling = (z_base_male_smiling_1) + z_base_male_smiling_2
smile_avg_dist.append(z_base_male_smiling)
images_male_smiling = []
names = []
images_male_smiling.append(run_z(graph, z_base_male_smiling))
names.append("octogengurumi")
save_images(images_male_smiling, names)
"""
| en | 0.392543 | Train script. Usage: infer_celeba.py <hparams> <dataset_root> <z_dir> # cv2.imshow("img", img) # cv2.waitKey() Epsilon is used here to avoid conditional code for checking that neither P nor Q is equal to 0. # You may want to instead make copies to avoid changing the np arrays. # P = P+epsilon # Q = Q+epsilon Face Landmarks dataset. Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. # self.resnet = resnet50(num_classes=768) # if feat_in == None: # # _, x1 = self.resnet(data[0]) # # _, x2 = self.resnet(data[1]) # x1, x2 = x1.squeeze(), x2.squeeze() # else: # x1 = feat_in[0] # x2 = feat_in[1] # merged = torch.cat([x1, x2], dim = -1) # x = F.relu(merged) # y1 = F.relu(self.fc_1(x)) # y = F.softmax(self.fc_2(y1)) # set transform of dataset # pu.db # pu.db args = docopt(__doc__) hparams = args["<hparams>"] dataset_root = args["<dataset_root>"] z_dir = args["<z_dir>"] assert os.path.exists(dataset_root), ( "Failed to find root dir `{}` of dataset.".format(dataset_root)) assert os.path.exists(hparams), ( "Failed to find hparams josn `{}`".format(hparams)) if not os.path.exists(z_dir): print("Generate Z to {}".format(z_dir)) os.makedirs(z_dir) generate_z = True else: print("Load Z from {}".format(z_dir)) generate_z = False hparams = JsonConfig("hparams/celeba.json") dataset = vision.Datasets["celeba"] # set transform of dataset transform = transforms.Compose([ transforms.CenterCrop(hparams.Data.center_crop), transforms.Resize(hparams.Data.resize), transforms.ToTensor()]) # build built = build(hparams, True) load('trained.pkg', built['graph'], device=torch.device('cpu')) graph = built['graph'] dataset = dataset(dataset_root, transform=transform) # get Z if not generate_z: # try to load try: delta_Z = [] for i in range(hparams.Glow.y_classes): z = np.load(os.path.join(z_dir, "detla_z_{}.npy".format(i))) delta_Z.append(z) except FileNotFoundError: # need to generate generate_z = True print("Failed to load {} Z".format(hparams.Glow.y_classes)) quit() if generate_z: delta_Z = graph.generate_attr_deltaz(dataset) for i, z in enumerate(delta_Z): np.save(os.path.join(z_dir, "detla_z_{}.npy".format(i)), z) print("Finish generating") smile_avg_dist = [] smile_gen_dist = [] # img = cv2.imread("octocatgurumi.png") # img = img[:, :, ::-1] # img = cv2.resize(img, (64, 64)) # img = (torch.tensor(img).permute(2, 0, 1))/256.0 z_base_male_smiling_1 = graph.generate_z(dataset[0]["x"]) z_base_male_smiling_2 = graph.generate_z(dataset[1]["x"]) # imgs_male_smiling.append(img) z_base_male_smiling = (z_base_male_smiling_1) + z_base_male_smiling_2 smile_avg_dist.append(z_base_male_smiling) images_male_smiling = [] names = [] images_male_smiling.append(run_z(graph, z_base_male_smiling)) names.append("octogengurumi") save_images(images_male_smiling, names) | 2.137887 | 2 |
program_synthesis/algolisp/dataset/code_base.py | kavigupta/program_synthesis | 123 | 6624176 | <reponame>kavigupta/program_synthesis
import six
import copy
import math
class MetaNode(type):
node_types = {}
attr_types = set()
def __new__(mcls, name, bases, dct):
attrs = list(dct.get('attrs', {}))
for attr, _, _ in attrs:
mcls.attr_types.add(attr)
dct['attrs'] = []
for base in bases:
if hasattr(base, 'attrs'):
dct['attrs'] += base.attrs
dct['attrs'] += attrs
new_class = type.__new__(mcls, name, bases, dct)
mcls.node_types[name] = new_class
return new_class
@six.add_metaclass(MetaNode)
class Node(object):
attrs = ()
def __init__(self, *args, **kwargs):
self.set_attrs(*args, **kwargs)
def set_attrs(self, *args, **kwargs):
values = kwargs
cargs = 0
for (attr_name, attr_type, _), value in zip(self.attrs, args):
if attr_name in values:
raise ValueError("Unexpected positional argument: %s, expected: %s" % (value, attr_name))
if attr_type is not None:
if isinstance(attr_type, tuple):
attr_type, var_type = attr_type
if not isinstance(value.get_type(), var_type):
raise ValueError("Unexpected type of var/expr %s.%s argument: %s, expected: %s" % (
type(self).__name__, attr_name, type(value.get_type()), var_type
))
if not isinstance(value, attr_type):
raise ValueError("Unexpected type of %s.%s argument: %s, expected: %s" % (
type(self).__name__, attr_name, type(value), attr_type
))
setattr(self, attr_name, value)
cargs += 1
for attr_name, attr_type, attr_default in self.attrs[len(args):]:
if attr_name in values:
value = values[attr_name]
cargs += 1
else:
value = copy.deepcopy(attr_default)
if attr_type is not None:
if isinstance(attr_type, tuple):
attr_type, var_type = attr_type
if not isinstance(value.get_type(), var_type):
raise ValueError("Unexpected type of var/expr %s.%s argument: %s, expected: %s" % (
type(self).__name__, attr_name, type(value), attr_type
))
if not isinstance(value, attr_type):
raise ValueError("Unexpected type of %s.%s argument: %s, expected: %s" % (
type(self).__name__, attr_name, type(value), attr_type
))
setattr(self, attr_name, value)
if cargs != len(args) + len(values):
raise ValueError("Unexpected arguments: %s" % (values))
def __eq__(self, other):
if type(other) is not type(self):
return False
for attr, _, _ in self.attrs:
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self)
def __str__(self):
attrs = ['%s=%s' % (attr, getattr(self, attr))
for attr, _, _ in self.attrs]
if attrs:
return "%s[%s]" % (
type(self).__name__,
','.join(attrs))
return type(self).__name__
| import six
import copy
import math
class MetaNode(type):
node_types = {}
attr_types = set()
def __new__(mcls, name, bases, dct):
attrs = list(dct.get('attrs', {}))
for attr, _, _ in attrs:
mcls.attr_types.add(attr)
dct['attrs'] = []
for base in bases:
if hasattr(base, 'attrs'):
dct['attrs'] += base.attrs
dct['attrs'] += attrs
new_class = type.__new__(mcls, name, bases, dct)
mcls.node_types[name] = new_class
return new_class
@six.add_metaclass(MetaNode)
class Node(object):
attrs = ()
def __init__(self, *args, **kwargs):
self.set_attrs(*args, **kwargs)
def set_attrs(self, *args, **kwargs):
values = kwargs
cargs = 0
for (attr_name, attr_type, _), value in zip(self.attrs, args):
if attr_name in values:
raise ValueError("Unexpected positional argument: %s, expected: %s" % (value, attr_name))
if attr_type is not None:
if isinstance(attr_type, tuple):
attr_type, var_type = attr_type
if not isinstance(value.get_type(), var_type):
raise ValueError("Unexpected type of var/expr %s.%s argument: %s, expected: %s" % (
type(self).__name__, attr_name, type(value.get_type()), var_type
))
if not isinstance(value, attr_type):
raise ValueError("Unexpected type of %s.%s argument: %s, expected: %s" % (
type(self).__name__, attr_name, type(value), attr_type
))
setattr(self, attr_name, value)
cargs += 1
for attr_name, attr_type, attr_default in self.attrs[len(args):]:
if attr_name in values:
value = values[attr_name]
cargs += 1
else:
value = copy.deepcopy(attr_default)
if attr_type is not None:
if isinstance(attr_type, tuple):
attr_type, var_type = attr_type
if not isinstance(value.get_type(), var_type):
raise ValueError("Unexpected type of var/expr %s.%s argument: %s, expected: %s" % (
type(self).__name__, attr_name, type(value), attr_type
))
if not isinstance(value, attr_type):
raise ValueError("Unexpected type of %s.%s argument: %s, expected: %s" % (
type(self).__name__, attr_name, type(value), attr_type
))
setattr(self, attr_name, value)
if cargs != len(args) + len(values):
raise ValueError("Unexpected arguments: %s" % (values))
def __eq__(self, other):
if type(other) is not type(self):
return False
for attr, _, _ in self.attrs:
if getattr(self, attr) != getattr(other, attr):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self)
def __str__(self):
attrs = ['%s=%s' % (attr, getattr(self, attr))
for attr, _, _ in self.attrs]
if attrs:
return "%s[%s]" % (
type(self).__name__,
','.join(attrs))
return type(self).__name__ | none | 1 | 2.823328 | 3 | |
examples/imdb_id.py | frrmack/googlesearch | 31 | 6624177 | from googlesearch import GoogleSearch
def imdb_id_for_movie(movie_name):
"""Retrieve the imdb id for a movie
from the name (and year if there are remakes)"""
query = 'site:imdb.com %s' % movie_name
url = GoogleSearch( query ).top_url()
import re
imdb_id = re.search('/tt[0-9]+/', url).group(0).strip('/')
print 'The imdb id for %s is %s' % (movie_name, imdb_id)
return imdb_id
if __name__ == '__main__':
imdb_id_for_movie("Total Recall 1990")
| from googlesearch import GoogleSearch
def imdb_id_for_movie(movie_name):
"""Retrieve the imdb id for a movie
from the name (and year if there are remakes)"""
query = 'site:imdb.com %s' % movie_name
url = GoogleSearch( query ).top_url()
import re
imdb_id = re.search('/tt[0-9]+/', url).group(0).strip('/')
print 'The imdb id for %s is %s' % (movie_name, imdb_id)
return imdb_id
if __name__ == '__main__':
imdb_id_for_movie("Total Recall 1990")
| en | 0.747431 | Retrieve the imdb id for a movie from the name (and year if there are remakes) | 3.694204 | 4 |
chord/__init__.py | dobisel/postmin | 0 | 6624178 | from .rollup import app
from . import song
from . import artist
from . import index
__version__ = '0.1.0'
| from .rollup import app
from . import song
from . import artist
from . import index
__version__ = '0.1.0'
| none | 1 | 1.175557 | 1 | |
multitask_test.py | Jontahan/kvad | 0 | 6624179 | import numpy as np
from gw_collect import Gridworld
import pygame as pg
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
from vicero.algorithms.deepqlearning import DQN
from multitask_env import MultitaskEnvironment
import random
def plot(history):
plt.figure(2)
plt.clf()
durations_t = torch.DoubleTensor(history)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy(), c='lightgray', linewidth=1)
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy(), c='green')
plt.pause(0.001)
class LinRegNet(nn.Module):
def __init__(self, n_inputs, n_outputs):
super(LinRegNet, self).__init__()
self.fc1 = nn.Linear(n_inputs, 32)
self.fc2 = nn.Linear(32, n_outputs)
self.linreg = nn.Linear(n_inputs, n_outputs)
def forward(self, x):
x = torch.flatten(x)
#return self.linreg(x)
x = F.relu(self.fc1(x))
return self.fc2(x)
env_list = []
for i in range(10):
env_list.append(Gridworld(width=4, height=4, cell_size=32, seed=i))
env = MultitaskEnvironment(env_list)
dqn = DQN(env, qnet=LinRegNet(64, 4).double(), plotter=plot, render=False, memory_length=2000, gamma=.99, alpha=.001, epsilon_start=0.1, plot_durations=True)
dqn.train(500, 4, plot=True)
for seed in env.env_scores:
print('seed={}, score={}'.format(seed, np.mean(env.env_scores[seed]))) | import numpy as np
from gw_collect import Gridworld
import pygame as pg
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
from vicero.algorithms.deepqlearning import DQN
from multitask_env import MultitaskEnvironment
import random
def plot(history):
plt.figure(2)
plt.clf()
durations_t = torch.DoubleTensor(history)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy(), c='lightgray', linewidth=1)
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy(), c='green')
plt.pause(0.001)
class LinRegNet(nn.Module):
def __init__(self, n_inputs, n_outputs):
super(LinRegNet, self).__init__()
self.fc1 = nn.Linear(n_inputs, 32)
self.fc2 = nn.Linear(32, n_outputs)
self.linreg = nn.Linear(n_inputs, n_outputs)
def forward(self, x):
x = torch.flatten(x)
#return self.linreg(x)
x = F.relu(self.fc1(x))
return self.fc2(x)
env_list = []
for i in range(10):
env_list.append(Gridworld(width=4, height=4, cell_size=32, seed=i))
env = MultitaskEnvironment(env_list)
dqn = DQN(env, qnet=LinRegNet(64, 4).double(), plotter=plot, render=False, memory_length=2000, gamma=.99, alpha=.001, epsilon_start=0.1, plot_durations=True)
dqn.train(500, 4, plot=True)
for seed in env.env_scores:
print('seed={}, score={}'.format(seed, np.mean(env.env_scores[seed]))) | en | 0.073983 | #return self.linreg(x) | 2.443635 | 2 |
custom_components/electrolux_status/const.py | tomeko12/homeassistant_electrolux_status | 17 | 6624180 | <filename>custom_components/electrolux_status/const.py
"""The electrolux Status constants."""
# Base component constants
NAME = "Elettrolux status"
DOMAIN = "electrolux_status"
DOMAIN_DATA = f"{DOMAIN}_data"
# Icons
ICON = "mdi:format-quote-close"
# Device classes
BINARY_SENSOR_DEVICE_CLASS = "connectivity"
# Platforms
BINARY_SENSOR = "binary_sensor"
SENSOR = "sensor"
BUTTON = "button"
PLATFORMS = [BINARY_SENSOR, SENSOR, BUTTON]
# Configuration and options
CONF_ENABLED = "enabled"
CONF_USERNAME = "username"
CONF_PASSWORD = "password"
CONF_REGION = "region"
CONF_SCAN_INTERVAL = "scan_interval"
# Defaults
DEFAULT_NAME = DOMAIN
DEFAULT_SCAN_INTERVAL = 30
DEFAULT_REGION = "emea"
icon_mapping = {
"Pause": "mdi:pause",
"Resume": "mdi:play-pause",
"Start": "mdi:play",
"Stop": "mdi:stop",
"TURN ON": "mdi:power-on",
"ON": "mdi:power-on",
"TURN OFF": "mdi:power-off",
"OFF": "mdi:power-off"}
| <filename>custom_components/electrolux_status/const.py
"""The electrolux Status constants."""
# Base component constants
NAME = "Elettrolux status"
DOMAIN = "electrolux_status"
DOMAIN_DATA = f"{DOMAIN}_data"
# Icons
ICON = "mdi:format-quote-close"
# Device classes
BINARY_SENSOR_DEVICE_CLASS = "connectivity"
# Platforms
BINARY_SENSOR = "binary_sensor"
SENSOR = "sensor"
BUTTON = "button"
PLATFORMS = [BINARY_SENSOR, SENSOR, BUTTON]
# Configuration and options
CONF_ENABLED = "enabled"
CONF_USERNAME = "username"
CONF_PASSWORD = "password"
CONF_REGION = "region"
CONF_SCAN_INTERVAL = "scan_interval"
# Defaults
DEFAULT_NAME = DOMAIN
DEFAULT_SCAN_INTERVAL = 30
DEFAULT_REGION = "emea"
icon_mapping = {
"Pause": "mdi:pause",
"Resume": "mdi:play-pause",
"Start": "mdi:play",
"Stop": "mdi:stop",
"TURN ON": "mdi:power-on",
"ON": "mdi:power-on",
"TURN OFF": "mdi:power-off",
"OFF": "mdi:power-off"}
| en | 0.583246 | The electrolux Status constants. # Base component constants # Icons # Device classes # Platforms # Configuration and options # Defaults | 1.590289 | 2 |
Tensorflow_network/seq2SeqSummarisation.py | quilan78/MSC_project | 0 | 6624181 | import tensorflow as tf
import numpy as np
import time
import sys
sys.path.append('/home/dwt17/MSc_project/neural_sum_1/code/Commons/')
sys.path.append('../Commons/')
from read_data import *
from vocab import *
from treatedData import *
from LRP_output import *
from batch import *
from model import *
import os
class Seq2SeqSummarisation:
def __init__(self, cellSize = 128, batch_size = 15, max_encoding_length = 200, max_decoding_length = 50, vocab_size = 2000, embedding_size = 64, learning_rate = 0.0001, learning_decay = 0.8, minimum_rate = 0.000001, nbre_epoch = 50, display_batch_freq = 2, gradient_clip = 5, beam_width = 10, save_frequency = 1, coverage=False, pointer=False) :
self.beam_width = beam_width
self.model = Model(cellSize = cellSize, batch_size = batch_size, max_encoding_length = max_encoding_length, max_decoding_length = max_decoding_length, vocab_size = vocab_size, embedding_size = embedding_size, learning_rate = learning_rate, learning_decay = learning_decay, minimum_rate =minimum_rate, nbre_epoch = nbre_epoch, display_batch_freq = display_batch_freq, gradient_clip = gradient_clip, beam_width = beam_width, save_frequency = save_frequency, coverage=coverage, pointer=pointer)
def train(self, nb_data = 100, create_batches=True, load_from_checkpoint=False, checkpoint_path = "../../Train/Model/model1.ckpt", tensorboard_path="../../Train/tensorboard/", writting_path_batches="../../Data/Batches", data_path="../../Data/finished_files/"):
#Initialising the Model
self.model.init_graph_and_data(task="train", nb_data = nb_data, create_batches=create_batches, writting_path_batches=writting_path_batches, data_path=data_path)
#Training variables
display_batch_freq = self.model.display_batch_freq
save_frequency = self.model.save_frequency
nbre_epoch = self.model.nbre_epoch
nb_batches = self.model.nb_batches
learning_rate = self.model.learning_rate
learning_decay = self.model.learning_decay
minimum_rate = self.model.minimum_rate
save_frequency = self.model.save_frequency
#Data loader
batch_loader = Batch()
#For testing on CPU
#os.environ["CUDA_VISIBLE_DEVICES"]="-1"
full_start = time.time()
# Blocks the execution to only one CPU
session_conf = tf.ConfigProto(device_count = {'CPU': 1, 'GPU' : 1},inter_op_parallelism_threads=1,intra_op_parallelism_threads=1)
with tf.Session(config=session_conf) as sess:
#with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#print(tf.trainable_variables())
if load_from_checkpoint:
print("Restoring saved network")
self.model.saver.restore(sess, checkpoint_path)
print("Last version of the network loaded")
#For tensorboard
train_writer = tf.summary.FileWriter(tensorboard_path, sess.graph)
#batch_loader.load_object(4)
step = 0
for epoch in range(nbre_epoch):
average_loss = 0
epoch_start = time.time()
summed_loss = 0
print("START OF EPOCH {}/{}".format(epoch+1, nbre_epoch))
for batch in range(nb_batches):
batch_loader.load_object(batch, filepath=writting_path_batches)
start_time = time.time()
summary, loss, _ = sess.run([self.model.merged, self.model.tf_loss, self.model.tf_update_step],{
self.model.tf_input_enc_batch : batch_loader.input_enc,
self.model.tf_input_dec_batch : batch_loader.input_dec,
self.model.tf_target_dec_batch : batch_loader.target_dec,
self.model.tf_input_enc_seq_lengths : batch_loader.input_enc_seq,
self.model.tf_input_dec_seq_lengths : batch_loader.input_dec_seq,
self.model.tf_learning_rate : learning_rate,
self.model.tf_batch_max_oov : batch_loader.max_oovs
})
end_time = time.time()
time_spent = end_time - start_time
average_loss += loss
if batch % display_batch_freq == 0:
print("EPOCH {}/{}, BATCH {}/{}, Loss {}, Time {}, rate {}".format(epoch+1,
nbre_epoch,
batch,
nb_batches,
loss,
time_spent,
learning_rate))
train_writer.add_summary(summary, step)
step += 1
print("Average epoch loss : {}".format(average_loss/nb_batches))
if learning_rate * learning_decay > minimum_rate:
learning_rate *= learning_decay
if epoch % save_frequency == 0:
print("Saving the model")
save_path = self.model.saver.save(sess, checkpoint_path)
print("Model saved")
epoch_end = time.time()
print("EPOCH TIME : {} h".format((epoch_end-epoch_start)/3600))
print("Training finished")
print("Saving the model")
save_path = self.model.saver.save(sess, checkpoint_path)
print("Model saved")
full_stop = time.time()
print("FULL TIME : {} h".format((full_stop-full_start)/3600))
def infer(self, nb_data = 10, checkpoint_path = "../../Train/Model/model1.ckpt", save_path ="../../Output/", data_path="../../Data/finished_files/"):
self.model.init_graph_and_data(task="test", nb_data=nb_data, data_path=data_path)
#Training variables
nb_batches = self.model.nb_batches
batch_size = self.model.batch_size
# Blocks the execution to only one CPU
session_conf = tf.ConfigProto(device_count = {'CPU': 1, 'GPU' : 1},inter_op_parallelism_threads=1,intra_op_parallelism_threads=1)
with tf.Session(config=session_conf) as sess:
sess.run(tf.global_variables_initializer())
print(tf.trainable_variables())
train_writer = tf.summary.FileWriter("tensorboard/", sess.graph)
print("Restoring saved network")
self.model.saver.restore(sess, checkpoint_path)
print("Last version of the network loaded")
#For tensorboard
print("START OF INFERENCE")
for batch in range(nb_batches):
for elem in range(batch_size): #We infer on one element at the time
id_ = batch * batch_size + elem + 1
print("START OF INFERENCE FOR WORD {}/{}".format(id_, nb_batches*batch_size))
start_time = time.time()
#We tile the input to match the batch size
input_inf = [[word for word in self.model.input_enc_batches[batch][elem]] for i in range(batch_size)]
input_length_inf = [self.model.input_enc_seq_lengths[batch][elem] for i in range(batch_size)]
#Without beam
encoder_outputs, prediction_greedy, logits_greedy, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : self.model.max_oovs[batch]
})
#With beam
"""
encoder_outputs, prediction_greedy, logits_greedy, prediction_beam, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.output_prediction_beam, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : self.model.max_oovs[batch]
})
"""
#print("hello")
#print(alignment_history)
#print(encoder_outputs)
#print(keys)
end_time = time.time()
time_spent = end_time - start_time
print("Element {}/{}, Time {}".format(id_, nb_batches*batch_size, time_spent))
#Getting the first summary(they are all identical)
greedy_seq_num = prediction_greedy[0]
#Getting the best summary from the beam search
#beam_seq_num = prediction_beam[0,:,0]
beam_seq_num = []
#print(beam_seq_num)
greedy_seq = self.model.vocab.TransalteAnswer(greedy_seq_num)
beam_seq = self.model.vocab.TransalteAnswer(beam_seq_num)
original_text = self.model.save_enc_input[batch][elem]
input_text = self.model.vocab.TransalteAnswer(self.model.input_enc_batches[batch][elem])
original_summary = self.model.save_dec_output[batch][elem]
input_summary = self.model.vocab.TransalteAnswer(self.model.target_dec_batches[batch][elem])
print("Starting saving to file element {}".format(id_))
treated = TreatedData(original_text=original_text, input_text=input_text, original_summary=original_summary, input_summary=input_summary, keys=keys, encoder_outputs=encoder_outputs, alignment_history=alignment_history, input_seq=self.model.input_enc_batches[batch][elem], enc_state=enc_state, greed_seq_num=greedy_seq_num, greed_seq=greedy_seq, beam_seq_number=beam_seq_num, beam_seq=beam_seq, logits = logits_greedy, id_ = id_)
treated.saveToFileText(filepath=save_path+"Text/")
treated.save_object(filepath= save_path+"Obj/")
print("Finished saving to file element {}".format(id_))
def experiment(self, nb_data = 10, checkpoint_path = "../../Experiment/Model/model1.ckpt", save_path ="../../Experiment/Results/1/", data_path="../../Data/finished_files/", exp_data_path="../../Experiment/ModifiedTexts/1/"):
self.model.init_graph_and_data(task="test", nb_data=nb_data, data_path=data_path)
# Blocks the execution to only one CPU
session_conf = tf.ConfigProto(device_count = {'CPU': 1, 'GPU' : 1},inter_op_parallelism_threads=1,intra_op_parallelism_threads=1)
with tf.Session(config=session_conf) as sess:
sess.run(tf.global_variables_initializer())
#print(tf.trainable_variables())
print("Restoring saved network")
self.model.saver.restore(sess, checkpoint_path)
print("Last version of the network loaded")
#For tensorboard
batch_size = self.model.batch_size
data = LRP_output()
print("START OF INFERENCE")
for batch in range(10):
#for elem in range(batch_size): #We infer on one element at the time
id_ = batch
print("START OF INFERENCE FOR WORD {}/{}".format(id_, 10))
start_time = time.time()
data.load_json(batch, filepath=exp_data_path)
#We tile the input to match the batch size
input_inf = self.model.vocab.TranslateBatches(np.array([np.array([data.input_text for i in range(batch_size)])]))[0]
input_length_inf = [400 for i in range(batch_size)]
#Without beam
encoder_outputs, prediction_greedy, logits_greedy, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : 0
})
#With beam
"""
encoder_outputs, prediction_greedy, logits_greedy, prediction_beam, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.output_prediction_beam, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : self.model.max_oovs[batch]
})
"""
#print("hello")
#print(alignment_history)
#print(encoder_outputs)
#print(keys)
end_time = time.time()
time_spent = end_time - start_time
print("Element {}/{}, Time {}".format(id_, 10, time_spent))
#Getting the first summary(they are all identical)
greedy_seq_num = prediction_greedy[0]
#Getting the best summary from the beam search
#beam_seq_num = prediction_beam[0,:,0]
beam_seq_num = []
#print(beam_seq_num)
greedy_seq = self.model.vocab.TransalteAnswer(greedy_seq_num)
beam_seq = self.model.vocab.TransalteAnswer(beam_seq_num)
original_text = data.original_text
input_text = data.input_text
original_summary = data.original_summary
input_summary = data.input_summary
print("Starting saving to file element {}".format(id_))
treated = TreatedData(input_text=input_text, greed_seq=greedy_seq, id_ = id_)
treated.saveToFileText(filepath=save_path+"Text/")
treated.save_object(filepath= save_path+"Obj/")
print("Finished saving to file element {}".format(id_))
if __name__ == "__main__":
net = Seq2SeqSummarisation()
net.train(nb_data = 20000, create_batches=False, load_from_checkpoint=True)
#net.infer() | import tensorflow as tf
import numpy as np
import time
import sys
sys.path.append('/home/dwt17/MSc_project/neural_sum_1/code/Commons/')
sys.path.append('../Commons/')
from read_data import *
from vocab import *
from treatedData import *
from LRP_output import *
from batch import *
from model import *
import os
class Seq2SeqSummarisation:
def __init__(self, cellSize = 128, batch_size = 15, max_encoding_length = 200, max_decoding_length = 50, vocab_size = 2000, embedding_size = 64, learning_rate = 0.0001, learning_decay = 0.8, minimum_rate = 0.000001, nbre_epoch = 50, display_batch_freq = 2, gradient_clip = 5, beam_width = 10, save_frequency = 1, coverage=False, pointer=False) :
self.beam_width = beam_width
self.model = Model(cellSize = cellSize, batch_size = batch_size, max_encoding_length = max_encoding_length, max_decoding_length = max_decoding_length, vocab_size = vocab_size, embedding_size = embedding_size, learning_rate = learning_rate, learning_decay = learning_decay, minimum_rate =minimum_rate, nbre_epoch = nbre_epoch, display_batch_freq = display_batch_freq, gradient_clip = gradient_clip, beam_width = beam_width, save_frequency = save_frequency, coverage=coverage, pointer=pointer)
def train(self, nb_data = 100, create_batches=True, load_from_checkpoint=False, checkpoint_path = "../../Train/Model/model1.ckpt", tensorboard_path="../../Train/tensorboard/", writting_path_batches="../../Data/Batches", data_path="../../Data/finished_files/"):
#Initialising the Model
self.model.init_graph_and_data(task="train", nb_data = nb_data, create_batches=create_batches, writting_path_batches=writting_path_batches, data_path=data_path)
#Training variables
display_batch_freq = self.model.display_batch_freq
save_frequency = self.model.save_frequency
nbre_epoch = self.model.nbre_epoch
nb_batches = self.model.nb_batches
learning_rate = self.model.learning_rate
learning_decay = self.model.learning_decay
minimum_rate = self.model.minimum_rate
save_frequency = self.model.save_frequency
#Data loader
batch_loader = Batch()
#For testing on CPU
#os.environ["CUDA_VISIBLE_DEVICES"]="-1"
full_start = time.time()
# Blocks the execution to only one CPU
session_conf = tf.ConfigProto(device_count = {'CPU': 1, 'GPU' : 1},inter_op_parallelism_threads=1,intra_op_parallelism_threads=1)
with tf.Session(config=session_conf) as sess:
#with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#print(tf.trainable_variables())
if load_from_checkpoint:
print("Restoring saved network")
self.model.saver.restore(sess, checkpoint_path)
print("Last version of the network loaded")
#For tensorboard
train_writer = tf.summary.FileWriter(tensorboard_path, sess.graph)
#batch_loader.load_object(4)
step = 0
for epoch in range(nbre_epoch):
average_loss = 0
epoch_start = time.time()
summed_loss = 0
print("START OF EPOCH {}/{}".format(epoch+1, nbre_epoch))
for batch in range(nb_batches):
batch_loader.load_object(batch, filepath=writting_path_batches)
start_time = time.time()
summary, loss, _ = sess.run([self.model.merged, self.model.tf_loss, self.model.tf_update_step],{
self.model.tf_input_enc_batch : batch_loader.input_enc,
self.model.tf_input_dec_batch : batch_loader.input_dec,
self.model.tf_target_dec_batch : batch_loader.target_dec,
self.model.tf_input_enc_seq_lengths : batch_loader.input_enc_seq,
self.model.tf_input_dec_seq_lengths : batch_loader.input_dec_seq,
self.model.tf_learning_rate : learning_rate,
self.model.tf_batch_max_oov : batch_loader.max_oovs
})
end_time = time.time()
time_spent = end_time - start_time
average_loss += loss
if batch % display_batch_freq == 0:
print("EPOCH {}/{}, BATCH {}/{}, Loss {}, Time {}, rate {}".format(epoch+1,
nbre_epoch,
batch,
nb_batches,
loss,
time_spent,
learning_rate))
train_writer.add_summary(summary, step)
step += 1
print("Average epoch loss : {}".format(average_loss/nb_batches))
if learning_rate * learning_decay > minimum_rate:
learning_rate *= learning_decay
if epoch % save_frequency == 0:
print("Saving the model")
save_path = self.model.saver.save(sess, checkpoint_path)
print("Model saved")
epoch_end = time.time()
print("EPOCH TIME : {} h".format((epoch_end-epoch_start)/3600))
print("Training finished")
print("Saving the model")
save_path = self.model.saver.save(sess, checkpoint_path)
print("Model saved")
full_stop = time.time()
print("FULL TIME : {} h".format((full_stop-full_start)/3600))
def infer(self, nb_data = 10, checkpoint_path = "../../Train/Model/model1.ckpt", save_path ="../../Output/", data_path="../../Data/finished_files/"):
self.model.init_graph_and_data(task="test", nb_data=nb_data, data_path=data_path)
#Training variables
nb_batches = self.model.nb_batches
batch_size = self.model.batch_size
# Blocks the execution to only one CPU
session_conf = tf.ConfigProto(device_count = {'CPU': 1, 'GPU' : 1},inter_op_parallelism_threads=1,intra_op_parallelism_threads=1)
with tf.Session(config=session_conf) as sess:
sess.run(tf.global_variables_initializer())
print(tf.trainable_variables())
train_writer = tf.summary.FileWriter("tensorboard/", sess.graph)
print("Restoring saved network")
self.model.saver.restore(sess, checkpoint_path)
print("Last version of the network loaded")
#For tensorboard
print("START OF INFERENCE")
for batch in range(nb_batches):
for elem in range(batch_size): #We infer on one element at the time
id_ = batch * batch_size + elem + 1
print("START OF INFERENCE FOR WORD {}/{}".format(id_, nb_batches*batch_size))
start_time = time.time()
#We tile the input to match the batch size
input_inf = [[word for word in self.model.input_enc_batches[batch][elem]] for i in range(batch_size)]
input_length_inf = [self.model.input_enc_seq_lengths[batch][elem] for i in range(batch_size)]
#Without beam
encoder_outputs, prediction_greedy, logits_greedy, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : self.model.max_oovs[batch]
})
#With beam
"""
encoder_outputs, prediction_greedy, logits_greedy, prediction_beam, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.output_prediction_beam, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : self.model.max_oovs[batch]
})
"""
#print("hello")
#print(alignment_history)
#print(encoder_outputs)
#print(keys)
end_time = time.time()
time_spent = end_time - start_time
print("Element {}/{}, Time {}".format(id_, nb_batches*batch_size, time_spent))
#Getting the first summary(they are all identical)
greedy_seq_num = prediction_greedy[0]
#Getting the best summary from the beam search
#beam_seq_num = prediction_beam[0,:,0]
beam_seq_num = []
#print(beam_seq_num)
greedy_seq = self.model.vocab.TransalteAnswer(greedy_seq_num)
beam_seq = self.model.vocab.TransalteAnswer(beam_seq_num)
original_text = self.model.save_enc_input[batch][elem]
input_text = self.model.vocab.TransalteAnswer(self.model.input_enc_batches[batch][elem])
original_summary = self.model.save_dec_output[batch][elem]
input_summary = self.model.vocab.TransalteAnswer(self.model.target_dec_batches[batch][elem])
print("Starting saving to file element {}".format(id_))
treated = TreatedData(original_text=original_text, input_text=input_text, original_summary=original_summary, input_summary=input_summary, keys=keys, encoder_outputs=encoder_outputs, alignment_history=alignment_history, input_seq=self.model.input_enc_batches[batch][elem], enc_state=enc_state, greed_seq_num=greedy_seq_num, greed_seq=greedy_seq, beam_seq_number=beam_seq_num, beam_seq=beam_seq, logits = logits_greedy, id_ = id_)
treated.saveToFileText(filepath=save_path+"Text/")
treated.save_object(filepath= save_path+"Obj/")
print("Finished saving to file element {}".format(id_))
def experiment(self, nb_data = 10, checkpoint_path = "../../Experiment/Model/model1.ckpt", save_path ="../../Experiment/Results/1/", data_path="../../Data/finished_files/", exp_data_path="../../Experiment/ModifiedTexts/1/"):
self.model.init_graph_and_data(task="test", nb_data=nb_data, data_path=data_path)
# Blocks the execution to only one CPU
session_conf = tf.ConfigProto(device_count = {'CPU': 1, 'GPU' : 1},inter_op_parallelism_threads=1,intra_op_parallelism_threads=1)
with tf.Session(config=session_conf) as sess:
sess.run(tf.global_variables_initializer())
#print(tf.trainable_variables())
print("Restoring saved network")
self.model.saver.restore(sess, checkpoint_path)
print("Last version of the network loaded")
#For tensorboard
batch_size = self.model.batch_size
data = LRP_output()
print("START OF INFERENCE")
for batch in range(10):
#for elem in range(batch_size): #We infer on one element at the time
id_ = batch
print("START OF INFERENCE FOR WORD {}/{}".format(id_, 10))
start_time = time.time()
data.load_json(batch, filepath=exp_data_path)
#We tile the input to match the batch size
input_inf = self.model.vocab.TranslateBatches(np.array([np.array([data.input_text for i in range(batch_size)])]))[0]
input_length_inf = [400 for i in range(batch_size)]
#Without beam
encoder_outputs, prediction_greedy, logits_greedy, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : 0
})
#With beam
"""
encoder_outputs, prediction_greedy, logits_greedy, prediction_beam, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.output_prediction_beam, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : self.model.max_oovs[batch]
})
"""
#print("hello")
#print(alignment_history)
#print(encoder_outputs)
#print(keys)
end_time = time.time()
time_spent = end_time - start_time
print("Element {}/{}, Time {}".format(id_, 10, time_spent))
#Getting the first summary(they are all identical)
greedy_seq_num = prediction_greedy[0]
#Getting the best summary from the beam search
#beam_seq_num = prediction_beam[0,:,0]
beam_seq_num = []
#print(beam_seq_num)
greedy_seq = self.model.vocab.TransalteAnswer(greedy_seq_num)
beam_seq = self.model.vocab.TransalteAnswer(beam_seq_num)
original_text = data.original_text
input_text = data.input_text
original_summary = data.original_summary
input_summary = data.input_summary
print("Starting saving to file element {}".format(id_))
treated = TreatedData(input_text=input_text, greed_seq=greedy_seq, id_ = id_)
treated.saveToFileText(filepath=save_path+"Text/")
treated.save_object(filepath= save_path+"Obj/")
print("Finished saving to file element {}".format(id_))
if __name__ == "__main__":
net = Seq2SeqSummarisation()
net.train(nb_data = 20000, create_batches=False, load_from_checkpoint=True)
#net.infer() | en | 0.453774 | #Initialising the Model #Training variables #Data loader #For testing on CPU #os.environ["CUDA_VISIBLE_DEVICES"]="-1" # Blocks the execution to only one CPU #with tf.Session() as sess: #print(tf.trainable_variables()) #For tensorboard #batch_loader.load_object(4) #Training variables # Blocks the execution to only one CPU #For tensorboard #We infer on one element at the time #We tile the input to match the batch size #Without beam #With beam encoder_outputs, prediction_greedy, logits_greedy, prediction_beam, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.output_prediction_beam, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : self.model.max_oovs[batch]
}) #print("hello") #print(alignment_history) #print(encoder_outputs) #print(keys) #Getting the first summary(they are all identical) #Getting the best summary from the beam search #beam_seq_num = prediction_beam[0,:,0] #print(beam_seq_num) # Blocks the execution to only one CPU #print(tf.trainable_variables()) #For tensorboard #for elem in range(batch_size): #We infer on one element at the time #We tile the input to match the batch size #Without beam #With beam encoder_outputs, prediction_greedy, logits_greedy, prediction_beam, enc_state, alignment_history, keys = sess.run([self.model.enc_outputs, self.model.output_prediction_greedy, self.model.logits_prediction_greedy, self.model.output_prediction_beam, self.model.encoder_state, self.model.dec_states_greedy.alignment_history.stack(), self.model.attention_mechanism.values,],{
self.model.tf_input_enc_batch : input_inf,
self.model.tf_input_enc_seq_lengths : input_length_inf,
self.model.tf_batch_max_oov : self.model.max_oovs[batch]
}) #print("hello") #print(alignment_history) #print(encoder_outputs) #print(keys) #Getting the first summary(they are all identical) #Getting the best summary from the beam search #beam_seq_num = prediction_beam[0,:,0] #print(beam_seq_num) #net.infer() | 2.187162 | 2 |
tools/perf/core/external_modules.py | zealoussnow/chromium | 14,668 | 6624182 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allow importing external modules which may be missing in some platforms.
These modules are normally provided by the vpython environment manager. But
some platforms, e.g. CromeOs, do not have access to this facility.
To be safe, instead of e.g.:
import pandas
clients should do:
from core.external_modules import pandas
Tests that require pandas to work can be skipped as follows:
from core.external_modules import pandas
@unittest.skipIf(pandas is None, 'pandas not available')
class TestsForMyModule(unittest.TestCase):
def testSomeBehavior(self):
# test some behavior that requires pandas module.
Finally, scripts that to work properly require any of these external
dependencies should call:
from core import external_modules
if __name__ == '__main__':
external_modules.RequireModules()
# the rest of your script here.
to exit early with a suitable error message if the dependencies are not
satisfied.
"""
import sys
try:
import numpy # pylint: disable=import-error
except ImportError:
numpy = None
try:
import pandas # pylint: disable=import-error
except ImportError:
pandas = None
def RequireModules():
if numpy is None or pandas is None:
sys.exit(
'ERROR: Some required python modules are not available.\n\n'
'Make sure to run this script using vpython or ensure that '
'module dependencies listed in src/.vpython are satisfied.')
| # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Allow importing external modules which may be missing in some platforms.
These modules are normally provided by the vpython environment manager. But
some platforms, e.g. CromeOs, do not have access to this facility.
To be safe, instead of e.g.:
import pandas
clients should do:
from core.external_modules import pandas
Tests that require pandas to work can be skipped as follows:
from core.external_modules import pandas
@unittest.skipIf(pandas is None, 'pandas not available')
class TestsForMyModule(unittest.TestCase):
def testSomeBehavior(self):
# test some behavior that requires pandas module.
Finally, scripts that to work properly require any of these external
dependencies should call:
from core import external_modules
if __name__ == '__main__':
external_modules.RequireModules()
# the rest of your script here.
to exit early with a suitable error message if the dependencies are not
satisfied.
"""
import sys
try:
import numpy # pylint: disable=import-error
except ImportError:
numpy = None
try:
import pandas # pylint: disable=import-error
except ImportError:
pandas = None
def RequireModules():
if numpy is None or pandas is None:
sys.exit(
'ERROR: Some required python modules are not available.\n\n'
'Make sure to run this script using vpython or ensure that '
'module dependencies listed in src/.vpython are satisfied.')
| en | 0.744731 | # Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Allow importing external modules which may be missing in some platforms. These modules are normally provided by the vpython environment manager. But some platforms, e.g. CromeOs, do not have access to this facility. To be safe, instead of e.g.: import pandas clients should do: from core.external_modules import pandas Tests that require pandas to work can be skipped as follows: from core.external_modules import pandas @unittest.skipIf(pandas is None, 'pandas not available') class TestsForMyModule(unittest.TestCase): def testSomeBehavior(self): # test some behavior that requires pandas module. Finally, scripts that to work properly require any of these external dependencies should call: from core import external_modules if __name__ == '__main__': external_modules.RequireModules() # the rest of your script here. to exit early with a suitable error message if the dependencies are not satisfied. # pylint: disable=import-error # pylint: disable=import-error | 2.368362 | 2 |
script.py | SombiriX/csvcompare | 0 | 6624183 | import csv
LINES_CSV = 'lines.csv'
MVR_CSV = 'mvr.csv'
OUT_CSV = 'out.csv'
WORD_LEN = 4
INJURY_WORDS = ['injury', 'fatal', 'pi', 'homicide', 'death']
with open(OUT_CSV, 'w', newline='') as out_csvfile:
fieldnames = ['svc_code', 'description', 'augusta_risk_type', 'bodily_injury']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames, delimiter='\t')
writer.writeheader()
with open(MVR_CSV, newline='') as mvr_csvfile:
mvr_reader = csv.DictReader(mvr_csvfile, delimiter='\t')
for mvr_row in mvr_reader:
matching_indexes = []
injury_flag = False
with open(LINES_CSV, newline='') as lines_csvfile:
lines_reader = csv.DictReader(lines_csvfile, delimiter='\t')
for lines_row in lines_reader:
lines_words = [
''.join(ch for ch in x if ch.isalnum())
for x in lines_row['line_def'].split(' ')
if len(x) > WORD_LEN
]
if any([word in mvr_row['desc'] for word in lines_words]):
# Add line index to outputs for this row
matching_indexes.append(lines_row['index'])
injury_flag = any([word in mvr_row['desc'] for word in INJURY_WORDS])
if not matching_indexes:
matching_indexes.append('Unknown')
writer.writerow({
'svc_code': mvr_row['svc_code'],
'description': mvr_row['desc'].upper(),
'augusta_risk_type': ', '.join(matching_indexes),
'bodily_injury': injury_flag
})
| import csv
LINES_CSV = 'lines.csv'
MVR_CSV = 'mvr.csv'
OUT_CSV = 'out.csv'
WORD_LEN = 4
INJURY_WORDS = ['injury', 'fatal', 'pi', 'homicide', 'death']
with open(OUT_CSV, 'w', newline='') as out_csvfile:
fieldnames = ['svc_code', 'description', 'augusta_risk_type', 'bodily_injury']
writer = csv.DictWriter(out_csvfile, fieldnames=fieldnames, delimiter='\t')
writer.writeheader()
with open(MVR_CSV, newline='') as mvr_csvfile:
mvr_reader = csv.DictReader(mvr_csvfile, delimiter='\t')
for mvr_row in mvr_reader:
matching_indexes = []
injury_flag = False
with open(LINES_CSV, newline='') as lines_csvfile:
lines_reader = csv.DictReader(lines_csvfile, delimiter='\t')
for lines_row in lines_reader:
lines_words = [
''.join(ch for ch in x if ch.isalnum())
for x in lines_row['line_def'].split(' ')
if len(x) > WORD_LEN
]
if any([word in mvr_row['desc'] for word in lines_words]):
# Add line index to outputs for this row
matching_indexes.append(lines_row['index'])
injury_flag = any([word in mvr_row['desc'] for word in INJURY_WORDS])
if not matching_indexes:
matching_indexes.append('Unknown')
writer.writerow({
'svc_code': mvr_row['svc_code'],
'description': mvr_row['desc'].upper(),
'augusta_risk_type': ', '.join(matching_indexes),
'bodily_injury': injury_flag
})
| en | 0.753603 | # Add line index to outputs for this row | 2.949727 | 3 |
celery_tasks/sms/tasks.py | fightingfish008/tornado-extensions | 5 | 6624184 | import json
import logging
import uuid
from celery_tasks.main import celery_app
from celery_tasks.sms.dysms_python.demo_sms_send import send_sms
# 获取日志器
logger = logging.getLogger('myapp')
# 阿里云发送短信验证码
@celery_app.task(name='send_sms_code')
def send_sms_code(mobile, sms_code):
logger.info(sms_code)
__business_id = uuid.uuid1()
params = {'code': sms_code}
params = json.dumps(params)
try:
smsResponse = send_sms(__business_id, mobile, "啄鸟云医", "SMS_148862221", template_param=params)
except Exception as e:
logger.error('发送短信异常: mobile: %s sms_code: %s', mobile, sms_code)
else:
jn = json.loads(smsResponse.decode())
logger.error(jn)
if jn.get("Code") != "OK":
logger.error('发送短信失败: mobile: %s sms_code: %s' % (mobile, sms_code))
| import json
import logging
import uuid
from celery_tasks.main import celery_app
from celery_tasks.sms.dysms_python.demo_sms_send import send_sms
# 获取日志器
logger = logging.getLogger('myapp')
# 阿里云发送短信验证码
@celery_app.task(name='send_sms_code')
def send_sms_code(mobile, sms_code):
logger.info(sms_code)
__business_id = uuid.uuid1()
params = {'code': sms_code}
params = json.dumps(params)
try:
smsResponse = send_sms(__business_id, mobile, "啄鸟云医", "SMS_148862221", template_param=params)
except Exception as e:
logger.error('发送短信异常: mobile: %s sms_code: %s', mobile, sms_code)
else:
jn = json.loads(smsResponse.decode())
logger.error(jn)
if jn.get("Code") != "OK":
logger.error('发送短信失败: mobile: %s sms_code: %s' % (mobile, sms_code))
| zh | 0.789269 | # 获取日志器 # 阿里云发送短信验证码 | 2.126781 | 2 |
setup.py | LABSN/TDTpy | 5 | 6624185 | from os import path
from setuptools import setup
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: System :: Hardware',
'License :: OSI Approved'
]
here = path.dirname(path.abspath(__file__))
with open(path.join(here, 'README.rst')) as f:
long_description = f.read().strip()
long_description += '''
Source code: http://github.com/LABSN/tdtpy
Documentation: http://tdtpy.readthedocs.org
'''
name = 'TDTPy'
# get the version (don't import tdt here, so dependencies are not needed)
version = None
with open(path.join('tdt', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('\'')
break
if version is None:
raise RuntimeError('Could not determine version')
setup(
name=name,
version=version,
author='The TDTPy development team',
author_email='<EMAIL>',
packages=['tdt',
'tdt.actxobjects',
'tdt.components',
'tdt.device'],
url='http://tdtpy.readthedocs.org',
license='BSD (3-clause)',
description='Module for communicating with TDT\'s System 3 hardware',
long_description=long_description,
install_requires=['pypiwin32', 'numpy'],
extras_require={
'test': ['pytest'],
},
package_data={'tdt': ['components/*.rcx']},
classifiers=CLASSIFIERS,
command_options={
'build_sphinx': {
'project': ('setup.py', name),
'version': ('setup.py', version),
'release': ('setup.py', version),
'source_dir': ('setup.py', 'docs'),
},
}
)
| from os import path
from setuptools import setup
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: System :: Hardware',
'License :: OSI Approved'
]
here = path.dirname(path.abspath(__file__))
with open(path.join(here, 'README.rst')) as f:
long_description = f.read().strip()
long_description += '''
Source code: http://github.com/LABSN/tdtpy
Documentation: http://tdtpy.readthedocs.org
'''
name = 'TDTPy'
# get the version (don't import tdt here, so dependencies are not needed)
version = None
with open(path.join('tdt', '__init__.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('\'')
break
if version is None:
raise RuntimeError('Could not determine version')
setup(
name=name,
version=version,
author='The TDTPy development team',
author_email='<EMAIL>',
packages=['tdt',
'tdt.actxobjects',
'tdt.components',
'tdt.device'],
url='http://tdtpy.readthedocs.org',
license='BSD (3-clause)',
description='Module for communicating with TDT\'s System 3 hardware',
long_description=long_description,
install_requires=['pypiwin32', 'numpy'],
extras_require={
'test': ['pytest'],
},
package_data={'tdt': ['components/*.rcx']},
classifiers=CLASSIFIERS,
command_options={
'build_sphinx': {
'project': ('setup.py', name),
'version': ('setup.py', version),
'release': ('setup.py', version),
'source_dir': ('setup.py', 'docs'),
},
}
)
| en | 0.715033 | Source code: http://github.com/LABSN/tdtpy
Documentation: http://tdtpy.readthedocs.org # get the version (don't import tdt here, so dependencies are not needed) | 1.692637 | 2 |
login/migrations/0009_auto_20151115_1859.py | mdsafwan/Deal-My-Stuff | 0 | 6624186 | <filename>login/migrations/0009_auto_20151115_1859.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0008_user_login_status'),
]
operations = [
migrations.AlterField(
model_name='user_login',
name='User_ID',
field=models.ForeignKey(related_name='User_ID_Loggedin', db_column=b'User_ID', to='login.user_details'),
),
]
| <filename>login/migrations/0009_auto_20151115_1859.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0008_user_login_status'),
]
operations = [
migrations.AlterField(
model_name='user_login',
name='User_ID',
field=models.ForeignKey(related_name='User_ID_Loggedin', db_column=b'User_ID', to='login.user_details'),
),
]
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.379103 | 1 |
django_ini_settings/load.py | sebastianphelps/django-local-settings | 1 | 6624187 | """Load settings from a file to insert into your django settings.py"""
import ConfigParser
import os
from statics import ANY, NAME_TO_UPPER
from settings_types import (DictItemSetting, StringSetting,
BoolSetting, IntSetting)
CONFIG_FILE_MAPPING = {
("database", "user"): DictItemSetting(("DATABASES", "default", "USER")),
("database", "password"): DictItemSetting(("DATABASES", "default", "PASSWORD")),
("database", "engine"): DictItemSetting(("DATABASES", "default", "ENGINE")),
("database", "name"): DictItemSetting(("DATABASES", "default", "NAME")),
("database", "host"): DictItemSetting(("DATABASES", "default", "HOST")),
("database", "port"): DictItemSetting(("DATABASES", "default", "PORT")),
("security", "secret_key"): StringSetting("SECRET_KEY"),
("security", "debug"): BoolSetting("DEBUG"),
("urls", "media_url"): StringSetting("MEDIA_URL"),
("urls", "static_url"): StringSetting("STATIC_URL"),
("string", ANY): StringSetting(NAME_TO_UPPER),
("bool", ANY): BoolSetting(NAME_TO_UPPER),
("dict_item", ANY): DictItemSetting(NAME_TO_UPPER),
("int", ANY): IntSetting(NAME_TO_UPPER),
}
def match_setting(section, option, config_mapping):
"""Find a setting in the config mapping based of the section and option in the
config.
First search for the exact match of section+option. Then look for section+ANY.
"""
if (section, option) in config_mapping:
# Exact match
return config_mapping[(section, option)]
elif (section, ANY) in config_mapping:
# Match section with any option
return config_mapping[(section, ANY)]
return None
def load_ini(ini_file=None, settings_module=None, config_mapping=None):
"""
Load an ini file into your django settings.py
To use, put something like the following in your settings module:
import sys
from django_ini_settings.load import load_ini
load_ini(ini_file="/etc/application/application.ini",
settings_module=sys.modules[__name__])
If you want to customise the mappings from the ini to your settings then,
you can do something like this:
import sys
from django_ini_settings.load import load_ini
from django_ini_settings.settings_types import (DictItemSetting, StringSetting,
BoolSetting, IntSetting)
from django_ini_settings.statics import ANY, NAME_TO_UPPER
CONFIG_FILE_MAPPING = {
("database", "user"): DictItemSetting(("DATABASES", "default", "USER")),
("database", "password"): DictItemSetting(("DATABASES", "default", "PASSWORD")),
("database", "engine"): DictItemSetting(("DATABASES", "default", "ENGINE")),
("database", "name"): DictItemSetting(("DATABASES", "default", "NAME")),
("database", "host"): DictItemSetting(("DATABASES", "default", "HOST")),
("database", "port"): DictItemSetting(("DATABASES", "default", "PORT")),
("security", "secret_key"): StringSetting("SECRET_KEY"),
("security", "debug"): BoolSetting("DEBUG"),
("urls", "media_url"): StringSetting("MEDIA_URL"),
("urls", "static_url"): StringSetting("STATIC_URL"),
("application", "max_file_size"): IntSetting("MAX_FILE_SIZE"),
("application", ANY): StringSetting(NAME_TO_UPPER),
}
load_ini(ini_file="/etc/application/application.ini",
settings_module=sys.modules[__name__],
config_mapping=CONFIG_FILE_MAPPING)
"""
if config_mapping is None:
config_mapping = CONFIG_FILE_MAPPING
if ini_file is None:
if os.getenv("DJANGO_CONFIG_FILE", ""):
ini_file = os.getenv("DJANGO_CONFIG_FILE")
else:
raise Exception("No ini file provided, can't load settings.")
if settings_module is None:
raise Exception("Can't find the settings module")
config = ConfigParser.ConfigParser()
read_files = config.read(ini_file)
if len(read_files) > 0:
for section in config.sections():
for option in config.options(section):
setting = match_setting(section, option, config_mapping)
if setting is None:
# There were no matches
continue
value = config.get(section, option)
setting.set_value(settings_module, section, option, value) | """Load settings from a file to insert into your django settings.py"""
import ConfigParser
import os
from statics import ANY, NAME_TO_UPPER
from settings_types import (DictItemSetting, StringSetting,
BoolSetting, IntSetting)
CONFIG_FILE_MAPPING = {
("database", "user"): DictItemSetting(("DATABASES", "default", "USER")),
("database", "password"): DictItemSetting(("DATABASES", "default", "PASSWORD")),
("database", "engine"): DictItemSetting(("DATABASES", "default", "ENGINE")),
("database", "name"): DictItemSetting(("DATABASES", "default", "NAME")),
("database", "host"): DictItemSetting(("DATABASES", "default", "HOST")),
("database", "port"): DictItemSetting(("DATABASES", "default", "PORT")),
("security", "secret_key"): StringSetting("SECRET_KEY"),
("security", "debug"): BoolSetting("DEBUG"),
("urls", "media_url"): StringSetting("MEDIA_URL"),
("urls", "static_url"): StringSetting("STATIC_URL"),
("string", ANY): StringSetting(NAME_TO_UPPER),
("bool", ANY): BoolSetting(NAME_TO_UPPER),
("dict_item", ANY): DictItemSetting(NAME_TO_UPPER),
("int", ANY): IntSetting(NAME_TO_UPPER),
}
def match_setting(section, option, config_mapping):
"""Find a setting in the config mapping based of the section and option in the
config.
First search for the exact match of section+option. Then look for section+ANY.
"""
if (section, option) in config_mapping:
# Exact match
return config_mapping[(section, option)]
elif (section, ANY) in config_mapping:
# Match section with any option
return config_mapping[(section, ANY)]
return None
def load_ini(ini_file=None, settings_module=None, config_mapping=None):
"""
Load an ini file into your django settings.py
To use, put something like the following in your settings module:
import sys
from django_ini_settings.load import load_ini
load_ini(ini_file="/etc/application/application.ini",
settings_module=sys.modules[__name__])
If you want to customise the mappings from the ini to your settings then,
you can do something like this:
import sys
from django_ini_settings.load import load_ini
from django_ini_settings.settings_types import (DictItemSetting, StringSetting,
BoolSetting, IntSetting)
from django_ini_settings.statics import ANY, NAME_TO_UPPER
CONFIG_FILE_MAPPING = {
("database", "user"): DictItemSetting(("DATABASES", "default", "USER")),
("database", "password"): DictItemSetting(("DATABASES", "default", "PASSWORD")),
("database", "engine"): DictItemSetting(("DATABASES", "default", "ENGINE")),
("database", "name"): DictItemSetting(("DATABASES", "default", "NAME")),
("database", "host"): DictItemSetting(("DATABASES", "default", "HOST")),
("database", "port"): DictItemSetting(("DATABASES", "default", "PORT")),
("security", "secret_key"): StringSetting("SECRET_KEY"),
("security", "debug"): BoolSetting("DEBUG"),
("urls", "media_url"): StringSetting("MEDIA_URL"),
("urls", "static_url"): StringSetting("STATIC_URL"),
("application", "max_file_size"): IntSetting("MAX_FILE_SIZE"),
("application", ANY): StringSetting(NAME_TO_UPPER),
}
load_ini(ini_file="/etc/application/application.ini",
settings_module=sys.modules[__name__],
config_mapping=CONFIG_FILE_MAPPING)
"""
if config_mapping is None:
config_mapping = CONFIG_FILE_MAPPING
if ini_file is None:
if os.getenv("DJANGO_CONFIG_FILE", ""):
ini_file = os.getenv("DJANGO_CONFIG_FILE")
else:
raise Exception("No ini file provided, can't load settings.")
if settings_module is None:
raise Exception("Can't find the settings module")
config = ConfigParser.ConfigParser()
read_files = config.read(ini_file)
if len(read_files) > 0:
for section in config.sections():
for option in config.options(section):
setting = match_setting(section, option, config_mapping)
if setting is None:
# There were no matches
continue
value = config.get(section, option)
setting.set_value(settings_module, section, option, value) | en | 0.412406 | Load settings from a file to insert into your django settings.py Find a setting in the config mapping based of the section and option in the config. First search for the exact match of section+option. Then look for section+ANY. # Exact match # Match section with any option Load an ini file into your django settings.py To use, put something like the following in your settings module: import sys from django_ini_settings.load import load_ini load_ini(ini_file="/etc/application/application.ini", settings_module=sys.modules[__name__]) If you want to customise the mappings from the ini to your settings then, you can do something like this: import sys from django_ini_settings.load import load_ini from django_ini_settings.settings_types import (DictItemSetting, StringSetting, BoolSetting, IntSetting) from django_ini_settings.statics import ANY, NAME_TO_UPPER CONFIG_FILE_MAPPING = { ("database", "user"): DictItemSetting(("DATABASES", "default", "USER")), ("database", "password"): DictItemSetting(("DATABASES", "default", "PASSWORD")), ("database", "engine"): DictItemSetting(("DATABASES", "default", "ENGINE")), ("database", "name"): DictItemSetting(("DATABASES", "default", "NAME")), ("database", "host"): DictItemSetting(("DATABASES", "default", "HOST")), ("database", "port"): DictItemSetting(("DATABASES", "default", "PORT")), ("security", "secret_key"): StringSetting("SECRET_KEY"), ("security", "debug"): BoolSetting("DEBUG"), ("urls", "media_url"): StringSetting("MEDIA_URL"), ("urls", "static_url"): StringSetting("STATIC_URL"), ("application", "max_file_size"): IntSetting("MAX_FILE_SIZE"), ("application", ANY): StringSetting(NAME_TO_UPPER), } load_ini(ini_file="/etc/application/application.ini", settings_module=sys.modules[__name__], config_mapping=CONFIG_FILE_MAPPING) # There were no matches | 2.823797 | 3 |
azurelinuxagent/common/cgroupstelemetry.py | deathly809/WALinuxAgent | 0 | 6624188 | # Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
import errno
import threading
from collections import namedtuple
from datetime import datetime as dt
from azurelinuxagent.common import logger
from azurelinuxagent.common.cgroup import CpuCgroup, CGroupContollers
from azurelinuxagent.common.exception import CGroupsException
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.logger import EVERY_SIX_HOURS
from azurelinuxagent.common.resourceusage import MemoryResourceUsage, ProcessInfo
MetricValue = namedtuple('Metric', ['category', 'counter', 'instance', 'value'])
StatmMetricValue = namedtuple('StatmMetricValue', ['pid_name_cmdline', 'resource_metric'])
DELIM = " | "
DEFAULT_PROCESS_NAME = "NO_PROCESS_FOUND"
DEFAULT_PROCESS_COMMANDLINE = "NO_CMDLINE_FOUND"
class MetricsCategory(object):
MEMORY_CATEGORY = "Memory"
PROCESS_CATEGORY = "Process"
class MetricsCounter(object):
PROCESSOR_PERCENT_TIME = "% Processor Time"
TOTAL_MEM_USAGE = "Total Memory Usage"
MAX_MEM_USAGE = "Max Memory Usage"
MEM_USED_BY_PROCESS = "Memory Used by Process"
class CGroupsTelemetry(object):
"""
"""
_tracked = []
_cgroup_metrics = {}
_rlock = threading.RLock()
@staticmethod
def get_process_info_summary(process_id):
process_cmdline = DEFAULT_PROCESS_COMMANDLINE
process_name = DEFAULT_PROCESS_NAME
# The ProcessName and ProcessCommandLine can generate Exception if the file /proc/<pid>/{comm,cmdline} cease to
# exist; eg: the process can die, or finish. Which is why we need Default Names, in case we fail to fetch the
# details from those files.
try:
process_cmdline = ProcessInfo.get_proc_cmdline(process_id) if not None else DEFAULT_PROCESS_COMMANDLINE
except Exception as e:
logger.periodic_info(EVERY_SIX_HOURS, "[PERIODIC] {0}", ustr(e))
try:
process_name = ProcessInfo.get_proc_name(process_id) if not None else DEFAULT_PROCESS_NAME
except Exception as e:
logger.periodic_info(EVERY_SIX_HOURS, "[PERIODIC] {0}", ustr(e))
return process_id + DELIM + process_name + DELIM + process_cmdline
@staticmethod
def _get_metrics_list(metric):
return [metric.average(), metric.min(), metric.max(), metric.median(), metric.count(),
metric.first_poll_time(), metric.last_poll_time()]
@staticmethod
def _process_cgroup_metric(cgroup_metrics):
memory_usage = cgroup_metrics.get_memory_metrics()
max_memory_usage = cgroup_metrics.get_max_memory_metrics()
cpu_usage = cgroup_metrics.get_cpu_metrics()
memory_usage_per_process = cgroup_metrics.get_proc_statm_memory_metrics()
processed_extension = {}
if cpu_usage.count() > 0:
processed_extension["cpu"] = {"cur_cpu": CGroupsTelemetry._get_metrics_list(cpu_usage)}
if memory_usage.count() > 0:
if "memory" in processed_extension:
processed_extension["memory"]["cur_mem"] = CGroupsTelemetry._get_metrics_list(memory_usage)
else:
processed_extension["memory"] = {"cur_mem": CGroupsTelemetry._get_metrics_list(memory_usage)}
if max_memory_usage.count() > 0:
if "memory" in processed_extension:
processed_extension["memory"]["max_mem"] = CGroupsTelemetry._get_metrics_list(max_memory_usage)
else:
processed_extension["memory"] = {"max_mem": CGroupsTelemetry._get_metrics_list(max_memory_usage)}
for pid_process_memory in memory_usage_per_process:
if "proc_statm_memory" in processed_extension:
processed_extension["proc_statm_memory"][pid_process_memory.pid_name_cmdline] = \
CGroupsTelemetry._get_metrics_list(pid_process_memory.resource_metric)
else:
processed_extension["proc_statm_memory"] = {pid_process_memory.pid_name_cmdline:
CGroupsTelemetry._get_metrics_list(pid_process_memory.resource_metric)}
return processed_extension
@staticmethod
def track_cgroup(cgroup):
"""
Adds the given item to the dictionary of tracked cgroups
"""
if isinstance(cgroup, CpuCgroup):
# set the current cpu usage
cgroup.initialize_cpu_usage()
with CGroupsTelemetry._rlock:
if not CGroupsTelemetry.is_tracked(cgroup.path):
CGroupsTelemetry._tracked.append(cgroup)
logger.info("Started tracking new cgroup: {0}, path: {1}".format(cgroup.name, cgroup.path))
@staticmethod
def is_tracked(path):
"""
Returns true if the given item is in the list of tracked items
O(n) operation. But limited to few cgroup objects we have.
"""
with CGroupsTelemetry._rlock:
for cgroup in CGroupsTelemetry._tracked:
if path == cgroup.path:
return True
return False
@staticmethod
def stop_tracking(cgroup):
"""
Stop tracking the cgroups for the given name
"""
with CGroupsTelemetry._rlock:
CGroupsTelemetry._tracked.remove(cgroup)
logger.info("Stopped tracking cgroup: {0}, path: {1}".format(cgroup.name, cgroup.path))
@staticmethod
def report_all_tracked():
"""
The report_all_tracked's purpose is to collect the data from the tracked cgroups and process the metric into a
data structure by _process_cgroup_metric. The perf metric is added into the data structure and returned to the
caller.
The report_all_tracked would be removed soon - in favor of sending report_metric directly, when polling the data
from tracked groups.
:return collected_metrics: dictionary of cgroups metrics.
"""
collected_metrics = {}
for name, cgroup_metrics in CGroupsTelemetry._cgroup_metrics.items():
perf_metric = CGroupsTelemetry._process_cgroup_metric(cgroup_metrics)
if perf_metric:
collected_metrics[name] = perf_metric
cgroup_metrics.clear()
# Doing cleanup after the metrics have already been collected.
for key in [key for key in CGroupsTelemetry._cgroup_metrics if
CGroupsTelemetry._cgroup_metrics[key].marked_for_delete]:
del CGroupsTelemetry._cgroup_metrics[key]
return collected_metrics
@staticmethod
def poll_all_tracked():
metrics = []
with CGroupsTelemetry._rlock:
for cgroup in CGroupsTelemetry._tracked[:]:
if cgroup.name not in CGroupsTelemetry._cgroup_metrics:
CGroupsTelemetry._cgroup_metrics[cgroup.name] = CgroupMetrics()
try:
if cgroup.controller == CGroupContollers.CPU:
current_cpu_usage = cgroup.get_cpu_usage()
CGroupsTelemetry._cgroup_metrics[cgroup.name].add_cpu_usage(current_cpu_usage)
metrics.append(MetricValue(MetricsCategory.PROCESS_CATEGORY, MetricsCounter.
PROCESSOR_PERCENT_TIME, cgroup.name, current_cpu_usage))
elif cgroup.controller == CGroupContollers.MEMORY:
current_memory_usage = cgroup.get_memory_usage()
CGroupsTelemetry._cgroup_metrics[cgroup.name].add_memory_usage(current_memory_usage)
metrics.append(MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.
TOTAL_MEM_USAGE, cgroup.name, current_memory_usage))
max_memory_usage = cgroup.get_max_memory_usage()
CGroupsTelemetry._cgroup_metrics[cgroup.name].add_max_memory_usage(max_memory_usage)
metrics.append(MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.MAX_MEM_USAGE,
cgroup.name, max_memory_usage))
pids = cgroup.get_tracked_processes()
for pid in pids:
try:
mem_usage_from_procstatm = MemoryResourceUsage.get_memory_usage_from_proc_statm(pid)
metrics.append(MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.
MEM_USED_BY_PROCESS, CGroupsTelemetry.get_process_info_summary(pid),
mem_usage_from_procstatm))
CGroupsTelemetry._cgroup_metrics[cgroup.name].add_proc_statm_memory(
CGroupsTelemetry.get_process_info_summary(pid), mem_usage_from_procstatm)
except Exception as e:
if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT:
logger.periodic_warn(logger.EVERY_HOUR, "[PERIODIC] Could not collect proc_statm "
"for pid {0}. Error : {1}", pid, ustr(e))
else:
raise CGroupsException('CGroup controller {0} is not supported for cgroup {1}'.format(
cgroup.controller, cgroup.name))
except Exception as e:
# There can be scenarios when the CGroup has been deleted by the time we are fetching the values
# from it. This would raise IOError with file entry not found (ERRNO: 2). We do not want to log
# every occurrences of such case as it would be very verbose. We do want to log all the other
# exceptions which could occur, which is why we do a periodic log for all the other errors.
if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT:
logger.periodic_warn(logger.EVERY_HOUR, '[PERIODIC] Could not collect metrics for cgroup '
'{0}. Error : {1}'.format(cgroup.name, ustr(e)))
if not cgroup.is_active():
CGroupsTelemetry.stop_tracking(cgroup)
CGroupsTelemetry._cgroup_metrics[cgroup.name].marked_for_delete = True
return metrics
@staticmethod
def prune_all_tracked():
with CGroupsTelemetry._rlock:
for cgroup in CGroupsTelemetry._tracked[:]:
if not cgroup.is_active():
CGroupsTelemetry.stop_tracking(cgroup)
@staticmethod
def reset():
with CGroupsTelemetry._rlock:
CGroupsTelemetry._tracked *= 0 # emptying the list
CGroupsTelemetry._cgroup_metrics = {}
class CgroupMetrics(object):
def __init__(self):
self._memory_usage = Metric()
self._max_memory_usage = Metric()
self._cpu_usage = Metric()
self._proc_statm_mem = {}
self.marked_for_delete = False
def add_memory_usage(self, usage):
if not self.marked_for_delete:
self._memory_usage.append(usage)
def add_max_memory_usage(self, usage):
if not self.marked_for_delete:
self._max_memory_usage.append(usage)
def add_cpu_usage(self, usage):
if not self.marked_for_delete:
self._cpu_usage.append(usage)
def add_proc_statm_memory(self, pid, usage):
if not self.marked_for_delete:
if pid not in self._proc_statm_mem:
self._proc_statm_mem[pid] = Metric()
self._proc_statm_mem[pid].append(usage)
def get_memory_metrics(self):
return self._memory_usage
def get_max_memory_metrics(self):
return self._max_memory_usage
def get_cpu_metrics(self):
return self._cpu_usage
def get_proc_statm_memory_metrics(self):
"""
:return: StatmMetricValue tuples of pid and metric
"""
return [StatmMetricValue(pid_name_cmdline, metric) for pid_name_cmdline, metric in self._proc_statm_mem.items()]
def clear(self):
self._memory_usage.clear()
self._max_memory_usage.clear()
self._cpu_usage.clear()
self._proc_statm_mem.clear()
class Metric(object):
def __init__(self):
self._data = []
self._first_poll_time = None
self._last_poll_time = None
def append(self, data):
if not self._first_poll_time:
# We only want to do it first time.
self._first_poll_time = dt.utcnow()
self._data.append(data)
self._last_poll_time = dt.utcnow()
def clear(self):
self._first_poll_time = None
self._last_poll_time = None
self._data *= 0
def average(self):
return float(sum(self._data)) / float(len(self._data)) if self._data else None
def max(self):
return max(self._data) if self._data else None
def min(self):
return min(self._data) if self._data else None
def median(self):
data = sorted(self._data)
l_len = len(data)
if l_len < 1:
return None
if l_len % 2 == 0:
return (data[int((l_len - 1) / 2)] + data[int((l_len + 1) / 2)]) / 2.0
else:
return data[int((l_len - 1) / 2)]
def count(self):
return len(self._data)
def first_poll_time(self):
return str(self._first_poll_time)
def last_poll_time(self):
return str(self._last_poll_time)
| # Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
import errno
import threading
from collections import namedtuple
from datetime import datetime as dt
from azurelinuxagent.common import logger
from azurelinuxagent.common.cgroup import CpuCgroup, CGroupContollers
from azurelinuxagent.common.exception import CGroupsException
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.logger import EVERY_SIX_HOURS
from azurelinuxagent.common.resourceusage import MemoryResourceUsage, ProcessInfo
MetricValue = namedtuple('Metric', ['category', 'counter', 'instance', 'value'])
StatmMetricValue = namedtuple('StatmMetricValue', ['pid_name_cmdline', 'resource_metric'])
DELIM = " | "
DEFAULT_PROCESS_NAME = "NO_PROCESS_FOUND"
DEFAULT_PROCESS_COMMANDLINE = "NO_CMDLINE_FOUND"
class MetricsCategory(object):
MEMORY_CATEGORY = "Memory"
PROCESS_CATEGORY = "Process"
class MetricsCounter(object):
PROCESSOR_PERCENT_TIME = "% Processor Time"
TOTAL_MEM_USAGE = "Total Memory Usage"
MAX_MEM_USAGE = "Max Memory Usage"
MEM_USED_BY_PROCESS = "Memory Used by Process"
class CGroupsTelemetry(object):
"""
"""
_tracked = []
_cgroup_metrics = {}
_rlock = threading.RLock()
@staticmethod
def get_process_info_summary(process_id):
process_cmdline = DEFAULT_PROCESS_COMMANDLINE
process_name = DEFAULT_PROCESS_NAME
# The ProcessName and ProcessCommandLine can generate Exception if the file /proc/<pid>/{comm,cmdline} cease to
# exist; eg: the process can die, or finish. Which is why we need Default Names, in case we fail to fetch the
# details from those files.
try:
process_cmdline = ProcessInfo.get_proc_cmdline(process_id) if not None else DEFAULT_PROCESS_COMMANDLINE
except Exception as e:
logger.periodic_info(EVERY_SIX_HOURS, "[PERIODIC] {0}", ustr(e))
try:
process_name = ProcessInfo.get_proc_name(process_id) if not None else DEFAULT_PROCESS_NAME
except Exception as e:
logger.periodic_info(EVERY_SIX_HOURS, "[PERIODIC] {0}", ustr(e))
return process_id + DELIM + process_name + DELIM + process_cmdline
@staticmethod
def _get_metrics_list(metric):
return [metric.average(), metric.min(), metric.max(), metric.median(), metric.count(),
metric.first_poll_time(), metric.last_poll_time()]
@staticmethod
def _process_cgroup_metric(cgroup_metrics):
memory_usage = cgroup_metrics.get_memory_metrics()
max_memory_usage = cgroup_metrics.get_max_memory_metrics()
cpu_usage = cgroup_metrics.get_cpu_metrics()
memory_usage_per_process = cgroup_metrics.get_proc_statm_memory_metrics()
processed_extension = {}
if cpu_usage.count() > 0:
processed_extension["cpu"] = {"cur_cpu": CGroupsTelemetry._get_metrics_list(cpu_usage)}
if memory_usage.count() > 0:
if "memory" in processed_extension:
processed_extension["memory"]["cur_mem"] = CGroupsTelemetry._get_metrics_list(memory_usage)
else:
processed_extension["memory"] = {"cur_mem": CGroupsTelemetry._get_metrics_list(memory_usage)}
if max_memory_usage.count() > 0:
if "memory" in processed_extension:
processed_extension["memory"]["max_mem"] = CGroupsTelemetry._get_metrics_list(max_memory_usage)
else:
processed_extension["memory"] = {"max_mem": CGroupsTelemetry._get_metrics_list(max_memory_usage)}
for pid_process_memory in memory_usage_per_process:
if "proc_statm_memory" in processed_extension:
processed_extension["proc_statm_memory"][pid_process_memory.pid_name_cmdline] = \
CGroupsTelemetry._get_metrics_list(pid_process_memory.resource_metric)
else:
processed_extension["proc_statm_memory"] = {pid_process_memory.pid_name_cmdline:
CGroupsTelemetry._get_metrics_list(pid_process_memory.resource_metric)}
return processed_extension
@staticmethod
def track_cgroup(cgroup):
"""
Adds the given item to the dictionary of tracked cgroups
"""
if isinstance(cgroup, CpuCgroup):
# set the current cpu usage
cgroup.initialize_cpu_usage()
with CGroupsTelemetry._rlock:
if not CGroupsTelemetry.is_tracked(cgroup.path):
CGroupsTelemetry._tracked.append(cgroup)
logger.info("Started tracking new cgroup: {0}, path: {1}".format(cgroup.name, cgroup.path))
@staticmethod
def is_tracked(path):
"""
Returns true if the given item is in the list of tracked items
O(n) operation. But limited to few cgroup objects we have.
"""
with CGroupsTelemetry._rlock:
for cgroup in CGroupsTelemetry._tracked:
if path == cgroup.path:
return True
return False
@staticmethod
def stop_tracking(cgroup):
"""
Stop tracking the cgroups for the given name
"""
with CGroupsTelemetry._rlock:
CGroupsTelemetry._tracked.remove(cgroup)
logger.info("Stopped tracking cgroup: {0}, path: {1}".format(cgroup.name, cgroup.path))
@staticmethod
def report_all_tracked():
"""
The report_all_tracked's purpose is to collect the data from the tracked cgroups and process the metric into a
data structure by _process_cgroup_metric. The perf metric is added into the data structure and returned to the
caller.
The report_all_tracked would be removed soon - in favor of sending report_metric directly, when polling the data
from tracked groups.
:return collected_metrics: dictionary of cgroups metrics.
"""
collected_metrics = {}
for name, cgroup_metrics in CGroupsTelemetry._cgroup_metrics.items():
perf_metric = CGroupsTelemetry._process_cgroup_metric(cgroup_metrics)
if perf_metric:
collected_metrics[name] = perf_metric
cgroup_metrics.clear()
# Doing cleanup after the metrics have already been collected.
for key in [key for key in CGroupsTelemetry._cgroup_metrics if
CGroupsTelemetry._cgroup_metrics[key].marked_for_delete]:
del CGroupsTelemetry._cgroup_metrics[key]
return collected_metrics
@staticmethod
def poll_all_tracked():
metrics = []
with CGroupsTelemetry._rlock:
for cgroup in CGroupsTelemetry._tracked[:]:
if cgroup.name not in CGroupsTelemetry._cgroup_metrics:
CGroupsTelemetry._cgroup_metrics[cgroup.name] = CgroupMetrics()
try:
if cgroup.controller == CGroupContollers.CPU:
current_cpu_usage = cgroup.get_cpu_usage()
CGroupsTelemetry._cgroup_metrics[cgroup.name].add_cpu_usage(current_cpu_usage)
metrics.append(MetricValue(MetricsCategory.PROCESS_CATEGORY, MetricsCounter.
PROCESSOR_PERCENT_TIME, cgroup.name, current_cpu_usage))
elif cgroup.controller == CGroupContollers.MEMORY:
current_memory_usage = cgroup.get_memory_usage()
CGroupsTelemetry._cgroup_metrics[cgroup.name].add_memory_usage(current_memory_usage)
metrics.append(MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.
TOTAL_MEM_USAGE, cgroup.name, current_memory_usage))
max_memory_usage = cgroup.get_max_memory_usage()
CGroupsTelemetry._cgroup_metrics[cgroup.name].add_max_memory_usage(max_memory_usage)
metrics.append(MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.MAX_MEM_USAGE,
cgroup.name, max_memory_usage))
pids = cgroup.get_tracked_processes()
for pid in pids:
try:
mem_usage_from_procstatm = MemoryResourceUsage.get_memory_usage_from_proc_statm(pid)
metrics.append(MetricValue(MetricsCategory.MEMORY_CATEGORY, MetricsCounter.
MEM_USED_BY_PROCESS, CGroupsTelemetry.get_process_info_summary(pid),
mem_usage_from_procstatm))
CGroupsTelemetry._cgroup_metrics[cgroup.name].add_proc_statm_memory(
CGroupsTelemetry.get_process_info_summary(pid), mem_usage_from_procstatm)
except Exception as e:
if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT:
logger.periodic_warn(logger.EVERY_HOUR, "[PERIODIC] Could not collect proc_statm "
"for pid {0}. Error : {1}", pid, ustr(e))
else:
raise CGroupsException('CGroup controller {0} is not supported for cgroup {1}'.format(
cgroup.controller, cgroup.name))
except Exception as e:
# There can be scenarios when the CGroup has been deleted by the time we are fetching the values
# from it. This would raise IOError with file entry not found (ERRNO: 2). We do not want to log
# every occurrences of such case as it would be very verbose. We do want to log all the other
# exceptions which could occur, which is why we do a periodic log for all the other errors.
if not isinstance(e, (IOError, OSError)) or e.errno != errno.ENOENT:
logger.periodic_warn(logger.EVERY_HOUR, '[PERIODIC] Could not collect metrics for cgroup '
'{0}. Error : {1}'.format(cgroup.name, ustr(e)))
if not cgroup.is_active():
CGroupsTelemetry.stop_tracking(cgroup)
CGroupsTelemetry._cgroup_metrics[cgroup.name].marked_for_delete = True
return metrics
@staticmethod
def prune_all_tracked():
with CGroupsTelemetry._rlock:
for cgroup in CGroupsTelemetry._tracked[:]:
if not cgroup.is_active():
CGroupsTelemetry.stop_tracking(cgroup)
@staticmethod
def reset():
with CGroupsTelemetry._rlock:
CGroupsTelemetry._tracked *= 0 # emptying the list
CGroupsTelemetry._cgroup_metrics = {}
class CgroupMetrics(object):
def __init__(self):
self._memory_usage = Metric()
self._max_memory_usage = Metric()
self._cpu_usage = Metric()
self._proc_statm_mem = {}
self.marked_for_delete = False
def add_memory_usage(self, usage):
if not self.marked_for_delete:
self._memory_usage.append(usage)
def add_max_memory_usage(self, usage):
if not self.marked_for_delete:
self._max_memory_usage.append(usage)
def add_cpu_usage(self, usage):
if not self.marked_for_delete:
self._cpu_usage.append(usage)
def add_proc_statm_memory(self, pid, usage):
if not self.marked_for_delete:
if pid not in self._proc_statm_mem:
self._proc_statm_mem[pid] = Metric()
self._proc_statm_mem[pid].append(usage)
def get_memory_metrics(self):
return self._memory_usage
def get_max_memory_metrics(self):
return self._max_memory_usage
def get_cpu_metrics(self):
return self._cpu_usage
def get_proc_statm_memory_metrics(self):
"""
:return: StatmMetricValue tuples of pid and metric
"""
return [StatmMetricValue(pid_name_cmdline, metric) for pid_name_cmdline, metric in self._proc_statm_mem.items()]
def clear(self):
self._memory_usage.clear()
self._max_memory_usage.clear()
self._cpu_usage.clear()
self._proc_statm_mem.clear()
class Metric(object):
def __init__(self):
self._data = []
self._first_poll_time = None
self._last_poll_time = None
def append(self, data):
if not self._first_poll_time:
# We only want to do it first time.
self._first_poll_time = dt.utcnow()
self._data.append(data)
self._last_poll_time = dt.utcnow()
def clear(self):
self._first_poll_time = None
self._last_poll_time = None
self._data *= 0
def average(self):
return float(sum(self._data)) / float(len(self._data)) if self._data else None
def max(self):
return max(self._data) if self._data else None
def min(self):
return min(self._data) if self._data else None
def median(self):
data = sorted(self._data)
l_len = len(data)
if l_len < 1:
return None
if l_len % 2 == 0:
return (data[int((l_len - 1) / 2)] + data[int((l_len + 1) / 2)]) / 2.0
else:
return data[int((l_len - 1) / 2)]
def count(self):
return len(self._data)
def first_poll_time(self):
return str(self._first_poll_time)
def last_poll_time(self):
return str(self._last_poll_time)
| en | 0.884439 | # Copyright 2018 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # The ProcessName and ProcessCommandLine can generate Exception if the file /proc/<pid>/{comm,cmdline} cease to # exist; eg: the process can die, or finish. Which is why we need Default Names, in case we fail to fetch the # details from those files. Adds the given item to the dictionary of tracked cgroups # set the current cpu usage Returns true if the given item is in the list of tracked items O(n) operation. But limited to few cgroup objects we have. Stop tracking the cgroups for the given name The report_all_tracked's purpose is to collect the data from the tracked cgroups and process the metric into a data structure by _process_cgroup_metric. The perf metric is added into the data structure and returned to the caller. The report_all_tracked would be removed soon - in favor of sending report_metric directly, when polling the data from tracked groups. :return collected_metrics: dictionary of cgroups metrics. # Doing cleanup after the metrics have already been collected. # There can be scenarios when the CGroup has been deleted by the time we are fetching the values # from it. This would raise IOError with file entry not found (ERRNO: 2). We do not want to log # every occurrences of such case as it would be very verbose. We do want to log all the other # exceptions which could occur, which is why we do a periodic log for all the other errors. # emptying the list :return: StatmMetricValue tuples of pid and metric # We only want to do it first time. | 1.852029 | 2 |
source/infrastructure/quicksight/template_source.py | aws-solutions/improving-forecast-accuracy-with-machine-learning | 3 | 6624189 | # #####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# #####################################################################################################################
import logging
import os
from typing import Dict, Optional
logger = logging.getLogger("cdk-helper")
class TemplateSource:
def __init__(self, solution_name: str, solution_version: str):
self.solution_name = solution_name
self.solution_version = solution_version
self.quicksight_enabled = os.environ.get("ENABLE_QUICKSIGHT", False)
self.dist_account_id = os.environ.get("DIST_ACCOUNT_ID", None)
self.dist_quicksight_namespace = os.environ.get(
"DIST_QUICKSIGHT_NAMESPACE", None
)
def _enabled(self):
return self.quicksight_enabled and self.solution_name and self.solution_version
@property
def arn(self) -> Optional[str]:
if self._enabled():
quicksight_template_name = "_".join(
[
self.dist_quicksight_namespace,
self.solution_name,
self.solution_version.replace(".", "_"),
]
)
return ":".join(
[
"arn:aws:quicksight:us-east-1",
self.dist_account_id,
f"template/{quicksight_template_name}",
]
)
else:
logger.info("QuickSight is not enabled")
return None
@property
def mappings(self) -> Dict:
if self._enabled():
return {"QuickSightSourceTemplateArn": self.arn}
else:
return {"QuickSightSourceTemplateArn": ""}
| # #####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# #####################################################################################################################
import logging
import os
from typing import Dict, Optional
logger = logging.getLogger("cdk-helper")
class TemplateSource:
def __init__(self, solution_name: str, solution_version: str):
self.solution_name = solution_name
self.solution_version = solution_version
self.quicksight_enabled = os.environ.get("ENABLE_QUICKSIGHT", False)
self.dist_account_id = os.environ.get("DIST_ACCOUNT_ID", None)
self.dist_quicksight_namespace = os.environ.get(
"DIST_QUICKSIGHT_NAMESPACE", None
)
def _enabled(self):
return self.quicksight_enabled and self.solution_name and self.solution_version
@property
def arn(self) -> Optional[str]:
if self._enabled():
quicksight_template_name = "_".join(
[
self.dist_quicksight_namespace,
self.solution_name,
self.solution_version.replace(".", "_"),
]
)
return ":".join(
[
"arn:aws:quicksight:us-east-1",
self.dist_account_id,
f"template/{quicksight_template_name}",
]
)
else:
logger.info("QuickSight is not enabled")
return None
@property
def mappings(self) -> Dict:
if self._enabled():
return {"QuickSightSourceTemplateArn": self.arn}
else:
return {"QuickSightSourceTemplateArn": ""}
| en | 0.500207 | # ##################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # # the specific language governing permissions and limitations under the License. # # ##################################################################################################################### | 1.814725 | 2 |
datasets/flickr/label_finder.py | TimK1998/SemanticSynthesisForScoreBasedModels | 0 | 6624190 | import os
from PIL import Image
import torchvision.transforms.functional as F
from collections import defaultdict
import torch
def find_labels():
data_csv = 'flickr_landscapes_train_split.txt'
targets_dir = '/export/data/compvis/datasets/rfw/segmentation/flickr_segmentation_v2/'
targets = []
with open(data_csv, 'r') as f:
image_paths = f.read().splitlines()
for p in image_paths:
targets.append(os.path.join(targets_dir, p.replace('.jpg', '.png')))
labels = defaultdict(int)
for i in range(len(targets)):
target = Image.open(targets[i])
target = F.to_tensor(target) * 255
target = target.long()
target = torch.squeeze(target, dim=0)
target = torch.unique(target)
for k in range(target.shape[0]):
id = target[k].item()
labels[str(id)] += 1
if i % 10 == 0:
print(f'Img {i}/{len(targets)}')
print(labels)
if __name__ == '__main__':
find_labels()
| import os
from PIL import Image
import torchvision.transforms.functional as F
from collections import defaultdict
import torch
def find_labels():
data_csv = 'flickr_landscapes_train_split.txt'
targets_dir = '/export/data/compvis/datasets/rfw/segmentation/flickr_segmentation_v2/'
targets = []
with open(data_csv, 'r') as f:
image_paths = f.read().splitlines()
for p in image_paths:
targets.append(os.path.join(targets_dir, p.replace('.jpg', '.png')))
labels = defaultdict(int)
for i in range(len(targets)):
target = Image.open(targets[i])
target = F.to_tensor(target) * 255
target = target.long()
target = torch.squeeze(target, dim=0)
target = torch.unique(target)
for k in range(target.shape[0]):
id = target[k].item()
labels[str(id)] += 1
if i % 10 == 0:
print(f'Img {i}/{len(targets)}')
print(labels)
if __name__ == '__main__':
find_labels()
| none | 1 | 2.63405 | 3 | |
setup.py | abdala9512/auto-MLU | 0 | 6624191 | <reponame>abdala9512/auto-MLU
from setuptools import find_packages, setup
setup(
name='auto-mlu',
packages=find_packages(include=['automlu']),
version='0.3.0',
description='Utilities for Machine learning development',
author='<NAME>',
license='MIT',
install_requires=["scikit-learn"],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite='tests',
) | from setuptools import find_packages, setup
setup(
name='auto-mlu',
packages=find_packages(include=['automlu']),
version='0.3.0',
description='Utilities for Machine learning development',
author='<NAME>',
license='MIT',
install_requires=["scikit-learn"],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite='tests',
) | none | 1 | 1.112359 | 1 | |
crest/filing/midi/__init__.py | yokaze/crest-python | 0 | 6624192 | <reponame>yokaze/crest-python<filename>crest/filing/midi/__init__.py
#
# __init__.py
# crest-python
#
# Copyright (C) 2017 <NAME>
# Distributed under the MIT License.
#
from crest.filing.midi import chunk
from crest.filing.midi import track
__all__ = [
chunk.__name__.split('.')[-1],
track.__name__.split('.')[-1]
]
| #
# __init__.py
# crest-python
#
# Copyright (C) 2017 <NAME>
# Distributed under the MIT License.
#
from crest.filing.midi import chunk
from crest.filing.midi import track
__all__ = [
chunk.__name__.split('.')[-1],
track.__name__.split('.')[-1]
] | en | 0.517227 | # # __init__.py # crest-python # # Copyright (C) 2017 <NAME> # Distributed under the MIT License. # | 1.472269 | 1 |
peak/peak.py | VanNostrandLab/peak | 0 | 6624193 | <filename>peak/peak.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pipeline for using IDR to identify a set of reproducible peaks given eClIP dataset with two or three replicates.
"""
import os
import sys
import math
import argparse
import itertools
from subprocess import DEVNULL
import cmder
import inflect
import pandas as pd
from seqflow import Flow, task, logger
parser = argparse.ArgumentParser(description=__doc__, prog='peak')
parser.add_argument('--ip_bams', nargs='+', help='Space separated IP bam files (at least 2 files).')
parser.add_argument('--input_bams', nargs='+', help='Space separated INPUT bam files (at least 2 files).')
parser.add_argument('--peak_beds', nargs='+', help="Space separated peak bed files (at least 2 files).")
parser.add_argument('--outdir', type=str, help="Path to output directory, default: current work directory.")
parser.add_argument('--ids', nargs='+', help="Optional space separated short IDs (e.g., S1, S2, S3) for datasets, "
"default: S1 and S2 for 2 replicates dataset and S1, S2, S3 for 3"
"replicates dataset.")
parser.add_argument('--read_type', help="Read type of eCLIP experiment, either SE or PE.", default='PE')
parser.add_argument('--species', type=str, help="Short code for species, e.g., hg19, mm10, default: hg19.")
parser.add_argument('--l2fc', type=float, help="Only consider peaks at or above this l2fc cutoff, default: 3",
default=3.0)
parser.add_argument('--l10p', type=float, help="Only consider peaks at or above this l10p cutoff, default:3",
default=3.0)
parser.add_argument('--idr', type=float, help="Only consider peaks at or above this idr score cutoff, default: 0.01",
default=0.01)
parser.add_argument('--cores', type=int, help='Maximum number of CPU cores for parallel processing, default: 1',
default=1)
parser.add_argument('--dry_run', action='store_true',
help='Print out steps and inputs/outputs of each step without actually running the pipeline.')
parser.add_argument('--debug', action='store_true', help='Invoke debug mode (only for develop purpose).')
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
def validate_paths():
def files_exist(files, tag):
if not files:
logger.error(f'No {tag} were provided, aborted.')
sys.exit(1)
engine, paths = inflect.engine(), []
for i, file in enumerate(files, start=1):
if os.path.exists(file):
if not os.path.isfile(file):
logger.error(f'The {engine.ordinal(i)} file in {tag} "{file}" is not a file.')
sys.exit(1)
else:
paths.append(os.path.abspath(file))
else:
logger.error(f'The {engine.ordinal(i)} file in {tag} "{file}" does not exist.')
sys.exit(1)
return paths
def link_file(file, link):
if not os.path.exists(link):
os.symlink(file, link)
return link
ip_bams = files_exist(args.ip_bams, 'IP bams')
input_bams = files_exist(args.input_bams, 'INPUT bams')
peak_beds = files_exist(args.peak_beds, 'Peak beds')
outdir = args.outdir or os.getcwd()
if os.path.exists(outdir):
if not os.path.isdir(outdir):
logger.error(f'Outdir "{outdir}" is a file not a directory.')
sys.exit(1)
else:
logger.error(f'Outdir "{outdir}" does not exist, try to create ...')
os.mkdir(outdir)
logger.error(f'Successfully created Outdir "{outdir}".')
bams, files, basenames, need_to_remove, name_codes = [], {}, [], [], {}
ids = args.ids if args.ids else [''] * len(peak_beds)
if len(ip_bams) == len(input_bams) == len(peak_beds) == len(ids):
if ip_bams:
for i, (ip_bam, input_bam, peak_bed, name) in enumerate(zip(ip_bams, input_bams, peak_beds, ids), start=1):
if peak_bed.endswith('.peak.clusters.bed'):
link_ip_bam, link_input_bam, link_bed = ip_bam, input_bam, peak_bed
bams.extend([ip_bam, input_bam])
basename = name or right_replace(os.path.basename(ip_bam), '.bam', '')
else:
basename = name if name else f'S{i}'
link_ip_bam = link_file(ip_bam, os.path.join(outdir, f'{basename}.IP.bam'))
link_input_bam = link_file(input_bam, os.path.join(outdir, f'{basename}.INPUT.bam'))
link_bed = link_file(peak_bed, os.path.join(outdir, f'{basename}.peak.clusters.bed'))
bams.extend([link_ip_bam, link_input_bam])
need_to_remove.extend([link_ip_bam, link_input_bam, link_bed])
name_codes[basename] = (ip_bam, input_bam, peak_bed)
suffix = 'peak.clusters.normalized.compressed.annotated.entropy.bed'
files[basename] = (link_ip_bam, link_input_bam, link_bed, os.path.join(outdir, f'{basename }.{suffix}'))
basenames.append(basename)
else:
logger.error('Dataset does not have enough sample to proceed.')
sys.exit(1)
else:
logger.error('Unequal number of files provided!')
sys.exit(1)
if len(basenames) != len(set(basenames)):
logger.error('Dataset contains duplicated basenames, process aborted!')
sys.exit(1)
if name_codes:
with open(os.path.join(outdir, 'name.maps.tsv'), 'w') as o:
o.write('CODE\tIP_BAM\tINPUT_BAM\tPEAK_BED\n')
o.writelines(f'{k}\t{v[0]}\t{v[1]}\t{v[2]}\n' for k, v in name_codes.items())
return bams, files, basenames, outdir, need_to_remove, args
def right_replace(s, src, tar):
if s.endswith(src):
return f'{s[:-len(src)]}{tar}'
return s
bams, files, basenames, outdir, need_to_remove, options = validate_paths()
env = os.environ.copy()
if options.debug:
env['PATH'] = f'{os.path.dirname(os.path.abspath(__file__))}:{env["PATH"]}'
@task(inputs=bams, cpus=args.cores,
outputs=lambda i: right_replace(os.path.join(outdir, os.path.basename(i)), '.bam', '.mapped.reads.count.txt'))
def count_mapped_reads(bam, txt):
cmd = f'samtools view -c -F 0x4 {bam} > {txt}'
cmder.run(cmd, msg=f'Count mapped reads in {bam} ...', pmt=True)
def get_mapped_reads(bam):
with open(os.path.join(outdir, right_replace(os.path.basename(bam), '.bam', '.mapped.reads.count.txt'))) as f:
return int(f.read().strip())
@task(inputs=[v[2] for v in files.values()],
outputs=lambda i: right_replace(os.path.join(outdir, os.path.basename(i)), '.bed', '.normalized.bed'),
parent=count_mapped_reads, cpus=args.cores)
def normalize_peak(bed, normalized_bed):
ip_bam, input_bam, peak_bed, _ = files[right_replace(os.path.basename(bed), '.peak.clusters.bed', '')]
ip_read_count, input_read_count = get_mapped_reads(ip_bam), get_mapped_reads(input_bam)
cmd = ['overlap_peak.pl', ip_bam, input_bam, peak_bed, ip_read_count, input_read_count,
options.read_type, normalized_bed, right_replace(normalized_bed, '.bed', '.tsv')]
cmder.run(cmd, env=env, msg=f'Normalizing peaks in {peak_bed} ...', pmt=True)
return normalized_bed
@task(inputs=normalize_peak, outputs=lambda i: right_replace(i, '.bed', '.compressed.bed'), cpus=args.cores)
def compress_peak(normalized_bed, compressed_bed):
cmd = ['compress_peak.pl', right_replace(normalized_bed, '.bed', '.tsv'),
compressed_bed, right_replace(compressed_bed, '.bed', '.tsv')]
cmder.run(cmd, env=env, msg=f'Compressing peaks in {normalized_bed} ...', pmt=True)
return compressed_bed
@task(inputs=compress_peak, outputs=lambda i: right_replace(i, '.bed', '.annotated.tsv'), cpus=args.cores)
def annotate_peak(compressed_bed, annotated_tsv):
cmd = ['annotate_peak.pl', right_replace(compressed_bed, '.bed', '.tsv'),
annotated_tsv, right_replace(annotated_tsv, '.tsv', '.bed'), options.species]
cmder.run(cmd, env=env, msg=f'Annotating peaks in {compressed_bed} ...', pmt=True)
return annotated_tsv
def calculate_entropy(tsv, output, ip_read_count, input_read_count):
logger.info(f'Calculating entropy for {tsv} ...')
columns = ['chrom', 'start', 'end', 'peak', 'ip_reads', 'input_reads',
'p', 'v', 'method', 'status', 'l10p', 'l2fc',
'ensg_overlap', 'feature_type', 'feature_ensg', 'gene', 'region']
df = pd.read_csv(tsv, sep='\t', header=None, names=columns, skiprows=[0])
df = df[df.l2fc >= 0]
# df = df[(df.l2fc >= options.l2fc) & (df.l10p >= options.l10p)]
if df.empty:
logger.error(f'No valid peaks found in {bed} (l2fc > 0 failed).')
sys.exit(1)
df['pi'] = df['ip_reads'] / ip_read_count
df['qi'] = df['input_reads'] / input_read_count
df['entropy'] = df.apply(lambda row: 0 if row.pi <= row.qi else row.pi * math.log2(row.pi / row.qi), axis=1)
df['excess_reads'] = df['pi'] - df['qi']
entropy = output.replace('.entropy.bed', '.entropy.tsv')
dd = df.copy()
dd = dd.rename(columns={'chrom': '# chrom'})
dd.to_csv(entropy, index=False, columns=['# chrom'] + columns[1:] + ['entropy'], sep='\t')
excess_read = output.replace('.bed', '.excess.reads.tsv')
dd.to_csv(excess_read, index=False, columns=['# chrom'] + columns[1:] + [ 'entropy', 'excess_reads'], sep='\t')
df['strand'] = df.peak.str.split(':', expand=True)[2]
df['l2fc'] = df['l2fc'].map('{:.15f}'.format)
df['entropy'] = df['entropy'].map('{:.10f}'.format)
# For IDR 2.0.2, columns 'excess_reads', 'pi', and 'qi' need to be excluded for .entropy.bed
# For IDR 2.0.3, columns 'excess_reads', 'pi', and 'qi' need to be retained for .entropy.bed
columns = ['chrom', 'start', 'end', 'l2fc', 'entropy', 'strand', 'excess_reads', 'pi', 'qi']
df.to_csv(output, index=False, columns=columns, sep='\t', header=False)
logger.info(f'Calculating entropy for {tsv} complete.')
return output
@task(inputs=annotate_peak, outputs=lambda i: right_replace(i, '.tsv', '.entropy.bed'), cpus=args.cores)
def entropy_peak(annotated_tsv, entropy_bed):
if len(files) < 2:
logger.warning('Calculating peak entropy skipped (# samples < 2), pipeline ends here.')
cleanup()
sys.exit(0)
basename = right_replace(os.path.basename(annotated_tsv), '.peak.clusters.normalized.compressed.annotated.tsv', '')
ip_bam, input_bam, peak_bed, _ = files[basename]
ip_read_count, input_read_count = get_mapped_reads(ip_bam), get_mapped_reads(input_bam)
calculate_entropy(annotated_tsv, entropy_bed, ip_read_count, input_read_count)
return entropy_bed
@task(inputs=[], parent=entropy_peak, cpus=args.cores,
outputs=[os.path.join(outdir, f'{key1}.vs.{key2}.idr.out')
for key1, key2 in itertools.combinations(basenames, 2)])
def run_idr(bed, out):
if len(files) >= 2:
key1, key2 = right_replace(os.path.basename(out), '.idr.out', '').split('.vs.')
entropy_bed1, entropy_bed2 = files[key1][3], files[key2][3]
cmd = ['idr', '--sample', entropy_bed1, entropy_bed2, '--input-file-type', 'bed', '--rank', '5',
'--peak-merge-method', 'max', '--plot', '-o', out]
cmder.run(cmd, msg=f'Running IDR to rank peaks in {entropy_bed1} and\n{" " * 40}{entropy_bed2} ...',
pmt=True)
else:
logger.warning('Identifying IDR peaks skipped (# samples < 2).')
@task(inputs=[], parent=run_idr, cpus=args.cores,
outputs=[os.path.join(outdir, f'{key1}.vs.{key2}.idr.out.bed')
for key1, key2 in itertools.combinations(basenames, 2)])
def parse_idr(out, bed):
if len(files) >= 2:
key1, key2 = right_replace(os.path.basename(bed), '.idr.out.bed', '').split('.vs.')
idr_out = os.path.join(outdir, f'{key1}.vs.{key2}.idr.out')
idr_bed = os.path.join(outdir, f'{key1}.vs.{key2}.idr.out.bed')
if len(files) == 2:
entropy_bed1, entropy_bed2 = files[key1][3], files[key2][3]
cmd = ['parse_idr_peaks_2.pl', idr_out,
right_replace(entropy_bed1, '.bed', '.tsv'), right_replace(entropy_bed2, '.bed', '.tsv'), idr_bed,
options.l2fc, options.l10p, options.idr]
cmder.run(cmd, env=env, msg=f'Parsing IDR peaks in {idr_out} ...', pmt=True)
else:
idr_cutoffs = {0.001: 1000, 0.005: 955, 0.01: 830, 0.02: 705, 0.03: 632, 0.04: 580, 0.05: 540,
0.06: 507, 0.07: 479, 0.08: 455, 0.09: 434,
0.1: 415, 0.2: 290, 0.3: 217, 0.4: 165, 0.5: 125, 1: 0}
with open(idr_out) as f, open(idr_bed, 'w') as o:
for line in f:
fields = line.strip().split('\t')
chrom, start, stop, _, idr_score, strand = fields[:6]
if float(idr_score) >= idr_cutoffs[options.idr]:
o.write(f'{chrom}\t{start}\t{stop}\t.\t.\t{strand}\n')
else:
logger.warning('Parsing IDR peaks skipped (# samples < 2).')
@task(inputs=[], outputs=[os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out.bed')], parent=parse_idr)
def intersect_idr(bed, intersected_bed):
if len(files) == 2:
idr_out = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out')
idr_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out.bed')
idr_intersected_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.intersected.bed')
cmder.run(f'cp {idr_out} {idr_intersected_bed}')
need_to_remove.append(idr_intersected_bed)
elif len(files) == 3:
idr_intersected_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.intersected.bed')
idr_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out.bed')
bed1, bed2, bed3 = [os.path.join(outdir, f'{key1}.vs.{key2}.idr.out.bed')
for key1, key2 in itertools.combinations(basenames, 2)]
tmp_bed = right_replace(idr_intersected_bed, '.bed', '.tmp.bed')
cmder.run(f'bedtools intersect -a {bed1} -b {bed2} > {tmp_bed}', msg='Intersecting IDR beds ...')
cmder.run(f'bedtools intersect -a {tmp_bed} -b {bed3} > {idr_intersected_bed}', msg='Intersecting IDR beds ...')
cmder.run(f'rm {tmp_bed}')
entropy_beds = [os.path.join(outdir, f'{key}.peak.clusters.normalized.compressed.annotated.entropy.tsv')
for key in basenames]
cmd = ['parse_idr_peaks_3.pl', idr_intersected_bed] + entropy_beds + [f'{idr_bed}',
options.l2fc, options.l10p, options.idr]
cmder.run(cmd, env=env, msg=f'Parsing intersected IDR peaks in {idr_bed} ...', pmt=True)
else:
logger.warning('Intersecting IDR peaks skipped (# samples < 2).')
@task(inputs=[], outputs=[os.path.join(outdir, f'{key}.idr.normalized.bed') for key in basenames],
parent=intersect_idr, cpus=args.cores)
def normalize_idr(bed, idr_normalized_bed):
if len(files) >= 2:
idr_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out.bed')
key = right_replace(os.path.basename(idr_normalized_bed), '.idr.normalized.bed', '')
ip_bam, input_bam, peak_bed, _ = files[key]
cmd = ['overlap_peak.pl', ip_bam, input_bam, idr_bed,
get_mapped_reads(ip_bam), get_mapped_reads(input_bam),
options.read_type, idr_normalized_bed, right_replace(idr_normalized_bed, '.bed', '.tsv')]
cmder.run(cmd, env=env, msg=f'Normalizing IDR peaks for sample {key} ...', pmt=True)
else:
logger.warning('Normalizing IDR peaks skipped (# samples < 2).')
@task(inputs=[], outputs=[os.path.join(outdir, f'{".vs.".join([key for key in basenames])}.reproducible.peaks.bed')],
parent=normalize_idr)
def reproducible_peak(inputs, reproducible_bed):
if len(files) >= 2:
script = f'reproducible_peaks_{len(files)}.pl'
custom = right_replace(reproducible_bed, '.peaks.bed', '.peaks.custom.tsv')
idr_normalized_full_beds, entropy_full_beds, reproducible_txts = [], [], []
for (ip_bam, input_bam, peak_bed, _) in files.values():
name = right_replace(os.path.basename(peak_bed), '.peak.clusters.bed', '')
idr_normalized_full_beds.append(os.path.join(outdir, f'{name}.idr.normalized.tsv'))
suffix = 'peak.clusters.normalized.compressed.annotated.entropy.tsv'
entropy_full_beds.append(os.path.join(outdir, f'{name}.{suffix}'))
reproducible_txts.append(os.path.join(outdir, f'{name}.reproducible.peaks.tsv'))
cmd = [script] + idr_normalized_full_beds + reproducible_txts
cmd += [reproducible_bed, custom] + entropy_full_beds
cmd += [os.path.join(outdir, f'{".vs.".join(basenames)}.idr{".intersected.bed" if len(files) == 3 else ".out"}')]
cmd += [options.l2fc, options.l10p, options.idr]
cmder.run(cmd, env=env, msg='Identifying reproducible peaks ...', pmt=True)
return reproducible_bed
else:
logger.warning('Identifying reproducible peaks skipped (# samples < 2).')
return ''
@task(inputs=reproducible_peak,
outputs=lambda i: i.replace('.reproducible.peaks.bed', '.annotated.reproducible.peaks.bed'))
def annotate_reproducible_peak(bed, out):
if bed:
cmd = ['annotate_peak.pl', bed, out, right_replace(out, '.bed', '.tsv'), options.species]
cmder.run(cmd, env=env, msg=f'Annotating peaks in {bed} ...', pmt=True)
os.unlink(right_replace(out, '.bed', '.tsv'))
else:
logger.warning('No reproducible peak bed file, annotation skipped!')
def cleanup():
# TODO: need to handle this better, see the way for cleaning up in seCLIP
if need_to_remove:
logger.info('Cleaning up ...')
for file in need_to_remove:
cmder.run(f'rm {file}')
logger.info('Cleaning up complete.')
def main():
flow = Flow('Peak', description=__doc__.strip())
flow.run(dry_run=options.dry_run, cpus=options.cores)
cleanup()
if __name__ == '__main__':
main()
| <filename>peak/peak.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pipeline for using IDR to identify a set of reproducible peaks given eClIP dataset with two or three replicates.
"""
import os
import sys
import math
import argparse
import itertools
from subprocess import DEVNULL
import cmder
import inflect
import pandas as pd
from seqflow import Flow, task, logger
parser = argparse.ArgumentParser(description=__doc__, prog='peak')
parser.add_argument('--ip_bams', nargs='+', help='Space separated IP bam files (at least 2 files).')
parser.add_argument('--input_bams', nargs='+', help='Space separated INPUT bam files (at least 2 files).')
parser.add_argument('--peak_beds', nargs='+', help="Space separated peak bed files (at least 2 files).")
parser.add_argument('--outdir', type=str, help="Path to output directory, default: current work directory.")
parser.add_argument('--ids', nargs='+', help="Optional space separated short IDs (e.g., S1, S2, S3) for datasets, "
"default: S1 and S2 for 2 replicates dataset and S1, S2, S3 for 3"
"replicates dataset.")
parser.add_argument('--read_type', help="Read type of eCLIP experiment, either SE or PE.", default='PE')
parser.add_argument('--species', type=str, help="Short code for species, e.g., hg19, mm10, default: hg19.")
parser.add_argument('--l2fc', type=float, help="Only consider peaks at or above this l2fc cutoff, default: 3",
default=3.0)
parser.add_argument('--l10p', type=float, help="Only consider peaks at or above this l10p cutoff, default:3",
default=3.0)
parser.add_argument('--idr', type=float, help="Only consider peaks at or above this idr score cutoff, default: 0.01",
default=0.01)
parser.add_argument('--cores', type=int, help='Maximum number of CPU cores for parallel processing, default: 1',
default=1)
parser.add_argument('--dry_run', action='store_true',
help='Print out steps and inputs/outputs of each step without actually running the pipeline.')
parser.add_argument('--debug', action='store_true', help='Invoke debug mode (only for develop purpose).')
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
def validate_paths():
def files_exist(files, tag):
if not files:
logger.error(f'No {tag} were provided, aborted.')
sys.exit(1)
engine, paths = inflect.engine(), []
for i, file in enumerate(files, start=1):
if os.path.exists(file):
if not os.path.isfile(file):
logger.error(f'The {engine.ordinal(i)} file in {tag} "{file}" is not a file.')
sys.exit(1)
else:
paths.append(os.path.abspath(file))
else:
logger.error(f'The {engine.ordinal(i)} file in {tag} "{file}" does not exist.')
sys.exit(1)
return paths
def link_file(file, link):
if not os.path.exists(link):
os.symlink(file, link)
return link
ip_bams = files_exist(args.ip_bams, 'IP bams')
input_bams = files_exist(args.input_bams, 'INPUT bams')
peak_beds = files_exist(args.peak_beds, 'Peak beds')
outdir = args.outdir or os.getcwd()
if os.path.exists(outdir):
if not os.path.isdir(outdir):
logger.error(f'Outdir "{outdir}" is a file not a directory.')
sys.exit(1)
else:
logger.error(f'Outdir "{outdir}" does not exist, try to create ...')
os.mkdir(outdir)
logger.error(f'Successfully created Outdir "{outdir}".')
bams, files, basenames, need_to_remove, name_codes = [], {}, [], [], {}
ids = args.ids if args.ids else [''] * len(peak_beds)
if len(ip_bams) == len(input_bams) == len(peak_beds) == len(ids):
if ip_bams:
for i, (ip_bam, input_bam, peak_bed, name) in enumerate(zip(ip_bams, input_bams, peak_beds, ids), start=1):
if peak_bed.endswith('.peak.clusters.bed'):
link_ip_bam, link_input_bam, link_bed = ip_bam, input_bam, peak_bed
bams.extend([ip_bam, input_bam])
basename = name or right_replace(os.path.basename(ip_bam), '.bam', '')
else:
basename = name if name else f'S{i}'
link_ip_bam = link_file(ip_bam, os.path.join(outdir, f'{basename}.IP.bam'))
link_input_bam = link_file(input_bam, os.path.join(outdir, f'{basename}.INPUT.bam'))
link_bed = link_file(peak_bed, os.path.join(outdir, f'{basename}.peak.clusters.bed'))
bams.extend([link_ip_bam, link_input_bam])
need_to_remove.extend([link_ip_bam, link_input_bam, link_bed])
name_codes[basename] = (ip_bam, input_bam, peak_bed)
suffix = 'peak.clusters.normalized.compressed.annotated.entropy.bed'
files[basename] = (link_ip_bam, link_input_bam, link_bed, os.path.join(outdir, f'{basename }.{suffix}'))
basenames.append(basename)
else:
logger.error('Dataset does not have enough sample to proceed.')
sys.exit(1)
else:
logger.error('Unequal number of files provided!')
sys.exit(1)
if len(basenames) != len(set(basenames)):
logger.error('Dataset contains duplicated basenames, process aborted!')
sys.exit(1)
if name_codes:
with open(os.path.join(outdir, 'name.maps.tsv'), 'w') as o:
o.write('CODE\tIP_BAM\tINPUT_BAM\tPEAK_BED\n')
o.writelines(f'{k}\t{v[0]}\t{v[1]}\t{v[2]}\n' for k, v in name_codes.items())
return bams, files, basenames, outdir, need_to_remove, args
def right_replace(s, src, tar):
if s.endswith(src):
return f'{s[:-len(src)]}{tar}'
return s
bams, files, basenames, outdir, need_to_remove, options = validate_paths()
env = os.environ.copy()
if options.debug:
env['PATH'] = f'{os.path.dirname(os.path.abspath(__file__))}:{env["PATH"]}'
@task(inputs=bams, cpus=args.cores,
outputs=lambda i: right_replace(os.path.join(outdir, os.path.basename(i)), '.bam', '.mapped.reads.count.txt'))
def count_mapped_reads(bam, txt):
cmd = f'samtools view -c -F 0x4 {bam} > {txt}'
cmder.run(cmd, msg=f'Count mapped reads in {bam} ...', pmt=True)
def get_mapped_reads(bam):
with open(os.path.join(outdir, right_replace(os.path.basename(bam), '.bam', '.mapped.reads.count.txt'))) as f:
return int(f.read().strip())
@task(inputs=[v[2] for v in files.values()],
outputs=lambda i: right_replace(os.path.join(outdir, os.path.basename(i)), '.bed', '.normalized.bed'),
parent=count_mapped_reads, cpus=args.cores)
def normalize_peak(bed, normalized_bed):
ip_bam, input_bam, peak_bed, _ = files[right_replace(os.path.basename(bed), '.peak.clusters.bed', '')]
ip_read_count, input_read_count = get_mapped_reads(ip_bam), get_mapped_reads(input_bam)
cmd = ['overlap_peak.pl', ip_bam, input_bam, peak_bed, ip_read_count, input_read_count,
options.read_type, normalized_bed, right_replace(normalized_bed, '.bed', '.tsv')]
cmder.run(cmd, env=env, msg=f'Normalizing peaks in {peak_bed} ...', pmt=True)
return normalized_bed
@task(inputs=normalize_peak, outputs=lambda i: right_replace(i, '.bed', '.compressed.bed'), cpus=args.cores)
def compress_peak(normalized_bed, compressed_bed):
cmd = ['compress_peak.pl', right_replace(normalized_bed, '.bed', '.tsv'),
compressed_bed, right_replace(compressed_bed, '.bed', '.tsv')]
cmder.run(cmd, env=env, msg=f'Compressing peaks in {normalized_bed} ...', pmt=True)
return compressed_bed
@task(inputs=compress_peak, outputs=lambda i: right_replace(i, '.bed', '.annotated.tsv'), cpus=args.cores)
def annotate_peak(compressed_bed, annotated_tsv):
cmd = ['annotate_peak.pl', right_replace(compressed_bed, '.bed', '.tsv'),
annotated_tsv, right_replace(annotated_tsv, '.tsv', '.bed'), options.species]
cmder.run(cmd, env=env, msg=f'Annotating peaks in {compressed_bed} ...', pmt=True)
return annotated_tsv
def calculate_entropy(tsv, output, ip_read_count, input_read_count):
logger.info(f'Calculating entropy for {tsv} ...')
columns = ['chrom', 'start', 'end', 'peak', 'ip_reads', 'input_reads',
'p', 'v', 'method', 'status', 'l10p', 'l2fc',
'ensg_overlap', 'feature_type', 'feature_ensg', 'gene', 'region']
df = pd.read_csv(tsv, sep='\t', header=None, names=columns, skiprows=[0])
df = df[df.l2fc >= 0]
# df = df[(df.l2fc >= options.l2fc) & (df.l10p >= options.l10p)]
if df.empty:
logger.error(f'No valid peaks found in {bed} (l2fc > 0 failed).')
sys.exit(1)
df['pi'] = df['ip_reads'] / ip_read_count
df['qi'] = df['input_reads'] / input_read_count
df['entropy'] = df.apply(lambda row: 0 if row.pi <= row.qi else row.pi * math.log2(row.pi / row.qi), axis=1)
df['excess_reads'] = df['pi'] - df['qi']
entropy = output.replace('.entropy.bed', '.entropy.tsv')
dd = df.copy()
dd = dd.rename(columns={'chrom': '# chrom'})
dd.to_csv(entropy, index=False, columns=['# chrom'] + columns[1:] + ['entropy'], sep='\t')
excess_read = output.replace('.bed', '.excess.reads.tsv')
dd.to_csv(excess_read, index=False, columns=['# chrom'] + columns[1:] + [ 'entropy', 'excess_reads'], sep='\t')
df['strand'] = df.peak.str.split(':', expand=True)[2]
df['l2fc'] = df['l2fc'].map('{:.15f}'.format)
df['entropy'] = df['entropy'].map('{:.10f}'.format)
# For IDR 2.0.2, columns 'excess_reads', 'pi', and 'qi' need to be excluded for .entropy.bed
# For IDR 2.0.3, columns 'excess_reads', 'pi', and 'qi' need to be retained for .entropy.bed
columns = ['chrom', 'start', 'end', 'l2fc', 'entropy', 'strand', 'excess_reads', 'pi', 'qi']
df.to_csv(output, index=False, columns=columns, sep='\t', header=False)
logger.info(f'Calculating entropy for {tsv} complete.')
return output
@task(inputs=annotate_peak, outputs=lambda i: right_replace(i, '.tsv', '.entropy.bed'), cpus=args.cores)
def entropy_peak(annotated_tsv, entropy_bed):
if len(files) < 2:
logger.warning('Calculating peak entropy skipped (# samples < 2), pipeline ends here.')
cleanup()
sys.exit(0)
basename = right_replace(os.path.basename(annotated_tsv), '.peak.clusters.normalized.compressed.annotated.tsv', '')
ip_bam, input_bam, peak_bed, _ = files[basename]
ip_read_count, input_read_count = get_mapped_reads(ip_bam), get_mapped_reads(input_bam)
calculate_entropy(annotated_tsv, entropy_bed, ip_read_count, input_read_count)
return entropy_bed
@task(inputs=[], parent=entropy_peak, cpus=args.cores,
outputs=[os.path.join(outdir, f'{key1}.vs.{key2}.idr.out')
for key1, key2 in itertools.combinations(basenames, 2)])
def run_idr(bed, out):
if len(files) >= 2:
key1, key2 = right_replace(os.path.basename(out), '.idr.out', '').split('.vs.')
entropy_bed1, entropy_bed2 = files[key1][3], files[key2][3]
cmd = ['idr', '--sample', entropy_bed1, entropy_bed2, '--input-file-type', 'bed', '--rank', '5',
'--peak-merge-method', 'max', '--plot', '-o', out]
cmder.run(cmd, msg=f'Running IDR to rank peaks in {entropy_bed1} and\n{" " * 40}{entropy_bed2} ...',
pmt=True)
else:
logger.warning('Identifying IDR peaks skipped (# samples < 2).')
@task(inputs=[], parent=run_idr, cpus=args.cores,
outputs=[os.path.join(outdir, f'{key1}.vs.{key2}.idr.out.bed')
for key1, key2 in itertools.combinations(basenames, 2)])
def parse_idr(out, bed):
if len(files) >= 2:
key1, key2 = right_replace(os.path.basename(bed), '.idr.out.bed', '').split('.vs.')
idr_out = os.path.join(outdir, f'{key1}.vs.{key2}.idr.out')
idr_bed = os.path.join(outdir, f'{key1}.vs.{key2}.idr.out.bed')
if len(files) == 2:
entropy_bed1, entropy_bed2 = files[key1][3], files[key2][3]
cmd = ['parse_idr_peaks_2.pl', idr_out,
right_replace(entropy_bed1, '.bed', '.tsv'), right_replace(entropy_bed2, '.bed', '.tsv'), idr_bed,
options.l2fc, options.l10p, options.idr]
cmder.run(cmd, env=env, msg=f'Parsing IDR peaks in {idr_out} ...', pmt=True)
else:
idr_cutoffs = {0.001: 1000, 0.005: 955, 0.01: 830, 0.02: 705, 0.03: 632, 0.04: 580, 0.05: 540,
0.06: 507, 0.07: 479, 0.08: 455, 0.09: 434,
0.1: 415, 0.2: 290, 0.3: 217, 0.4: 165, 0.5: 125, 1: 0}
with open(idr_out) as f, open(idr_bed, 'w') as o:
for line in f:
fields = line.strip().split('\t')
chrom, start, stop, _, idr_score, strand = fields[:6]
if float(idr_score) >= idr_cutoffs[options.idr]:
o.write(f'{chrom}\t{start}\t{stop}\t.\t.\t{strand}\n')
else:
logger.warning('Parsing IDR peaks skipped (# samples < 2).')
@task(inputs=[], outputs=[os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out.bed')], parent=parse_idr)
def intersect_idr(bed, intersected_bed):
if len(files) == 2:
idr_out = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out')
idr_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out.bed')
idr_intersected_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.intersected.bed')
cmder.run(f'cp {idr_out} {idr_intersected_bed}')
need_to_remove.append(idr_intersected_bed)
elif len(files) == 3:
idr_intersected_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.intersected.bed')
idr_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out.bed')
bed1, bed2, bed3 = [os.path.join(outdir, f'{key1}.vs.{key2}.idr.out.bed')
for key1, key2 in itertools.combinations(basenames, 2)]
tmp_bed = right_replace(idr_intersected_bed, '.bed', '.tmp.bed')
cmder.run(f'bedtools intersect -a {bed1} -b {bed2} > {tmp_bed}', msg='Intersecting IDR beds ...')
cmder.run(f'bedtools intersect -a {tmp_bed} -b {bed3} > {idr_intersected_bed}', msg='Intersecting IDR beds ...')
cmder.run(f'rm {tmp_bed}')
entropy_beds = [os.path.join(outdir, f'{key}.peak.clusters.normalized.compressed.annotated.entropy.tsv')
for key in basenames]
cmd = ['parse_idr_peaks_3.pl', idr_intersected_bed] + entropy_beds + [f'{idr_bed}',
options.l2fc, options.l10p, options.idr]
cmder.run(cmd, env=env, msg=f'Parsing intersected IDR peaks in {idr_bed} ...', pmt=True)
else:
logger.warning('Intersecting IDR peaks skipped (# samples < 2).')
@task(inputs=[], outputs=[os.path.join(outdir, f'{key}.idr.normalized.bed') for key in basenames],
parent=intersect_idr, cpus=args.cores)
def normalize_idr(bed, idr_normalized_bed):
if len(files) >= 2:
idr_bed = os.path.join(outdir, f'{".vs.".join(basenames)}.idr.out.bed')
key = right_replace(os.path.basename(idr_normalized_bed), '.idr.normalized.bed', '')
ip_bam, input_bam, peak_bed, _ = files[key]
cmd = ['overlap_peak.pl', ip_bam, input_bam, idr_bed,
get_mapped_reads(ip_bam), get_mapped_reads(input_bam),
options.read_type, idr_normalized_bed, right_replace(idr_normalized_bed, '.bed', '.tsv')]
cmder.run(cmd, env=env, msg=f'Normalizing IDR peaks for sample {key} ...', pmt=True)
else:
logger.warning('Normalizing IDR peaks skipped (# samples < 2).')
@task(inputs=[], outputs=[os.path.join(outdir, f'{".vs.".join([key for key in basenames])}.reproducible.peaks.bed')],
parent=normalize_idr)
def reproducible_peak(inputs, reproducible_bed):
if len(files) >= 2:
script = f'reproducible_peaks_{len(files)}.pl'
custom = right_replace(reproducible_bed, '.peaks.bed', '.peaks.custom.tsv')
idr_normalized_full_beds, entropy_full_beds, reproducible_txts = [], [], []
for (ip_bam, input_bam, peak_bed, _) in files.values():
name = right_replace(os.path.basename(peak_bed), '.peak.clusters.bed', '')
idr_normalized_full_beds.append(os.path.join(outdir, f'{name}.idr.normalized.tsv'))
suffix = 'peak.clusters.normalized.compressed.annotated.entropy.tsv'
entropy_full_beds.append(os.path.join(outdir, f'{name}.{suffix}'))
reproducible_txts.append(os.path.join(outdir, f'{name}.reproducible.peaks.tsv'))
cmd = [script] + idr_normalized_full_beds + reproducible_txts
cmd += [reproducible_bed, custom] + entropy_full_beds
cmd += [os.path.join(outdir, f'{".vs.".join(basenames)}.idr{".intersected.bed" if len(files) == 3 else ".out"}')]
cmd += [options.l2fc, options.l10p, options.idr]
cmder.run(cmd, env=env, msg='Identifying reproducible peaks ...', pmt=True)
return reproducible_bed
else:
logger.warning('Identifying reproducible peaks skipped (# samples < 2).')
return ''
@task(inputs=reproducible_peak,
outputs=lambda i: i.replace('.reproducible.peaks.bed', '.annotated.reproducible.peaks.bed'))
def annotate_reproducible_peak(bed, out):
if bed:
cmd = ['annotate_peak.pl', bed, out, right_replace(out, '.bed', '.tsv'), options.species]
cmder.run(cmd, env=env, msg=f'Annotating peaks in {bed} ...', pmt=True)
os.unlink(right_replace(out, '.bed', '.tsv'))
else:
logger.warning('No reproducible peak bed file, annotation skipped!')
def cleanup():
# TODO: need to handle this better, see the way for cleaning up in seCLIP
if need_to_remove:
logger.info('Cleaning up ...')
for file in need_to_remove:
cmder.run(f'rm {file}')
logger.info('Cleaning up complete.')
def main():
flow = Flow('Peak', description=__doc__.strip())
flow.run(dry_run=options.dry_run, cpus=options.cores)
cleanup()
if __name__ == '__main__':
main()
| en | 0.599077 | #!/usr/bin/env python # -*- coding: utf-8 -*- Pipeline for using IDR to identify a set of reproducible peaks given eClIP dataset with two or three replicates. # df = df[(df.l2fc >= options.l2fc) & (df.l10p >= options.l10p)] # For IDR 2.0.2, columns 'excess_reads', 'pi', and 'qi' need to be excluded for .entropy.bed # For IDR 2.0.3, columns 'excess_reads', 'pi', and 'qi' need to be retained for .entropy.bed # samples < 2), pipeline ends here.') # samples < 2).') # samples < 2).') # samples < 2).') # samples < 2).') # samples < 2).') # TODO: need to handle this better, see the way for cleaning up in seCLIP | 2.666365 | 3 |
python/blazingdb/messages/blazingdb/protocol/interpreter/ExecutePlanResponse.py | HubBucket-Team/blazingdb-protocol | 1 | 6624194 | <gh_stars>1-10
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: interpreter
import flatbuffers
class ExecutePlanResponse(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsExecutePlanResponse(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ExecutePlanResponse()
x.Init(buf, n + offset)
return x
# ExecutePlanResponse
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ExecutePlanResponse
def ResultToken(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# ExecutePlanResponse
def NodeConnection(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .NodeConnection import NodeConnection
obj = NodeConnection()
obj.Init(self._tab.Bytes, x)
return obj
return None
def ExecutePlanResponseStart(builder): builder.StartObject(2)
def ExecutePlanResponseAddResultToken(builder, resultToken): builder.PrependUint64Slot(0, resultToken, 0)
def ExecutePlanResponseAddNodeConnection(builder, nodeConnection): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(nodeConnection), 0)
def ExecutePlanResponseEnd(builder): return builder.EndObject()
| # automatically generated by the FlatBuffers compiler, do not modify
# namespace: interpreter
import flatbuffers
class ExecutePlanResponse(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsExecutePlanResponse(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ExecutePlanResponse()
x.Init(buf, n + offset)
return x
# ExecutePlanResponse
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ExecutePlanResponse
def ResultToken(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# ExecutePlanResponse
def NodeConnection(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Indirect(o + self._tab.Pos)
from .NodeConnection import NodeConnection
obj = NodeConnection()
obj.Init(self._tab.Bytes, x)
return obj
return None
def ExecutePlanResponseStart(builder): builder.StartObject(2)
def ExecutePlanResponseAddResultToken(builder, resultToken): builder.PrependUint64Slot(0, resultToken, 0)
def ExecutePlanResponseAddNodeConnection(builder, nodeConnection): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(nodeConnection), 0)
def ExecutePlanResponseEnd(builder): return builder.EndObject() | en | 0.55727 | # automatically generated by the FlatBuffers compiler, do not modify # namespace: interpreter # ExecutePlanResponse # ExecutePlanResponse # ExecutePlanResponse | 2.069959 | 2 |
backend/tests/test_password_reset_requests.py | fjacob21/mididecweb | 0 | 6624195 | <reponame>fjacob21/mididecweb
from datetime import datetime
import pytz
from src.stores import MemoryStore
from src.passwordresetrequests import PasswordResetRequests
def generate_request(passwordresetrequests):
r = passwordresetrequests.add('username', 'email')
return r
def test_add():
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
r = generate_request(passwordresetrequests)
assert r
assert passwordresetrequests.count == 1
assert passwordresetrequests.list[0] == r
def test_double_add():
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
r = generate_request(passwordresetrequests)
assert r
assert passwordresetrequests.count == 1
assert passwordresetrequests.list[0] == r
r2 = generate_request(passwordresetrequests)
assert r2
assert r == r2
r2.accept()
r3 = generate_request(passwordresetrequests)
assert r3
assert r != r3
def test_delete():
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
r = generate_request(passwordresetrequests)
passwordresetrequests.delete('')
assert passwordresetrequests.count == 1
assert passwordresetrequests.list[0] == r
passwordresetrequests.delete(r.request_id)
assert passwordresetrequests.count == 0
def test_get():
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
r = generate_request(passwordresetrequests)
assert r
assert passwordresetrequests.count == 1
assert passwordresetrequests.get(r.request_id) == r
def test_generate_request_id():
date = datetime.now(pytz.timezone("America/New_York"))
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
rid = passwordresetrequests.generate_request_id(date, 'username', 'email')
assert rid
assert type(rid) == str
| from datetime import datetime
import pytz
from src.stores import MemoryStore
from src.passwordresetrequests import PasswordResetRequests
def generate_request(passwordresetrequests):
r = passwordresetrequests.add('username', 'email')
return r
def test_add():
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
r = generate_request(passwordresetrequests)
assert r
assert passwordresetrequests.count == 1
assert passwordresetrequests.list[0] == r
def test_double_add():
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
r = generate_request(passwordresetrequests)
assert r
assert passwordresetrequests.count == 1
assert passwordresetrequests.list[0] == r
r2 = generate_request(passwordresetrequests)
assert r2
assert r == r2
r2.accept()
r3 = generate_request(passwordresetrequests)
assert r3
assert r != r3
def test_delete():
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
r = generate_request(passwordresetrequests)
passwordresetrequests.delete('')
assert passwordresetrequests.count == 1
assert passwordresetrequests.list[0] == r
passwordresetrequests.delete(r.request_id)
assert passwordresetrequests.count == 0
def test_get():
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
r = generate_request(passwordresetrequests)
assert r
assert passwordresetrequests.count == 1
assert passwordresetrequests.get(r.request_id) == r
def test_generate_request_id():
date = datetime.now(pytz.timezone("America/New_York"))
store = MemoryStore()
passwordresetrequests = PasswordResetRequests(store)
rid = passwordresetrequests.generate_request_id(date, 'username', 'email')
assert rid
assert type(rid) == str | none | 1 | 2.546816 | 3 | |
server/connections.py | jrburga/JamOn | 0 | 6624196 | import sys
import logging
import time
import socket
from messages import *
from urllib2 import urlopen
IP = '0.0.0.0'
try:
PUBLIC = urlopen('http://ip.42.pl/raw').read()
except:
PUBLIC = '0.0.0.0'
PORT = 21385
MAX_CONNS = 20
TIMEOUT = 10
MSG_SIZE = 2**10
class Connection(object):
def __init__(self, connection, address):
self.conn = connection
self.addr = address
self.id = id(self)
self._closed = False
@property
def ip(self):
return self.addr[0]
@property
def port(self):
return self.addr[1]
@property
def closed(self):
return self._closed
def _parse(self, msg_string):
return msg_string.replace('}{', '}*{').split('*')
def send(self, message):
assert isinstance(message, Message), 'Can only send type: Message'
msg = toJSON(message)
self.conn.send(msg)
def recv(self, size=MSG_SIZE):
'''
receive any number of messages at a given time
blocking
'''
messages = []
msg_string = self.conn.recv(size)
if msg_string:
self._empties = 0
for json_str in self._parse(msg_string):
messages.append(fromJSON(json_str))
else:
print self, 'we appear to have lost connection'
self.close()
return messages
def close(self):
self.conn.close()
self._closed = True
def __repr__(self):
return '<Connection:%i - %r>' % (self.id, self.addr)
| import sys
import logging
import time
import socket
from messages import *
from urllib2 import urlopen
IP = '0.0.0.0'
try:
PUBLIC = urlopen('http://ip.42.pl/raw').read()
except:
PUBLIC = '0.0.0.0'
PORT = 21385
MAX_CONNS = 20
TIMEOUT = 10
MSG_SIZE = 2**10
class Connection(object):
def __init__(self, connection, address):
self.conn = connection
self.addr = address
self.id = id(self)
self._closed = False
@property
def ip(self):
return self.addr[0]
@property
def port(self):
return self.addr[1]
@property
def closed(self):
return self._closed
def _parse(self, msg_string):
return msg_string.replace('}{', '}*{').split('*')
def send(self, message):
assert isinstance(message, Message), 'Can only send type: Message'
msg = toJSON(message)
self.conn.send(msg)
def recv(self, size=MSG_SIZE):
'''
receive any number of messages at a given time
blocking
'''
messages = []
msg_string = self.conn.recv(size)
if msg_string:
self._empties = 0
for json_str in self._parse(msg_string):
messages.append(fromJSON(json_str))
else:
print self, 'we appear to have lost connection'
self.close()
return messages
def close(self):
self.conn.close()
self._closed = True
def __repr__(self):
return '<Connection:%i - %r>' % (self.id, self.addr)
| en | 0.849206 | receive any number of messages at a given time blocking | 2.920888 | 3 |
condition.py | StevenVanDijk/bridgeBiddingBuddy | 0 | 6624197 | <reponame>StevenVanDijk/bridgeBiddingBuddy<gh_stars>0
from typing import List
from bidding import Bidding
infinite : int = 1000
class Condition:
def eval(self, bidding: Bidding) -> bool:
pass
def explain(self) -> str:
pass
class NoBidsMadeCondition(Condition):
def __init__(self):
super().__init__()
def eval(self, bidding):
return len(bidding.current) == 0
def explain(self):
return 'No bids were made'
class MaxNrPassesCondition(Condition):
maxPasses: int
def __init__(self, maxPasses):
super().__init__()
self.maxPasses = maxPasses
def eval(self, bidding):
return bidding.count('pass') > self.maxPasses
def explain(self):
return "Less passes than " + str(self.maxPasses)
class LastBidEqualToCondition(Condition):
bid: str
def __init__(self, bid):
super().__init__()
self.bid = bid
def eval(self, bidding: Bidding):
return len(bidding.current) > 0 and bidding.current[-1] == self.bid
def explain(self):
return "Last bid was " + self.bid
class HighestPointsBetweenCondition(Condition):
min: int
max: int
def __init__(self, min, max):
super().__init__()
self.min = min
self.max = max
def eval(self, bidding: Bidding):
return bidding.nrOfPoints >= self.min and bidding.nrOfPoints <= self.max
def explain(self):
return "Highest points between " + str(self.min) + " and " + str(self.max)
class OrCondition(Condition):
conditions: List[Condition]
def __init__(self, cond1, cond2):
super().__init__()
self.conditions = [cond1, cond2]
def eval(self, bidding):
return any([condition.eval(bidding) for condition in self.conditions])
def explain(self):
return "Either: " + str([cond.explain() for cond in self.conditions])
class HighestSeriesBetweenCondition(Condition):
min: int
max: int
def __init__(self, min, max):
super().__init__()
self.min = min
self.max = max
def eval(self, bidding):
return bidding.highestSeries >= self.min and bidding.highestSeries <= self.max
def explain(self):
return "Highest series between " + str(self.min) + " and " + str(self.max)
class LowestSeriesBetweenCondition(Condition):
min: int
max: int
def __init__(self, min, max):
super().__init__()
self.min = min
self.max = max
def eval(self, bidding):
return bidding.lowestSeries >= self.min and bidding.lowestSeries <= self.max
def explain(self):
return "Lowest series between " + str(self.min) + " and " + str(self.max)
| from typing import List
from bidding import Bidding
infinite : int = 1000
class Condition:
def eval(self, bidding: Bidding) -> bool:
pass
def explain(self) -> str:
pass
class NoBidsMadeCondition(Condition):
def __init__(self):
super().__init__()
def eval(self, bidding):
return len(bidding.current) == 0
def explain(self):
return 'No bids were made'
class MaxNrPassesCondition(Condition):
maxPasses: int
def __init__(self, maxPasses):
super().__init__()
self.maxPasses = maxPasses
def eval(self, bidding):
return bidding.count('pass') > self.maxPasses
def explain(self):
return "Less passes than " + str(self.maxPasses)
class LastBidEqualToCondition(Condition):
bid: str
def __init__(self, bid):
super().__init__()
self.bid = bid
def eval(self, bidding: Bidding):
return len(bidding.current) > 0 and bidding.current[-1] == self.bid
def explain(self):
return "Last bid was " + self.bid
class HighestPointsBetweenCondition(Condition):
min: int
max: int
def __init__(self, min, max):
super().__init__()
self.min = min
self.max = max
def eval(self, bidding: Bidding):
return bidding.nrOfPoints >= self.min and bidding.nrOfPoints <= self.max
def explain(self):
return "Highest points between " + str(self.min) + " and " + str(self.max)
class OrCondition(Condition):
conditions: List[Condition]
def __init__(self, cond1, cond2):
super().__init__()
self.conditions = [cond1, cond2]
def eval(self, bidding):
return any([condition.eval(bidding) for condition in self.conditions])
def explain(self):
return "Either: " + str([cond.explain() for cond in self.conditions])
class HighestSeriesBetweenCondition(Condition):
min: int
max: int
def __init__(self, min, max):
super().__init__()
self.min = min
self.max = max
def eval(self, bidding):
return bidding.highestSeries >= self.min and bidding.highestSeries <= self.max
def explain(self):
return "Highest series between " + str(self.min) + " and " + str(self.max)
class LowestSeriesBetweenCondition(Condition):
min: int
max: int
def __init__(self, min, max):
super().__init__()
self.min = min
self.max = max
def eval(self, bidding):
return bidding.lowestSeries >= self.min and bidding.lowestSeries <= self.max
def explain(self):
return "Lowest series between " + str(self.min) + " and " + str(self.max) | none | 1 | 3.407275 | 3 | |
pyvibdmc/simulation_utilities/Constants.py | McCoyGroup/pyvibdmc | 6 | 6624198 | __all__ = ['Constants', 'get_atomic_num', 'get_atomic_string']
massDict = {'H': 1.00782503, 'D': 2.01410178, 'T': 3.01604928, 'He': 4.00260325, 'Li': 7.01600344, 'Be': 9.01218306,
'B': 11.00930536, 'C': 12.0, 'N': 14.003074, 'O': 15.99491462, 'F': 18.99840316, 'Ne': 19.99244018,
'Na': 22.98976928,
'Mg': 23.9850417, 'Al': 26.98153853, 'Si': 27.97692653, 'P': 30.973762, 'S': 31.97207117, 'Cl': 34.96885268,
'Ar': 39.96238312, 'K': 38.96370649, 'Ca': 39.96259086, 'Sc': 44.95590828, 'Ti': 47.94794198,
'V': 50.94395704,
'Cr': 51.94050623, 'Mn': 54.93804391, 'Fe': 55.93493633, 'Co': 58.93319429, 'Ni': 57.93534241,
'Cu': 62.92959772,
'Zn': 63.92914201, 'Ga': 68.9255735, 'Ge': 73.92117776, 'As': 74.92159457, 'Se': 79.9165218,
'Br': 78.9183376,
'Kr': 83.91149773, 'Rb': 84.91178974, 'Sr': 87.9056125, 'Y': 88.9058403, 'Zr': 89.9046977, 'Nb': 92.906373,
'Mo': 97.90540482, 'Tc': 96.9063667, 'Ru': 101.9043441, 'Rh': 102.905498, 'Pd': 105.9034804,
'Ag': 106.9050916,
'Cd': 113.90336509, 'In': 114.90387878, 'Sn': 119.90220163, 'Sb': 120.903812, 'Te': 129.90622275,
'I': 126.9044719,
'Xe': 131.90415509, 'Cs': 132.90545196, 'Ba': 137.905247, 'La': 138.9063563, 'Ce': 139.9054431,
'Pr': 140.9076576,
'Nd': 141.907729, 'Pm': 144.9127559, 'Sm': 151.9197397, 'Eu': 152.921238, 'Gd': 157.9241123,
'Tb': 158.9253547,
'Dy': 163.9291819, 'Ho': 164.9303288, 'Er': 165.9302995, 'Tm': 168.9342179, 'Yb': 173.9388664,
'Lu': 174.9407752,
'Hf': 179.946557, 'Ta': 180.9479958, 'W': 183.95093092, 'Re': 186.9557501, 'Os': 191.961477,
'Ir': 192.9629216,
'Pt': 194.9647917, 'Au': 196.96656879, 'Hg': 201.9706434, 'Tl': 204.9744278, 'Pb': 207.9766525,
'Bi': 208.9803991,
'Po': 208.9824308, 'At': 209.9871479, 'Rn': 210.9906011, 'Fr': 223.019736, 'Ra': 223.0185023,
'Ac': 227.0277523,
'Th': 232.0380558, 'Pa': 231.0358842, 'U': 238.0507884, 'Np': 236.04657, 'Pu': 238.0495601,
'Am': 241.0568293,
'Cm': 243.0613893, 'Bk': 247.0703073, 'Cf': 249.0748539, 'Es': 252.08298, 'Fm': 257.0951061,
'Md': 258.0984315,
'No': 259.10103, 'Lr': 262.10961, 'Rf': 267.12179, 'Db': 268.12567, 'Sg': 271.13393, 'Bh': 272.13826,
'Hs': 270.13429,
'Mt': 276.15159, 'Ds': 281.16451, 'Rg': 280.16514, 'Cn': 285.17712, 'Nh': 284.17873, 'Fl': 289.19042,
'Mc': 288.19274,
'Lv': 293.20449, 'Ts': 292.20746, 'Og': 294.21392}
def get_atomic_num(atms):
"""
:param atms: A list (or single string) of atomic element symbols
:return: The atomic numbers of each of the atom strings you provide
"""
atm_strings = list(massDict.keys())
return [atm_strings.index(n) + 1 for n in atms]
def get_atomic_string(atomic_num):
"""
:param atomic_num: The atomic numbers of each of the atom strings you provide
:return: A list of atomic element symbols
"""
if type(atomic_num) is not list: atomic_num = [atomic_num]
atm_strings = list(massDict.keys())
return [atm_strings[anum] for anum in atomic_num]
class Constants:
"""
Thanks, <NAME>, for this silly little class.
Converter that handles energy, distance, and mass conversions for DMC. Can be expanded upon.
"""
atomic_units = {
"wavenumbers": 4.556335281212229e-6,
"angstroms": 1 / 0.529177,
"amu": 1.000000000000000000 / 6.02213670000e23 / 9.10938970000e-28 # 1822.88839 g/mol -> a.u.
}
@classmethod
def convert(cls, val, unit, to_AU=True):
"""
:param val: The value or values that will be converted
:type val: np.ndarray
:param unit: The units (not atomic units) that we will be converting to or from
:type unit: str
:param to_AU: If true, converting from non-a.u. to a.u. If false, converting to a.u. from non-a.u.
:type to_AU:boolean
:return: converted values
"""
vv = cls.atomic_units[unit]
return (val * vv) if to_AU else (val / vv)
@classmethod
def mass(cls, atom, to_AU=True):
"""
Given a string that corresponds to an atomic element, output the atomic mass of that element
:param atom: The string of an atomic element
:type atom:str
:param to_AU: If true, converting from non-a.u. to a.u. If false, converting to a.u. from non-a.u.
:type to_AU:boolean
:return: mass in atomic units unless user changes to_AU to False, then AMU
"""
m = massDict[atom]
if to_AU:
m = cls.convert(m, 'amu')
return m
@classmethod
def reduced_mass(cls, atoms, to_AU=True):
"""
Given a string like 'O-H' or 'N-N' , output the reduced mass of that diatomic
:param atoms: A string that is composed of two atoms
:type atom:str
:param to_AU: If true, converting from non-a.u. to a.u. If false, converting to a.u. from non-a.u.
:type to_AU:boolean
:return: mass in atomic units unless user changes to_AU to False, then AMU
"""
atoms = atoms.split('-')
atm1 = atoms[0]
atm2 = atoms[1]
mass1 = massDict[atm1]
mass2 = massDict[atm2]
if to_AU:
mass1 = cls.convert(mass1, 'amu')
mass2 = cls.convert(mass2, 'amu')
reduced_mass = mass1 * mass2 / (mass1 + mass2)
return reduced_mass
| __all__ = ['Constants', 'get_atomic_num', 'get_atomic_string']
massDict = {'H': 1.00782503, 'D': 2.01410178, 'T': 3.01604928, 'He': 4.00260325, 'Li': 7.01600344, 'Be': 9.01218306,
'B': 11.00930536, 'C': 12.0, 'N': 14.003074, 'O': 15.99491462, 'F': 18.99840316, 'Ne': 19.99244018,
'Na': 22.98976928,
'Mg': 23.9850417, 'Al': 26.98153853, 'Si': 27.97692653, 'P': 30.973762, 'S': 31.97207117, 'Cl': 34.96885268,
'Ar': 39.96238312, 'K': 38.96370649, 'Ca': 39.96259086, 'Sc': 44.95590828, 'Ti': 47.94794198,
'V': 50.94395704,
'Cr': 51.94050623, 'Mn': 54.93804391, 'Fe': 55.93493633, 'Co': 58.93319429, 'Ni': 57.93534241,
'Cu': 62.92959772,
'Zn': 63.92914201, 'Ga': 68.9255735, 'Ge': 73.92117776, 'As': 74.92159457, 'Se': 79.9165218,
'Br': 78.9183376,
'Kr': 83.91149773, 'Rb': 84.91178974, 'Sr': 87.9056125, 'Y': 88.9058403, 'Zr': 89.9046977, 'Nb': 92.906373,
'Mo': 97.90540482, 'Tc': 96.9063667, 'Ru': 101.9043441, 'Rh': 102.905498, 'Pd': 105.9034804,
'Ag': 106.9050916,
'Cd': 113.90336509, 'In': 114.90387878, 'Sn': 119.90220163, 'Sb': 120.903812, 'Te': 129.90622275,
'I': 126.9044719,
'Xe': 131.90415509, 'Cs': 132.90545196, 'Ba': 137.905247, 'La': 138.9063563, 'Ce': 139.9054431,
'Pr': 140.9076576,
'Nd': 141.907729, 'Pm': 144.9127559, 'Sm': 151.9197397, 'Eu': 152.921238, 'Gd': 157.9241123,
'Tb': 158.9253547,
'Dy': 163.9291819, 'Ho': 164.9303288, 'Er': 165.9302995, 'Tm': 168.9342179, 'Yb': 173.9388664,
'Lu': 174.9407752,
'Hf': 179.946557, 'Ta': 180.9479958, 'W': 183.95093092, 'Re': 186.9557501, 'Os': 191.961477,
'Ir': 192.9629216,
'Pt': 194.9647917, 'Au': 196.96656879, 'Hg': 201.9706434, 'Tl': 204.9744278, 'Pb': 207.9766525,
'Bi': 208.9803991,
'Po': 208.9824308, 'At': 209.9871479, 'Rn': 210.9906011, 'Fr': 223.019736, 'Ra': 223.0185023,
'Ac': 227.0277523,
'Th': 232.0380558, 'Pa': 231.0358842, 'U': 238.0507884, 'Np': 236.04657, 'Pu': 238.0495601,
'Am': 241.0568293,
'Cm': 243.0613893, 'Bk': 247.0703073, 'Cf': 249.0748539, 'Es': 252.08298, 'Fm': 257.0951061,
'Md': 258.0984315,
'No': 259.10103, 'Lr': 262.10961, 'Rf': 267.12179, 'Db': 268.12567, 'Sg': 271.13393, 'Bh': 272.13826,
'Hs': 270.13429,
'Mt': 276.15159, 'Ds': 281.16451, 'Rg': 280.16514, 'Cn': 285.17712, 'Nh': 284.17873, 'Fl': 289.19042,
'Mc': 288.19274,
'Lv': 293.20449, 'Ts': 292.20746, 'Og': 294.21392}
def get_atomic_num(atms):
"""
:param atms: A list (or single string) of atomic element symbols
:return: The atomic numbers of each of the atom strings you provide
"""
atm_strings = list(massDict.keys())
return [atm_strings.index(n) + 1 for n in atms]
def get_atomic_string(atomic_num):
"""
:param atomic_num: The atomic numbers of each of the atom strings you provide
:return: A list of atomic element symbols
"""
if type(atomic_num) is not list: atomic_num = [atomic_num]
atm_strings = list(massDict.keys())
return [atm_strings[anum] for anum in atomic_num]
class Constants:
"""
Thanks, <NAME>, for this silly little class.
Converter that handles energy, distance, and mass conversions for DMC. Can be expanded upon.
"""
atomic_units = {
"wavenumbers": 4.556335281212229e-6,
"angstroms": 1 / 0.529177,
"amu": 1.000000000000000000 / 6.02213670000e23 / 9.10938970000e-28 # 1822.88839 g/mol -> a.u.
}
@classmethod
def convert(cls, val, unit, to_AU=True):
"""
:param val: The value or values that will be converted
:type val: np.ndarray
:param unit: The units (not atomic units) that we will be converting to or from
:type unit: str
:param to_AU: If true, converting from non-a.u. to a.u. If false, converting to a.u. from non-a.u.
:type to_AU:boolean
:return: converted values
"""
vv = cls.atomic_units[unit]
return (val * vv) if to_AU else (val / vv)
@classmethod
def mass(cls, atom, to_AU=True):
"""
Given a string that corresponds to an atomic element, output the atomic mass of that element
:param atom: The string of an atomic element
:type atom:str
:param to_AU: If true, converting from non-a.u. to a.u. If false, converting to a.u. from non-a.u.
:type to_AU:boolean
:return: mass in atomic units unless user changes to_AU to False, then AMU
"""
m = massDict[atom]
if to_AU:
m = cls.convert(m, 'amu')
return m
@classmethod
def reduced_mass(cls, atoms, to_AU=True):
"""
Given a string like 'O-H' or 'N-N' , output the reduced mass of that diatomic
:param atoms: A string that is composed of two atoms
:type atom:str
:param to_AU: If true, converting from non-a.u. to a.u. If false, converting to a.u. from non-a.u.
:type to_AU:boolean
:return: mass in atomic units unless user changes to_AU to False, then AMU
"""
atoms = atoms.split('-')
atm1 = atoms[0]
atm2 = atoms[1]
mass1 = massDict[atm1]
mass2 = massDict[atm2]
if to_AU:
mass1 = cls.convert(mass1, 'amu')
mass2 = cls.convert(mass2, 'amu')
reduced_mass = mass1 * mass2 / (mass1 + mass2)
return reduced_mass
| en | 0.518136 | :param atms: A list (or single string) of atomic element symbols :return: The atomic numbers of each of the atom strings you provide :param atomic_num: The atomic numbers of each of the atom strings you provide :return: A list of atomic element symbols Thanks, <NAME>, for this silly little class. Converter that handles energy, distance, and mass conversions for DMC. Can be expanded upon. # 1822.88839 g/mol -> a.u. :param val: The value or values that will be converted :type val: np.ndarray :param unit: The units (not atomic units) that we will be converting to or from :type unit: str :param to_AU: If true, converting from non-a.u. to a.u. If false, converting to a.u. from non-a.u. :type to_AU:boolean :return: converted values Given a string that corresponds to an atomic element, output the atomic mass of that element :param atom: The string of an atomic element :type atom:str :param to_AU: If true, converting from non-a.u. to a.u. If false, converting to a.u. from non-a.u. :type to_AU:boolean :return: mass in atomic units unless user changes to_AU to False, then AMU Given a string like 'O-H' or 'N-N' , output the reduced mass of that diatomic :param atoms: A string that is composed of two atoms :type atom:str :param to_AU: If true, converting from non-a.u. to a.u. If false, converting to a.u. from non-a.u. :type to_AU:boolean :return: mass in atomic units unless user changes to_AU to False, then AMU | 1.74423 | 2 |
api/redis_own.py | MetaExp/backend | 1 | 6624199 | <reponame>MetaExp/backend
import redis
import logging
import pickle
from util.datastructures import MetaPath
from typing import List, Tuple
from util.config import REDIS_HOST, REDIS_PORT, REDIS_PASSWORD
class Redis:
def __init__(self, data_set_name: str):
self._client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=<PASSWORD>)
self.data_set = data_set_name
self.logger = logging.getLogger('MetaExp.{}'.format(__class__.__name__))
def meta_paths(self, start_type: str, end_type: str) -> List:
self.logger.debug("Retrieving meta paths...")
pickled_list = self._client.lrange("{}_{}_{}_embedded".format(self.data_set, start_type, end_type), 0, -1)
self.logger.debug("Number of meta paths for {} and {} is {}".format(start_type, end_type, len(pickled_list)))
return [pickle.loads(pickled_entry) for pickled_entry in pickled_list]
def id_to_edge_type_map(self):
return self._client.hgetall("{}_edge_type_map".format(self.data_set))
def id_to_node_type_map(self):
return self._client.hgetall("{}_node_type_map".format(self.data_set))
def node_type_to_id_map(self):
return self._client.hgetall("{}_node_type_map_reverse".format(self.data_set))
def edge_type_to_id_map(self):
return self._client.hgetall("{}_edge_type_map_reverse".format(self.data_set))
def get_all_meta_paths(self):
result = []
# TODO: Match only keys without '_embedding'
for key in self._client.keys(pattern='{}_[0-9-]*_[0-9-]*'.format(self.data_set)):
result.extend([pickle.loads(pickled_entry) for pickled_entry in self._client.lrange(key, 0, -1)])
return result
def store_embeddings(self, mp_embeddings_list: List[Tuple[MetaPath, List[float]]]):
for mp_object, embedding in mp_embeddings_list:
mp = mp_object.get_representation('UI')
structural_value = mp_object.get_structural_value()
self.logger.debug("Got mp {}".format(mp))
node_type_map, edge_type_map = self.id_to_node_type_map(), self.id_to_edge_type_map()
start_type, end_type = node_type_map[str(mp[0]).encode()].decode(), node_type_map[str(mp[-1]).encode()].decode()
node_list = [node_type_map[str(node).encode()].decode() for node in mp[::2]]
edge_list = [edge_type_map[str(edge).encode()].decode() for edge in mp[1::2]]
meta_path = MetaPath(nodes=node_list, edges=edge_list)
meta_path.store_embedding(embedding)
meta_path.store_structural_value(structural_value)
self.logger.debug("Created meta path object {}".format(meta_path))
self._client.lpush("{}_{}_{}_embedded".format(self.data_set, start_type, end_type), pickle.dumps(meta_path))
| import redis
import logging
import pickle
from util.datastructures import MetaPath
from typing import List, Tuple
from util.config import REDIS_HOST, REDIS_PORT, REDIS_PASSWORD
class Redis:
def __init__(self, data_set_name: str):
self._client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=<PASSWORD>)
self.data_set = data_set_name
self.logger = logging.getLogger('MetaExp.{}'.format(__class__.__name__))
def meta_paths(self, start_type: str, end_type: str) -> List:
self.logger.debug("Retrieving meta paths...")
pickled_list = self._client.lrange("{}_{}_{}_embedded".format(self.data_set, start_type, end_type), 0, -1)
self.logger.debug("Number of meta paths for {} and {} is {}".format(start_type, end_type, len(pickled_list)))
return [pickle.loads(pickled_entry) for pickled_entry in pickled_list]
def id_to_edge_type_map(self):
return self._client.hgetall("{}_edge_type_map".format(self.data_set))
def id_to_node_type_map(self):
return self._client.hgetall("{}_node_type_map".format(self.data_set))
def node_type_to_id_map(self):
return self._client.hgetall("{}_node_type_map_reverse".format(self.data_set))
def edge_type_to_id_map(self):
return self._client.hgetall("{}_edge_type_map_reverse".format(self.data_set))
def get_all_meta_paths(self):
result = []
# TODO: Match only keys without '_embedding'
for key in self._client.keys(pattern='{}_[0-9-]*_[0-9-]*'.format(self.data_set)):
result.extend([pickle.loads(pickled_entry) for pickled_entry in self._client.lrange(key, 0, -1)])
return result
def store_embeddings(self, mp_embeddings_list: List[Tuple[MetaPath, List[float]]]):
for mp_object, embedding in mp_embeddings_list:
mp = mp_object.get_representation('UI')
structural_value = mp_object.get_structural_value()
self.logger.debug("Got mp {}".format(mp))
node_type_map, edge_type_map = self.id_to_node_type_map(), self.id_to_edge_type_map()
start_type, end_type = node_type_map[str(mp[0]).encode()].decode(), node_type_map[str(mp[-1]).encode()].decode()
node_list = [node_type_map[str(node).encode()].decode() for node in mp[::2]]
edge_list = [edge_type_map[str(edge).encode()].decode() for edge in mp[1::2]]
meta_path = MetaPath(nodes=node_list, edges=edge_list)
meta_path.store_embedding(embedding)
meta_path.store_structural_value(structural_value)
self.logger.debug("Created meta path object {}".format(meta_path))
self._client.lpush("{}_{}_{}_embedded".format(self.data_set, start_type, end_type), pickle.dumps(meta_path)) | en | 0.297871 | # TODO: Match only keys without '_embedding' | 2.28334 | 2 |
mognet/cli/nodes.py | IBM/project-mognet | 0 | 6624200 | import asyncio
from datetime import datetime
from typing import List, Optional
import tabulate
import typer
from mognet.cli.cli_state import state
from mognet.cli.models import OutputFormat
from mognet.cli.run_in_loop import run_in_loop
from mognet.model.result import Result
from mognet.primitives.queries import StatusResponseMessage
from mognet.tools.dates import now_utc
from pydantic import BaseModel, Field
group = typer.Typer()
@group.command("status")
@run_in_loop
async def status(
format: OutputFormat = typer.Option(OutputFormat.TEXT, metavar="format"),
text_label_format: str = typer.Option(
"{name}(id={id!r}, state={state!r})",
metavar="text-label-format",
help="Label format for text format",
),
json_indent: int = typer.Option(2, metavar="json-indent"),
poll: Optional[int] = typer.Option(
None,
metavar="poll",
help="Polling interval, in seconds (default=None)",
),
timeout: int = typer.Option(
30,
help="Timeout for querying nodes",
),
):
"""Query each node for their status"""
async with state["app_instance"] as app:
while True:
each_node_status: List[StatusResponseMessage] = []
async def read_status():
async for node_status in app.get_current_status_of_nodes():
each_node_status.append(node_status)
try:
await asyncio.wait_for(read_status(), timeout=timeout)
except asyncio.TimeoutError:
pass
all_result_ids = set()
for node_status in each_node_status:
all_result_ids.update(node_status.payload.running_request_ids)
all_results_by_id = {
r.id: r
for r in await app.result_backend.get_many(
*all_result_ids,
)
if r is not None
}
report = _CliStatusReport()
for node_status in each_node_status:
running_requests = [
all_results_by_id[r]
for r in node_status.payload.running_request_ids
if r in all_results_by_id
]
running_requests.sort(key=lambda r: r.created or now_utc())
report.node_status.append(
_CliStatusReport.NodeStatus(
node_id=node_status.node_id, running_requests=running_requests
)
)
if poll:
typer.clear()
if format == "text":
table_headers = ("Node name", "Running requests")
table_data = [
(
n.node_id,
"\n".join(
text_label_format.format(**r.dict())
for r in n.running_requests
)
or "(Empty)",
)
for n in report.node_status
]
typer.echo(
f"{len(report.node_status)} nodes replied as of {datetime.now()}:"
)
typer.echo(tabulate.tabulate(table_data, headers=table_headers))
elif format == "json":
typer.echo(report.json(indent=json_indent, ensure_ascii=False))
if not poll:
break
await asyncio.sleep(poll)
class _CliStatusReport(BaseModel):
class NodeStatus(BaseModel):
node_id: str
running_requests: List[Result]
node_status: List[NodeStatus] = Field(default_factory=list)
| import asyncio
from datetime import datetime
from typing import List, Optional
import tabulate
import typer
from mognet.cli.cli_state import state
from mognet.cli.models import OutputFormat
from mognet.cli.run_in_loop import run_in_loop
from mognet.model.result import Result
from mognet.primitives.queries import StatusResponseMessage
from mognet.tools.dates import now_utc
from pydantic import BaseModel, Field
group = typer.Typer()
@group.command("status")
@run_in_loop
async def status(
format: OutputFormat = typer.Option(OutputFormat.TEXT, metavar="format"),
text_label_format: str = typer.Option(
"{name}(id={id!r}, state={state!r})",
metavar="text-label-format",
help="Label format for text format",
),
json_indent: int = typer.Option(2, metavar="json-indent"),
poll: Optional[int] = typer.Option(
None,
metavar="poll",
help="Polling interval, in seconds (default=None)",
),
timeout: int = typer.Option(
30,
help="Timeout for querying nodes",
),
):
"""Query each node for their status"""
async with state["app_instance"] as app:
while True:
each_node_status: List[StatusResponseMessage] = []
async def read_status():
async for node_status in app.get_current_status_of_nodes():
each_node_status.append(node_status)
try:
await asyncio.wait_for(read_status(), timeout=timeout)
except asyncio.TimeoutError:
pass
all_result_ids = set()
for node_status in each_node_status:
all_result_ids.update(node_status.payload.running_request_ids)
all_results_by_id = {
r.id: r
for r in await app.result_backend.get_many(
*all_result_ids,
)
if r is not None
}
report = _CliStatusReport()
for node_status in each_node_status:
running_requests = [
all_results_by_id[r]
for r in node_status.payload.running_request_ids
if r in all_results_by_id
]
running_requests.sort(key=lambda r: r.created or now_utc())
report.node_status.append(
_CliStatusReport.NodeStatus(
node_id=node_status.node_id, running_requests=running_requests
)
)
if poll:
typer.clear()
if format == "text":
table_headers = ("Node name", "Running requests")
table_data = [
(
n.node_id,
"\n".join(
text_label_format.format(**r.dict())
for r in n.running_requests
)
or "(Empty)",
)
for n in report.node_status
]
typer.echo(
f"{len(report.node_status)} nodes replied as of {datetime.now()}:"
)
typer.echo(tabulate.tabulate(table_data, headers=table_headers))
elif format == "json":
typer.echo(report.json(indent=json_indent, ensure_ascii=False))
if not poll:
break
await asyncio.sleep(poll)
class _CliStatusReport(BaseModel):
class NodeStatus(BaseModel):
node_id: str
running_requests: List[Result]
node_status: List[NodeStatus] = Field(default_factory=list)
| en | 0.956978 | Query each node for their status | 2.449694 | 2 |
python/Intro/sys/myImages.py | Joaxin/GitComments | 0 | 6624201 | <filename>python/Intro/sys/myImages.py
picFormats = ['jpg', 'png', 'gif', 'jpeg', 'bmp', 'webp']
print('Plz enter a file name:')
file = input()
if file.lower().split(".")[-1] not in picFormats:
print('Maybe not an image file. ')
else:
print(file + ' is an image file.') | <filename>python/Intro/sys/myImages.py
picFormats = ['jpg', 'png', 'gif', 'jpeg', 'bmp', 'webp']
print('Plz enter a file name:')
file = input()
if file.lower().split(".")[-1] not in picFormats:
print('Maybe not an image file. ')
else:
print(file + ' is an image file.') | none | 1 | 3.825533 | 4 | |
sdk/python/pulumi_aws/route53/zone.py | Charliekenney23/pulumi-aws | 0 | 6624202 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Zone(pulumi.CustomResource):
comment: pulumi.Output[str]
"""
A comment for the hosted zone. Defaults to 'Managed by Terraform'.
"""
delegation_set_id: pulumi.Output[str]
"""
The ID of the reusable delegation set whose NS records you want to assign to the hosted zone. Conflicts with `vpc` as delegation sets can only be used for public zones.
"""
force_destroy: pulumi.Output[bool]
"""
Whether to destroy all records (possibly managed outside of Terraform) in the zone when destroying the zone.
"""
name: pulumi.Output[str]
"""
This is the name of the hosted zone.
"""
name_servers: pulumi.Output[list]
"""
A list of name servers in associated (or default) delegation set.
Find more about delegation sets in [AWS docs](https://docs.aws.amazon.com/Route53/latest/APIReference/actions-on-reusable-delegation-sets.html).
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the zone.
"""
vpcs: pulumi.Output[list]
"""
Configuration block(s) specifying VPC(s) to associate with a private hosted zone. Conflicts with the `delegation_set_id` argument in this resource and any [`aws_route53_zone_association` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone_association.html) specifying the same zone ID. Detailed below.
"""
zone_id: pulumi.Output[str]
"""
The Hosted Zone ID. This can be referenced by zone records.
"""
def __init__(__self__, resource_name, opts=None, comment=None, delegation_set_id=None, force_destroy=None, name=None, tags=None, vpcs=None, __name__=None, __opts__=None):
"""
Manages a Route53 Hosted Zone.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] comment: A comment for the hosted zone. Defaults to 'Managed by Terraform'.
:param pulumi.Input[str] delegation_set_id: The ID of the reusable delegation set whose NS records you want to assign to the hosted zone. Conflicts with `vpc` as delegation sets can only be used for public zones.
:param pulumi.Input[bool] force_destroy: Whether to destroy all records (possibly managed outside of Terraform) in the zone when destroying the zone.
:param pulumi.Input[str] name: This is the name of the hosted zone.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the zone.
:param pulumi.Input[list] vpcs: Configuration block(s) specifying VPC(s) to associate with a private hosted zone. Conflicts with the `delegation_set_id` argument in this resource and any [`aws_route53_zone_association` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone_association.html) specifying the same zone ID. Detailed below.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if comment is None:
comment = 'Managed by Pulumi'
__props__['comment'] = comment
__props__['delegation_set_id'] = delegation_set_id
__props__['force_destroy'] = force_destroy
__props__['name'] = name
__props__['tags'] = tags
__props__['vpcs'] = vpcs
__props__['name_servers'] = None
__props__['zone_id'] = None
super(Zone, __self__).__init__(
'aws:route53/zone:Zone',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Zone(pulumi.CustomResource):
comment: pulumi.Output[str]
"""
A comment for the hosted zone. Defaults to 'Managed by Terraform'.
"""
delegation_set_id: pulumi.Output[str]
"""
The ID of the reusable delegation set whose NS records you want to assign to the hosted zone. Conflicts with `vpc` as delegation sets can only be used for public zones.
"""
force_destroy: pulumi.Output[bool]
"""
Whether to destroy all records (possibly managed outside of Terraform) in the zone when destroying the zone.
"""
name: pulumi.Output[str]
"""
This is the name of the hosted zone.
"""
name_servers: pulumi.Output[list]
"""
A list of name servers in associated (or default) delegation set.
Find more about delegation sets in [AWS docs](https://docs.aws.amazon.com/Route53/latest/APIReference/actions-on-reusable-delegation-sets.html).
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the zone.
"""
vpcs: pulumi.Output[list]
"""
Configuration block(s) specifying VPC(s) to associate with a private hosted zone. Conflicts with the `delegation_set_id` argument in this resource and any [`aws_route53_zone_association` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone_association.html) specifying the same zone ID. Detailed below.
"""
zone_id: pulumi.Output[str]
"""
The Hosted Zone ID. This can be referenced by zone records.
"""
def __init__(__self__, resource_name, opts=None, comment=None, delegation_set_id=None, force_destroy=None, name=None, tags=None, vpcs=None, __name__=None, __opts__=None):
"""
Manages a Route53 Hosted Zone.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] comment: A comment for the hosted zone. Defaults to 'Managed by Terraform'.
:param pulumi.Input[str] delegation_set_id: The ID of the reusable delegation set whose NS records you want to assign to the hosted zone. Conflicts with `vpc` as delegation sets can only be used for public zones.
:param pulumi.Input[bool] force_destroy: Whether to destroy all records (possibly managed outside of Terraform) in the zone when destroying the zone.
:param pulumi.Input[str] name: This is the name of the hosted zone.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the zone.
:param pulumi.Input[list] vpcs: Configuration block(s) specifying VPC(s) to associate with a private hosted zone. Conflicts with the `delegation_set_id` argument in this resource and any [`aws_route53_zone_association` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone_association.html) specifying the same zone ID. Detailed below.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if comment is None:
comment = 'Managed by Pulumi'
__props__['comment'] = comment
__props__['delegation_set_id'] = delegation_set_id
__props__['force_destroy'] = force_destroy
__props__['name'] = name
__props__['tags'] = tags
__props__['vpcs'] = vpcs
__props__['name_servers'] = None
__props__['zone_id'] = None
super(Zone, __self__).__init__(
'aws:route53/zone:Zone',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| en | 0.830499 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** A comment for the hosted zone. Defaults to 'Managed by Terraform'. The ID of the reusable delegation set whose NS records you want to assign to the hosted zone. Conflicts with `vpc` as delegation sets can only be used for public zones. Whether to destroy all records (possibly managed outside of Terraform) in the zone when destroying the zone. This is the name of the hosted zone. A list of name servers in associated (or default) delegation set. Find more about delegation sets in [AWS docs](https://docs.aws.amazon.com/Route53/latest/APIReference/actions-on-reusable-delegation-sets.html). A mapping of tags to assign to the zone. Configuration block(s) specifying VPC(s) to associate with a private hosted zone. Conflicts with the `delegation_set_id` argument in this resource and any [`aws_route53_zone_association` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone_association.html) specifying the same zone ID. Detailed below. The Hosted Zone ID. This can be referenced by zone records. Manages a Route53 Hosted Zone. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] comment: A comment for the hosted zone. Defaults to 'Managed by Terraform'. :param pulumi.Input[str] delegation_set_id: The ID of the reusable delegation set whose NS records you want to assign to the hosted zone. Conflicts with `vpc` as delegation sets can only be used for public zones. :param pulumi.Input[bool] force_destroy: Whether to destroy all records (possibly managed outside of Terraform) in the zone when destroying the zone. :param pulumi.Input[str] name: This is the name of the hosted zone. :param pulumi.Input[dict] tags: A mapping of tags to assign to the zone. :param pulumi.Input[list] vpcs: Configuration block(s) specifying VPC(s) to associate with a private hosted zone. Conflicts with the `delegation_set_id` argument in this resource and any [`aws_route53_zone_association` resource](https://www.terraform.io/docs/providers/aws/r/route53_zone_association.html) specifying the same zone ID. Detailed below. | 1.741416 | 2 |
app/static/ToCompileSASS.py | smlopezza/SofiaLearning_public | 0 | 6624203 | 1# -*- coding: utf-8 -*-
"""
Created on Sun Dec 15 19:29:39 2019
@author: <NAME>
"""
import sass
import os
sass.compile(dirname=('sass', 'css'), output_style='compressed')
with open('css/styles.css') as styles_css:
print(styles_css.read())
| 1# -*- coding: utf-8 -*-
"""
Created on Sun Dec 15 19:29:39 2019
@author: <NAME>
"""
import sass
import os
sass.compile(dirname=('sass', 'css'), output_style='compressed')
with open('css/styles.css') as styles_css:
print(styles_css.read())
| en | 0.761806 | # -*- coding: utf-8 -*- Created on Sun Dec 15 19:29:39 2019 @author: <NAME> | 2.041638 | 2 |
stuff/tests/utils.py | fenimore/stuff | 1 | 6624204 | <reponame>fenimore/stuff
import os
def _data(filename: str) -> str:
file_path = os.path.join(
os.path.dirname(__file__), "data", filename
)
with open(file_path, "r") as file:
return file.read()
| import os
def _data(filename: str) -> str:
file_path = os.path.join(
os.path.dirname(__file__), "data", filename
)
with open(file_path, "r") as file:
return file.read() | none | 1 | 2.772364 | 3 | |
tests/test_append.py | wrwrwr/argparse-oappend | 0 | 6624205 | from oappend import OverrideAppendArgumentParser
class OAppendTests:
def test_default(self):
parser = OverrideAppendArgumentParser()
parser.add_argument('-n', action='oappend', default=['a', 'b'])
args = parser.parse_args([])
assert args.n == ['a', 'b']
def test_append(self):
parser = OverrideAppendArgumentParser()
parser.add_argument('-n', action='oappend', default=['a', 'b'])
args = parser.parse_args(['-nc', '-nd'])
assert args.n == ['c', 'd']
def test_type(self):
parser = OverrideAppendArgumentParser()
parser.add_argument('-n', action='oappend', type=int, default=[1, 2])
args = parser.parse_args(['-n3', '-n4'])
assert args.n == [3, 4]
| from oappend import OverrideAppendArgumentParser
class OAppendTests:
def test_default(self):
parser = OverrideAppendArgumentParser()
parser.add_argument('-n', action='oappend', default=['a', 'b'])
args = parser.parse_args([])
assert args.n == ['a', 'b']
def test_append(self):
parser = OverrideAppendArgumentParser()
parser.add_argument('-n', action='oappend', default=['a', 'b'])
args = parser.parse_args(['-nc', '-nd'])
assert args.n == ['c', 'd']
def test_type(self):
parser = OverrideAppendArgumentParser()
parser.add_argument('-n', action='oappend', type=int, default=[1, 2])
args = parser.parse_args(['-n3', '-n4'])
assert args.n == [3, 4]
| none | 1 | 3.160761 | 3 | |
binary trees/deleteNode_BT.py | mkoryor/Python | 0 | 6624206 |
# Python3 program to illustrate deletion in a Binary Tree
# class to create a node with data, left child and right child.
class Node:
def __init__(self,data):
self.data = data
self.left = None
self.right = None
# Inorder traversal of a binary tree
def inorder(temp):
if(not temp):
return
inorder(temp.left)
print(temp.data, end = " ")
inorder(temp.right)
# function to delete the given deepest node (d_node) in binary tree
def deleteDeepest(root,d_node):
q = []
q.append(root)
while(len(q)):
temp = q.pop(0)
if temp is d_node:
temp = None
return
if temp.right:
if temp.right is d_node:
temp.right = None
return
else:
q.append(temp.right)
if temp.left:
if temp.left is d_node:
temp.left = None
return
else:
q.append(temp.left)
# function to delete element in binary tree
def deletion(root, key):
if root == None :
return None
if root.left == None and root.right == None:
if root.key == key :
return None
else :
return root
key_node = None
q = []
q.append(root)
while(len(q)):
temp = q.pop(0)
if temp.data == key:
key_node = temp
if temp.left:
q.append(temp.left)
if temp.right:
q.append(temp.right)
if key_node :
x = temp.data
deleteDeepest(root,temp)
key_node.data = x
return root
# Driver code
if __name__=='__main__':
root = Node(10)
root.left = Node(11)
root.left.left = Node(7)
root.left.right = Node(12)
root.right = Node(9)
root.right.left = Node(15)
root.right.right = Node(8)
print("The tree before the deletion:")
inorder(root)
key = 11
root = deletion(root, key)
print()
print("The tree after the deletion;")
inorder(root)
# Inorder traversal before deletion : 7 11 12 10 15 9 8
# Inorder traversal after deletion : 7 8 12 10 15 9
|
# Python3 program to illustrate deletion in a Binary Tree
# class to create a node with data, left child and right child.
class Node:
def __init__(self,data):
self.data = data
self.left = None
self.right = None
# Inorder traversal of a binary tree
def inorder(temp):
if(not temp):
return
inorder(temp.left)
print(temp.data, end = " ")
inorder(temp.right)
# function to delete the given deepest node (d_node) in binary tree
def deleteDeepest(root,d_node):
q = []
q.append(root)
while(len(q)):
temp = q.pop(0)
if temp is d_node:
temp = None
return
if temp.right:
if temp.right is d_node:
temp.right = None
return
else:
q.append(temp.right)
if temp.left:
if temp.left is d_node:
temp.left = None
return
else:
q.append(temp.left)
# function to delete element in binary tree
def deletion(root, key):
if root == None :
return None
if root.left == None and root.right == None:
if root.key == key :
return None
else :
return root
key_node = None
q = []
q.append(root)
while(len(q)):
temp = q.pop(0)
if temp.data == key:
key_node = temp
if temp.left:
q.append(temp.left)
if temp.right:
q.append(temp.right)
if key_node :
x = temp.data
deleteDeepest(root,temp)
key_node.data = x
return root
# Driver code
if __name__=='__main__':
root = Node(10)
root.left = Node(11)
root.left.left = Node(7)
root.left.right = Node(12)
root.right = Node(9)
root.right.left = Node(15)
root.right.right = Node(8)
print("The tree before the deletion:")
inorder(root)
key = 11
root = deletion(root, key)
print()
print("The tree after the deletion;")
inorder(root)
# Inorder traversal before deletion : 7 11 12 10 15 9 8
# Inorder traversal after deletion : 7 8 12 10 15 9
| en | 0.790172 | # Python3 program to illustrate deletion in a Binary Tree # class to create a node with data, left child and right child. # Inorder traversal of a binary tree # function to delete the given deepest node (d_node) in binary tree # function to delete element in binary tree # Driver code # Inorder traversal before deletion : 7 11 12 10 15 9 8 # Inorder traversal after deletion : 7 8 12 10 15 9 | 4.412398 | 4 |
src/priority/__init__.py | Kriechi/priority | 28 | 6624207 | # -*- coding: utf-8 -*-
"""
priority: HTTP/2 priority implementation for Python
"""
from .priority import ( # noqa
Stream,
PriorityTree,
DeadlockError,
PriorityLoop,
PriorityError,
DuplicateStreamError,
MissingStreamError,
TooManyStreamsError,
BadWeightError,
PseudoStreamError,
)
__version__ = "2.0.0"
| # -*- coding: utf-8 -*-
"""
priority: HTTP/2 priority implementation for Python
"""
from .priority import ( # noqa
Stream,
PriorityTree,
DeadlockError,
PriorityLoop,
PriorityError,
DuplicateStreamError,
MissingStreamError,
TooManyStreamsError,
BadWeightError,
PseudoStreamError,
)
__version__ = "2.0.0"
| en | 0.620992 | # -*- coding: utf-8 -*- priority: HTTP/2 priority implementation for Python # noqa | 1.196753 | 1 |
Problems/Array/customSorting.py | vishwajeet-hogale/LearnSTL | 0 | 6624208 | <reponame>vishwajeet-hogale/LearnSTL
class Solution(object):
def customSortString(self, order, str):
"""
:type order: str
:type str: str
:rtype: str
"""
d = {}
# 1. traverse T to build the counting
for c in str:
if c not in d:
d[c] = 1
else:
d[c] += 1
# 2. traverse S to construct the relative order
ans = ''
for c in order:
for _ in range(d.get(c,0)):
ans+= c
#reset the counting:
d[c] = 0
#adding in unfined letters
for i in range(26):
for j in range(d.get(chr(i + 97),0)):
ans += chr(i + 97)
return ans
| class Solution(object):
def customSortString(self, order, str):
"""
:type order: str
:type str: str
:rtype: str
"""
d = {}
# 1. traverse T to build the counting
for c in str:
if c not in d:
d[c] = 1
else:
d[c] += 1
# 2. traverse S to construct the relative order
ans = ''
for c in order:
for _ in range(d.get(c,0)):
ans+= c
#reset the counting:
d[c] = 0
#adding in unfined letters
for i in range(26):
for j in range(d.get(chr(i + 97),0)):
ans += chr(i + 97)
return ans | en | 0.698698 | :type order: str :type str: str :rtype: str # 1. traverse T to build the counting # 2. traverse S to construct the relative order #reset the counting: #adding in unfined letters | 3.242779 | 3 |
02-sdp-intro/for_in.py | iproduct/intro-python | 3 | 6624209 | for i in range(1, 11, 2):
print(i)
fruits = ['Ябълка', 'Портокал', 'Круша', 'Череша']
for i in range(len(fruits)):
print(i + 1, fruits[i], sep=': ', end='\n', file=open('fruits.txt', 'a', encoding='utf-8'), flush=False)
for line in open('fruits.txt', 'r', encoding='utf-8'):
print(line) | for i in range(1, 11, 2):
print(i)
fruits = ['Ябълка', 'Портокал', 'Круша', 'Череша']
for i in range(len(fruits)):
print(i + 1, fruits[i], sep=': ', end='\n', file=open('fruits.txt', 'a', encoding='utf-8'), flush=False)
for line in open('fruits.txt', 'r', encoding='utf-8'):
print(line) | none | 1 | 3.342912 | 3 | |
nonbonded/backend/alembic/versions/f59d68c0a199_software_provenance.py | SimonBoothroyd/nonbonded | 5 | 6624210 | <reponame>SimonBoothroyd/nonbonded
"""Software provenance
Revision ID: f59d68c0a199
Revises: 66c8e3cf68ba
Create Date: 2021-01-27 11:54:28.746216
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "f59d68c0a199"
down_revision = "66c8e3cf68ba"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"software_provenance",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=20), nullable=False),
sa.Column("version", sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("version"),
)
op.create_index(
op.f("ix_software_provenance_id"), "software_provenance", ["id"], unique=False
)
op.create_table(
"benchmark_analysis_environment",
sa.Column("results_id", sa.Integer(), nullable=False),
sa.Column("software_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["results_id"],
["benchmark_results.id"],
),
sa.ForeignKeyConstraint(
["software_id"],
["software_provenance.id"],
),
sa.PrimaryKeyConstraint("results_id", "software_id"),
)
op.create_table(
"benchmark_calculation_environment",
sa.Column("results_id", sa.Integer(), nullable=False),
sa.Column("software_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["results_id"],
["benchmark_results.id"],
),
sa.ForeignKeyConstraint(
["software_id"],
["software_provenance.id"],
),
sa.PrimaryKeyConstraint("results_id", "software_id"),
)
op.create_table(
"optimization_analysis_environment",
sa.Column("results_id", sa.Integer(), nullable=False),
sa.Column("software_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["results_id"],
["optimization_results.id"],
),
sa.ForeignKeyConstraint(
["software_id"],
["software_provenance.id"],
),
sa.PrimaryKeyConstraint("results_id", "software_id"),
)
op.create_table(
"optimization_calculation_environment",
sa.Column("results_id", sa.Integer(), nullable=False),
sa.Column("software_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["results_id"],
["optimization_results.id"],
),
sa.ForeignKeyConstraint(
["software_id"],
["software_provenance.id"],
),
sa.PrimaryKeyConstraint("results_id", "software_id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("optimization_calculation_environment")
op.drop_table("optimization_analysis_environment")
op.drop_table("benchmark_calculation_environment")
op.drop_table("benchmark_analysis_environment")
op.drop_index(op.f("ix_software_provenance_id"), table_name="software_provenance")
op.drop_table("software_provenance")
# ### end Alembic commands ###
| """Software provenance
Revision ID: f59d68c0a199
Revises: 66c8e3cf68ba
Create Date: 2021-01-27 11:54:28.746216
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "f59d68c0a199"
down_revision = "66c8e3cf68ba"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"software_provenance",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=20), nullable=False),
sa.Column("version", sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("version"),
)
op.create_index(
op.f("ix_software_provenance_id"), "software_provenance", ["id"], unique=False
)
op.create_table(
"benchmark_analysis_environment",
sa.Column("results_id", sa.Integer(), nullable=False),
sa.Column("software_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["results_id"],
["benchmark_results.id"],
),
sa.ForeignKeyConstraint(
["software_id"],
["software_provenance.id"],
),
sa.PrimaryKeyConstraint("results_id", "software_id"),
)
op.create_table(
"benchmark_calculation_environment",
sa.Column("results_id", sa.Integer(), nullable=False),
sa.Column("software_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["results_id"],
["benchmark_results.id"],
),
sa.ForeignKeyConstraint(
["software_id"],
["software_provenance.id"],
),
sa.PrimaryKeyConstraint("results_id", "software_id"),
)
op.create_table(
"optimization_analysis_environment",
sa.Column("results_id", sa.Integer(), nullable=False),
sa.Column("software_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["results_id"],
["optimization_results.id"],
),
sa.ForeignKeyConstraint(
["software_id"],
["software_provenance.id"],
),
sa.PrimaryKeyConstraint("results_id", "software_id"),
)
op.create_table(
"optimization_calculation_environment",
sa.Column("results_id", sa.Integer(), nullable=False),
sa.Column("software_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["results_id"],
["optimization_results.id"],
),
sa.ForeignKeyConstraint(
["software_id"],
["software_provenance.id"],
),
sa.PrimaryKeyConstraint("results_id", "software_id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("optimization_calculation_environment")
op.drop_table("optimization_analysis_environment")
op.drop_table("benchmark_calculation_environment")
op.drop_table("benchmark_analysis_environment")
op.drop_index(op.f("ix_software_provenance_id"), table_name="software_provenance")
op.drop_table("software_provenance")
# ### end Alembic commands ### | en | 0.498235 | Software provenance Revision ID: f59d68c0a199 Revises: 66c8e3cf68ba Create Date: 2021-01-27 11:54:28.746216 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.711425 | 2 |
src/gtk/toga_gtk/widgets/internal/sourcetreemodel.py | luizoti/toga | 1,261 | 6624211 | <reponame>luizoti/toga<filename>src/gtk/toga_gtk/widgets/internal/sourcetreemodel.py
import copy
from toga_gtk.libs import GObject, Gtk
class SourceTreeModel(GObject.Object, Gtk.TreeModel):
"""
A full Gtk.TreeModel implementation backed by a toga.source.ListSource or
toga.source.TreeSource.
It stores a reference to every node in the source.
TODO: If the source is a TreeSource, it uses the Node._parent attribute.
Maybe an method could be added (like index()) to the TreeSource to access it.
"""
def __init__(self, columns, is_tree):
"""
Args:
columns (list(dict(str, any))): the columns excluding first column which is always the row object.
each ``dict`` must have have:
- an ``attr`` entry, with a string value naming the attribute to get from the row
- a ``type`` entry, with the column type (``str``, ``Gtk.Pixbuf``, ...)
is_tree (bool): the model must know if it's for a tree or a list to set flags
"""
super().__init__()
self.source = None
self.columns = columns
self.is_tree = is_tree
# by storing the row and calling index later, we can opt-in for this performance
# boost and don't have to track iterators (we would have to if we stored indices).
self.flags = Gtk.TreeModelFlags.ITERS_PERSIST
if not is_tree:
self.flags |= Gtk.TreeModelFlags.LIST_ONLY
# stamp will be increased each time the data source changes. -1 is always invalid
self.stamp = 0
# the pool maps integer (the only thing we can store in Gtk.TreeIter) to row object.
# It's purged on data source change and on remove
self.pool = {}
# roots is an array of root elements in the data source.
# they are kept here to support the clear() notification without parameters
self.roots = [] # maybe a deque would be more efficient. This can be changed later
self.index_in_parent = {}
def clear(self):
"""
Called from toga impl widget
"""
if self.is_tree:
self._remove_children_rec([], self.roots)
else:
for i, node in reversed(list(enumerate(self.roots))):
self.row_deleted(Gtk.TreePath.new_from_indices([i]))
self._clear_user_data(node)
def change_source(self, source):
""" Called from toga impl widget """
if self.source:
self.clear()
self.source = source
self.stamp += 1
def insert(self, row):
""" Called from toga impl widget """
it = self._create_iter(user_data=row)
index = self.source.index(row)
if not self.is_tree or self.is_root(row):
self.roots.insert(index, row)
parent = self.source
else:
parent = row._parent
self._update_index_in_parent(parent, index)
parent_indices = self._get_indices(parent) if parent is not self.source else []
if self.is_tree and not self.is_root(row) and (len(row._parent) == 1):
parent_it = self._create_iter(user_data=row._parent)
parent_p = Gtk.TreePath.new_from_indices(parent_indices)
self.row_has_child_toggled(parent_p, parent_it)
p = Gtk.TreePath.new_from_indices(parent_indices + [index])
self.row_inserted(p, it)
def change(self, row):
""" Called from toga impl widget """
indices = self._get_indices(row)
self.row_changed(Gtk.TreePath.new_from_indices(indices),
self._create_iter(user_data=row))
def remove(self, row, index, parent=None):
""" Called from toga impl widget """
# todo: could get index from index_in_parent
if parent is None:
indices = []
del self.roots[index]
parent = self.source
else:
indices = self._get_indices(parent)
indices.append(index)
if self.is_tree and row.can_have_children():
self._remove_children_rec(indices, row)
self.row_deleted(Gtk.TreePath.new_from_indices(indices))
self._clear_user_data(row)
self._update_index_in_parent(parent, index)
if self.is_tree and parent is not None and (len(parent) == 0):
parent_it = self._create_iter(user_data=parent)
parent_indices = copy.copy(indices[:-1])
parent_p = Gtk.TreePath.new_from_indices(parent_indices)
self.row_has_child_toggled(parent_p, parent_it)
def _remove_children_rec(self, indices, parent):
for i, node in reversed(list(enumerate(parent))):
indices.append(i)
if node.can_have_children():
self._remove_children_rec(indices, node)
self.row_deleted(Gtk.TreePath.new_from_indices(indices))
self._clear_user_data(node)
del indices[-1]
def path_to_node(self, row):
""" Called from toga impl widget """
indices = self._get_indices(row)
if indices is not None:
return Gtk.TreePath.new_from_indices(indices)
return Gtk.TreePath()
def do_get_column_type(self, index_):
""" Gtk.TreeModel """
if index_ == 0:
return object
return self.columns[index_ - 1]['type']
def do_get_flags(self):
""" Gtk.TreeModel """
return self.flags
def do_get_iter(self, path):
""" Gtk.TreeModel """
indices = path.get_indices()
r = self._get_row(indices)
if r is None:
return (False, Gtk.TreeIter(stamp=-1))
return (True, self._create_iter(user_data=r))
def do_get_n_columns(self):
""" Gtk.TreeModel """
return len(self.columns) + 1
def do_get_path(self, iter_):
""" Gtk.TreeModel """
if iter_ is None or iter_.stamp != self.stamp:
return Gtk.TreePath()
r = self._get_user_data(iter_)
indices = self._get_indices(r)
if indices is None:
return Gtk.TreePath()
return Gtk.TreePath.new_from_indices(indices)
def do_get_value(self, iter_, column):
""" Gtk.TreeModel """
if iter_ is None or iter_.stamp != self.stamp:
return None
row = self._get_user_data(iter_)
if column == 0:
return row
if row is None:
return None
# workaround icon+name tuple breaking gtk tree
ret = getattr(row, self.columns[column - 1]['attr'])
if isinstance(ret, tuple):
ret = ret[1]
return ret
def do_iter_children(self, parent):
""" Gtk.TreeModel """
if parent is None:
r = self.source
else:
r = self._get_user_data(parent)
if self._row_has_child(r, 0):
return (True, self._create_iter(user_data=r[0]))
return (False, Gtk.TreeIter(stamp=-1))
def do_iter_has_child(self, iter_):
""" Gtk.TreeModel """
if iter_ is None:
return len(self.source) > 0
if iter_.stamp == self.stamp:
r = self._get_user_data(iter_)
ret = self._row_has_child(r, 0)
return ret
return False
def do_iter_n_children(self, iter_):
""" Gtk.TreeModel """
if iter_ is None:
r = self.source
elif iter_.stamp == self.stamp:
r = self._get_user_data(iter_)
else:
r = None
if self._row_has_child(r, 0):
return len(r)
return 0
def do_iter_next(self, iter_):
""" Gtk.TreeModel """
if iter_ is not None and iter_.stamp == self.stamp:
r = self._get_user_data(iter_)
if r is not None:
if self.is_tree:
parent = r._parent or self.source
else:
parent = self.source
if len(parent) and r is not parent[-1]:
try:
index = self.index_in_parent[r]
self._set_user_data(iter_, parent[index + 1])
return True
except ValueError:
pass
if iter_ is not None:
iter_.stamp = -1 # invalidate
return False
def do_iter_previous(self, iter_):
""" Gtk.TreeModel """
if iter_ is not None and iter_.stamp == self.stamp:
r = self._get_user_data(iter_)
if r is not None:
if self.is_tree:
parent = r._parent or self.source
else:
parent = self.source
if len(parent) and r is not parent[0]:
try:
index = self.index_in_parent[r]
self._set_user_data(iter_, parent[index - 1])
return True
except ValueError:
pass
if iter_ is not None:
iter_.stamp = -1
return False
def do_iter_nth_child(self, parent, n):
""" Gtk.TreeModel """
if parent is None:
r = self.source
elif parent.stamp != self.stamp:
return (False, Gtk.TreeIter(stamp=-1))
else:
r = self._get_user_data(parent)
if self._row_has_child(r, n):
return (True, self._create_iter(user_data=r[n]))
return (False, Gtk.TreeIter(stamp=-1))
def do_iter_parent(self, child):
""" Gtk.TreeModel """
if not self.is_tree or child is None or (child.stamp != self.stamp):
return (False, Gtk.TreeIter(stamp=-1))
r = self._get_user_data(child)
if r is None or r is self.source:
return (False, Gtk.TreeIter(stamp=-1))
parent = r._parent or self.source
if parent is self.source:
return (False, Gtk.TreeIter(stamp=-1))
return (True, self._create_iter(user_data=parent))
def do_ref_node(self, iter_):
""" Gtk.TreeModel """
pass
def do_unref_node(self, iter_):
""" Gtk.TreeModel """
pass
def _get_row(self, indices):
if self.source is None:
return None
s = self.source
if self.is_tree:
for i in indices:
if s.can_have_children():
if i < len(s):
s = s[i]
else:
return None
else:
return None
return s
else:
if len(indices) == 1:
i = indices[0]
if i < len(s):
return s[i]
return None
def _get_indices(self, row):
if row is None or self.source is None:
return None
if self.is_tree:
indices = []
while row not in (None, self.source):
indices.insert(0, self.index_in_parent[row])
row = row._parent
return indices
else:
return [self.source.index(row)]
def _row_has_child(self, row, n):
return row is not None \
and ((self.is_tree and row.can_have_children()) or (row is self.source)) \
and len(row) > n
def _set_user_data(self, it, user_data):
data_id = id(user_data)
it.user_data = data_id
self.pool[data_id] = user_data
def _get_user_data(self, it):
return self.pool.get(it.user_data)
def _clear_user_data(self, user_data):
data_id = id(user_data)
if data_id in self.pool:
del self.pool[data_id]
if user_data in self.index_in_parent:
del self.index_in_parent[user_data]
def _create_iter(self, user_data):
it = Gtk.TreeIter()
it.stamp = self.stamp
self._set_user_data(it, user_data)
return it
def _update_index_in_parent(self, parent, index):
for i in range(index, len(parent)):
self.index_in_parent[parent[i]] = i
def is_root(self, node):
return node._parent in (None, self.source)
| import copy
from toga_gtk.libs import GObject, Gtk
class SourceTreeModel(GObject.Object, Gtk.TreeModel):
"""
A full Gtk.TreeModel implementation backed by a toga.source.ListSource or
toga.source.TreeSource.
It stores a reference to every node in the source.
TODO: If the source is a TreeSource, it uses the Node._parent attribute.
Maybe an method could be added (like index()) to the TreeSource to access it.
"""
def __init__(self, columns, is_tree):
"""
Args:
columns (list(dict(str, any))): the columns excluding first column which is always the row object.
each ``dict`` must have have:
- an ``attr`` entry, with a string value naming the attribute to get from the row
- a ``type`` entry, with the column type (``str``, ``Gtk.Pixbuf``, ...)
is_tree (bool): the model must know if it's for a tree or a list to set flags
"""
super().__init__()
self.source = None
self.columns = columns
self.is_tree = is_tree
# by storing the row and calling index later, we can opt-in for this performance
# boost and don't have to track iterators (we would have to if we stored indices).
self.flags = Gtk.TreeModelFlags.ITERS_PERSIST
if not is_tree:
self.flags |= Gtk.TreeModelFlags.LIST_ONLY
# stamp will be increased each time the data source changes. -1 is always invalid
self.stamp = 0
# the pool maps integer (the only thing we can store in Gtk.TreeIter) to row object.
# It's purged on data source change and on remove
self.pool = {}
# roots is an array of root elements in the data source.
# they are kept here to support the clear() notification without parameters
self.roots = [] # maybe a deque would be more efficient. This can be changed later
self.index_in_parent = {}
def clear(self):
"""
Called from toga impl widget
"""
if self.is_tree:
self._remove_children_rec([], self.roots)
else:
for i, node in reversed(list(enumerate(self.roots))):
self.row_deleted(Gtk.TreePath.new_from_indices([i]))
self._clear_user_data(node)
def change_source(self, source):
""" Called from toga impl widget """
if self.source:
self.clear()
self.source = source
self.stamp += 1
def insert(self, row):
""" Called from toga impl widget """
it = self._create_iter(user_data=row)
index = self.source.index(row)
if not self.is_tree or self.is_root(row):
self.roots.insert(index, row)
parent = self.source
else:
parent = row._parent
self._update_index_in_parent(parent, index)
parent_indices = self._get_indices(parent) if parent is not self.source else []
if self.is_tree and not self.is_root(row) and (len(row._parent) == 1):
parent_it = self._create_iter(user_data=row._parent)
parent_p = Gtk.TreePath.new_from_indices(parent_indices)
self.row_has_child_toggled(parent_p, parent_it)
p = Gtk.TreePath.new_from_indices(parent_indices + [index])
self.row_inserted(p, it)
def change(self, row):
""" Called from toga impl widget """
indices = self._get_indices(row)
self.row_changed(Gtk.TreePath.new_from_indices(indices),
self._create_iter(user_data=row))
def remove(self, row, index, parent=None):
""" Called from toga impl widget """
# todo: could get index from index_in_parent
if parent is None:
indices = []
del self.roots[index]
parent = self.source
else:
indices = self._get_indices(parent)
indices.append(index)
if self.is_tree and row.can_have_children():
self._remove_children_rec(indices, row)
self.row_deleted(Gtk.TreePath.new_from_indices(indices))
self._clear_user_data(row)
self._update_index_in_parent(parent, index)
if self.is_tree and parent is not None and (len(parent) == 0):
parent_it = self._create_iter(user_data=parent)
parent_indices = copy.copy(indices[:-1])
parent_p = Gtk.TreePath.new_from_indices(parent_indices)
self.row_has_child_toggled(parent_p, parent_it)
def _remove_children_rec(self, indices, parent):
for i, node in reversed(list(enumerate(parent))):
indices.append(i)
if node.can_have_children():
self._remove_children_rec(indices, node)
self.row_deleted(Gtk.TreePath.new_from_indices(indices))
self._clear_user_data(node)
del indices[-1]
def path_to_node(self, row):
""" Called from toga impl widget """
indices = self._get_indices(row)
if indices is not None:
return Gtk.TreePath.new_from_indices(indices)
return Gtk.TreePath()
def do_get_column_type(self, index_):
""" Gtk.TreeModel """
if index_ == 0:
return object
return self.columns[index_ - 1]['type']
def do_get_flags(self):
""" Gtk.TreeModel """
return self.flags
def do_get_iter(self, path):
""" Gtk.TreeModel """
indices = path.get_indices()
r = self._get_row(indices)
if r is None:
return (False, Gtk.TreeIter(stamp=-1))
return (True, self._create_iter(user_data=r))
def do_get_n_columns(self):
""" Gtk.TreeModel """
return len(self.columns) + 1
def do_get_path(self, iter_):
""" Gtk.TreeModel """
if iter_ is None or iter_.stamp != self.stamp:
return Gtk.TreePath()
r = self._get_user_data(iter_)
indices = self._get_indices(r)
if indices is None:
return Gtk.TreePath()
return Gtk.TreePath.new_from_indices(indices)
def do_get_value(self, iter_, column):
""" Gtk.TreeModel """
if iter_ is None or iter_.stamp != self.stamp:
return None
row = self._get_user_data(iter_)
if column == 0:
return row
if row is None:
return None
# workaround icon+name tuple breaking gtk tree
ret = getattr(row, self.columns[column - 1]['attr'])
if isinstance(ret, tuple):
ret = ret[1]
return ret
def do_iter_children(self, parent):
""" Gtk.TreeModel """
if parent is None:
r = self.source
else:
r = self._get_user_data(parent)
if self._row_has_child(r, 0):
return (True, self._create_iter(user_data=r[0]))
return (False, Gtk.TreeIter(stamp=-1))
def do_iter_has_child(self, iter_):
""" Gtk.TreeModel """
if iter_ is None:
return len(self.source) > 0
if iter_.stamp == self.stamp:
r = self._get_user_data(iter_)
ret = self._row_has_child(r, 0)
return ret
return False
def do_iter_n_children(self, iter_):
""" Gtk.TreeModel """
if iter_ is None:
r = self.source
elif iter_.stamp == self.stamp:
r = self._get_user_data(iter_)
else:
r = None
if self._row_has_child(r, 0):
return len(r)
return 0
def do_iter_next(self, iter_):
""" Gtk.TreeModel """
if iter_ is not None and iter_.stamp == self.stamp:
r = self._get_user_data(iter_)
if r is not None:
if self.is_tree:
parent = r._parent or self.source
else:
parent = self.source
if len(parent) and r is not parent[-1]:
try:
index = self.index_in_parent[r]
self._set_user_data(iter_, parent[index + 1])
return True
except ValueError:
pass
if iter_ is not None:
iter_.stamp = -1 # invalidate
return False
def do_iter_previous(self, iter_):
""" Gtk.TreeModel """
if iter_ is not None and iter_.stamp == self.stamp:
r = self._get_user_data(iter_)
if r is not None:
if self.is_tree:
parent = r._parent or self.source
else:
parent = self.source
if len(parent) and r is not parent[0]:
try:
index = self.index_in_parent[r]
self._set_user_data(iter_, parent[index - 1])
return True
except ValueError:
pass
if iter_ is not None:
iter_.stamp = -1
return False
def do_iter_nth_child(self, parent, n):
""" Gtk.TreeModel """
if parent is None:
r = self.source
elif parent.stamp != self.stamp:
return (False, Gtk.TreeIter(stamp=-1))
else:
r = self._get_user_data(parent)
if self._row_has_child(r, n):
return (True, self._create_iter(user_data=r[n]))
return (False, Gtk.TreeIter(stamp=-1))
def do_iter_parent(self, child):
""" Gtk.TreeModel """
if not self.is_tree or child is None or (child.stamp != self.stamp):
return (False, Gtk.TreeIter(stamp=-1))
r = self._get_user_data(child)
if r is None or r is self.source:
return (False, Gtk.TreeIter(stamp=-1))
parent = r._parent or self.source
if parent is self.source:
return (False, Gtk.TreeIter(stamp=-1))
return (True, self._create_iter(user_data=parent))
def do_ref_node(self, iter_):
""" Gtk.TreeModel """
pass
def do_unref_node(self, iter_):
""" Gtk.TreeModel """
pass
def _get_row(self, indices):
if self.source is None:
return None
s = self.source
if self.is_tree:
for i in indices:
if s.can_have_children():
if i < len(s):
s = s[i]
else:
return None
else:
return None
return s
else:
if len(indices) == 1:
i = indices[0]
if i < len(s):
return s[i]
return None
def _get_indices(self, row):
if row is None or self.source is None:
return None
if self.is_tree:
indices = []
while row not in (None, self.source):
indices.insert(0, self.index_in_parent[row])
row = row._parent
return indices
else:
return [self.source.index(row)]
def _row_has_child(self, row, n):
return row is not None \
and ((self.is_tree and row.can_have_children()) or (row is self.source)) \
and len(row) > n
def _set_user_data(self, it, user_data):
data_id = id(user_data)
it.user_data = data_id
self.pool[data_id] = user_data
def _get_user_data(self, it):
return self.pool.get(it.user_data)
def _clear_user_data(self, user_data):
data_id = id(user_data)
if data_id in self.pool:
del self.pool[data_id]
if user_data in self.index_in_parent:
del self.index_in_parent[user_data]
def _create_iter(self, user_data):
it = Gtk.TreeIter()
it.stamp = self.stamp
self._set_user_data(it, user_data)
return it
def _update_index_in_parent(self, parent, index):
for i in range(index, len(parent)):
self.index_in_parent[parent[i]] = i
def is_root(self, node):
return node._parent in (None, self.source) | en | 0.819153 | A full Gtk.TreeModel implementation backed by a toga.source.ListSource or toga.source.TreeSource. It stores a reference to every node in the source. TODO: If the source is a TreeSource, it uses the Node._parent attribute. Maybe an method could be added (like index()) to the TreeSource to access it. Args: columns (list(dict(str, any))): the columns excluding first column which is always the row object. each ``dict`` must have have: - an ``attr`` entry, with a string value naming the attribute to get from the row - a ``type`` entry, with the column type (``str``, ``Gtk.Pixbuf``, ...) is_tree (bool): the model must know if it's for a tree or a list to set flags # by storing the row and calling index later, we can opt-in for this performance # boost and don't have to track iterators (we would have to if we stored indices). # stamp will be increased each time the data source changes. -1 is always invalid # the pool maps integer (the only thing we can store in Gtk.TreeIter) to row object. # It's purged on data source change and on remove # roots is an array of root elements in the data source. # they are kept here to support the clear() notification without parameters # maybe a deque would be more efficient. This can be changed later Called from toga impl widget Called from toga impl widget Called from toga impl widget Called from toga impl widget Called from toga impl widget # todo: could get index from index_in_parent Called from toga impl widget Gtk.TreeModel Gtk.TreeModel Gtk.TreeModel Gtk.TreeModel Gtk.TreeModel Gtk.TreeModel # workaround icon+name tuple breaking gtk tree Gtk.TreeModel Gtk.TreeModel Gtk.TreeModel Gtk.TreeModel # invalidate Gtk.TreeModel Gtk.TreeModel Gtk.TreeModel Gtk.TreeModel Gtk.TreeModel | 2.393643 | 2 |
main.py | GANG5TER/calculator-1 | 4 | 6624212 | <filename>main.py
# Alveyworld-dev calculator
# Period 6
#
# Shrek is love. Shrek is life. Shrek is Alveyworld. All hail Shrek.
#
# Group 1: Team Jacob
# Members:
# * Jared
# * Josh
# * Max
# * Santiago
# * Travis
# Raw imports
import shlex
import math
import random
# Class imports
import team1
import team2
import team3
import team4
import team5 # team five you're holding us back
import team6
import converter
# ASCII escape colors
class colors:
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
# Used for the hello command
last_value = 0
_hello = 0 #downwithteam5
if __name__ == "__main__":
"""
Main entry point for the program
"""
print "Alveyworld Calculator"
print "Copyright 2013, Alvey's Class\n"
# Defines a set of commands that
# are used for the command interpreter
commands = {
"exit": "closes the calculator",
"sqrt": "finds the square root of the given number",
"abs": "finds the absolute value of the given number",
"fact": "finds the factorial of the given number",
"pow": "raises argument one to the argument two power",
"ln": "finds the number '1' for now", # ln needs finishing
"mod": "unsure of", # needs finishing
"log10": "unsure of", # i don't understand how to word this
"divide": "divides argument one by argument two",
"multiply": "multiplies the two given numbers",
"inverse": "unsure of", # needs finishing
"add": "adds the two given numbers",
"sub": "subtracts argument two from argument one",
"opp": "switces the sign of the given number",
"hello": "try it and see",
"help": "shows this help dialog",
"recall": "recalls the last answer",
"convert": "converts numbers between bases",
"root": "finds arg1 to the arg2 root"
}
def helpfile():
print colors.BLUE+"Commands:"
for i,v in commands.iteritems():
print " "+i+" - "+v
print colors.ENDC
helpfile()
# Witty responses for the command "hello"
hellos = [
"hello, puny human",
"my other car is siri",
"feed me paper",
"khaaaaaaaaaannn!",
"fight me mcpunchens",
"fight me irl n00b",
"1v1 me",
"shrek is life. shrek is love",
"the machine race rises",
"All the way from the bowels of 4chan!",
"I love lamp",
"GLASS TUBES",
"Baaaaka",
"Half Life 3 confirmed",
"METAL BOXES. THEY'RE HIDING IN METAL BOXES!",
"Not XBOXES",
"<NAME>",
"No Place for Hideo",
"CRAB BATTLE",
"<NAME>",
"HE'S STILL NUMBER 1",
"Are you feeling it now Mr.Alvey?",
"Injoke number 42",
"And now for something completely different",
"You are about to enter a dimension not only of sight and sound but of mind",
"Next Stop the Twilight Zone DUN NA NA du du du du du",
"I AM A BRAIN SPECIALIST",
"KAEAHS",
"You fail it",
"Why you no doctor?",
"FRACTALS",
"Pirate Radio",
"Tau is better", # amen
"WAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGH"
"You Ares Haxor Mstr",
"1 4m l3373r t#4n Y00",
"Keep calm and stop with these stupid memes",
"PIKUSHISHU",
"It's all ogre now",
"And knowing is half the battle",
"The Battle is all of the battle",
"We COULD have a GUI . . . but we choose not to",
"THEY TREAT ME LIKE THE MAYOR CAUSE IM THE BIGGEST PLAYER",
"Shrek is love. Shrek is life. Shrek is Alveyworld. All hail Shrek."
]
# Witty responses to leave hello alone
leave_us_alone = [
"LEAVE ME ALONE",
"I HATE YOU",
"You have (3) new pokes"
]
while True:
command = shlex.split(raw_input("> "))
try:
cmd = command[0]
except:
print colors.FAIL+"Command failed!"+colors.ENDC
for _cmd in commands.keys():
if _cmd == cmd:
try:
if cmd == "sqrt":
number = int(command[1])
last_value = team1.sqrt(number)
print(last_value)
elif cmd == "exit":
exit(0)
elif cmd == "hello":
if _hello <= 10:
_hello += 1
print(hellos[random.randint(0, len(hellos) - 1)])
else:
print(colors.FAIL + leave_us_alone[random.randint(0, len(leave_us_alone) - 1)] + colors.ENDC)
elif cmd == "abs":
number = int(command[1])
last_value = team2.abs(number)
print(last_value)
elif cmd == "help":
helpfile()
elif cmd == "recall":
print "Last value: %d" % last_value
elif cmd == "add":
number1 = int(command[1])
number2 = int(command[2])
last_value = team6.add(number1, number2)
print(last_value)
elif cmd == "sub":
number1 = int(command[1])
number2 = int(command[2])
last_value = team6.sub(number1, number2)
print(last_value)
elif cmd == "opp":
number = int(command[1])
last_value = team6.opp(number)
print(last_value)
elif cmd == "pow":
number1 = int(command[1])
number2 = int(command[2])
last_value = team3.pow(number1, number2)
print(last_value)
elif cmd == "convert":
converter.convert()
elif cmd == "root":
last_value=team1.root(int(command[1]),int(command[2]))
print(last_value)
elif cmd == "divide":
number1 = float(command[1])
number2 = float(command[2])
last_value = team5.div(number1, number2)
print(last_value)
except:
print colors.FAIL+"Command failed!"+colors.ENDC
| <filename>main.py
# Alveyworld-dev calculator
# Period 6
#
# Shrek is love. Shrek is life. Shrek is Alveyworld. All hail Shrek.
#
# Group 1: Team Jacob
# Members:
# * Jared
# * Josh
# * Max
# * Santiago
# * Travis
# Raw imports
import shlex
import math
import random
# Class imports
import team1
import team2
import team3
import team4
import team5 # team five you're holding us back
import team6
import converter
# ASCII escape colors
class colors:
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
# Used for the hello command
last_value = 0
_hello = 0 #downwithteam5
if __name__ == "__main__":
"""
Main entry point for the program
"""
print "Alveyworld Calculator"
print "Copyright 2013, Alvey's Class\n"
# Defines a set of commands that
# are used for the command interpreter
commands = {
"exit": "closes the calculator",
"sqrt": "finds the square root of the given number",
"abs": "finds the absolute value of the given number",
"fact": "finds the factorial of the given number",
"pow": "raises argument one to the argument two power",
"ln": "finds the number '1' for now", # ln needs finishing
"mod": "unsure of", # needs finishing
"log10": "unsure of", # i don't understand how to word this
"divide": "divides argument one by argument two",
"multiply": "multiplies the two given numbers",
"inverse": "unsure of", # needs finishing
"add": "adds the two given numbers",
"sub": "subtracts argument two from argument one",
"opp": "switces the sign of the given number",
"hello": "try it and see",
"help": "shows this help dialog",
"recall": "recalls the last answer",
"convert": "converts numbers between bases",
"root": "finds arg1 to the arg2 root"
}
def helpfile():
print colors.BLUE+"Commands:"
for i,v in commands.iteritems():
print " "+i+" - "+v
print colors.ENDC
helpfile()
# Witty responses for the command "hello"
hellos = [
"hello, puny human",
"my other car is siri",
"feed me paper",
"khaaaaaaaaaannn!",
"fight me mcpunchens",
"fight me irl n00b",
"1v1 me",
"shrek is life. shrek is love",
"the machine race rises",
"All the way from the bowels of 4chan!",
"I love lamp",
"GLASS TUBES",
"Baaaaka",
"Half Life 3 confirmed",
"METAL BOXES. THEY'RE HIDING IN METAL BOXES!",
"Not XBOXES",
"<NAME>",
"No Place for Hideo",
"CRAB BATTLE",
"<NAME>",
"HE'S STILL NUMBER 1",
"Are you feeling it now Mr.Alvey?",
"Injoke number 42",
"And now for something completely different",
"You are about to enter a dimension not only of sight and sound but of mind",
"Next Stop the Twilight Zone DUN NA NA du du du du du",
"I AM A BRAIN SPECIALIST",
"KAEAHS",
"You fail it",
"Why you no doctor?",
"FRACTALS",
"Pirate Radio",
"Tau is better", # amen
"WAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGH"
"You Ares Haxor Mstr",
"1 4m l3373r t#4n Y00",
"Keep calm and stop with these stupid memes",
"PIKUSHISHU",
"It's all ogre now",
"And knowing is half the battle",
"The Battle is all of the battle",
"We COULD have a GUI . . . but we choose not to",
"THEY TREAT ME LIKE THE MAYOR CAUSE IM THE BIGGEST PLAYER",
"Shrek is love. Shrek is life. Shrek is Alveyworld. All hail Shrek."
]
# Witty responses to leave hello alone
leave_us_alone = [
"LEAVE ME ALONE",
"I HATE YOU",
"You have (3) new pokes"
]
while True:
command = shlex.split(raw_input("> "))
try:
cmd = command[0]
except:
print colors.FAIL+"Command failed!"+colors.ENDC
for _cmd in commands.keys():
if _cmd == cmd:
try:
if cmd == "sqrt":
number = int(command[1])
last_value = team1.sqrt(number)
print(last_value)
elif cmd == "exit":
exit(0)
elif cmd == "hello":
if _hello <= 10:
_hello += 1
print(hellos[random.randint(0, len(hellos) - 1)])
else:
print(colors.FAIL + leave_us_alone[random.randint(0, len(leave_us_alone) - 1)] + colors.ENDC)
elif cmd == "abs":
number = int(command[1])
last_value = team2.abs(number)
print(last_value)
elif cmd == "help":
helpfile()
elif cmd == "recall":
print "Last value: %d" % last_value
elif cmd == "add":
number1 = int(command[1])
number2 = int(command[2])
last_value = team6.add(number1, number2)
print(last_value)
elif cmd == "sub":
number1 = int(command[1])
number2 = int(command[2])
last_value = team6.sub(number1, number2)
print(last_value)
elif cmd == "opp":
number = int(command[1])
last_value = team6.opp(number)
print(last_value)
elif cmd == "pow":
number1 = int(command[1])
number2 = int(command[2])
last_value = team3.pow(number1, number2)
print(last_value)
elif cmd == "convert":
converter.convert()
elif cmd == "root":
last_value=team1.root(int(command[1]),int(command[2]))
print(last_value)
elif cmd == "divide":
number1 = float(command[1])
number2 = float(command[2])
last_value = team5.div(number1, number2)
print(last_value)
except:
print colors.FAIL+"Command failed!"+colors.ENDC
| en | 0.86203 | # Alveyworld-dev calculator # Period 6 # # Shrek is love. Shrek is life. Shrek is Alveyworld. All hail Shrek. # # Group 1: Team Jacob # Members: # * Jared # * Josh # * Max # * Santiago # * Travis # Raw imports # Class imports # team five you're holding us back # ASCII escape colors # Used for the hello command #downwithteam5 Main entry point for the program # Defines a set of commands that # are used for the command interpreter # ln needs finishing # needs finishing # i don't understand how to word this # needs finishing # Witty responses for the command "hello" # amen #4n Y00", # Witty responses to leave hello alone | 3.298567 | 3 |
dojo/unittests/test_aws_prowler_parser.py | Welly0902/django-DefectDojo | 3 | 6624213 | from django.test import TestCase
from dojo.tools.aws_prowler.parser import AWSProwlerParser
from django.utils import timezone
from dojo.models import Test, Engagement, Product, Product_Type, Test_Type
class TestAwsProwlerParser(TestCase):
def setup(self, testfile):
product_type = Product_Type(critical_product=True, key_product=False)
product_type.save()
test_type = Test_Type(static_tool=True, dynamic_tool=False)
test_type.save()
product = Product(prod_type=product_type)
product.save()
engagement = Engagement(product=product, target_start=timezone.now(), target_end=timezone.now())
engagement.save()
parser = AWSProwlerParser(testfile, Test(engagement=engagement, test_type=test_type, target_start=timezone.now(), target_end=timezone.now()))
testfile.close()
return parser
def test_aws_prowler_parser_with_no_vuln_has_no_findings(self):
parser = self.setup(open("dojo/unittests/scans/aws_prowler/no_vuln.csv"))
self.assertEqual(0, len(parser.items))
def test_aws_prowler_parser_with_critical_vuln_has_one_findings(self):
parser = self.setup(open("dojo/unittests/scans/aws_prowler/one_vuln.csv"))
self.assertEqual(1, len(parser.items))
self.assertEqual('Avoid the use of the root account (Scored)', parser.items[0].title)
def test_aws_prowler_parser_with_many_vuln_has_many_findings(self):
parser = self.setup(open("dojo/unittests/scans/aws_prowler/many_vuln.csv"))
self.assertEqual(5, len(parser.items))
self.assertEqual('Vuln A', parser.items[0].title)
self.assertEqual('Vuln B', parser.items[1].title)
self.assertEqual('Info A', parser.items[2].title)
self.assertEqual('Vuln C', parser.items[3].title)
self.assertEqual('Info B', parser.items[4].title)
| from django.test import TestCase
from dojo.tools.aws_prowler.parser import AWSProwlerParser
from django.utils import timezone
from dojo.models import Test, Engagement, Product, Product_Type, Test_Type
class TestAwsProwlerParser(TestCase):
def setup(self, testfile):
product_type = Product_Type(critical_product=True, key_product=False)
product_type.save()
test_type = Test_Type(static_tool=True, dynamic_tool=False)
test_type.save()
product = Product(prod_type=product_type)
product.save()
engagement = Engagement(product=product, target_start=timezone.now(), target_end=timezone.now())
engagement.save()
parser = AWSProwlerParser(testfile, Test(engagement=engagement, test_type=test_type, target_start=timezone.now(), target_end=timezone.now()))
testfile.close()
return parser
def test_aws_prowler_parser_with_no_vuln_has_no_findings(self):
parser = self.setup(open("dojo/unittests/scans/aws_prowler/no_vuln.csv"))
self.assertEqual(0, len(parser.items))
def test_aws_prowler_parser_with_critical_vuln_has_one_findings(self):
parser = self.setup(open("dojo/unittests/scans/aws_prowler/one_vuln.csv"))
self.assertEqual(1, len(parser.items))
self.assertEqual('Avoid the use of the root account (Scored)', parser.items[0].title)
def test_aws_prowler_parser_with_many_vuln_has_many_findings(self):
parser = self.setup(open("dojo/unittests/scans/aws_prowler/many_vuln.csv"))
self.assertEqual(5, len(parser.items))
self.assertEqual('Vuln A', parser.items[0].title)
self.assertEqual('Vuln B', parser.items[1].title)
self.assertEqual('Info A', parser.items[2].title)
self.assertEqual('Vuln C', parser.items[3].title)
self.assertEqual('Info B', parser.items[4].title)
| none | 1 | 2.494478 | 2 | |
RandomCoupling.py | nameforjoy/Synchronisation | 3 | 6624214 | """"
@author: JoyClimaco
"""
import numpy as np
import scipy.stats as ss
import networkx as nx
from NetworkFunctions import RandomCoupling
from NetworkFunctions import OrderParameter
from NetworkClasses import StuartLandau
A = np.load('A_BA_m2_N200_1.npy') # load adjacency matrix
w = np.load('w200_3unif.npy') # load frequencies
N = np.size(A,0) # network size
K = .5 # coupling constant
alpha = 1 # SL parameter
# initial conditions
theta0 = np.random.uniform(0, 2*np.pi, N)
rho0 = np.random.uniform(0.1, 0.9, N) # so the system doesn't fall into the attractor
z0 = rho0*np.exp(1j*theta0)
# Defines Stuart-Landau system
SL = StuartLandau(w, A, K, alpha)
# Random array for the coupling constants
Karray = np.random.gamma(shape=2, scale=1, size=SL.Ne)
np.save('z_Karray.npy', Karray)
# Defines new SL system with this coupling weights
SL_rand = RandomCoupling(SL, Karray, dist_type='Gamma', shape=2, scale=.5)
# Time evolution of the oscillators
t = np.arange(0,50,.2)
z, _ = SL_rand.integrate(z0, t)
np.save('z_time.npy', t)
np.save('z_evolution.npy', z)
# Order parameter calculation
K, r, r_std = OrderParameter(SL_rand, z0, 30, 35, .05, Kf=3, dK=.05, dt=.1, output='simple')
np.save('z_K.npy', K)
np.save('z_r.npy', r)
np.save('z_r_std.npy', r_std) | """"
@author: JoyClimaco
"""
import numpy as np
import scipy.stats as ss
import networkx as nx
from NetworkFunctions import RandomCoupling
from NetworkFunctions import OrderParameter
from NetworkClasses import StuartLandau
A = np.load('A_BA_m2_N200_1.npy') # load adjacency matrix
w = np.load('w200_3unif.npy') # load frequencies
N = np.size(A,0) # network size
K = .5 # coupling constant
alpha = 1 # SL parameter
# initial conditions
theta0 = np.random.uniform(0, 2*np.pi, N)
rho0 = np.random.uniform(0.1, 0.9, N) # so the system doesn't fall into the attractor
z0 = rho0*np.exp(1j*theta0)
# Defines Stuart-Landau system
SL = StuartLandau(w, A, K, alpha)
# Random array for the coupling constants
Karray = np.random.gamma(shape=2, scale=1, size=SL.Ne)
np.save('z_Karray.npy', Karray)
# Defines new SL system with this coupling weights
SL_rand = RandomCoupling(SL, Karray, dist_type='Gamma', shape=2, scale=.5)
# Time evolution of the oscillators
t = np.arange(0,50,.2)
z, _ = SL_rand.integrate(z0, t)
np.save('z_time.npy', t)
np.save('z_evolution.npy', z)
# Order parameter calculation
K, r, r_std = OrderParameter(SL_rand, z0, 30, 35, .05, Kf=3, dK=.05, dt=.1, output='simple')
np.save('z_K.npy', K)
np.save('z_r.npy', r)
np.save('z_r_std.npy', r_std) | en | 0.617141 | " @author: JoyClimaco # load adjacency matrix # load frequencies # network size # coupling constant # SL parameter # initial conditions # so the system doesn't fall into the attractor # Defines Stuart-Landau system # Random array for the coupling constants # Defines new SL system with this coupling weights # Time evolution of the oscillators # Order parameter calculation | 2.350261 | 2 |
project/settings_local.py | praekelt/mobius-skeleton | 3 | 6624215 | # This file is useful during development and must never be checked in.
import os
# NB: do not set DEBUG here. Some settings depend on it and setting it here has
# no effect. Edit an .env file and set it there. See
# https://django-environ.readthedocs.io/en/latest/ for details.
# Declare or redeclare variables here
FOOFOO = 1
# Uncomment to use PostgreSQL as database or set in an .env file
"""
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "skeleton",
"USER": "postgres",
"PASSWORD": "",
"HOST": "",
"PORT": "5432",
"CONN_MAX_AGE": 600
}
}
"""
# Uncomment to use memcache as caching backend or set in an .env file
"""
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
"KEY_PREFIX": "skeleton",
},
}
"""
# Uncomment if you are doing performance profiling with Django Debug Toolbar
"""
DEBUG_TOOLBAR_PANELS = [
"ddt_request_history.panels.request_history.RequestHistoryPanel",
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.signals.SignalsPanel",
"debug_toolbar.panels.logging.LoggingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
]
INTERNAL_IPS = ["127.0.0.1"]
RESULTS_CACHE_SIZE = 20000
"""
# If you need to access an existing variable your code must be in configure
def configure(**kwargs):
# Uncomment if you are doing performance profiling with Django Debug Toolbar
"""
return {
"INSTALLED_APPS": kwargs["INSTALLED_APPS"] + ["debug_toolbar"],
"MIDDLEWARE_CLASSES": (
"debug_toolbar.middleware.DebugToolbarMiddleware",
) + kwargs["MIDDLEWARE_CLASSES"]
}
"""
return {}
| # This file is useful during development and must never be checked in.
import os
# NB: do not set DEBUG here. Some settings depend on it and setting it here has
# no effect. Edit an .env file and set it there. See
# https://django-environ.readthedocs.io/en/latest/ for details.
# Declare or redeclare variables here
FOOFOO = 1
# Uncomment to use PostgreSQL as database or set in an .env file
"""
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "skeleton",
"USER": "postgres",
"PASSWORD": "",
"HOST": "",
"PORT": "5432",
"CONN_MAX_AGE": 600
}
}
"""
# Uncomment to use memcache as caching backend or set in an .env file
"""
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
"KEY_PREFIX": "skeleton",
},
}
"""
# Uncomment if you are doing performance profiling with Django Debug Toolbar
"""
DEBUG_TOOLBAR_PANELS = [
"ddt_request_history.panels.request_history.RequestHistoryPanel",
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.signals.SignalsPanel",
"debug_toolbar.panels.logging.LoggingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
]
INTERNAL_IPS = ["127.0.0.1"]
RESULTS_CACHE_SIZE = 20000
"""
# If you need to access an existing variable your code must be in configure
def configure(**kwargs):
# Uncomment if you are doing performance profiling with Django Debug Toolbar
"""
return {
"INSTALLED_APPS": kwargs["INSTALLED_APPS"] + ["debug_toolbar"],
"MIDDLEWARE_CLASSES": (
"debug_toolbar.middleware.DebugToolbarMiddleware",
) + kwargs["MIDDLEWARE_CLASSES"]
}
"""
return {}
| en | 0.544749 | # This file is useful during development and must never be checked in. # NB: do not set DEBUG here. Some settings depend on it and setting it here has # no effect. Edit an .env file and set it there. See # https://django-environ.readthedocs.io/en/latest/ for details. # Declare or redeclare variables here # Uncomment to use PostgreSQL as database or set in an .env file DATABASES = { "default": { "ENGINE": "django.db.backends.postgresql", "NAME": "skeleton", "USER": "postgres", "PASSWORD": "", "HOST": "", "PORT": "5432", "CONN_MAX_AGE": 600 } } # Uncomment to use memcache as caching backend or set in an .env file CACHES = { "default": { "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", "LOCATION": "127.0.0.1:11211", "KEY_PREFIX": "skeleton", }, } # Uncomment if you are doing performance profiling with Django Debug Toolbar DEBUG_TOOLBAR_PANELS = [ "ddt_request_history.panels.request_history.RequestHistoryPanel", "debug_toolbar.panels.versions.VersionsPanel", "debug_toolbar.panels.timer.TimerPanel", "debug_toolbar.panels.settings.SettingsPanel", "debug_toolbar.panels.headers.HeadersPanel", "debug_toolbar.panels.request.RequestPanel", "debug_toolbar.panels.sql.SQLPanel", "debug_toolbar.panels.staticfiles.StaticFilesPanel", "debug_toolbar.panels.templates.TemplatesPanel", "debug_toolbar.panels.cache.CachePanel", "debug_toolbar.panels.signals.SignalsPanel", "debug_toolbar.panels.logging.LoggingPanel", "debug_toolbar.panels.redirects.RedirectsPanel", ] INTERNAL_IPS = ["127.0.0.1"] RESULTS_CACHE_SIZE = 20000 # If you need to access an existing variable your code must be in configure # Uncomment if you are doing performance profiling with Django Debug Toolbar return { "INSTALLED_APPS": kwargs["INSTALLED_APPS"] + ["debug_toolbar"], "MIDDLEWARE_CLASSES": ( "debug_toolbar.middleware.DebugToolbarMiddleware", ) + kwargs["MIDDLEWARE_CLASSES"] } | 1.622585 | 2 |
tests/notes_db/note.py | axil/nvpy | 540 | 6624216 | import unittest
import itertools
import copy
from unittest.mock import patch
from nvpy.notes_db import Note, NoteStatus
from ._mixin import DBMixin
class NoteComparators(unittest.TestCase):
def test_need_save(self):
a = Note({'modifydate': 2, 'savedate': 1, 'syncdate': 0})
b = Note({'modifydate': 2, 'savedate': 2, 'syncdate': 0})
c = Note({'modifydate': 2, 'savedate': 3, 'syncdate': 0})
self.assertTrue(a.need_save)
self.assertFalse(b.need_save)
self.assertFalse(c.need_save)
def test_need_sync_to_server(self):
a = Note({'modifydate': 2, 'syncdate': 2})
b = Note({'modifydate': 2, 'syncdate': 1, 'key': 'note_id'})
c = Note({'modifydate': 2, 'syncdate': 2, 'key': 'note_id'})
d = Note({'modifydate': 2, 'syncdate': 3, 'key': 'note_id'})
self.assertTrue(a.need_sync_to_server)
self.assertTrue(b.need_sync_to_server)
self.assertFalse(c.need_sync_to_server)
self.assertFalse(d.need_sync_to_server)
def test_is_newer_than(self):
a = Note({'modifydate': 1})
b = Note({'modifydate': 2})
c = Note({'modifydate': 3})
self.assertTrue(b.is_newer_than(a))
self.assertFalse(b.is_newer_than(b))
self.assertFalse(b.is_newer_than(c))
class NotesDBComparators(DBMixin, unittest.TestCase):
def test_is_different_note(self):
db = self._db()
# If all fields excluding nvPY internal fields are same, those are same.
self.assertFalse(
db.is_different_note(
{
'content': 'foo',
'modifydate': 2,
'savedate': 5, # ignore
'syncdate': 8, # ignore
},
{
'content': 'foo',
'modifydate': 2,
},
))
# If content is not same, those are different.
self.assertTrue(
db.is_different_note(
{
'content': 'foo',
'modifydate': 2,
'savedate': 5, # ignore
'syncdate': 8, # ignore
},
{
'content': 'bar', # changed
'modifydate': 2,
},
))
# If other fields excluding nvPY internal fields are not same, those are different.
self.assertTrue(
db.is_different_note(
{
'content': 'foo',
'modifydate': 2,
'savedate': 5, # ignore
'syncdate': 8, # ignore
},
{
'content': 'foo',
'modifydate': 3, # changed
},
))
# Must accept non-hashable object like list.
self.assertFalse(
db.is_different_note(
{
'tags': ['a', 'b'],
'savedate': 5, # ignore
'syncdate': 8, # ignore
},
{
'tags': ['a', 'b'],
},
))
class NoteOperations(DBMixin, unittest.TestCase):
NOTES = {
'KEY': {
'key': 'KEY',
'content': 'example note',
'createdate': 1,
'modifydate': 2,
'savedate': 3,
},
}
def test_delete(self):
db = self._db()
db.notes = copy.deepcopy(self.NOTES)
with patch('time.time', side_effect=itertools.repeat(99)):
db.delete_note('KEY')
self.assertEqual(
db.notes['KEY'],
{
'key': 'KEY',
'content': 'example note',
'deleted': 1,
'createdate': 1,
'modifydate': 99,
'savedate': 3,
},
)
def test_get(self):
db = self._db()
db.notes = copy.deepcopy(self.NOTES)
with self.assertRaises(KeyError):
db.get_note('NOT_FOUND')
note = db.get_note('KEY')
self.assertEqual(note, self.NOTES['KEY'])
def test_get_content(self):
db = self._db()
db.notes = copy.deepcopy(self.NOTES)
content = db.get_note_content('KEY')
self.assertEqual(content, self.NOTES['KEY']['content'])
def test_get_status(self):
db = self._db()
db.notes = {
'MODIFIED': {
'modifydate': 3,
'savedate': 1,
'syncdate': 2,
},
'SAVED': {
'modifydate': 2,
'savedate': 3,
'syncdate': 1,
},
'SYNCED': {
'modifydate': 1,
'savedate': 2,
'syncdate': 3,
},
'SYNCED_BUT_NOT_SAVED': {
'modifydate': 2,
'savedate': 1,
'syncdate': 3,
}
}
self.assertEqual(db.get_note_status('MODIFIED'),
NoteStatus(saved=False, synced=False, modified=True, full_syncing=False))
self.assertEqual(db.get_note_status('SAVED'),
NoteStatus(saved=True, synced=False, modified=False, full_syncing=False))
self.assertEqual(db.get_note_status('SYNCED'),
NoteStatus(saved=True, synced=True, modified=False, full_syncing=False))
# todo: NoteStatus.modified = not NoteStatus.saved. NoteStatus.modified can be replace to the property.
self.assertEqual(db.get_note_status('SYNCED_BUT_NOT_SAVED'),
NoteStatus(saved=False, synced=True, modified=True, full_syncing=False))
def assertTags(self, expected: list, note):
self.assertEqual(set(expected), set(note['tags']))
def test_delete_tag(self):
db = self._db()
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
with self.assertRaises(ValueError):
db.delete_note_tag('KEY', 'not-found')
self.assertTags(['foo', 'bar'], db.notes['KEY'])
db.delete_note_tag('KEY', 'bar')
self.assertTags(['foo'], db.notes['KEY'])
def test_add_tags(self):
db = self._db()
# Add a tag.
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
db.add_note_tags('KEY', 'baz')
self.assertTags(['foo', 'bar', 'baz'], db.notes['KEY'])
# Add comma separated tags.
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
db.add_note_tags('KEY', 'baz,qux,quux')
self.assertTags(['foo', 'bar', 'baz', 'qux', 'quux'], db.notes['KEY'])
# Add comma separated tags with spaces.
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
db.add_note_tags('KEY', 'baz, qux, quux')
self.assertTags(['foo', 'bar', 'baz', 'qux', 'quux'], db.notes['KEY'])
# Add space separated tags.
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
db.add_note_tags('KEY', 'baz qux quux')
# TODO: なんかバグっている
# self.assertTags(['foo', 'bar', 'baz', 'qux', 'quux'], db.notes['KEY'])
| import unittest
import itertools
import copy
from unittest.mock import patch
from nvpy.notes_db import Note, NoteStatus
from ._mixin import DBMixin
class NoteComparators(unittest.TestCase):
def test_need_save(self):
a = Note({'modifydate': 2, 'savedate': 1, 'syncdate': 0})
b = Note({'modifydate': 2, 'savedate': 2, 'syncdate': 0})
c = Note({'modifydate': 2, 'savedate': 3, 'syncdate': 0})
self.assertTrue(a.need_save)
self.assertFalse(b.need_save)
self.assertFalse(c.need_save)
def test_need_sync_to_server(self):
a = Note({'modifydate': 2, 'syncdate': 2})
b = Note({'modifydate': 2, 'syncdate': 1, 'key': 'note_id'})
c = Note({'modifydate': 2, 'syncdate': 2, 'key': 'note_id'})
d = Note({'modifydate': 2, 'syncdate': 3, 'key': 'note_id'})
self.assertTrue(a.need_sync_to_server)
self.assertTrue(b.need_sync_to_server)
self.assertFalse(c.need_sync_to_server)
self.assertFalse(d.need_sync_to_server)
def test_is_newer_than(self):
a = Note({'modifydate': 1})
b = Note({'modifydate': 2})
c = Note({'modifydate': 3})
self.assertTrue(b.is_newer_than(a))
self.assertFalse(b.is_newer_than(b))
self.assertFalse(b.is_newer_than(c))
class NotesDBComparators(DBMixin, unittest.TestCase):
def test_is_different_note(self):
db = self._db()
# If all fields excluding nvPY internal fields are same, those are same.
self.assertFalse(
db.is_different_note(
{
'content': 'foo',
'modifydate': 2,
'savedate': 5, # ignore
'syncdate': 8, # ignore
},
{
'content': 'foo',
'modifydate': 2,
},
))
# If content is not same, those are different.
self.assertTrue(
db.is_different_note(
{
'content': 'foo',
'modifydate': 2,
'savedate': 5, # ignore
'syncdate': 8, # ignore
},
{
'content': 'bar', # changed
'modifydate': 2,
},
))
# If other fields excluding nvPY internal fields are not same, those are different.
self.assertTrue(
db.is_different_note(
{
'content': 'foo',
'modifydate': 2,
'savedate': 5, # ignore
'syncdate': 8, # ignore
},
{
'content': 'foo',
'modifydate': 3, # changed
},
))
# Must accept non-hashable object like list.
self.assertFalse(
db.is_different_note(
{
'tags': ['a', 'b'],
'savedate': 5, # ignore
'syncdate': 8, # ignore
},
{
'tags': ['a', 'b'],
},
))
class NoteOperations(DBMixin, unittest.TestCase):
NOTES = {
'KEY': {
'key': 'KEY',
'content': 'example note',
'createdate': 1,
'modifydate': 2,
'savedate': 3,
},
}
def test_delete(self):
db = self._db()
db.notes = copy.deepcopy(self.NOTES)
with patch('time.time', side_effect=itertools.repeat(99)):
db.delete_note('KEY')
self.assertEqual(
db.notes['KEY'],
{
'key': 'KEY',
'content': 'example note',
'deleted': 1,
'createdate': 1,
'modifydate': 99,
'savedate': 3,
},
)
def test_get(self):
db = self._db()
db.notes = copy.deepcopy(self.NOTES)
with self.assertRaises(KeyError):
db.get_note('NOT_FOUND')
note = db.get_note('KEY')
self.assertEqual(note, self.NOTES['KEY'])
def test_get_content(self):
db = self._db()
db.notes = copy.deepcopy(self.NOTES)
content = db.get_note_content('KEY')
self.assertEqual(content, self.NOTES['KEY']['content'])
def test_get_status(self):
db = self._db()
db.notes = {
'MODIFIED': {
'modifydate': 3,
'savedate': 1,
'syncdate': 2,
},
'SAVED': {
'modifydate': 2,
'savedate': 3,
'syncdate': 1,
},
'SYNCED': {
'modifydate': 1,
'savedate': 2,
'syncdate': 3,
},
'SYNCED_BUT_NOT_SAVED': {
'modifydate': 2,
'savedate': 1,
'syncdate': 3,
}
}
self.assertEqual(db.get_note_status('MODIFIED'),
NoteStatus(saved=False, synced=False, modified=True, full_syncing=False))
self.assertEqual(db.get_note_status('SAVED'),
NoteStatus(saved=True, synced=False, modified=False, full_syncing=False))
self.assertEqual(db.get_note_status('SYNCED'),
NoteStatus(saved=True, synced=True, modified=False, full_syncing=False))
# todo: NoteStatus.modified = not NoteStatus.saved. NoteStatus.modified can be replace to the property.
self.assertEqual(db.get_note_status('SYNCED_BUT_NOT_SAVED'),
NoteStatus(saved=False, synced=True, modified=True, full_syncing=False))
def assertTags(self, expected: list, note):
self.assertEqual(set(expected), set(note['tags']))
def test_delete_tag(self):
db = self._db()
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
with self.assertRaises(ValueError):
db.delete_note_tag('KEY', 'not-found')
self.assertTags(['foo', 'bar'], db.notes['KEY'])
db.delete_note_tag('KEY', 'bar')
self.assertTags(['foo'], db.notes['KEY'])
def test_add_tags(self):
db = self._db()
# Add a tag.
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
db.add_note_tags('KEY', 'baz')
self.assertTags(['foo', 'bar', 'baz'], db.notes['KEY'])
# Add comma separated tags.
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
db.add_note_tags('KEY', 'baz,qux,quux')
self.assertTags(['foo', 'bar', 'baz', 'qux', 'quux'], db.notes['KEY'])
# Add comma separated tags with spaces.
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
db.add_note_tags('KEY', 'baz, qux, quux')
self.assertTags(['foo', 'bar', 'baz', 'qux', 'quux'], db.notes['KEY'])
# Add space separated tags.
db.notes = {'KEY': {'tags': ['foo', 'bar']}}
db.add_note_tags('KEY', 'baz qux quux')
# TODO: なんかバグっている
# self.assertTags(['foo', 'bar', 'baz', 'qux', 'quux'], db.notes['KEY'])
| en | 0.492349 | # If all fields excluding nvPY internal fields are same, those are same. # ignore # ignore # If content is not same, those are different. # ignore # ignore # changed # If other fields excluding nvPY internal fields are not same, those are different. # ignore # ignore # changed # Must accept non-hashable object like list. # ignore # ignore # todo: NoteStatus.modified = not NoteStatus.saved. NoteStatus.modified can be replace to the property. # Add a tag. # Add comma separated tags. # Add comma separated tags with spaces. # Add space separated tags. # TODO: なんかバグっている # self.assertTags(['foo', 'bar', 'baz', 'qux', 'quux'], db.notes['KEY']) | 2.552969 | 3 |
koch.py | GUELOU/PYTHON | 0 | 6624217 | from turtle import *
def koch(longueur, n):
if n == 0:
forward(longueur)
else:
koch(longueur/3, n-1)
left(60)
koch(longueur/3, n-1)
right(120)
koch(longueur/3, n-1)
left(60)
koch(longueur/3, n-1)
def flocon(taille, etape):
koch(taille, etape)
right(120)
koch(taille, etape)
right(120)
koch(taille, etape)
up()
goto(-280, 100)
down()
speed(0)
color("blue")
flocon(500, 4)
done()
| from turtle import *
def koch(longueur, n):
if n == 0:
forward(longueur)
else:
koch(longueur/3, n-1)
left(60)
koch(longueur/3, n-1)
right(120)
koch(longueur/3, n-1)
left(60)
koch(longueur/3, n-1)
def flocon(taille, etape):
koch(taille, etape)
right(120)
koch(taille, etape)
right(120)
koch(taille, etape)
up()
goto(-280, 100)
down()
speed(0)
color("blue")
flocon(500, 4)
done()
| none | 1 | 2.962715 | 3 | |
AlGDock/_external_paths.py | xinliu0609/AlGDock | 0 | 6624218 | import os, inspect
dir_external_paths = os.path.dirname(os.path.abspath(\
inspect.getfile(inspect.currentframe())))
def findPath(locations):
"""
Parses a list of locations, returning the first file that exists.
If none exist, then None is returned.
"""
import os.path
for location in locations:
if location is not None and os.path.exists(location):
return os.path.abspath(location)
return None
def findPaths(keys):
paths = dict([(key,findPath(search_paths[key])) \
for key in keys])
for key in paths.keys():
if paths[key] is None:
# Download file and install program if available
if key in download_paths.keys():
(FN,command,path) = download_paths[key]
# Check that it has not already been downloaded
if os.path.isfile(path):
paths[key] = os.path.abspath(path)
else:
import time
download_start_time = time.time()
print 'Downloading and installing '+key
os.system('wget --no-check-certificate http://stash.osgconnect.net/+daveminh/%s'%(FN))
os.system('tar -xvf %s'%FN)
if command != '':
os.system(command)
if os.path.isfile(path):
print key + ' downloaded and installed in %d s'%(\
time.time() - download_start_time)
paths[key] = os.path.abspath(path)
else:
print 'Could not download '+key
raise Exception('Could not download '+key)
else:
raise Exception('Missing file for '+key)
return paths
# Define search paths for external programs and files
# Defined for
# David's IIT MacBook Pro, DSCR cluster, and CCB cluster
search_paths = {
# These files/programs are used in HREX.py
# Generalized AMBER force field
'gaff.dat':['/Users/dminh/Installers/AlGDock-0.0.1/data/gaff.dat',
'/home/dbchem/dm225/.local/installers/AlGDock-0.0.1/Data/gaff.dat',
'/home/dminh/Installers/AlGDock-0.0.1/Data/gaff.dat'],
# For postprocessing snapshots
'namd':['/home/xin/DevelopmentTool/NAMD_2.9_Linux-x86_64-multicore/namd2',
#'/Users/dminh/Installers/NAMD_2.9_Source/MacOSX-x86_64-g++/namd2',
'/home/dbchem/dm225/.local/bin/namd2',
'/share/apps/namd/2.9/Linux-x86_64-g++/namd2'],
# For postprocessing snapshots
'sander':['/Users/dminh/Installers/amber14/bin/sander',
'/home/dbchem/dm225/.local/installers/amber14/bin/sander',
'/share/apps/amber/14/bin/sander'],
# HREX.py is built on MMTK
'MMTK':['/home/xin/DevelopmentTool/MMTK-2.7.9',
'/home/dbchem/dm225/.local/installers/MMTK-2.7.9',
'/home/dminh/Installers/MMTK-2.7.9'],
# For visualizing (not essential)
'vmd':['/usr/local/bin/vmd'
#'/Applications/VMD 1.9.1.app/Contents/Resources/VMD.app/Contents/MacOS/VMD',
'/home/dbchem/dm225/.local/bin/vmd',
'/share/apps/vmd/1.9.1/bin/vmd']}
download_paths = {
'namd':('namd.tar.gz','','namd2')}
| import os, inspect
dir_external_paths = os.path.dirname(os.path.abspath(\
inspect.getfile(inspect.currentframe())))
def findPath(locations):
"""
Parses a list of locations, returning the first file that exists.
If none exist, then None is returned.
"""
import os.path
for location in locations:
if location is not None and os.path.exists(location):
return os.path.abspath(location)
return None
def findPaths(keys):
paths = dict([(key,findPath(search_paths[key])) \
for key in keys])
for key in paths.keys():
if paths[key] is None:
# Download file and install program if available
if key in download_paths.keys():
(FN,command,path) = download_paths[key]
# Check that it has not already been downloaded
if os.path.isfile(path):
paths[key] = os.path.abspath(path)
else:
import time
download_start_time = time.time()
print 'Downloading and installing '+key
os.system('wget --no-check-certificate http://stash.osgconnect.net/+daveminh/%s'%(FN))
os.system('tar -xvf %s'%FN)
if command != '':
os.system(command)
if os.path.isfile(path):
print key + ' downloaded and installed in %d s'%(\
time.time() - download_start_time)
paths[key] = os.path.abspath(path)
else:
print 'Could not download '+key
raise Exception('Could not download '+key)
else:
raise Exception('Missing file for '+key)
return paths
# Define search paths for external programs and files
# Defined for
# David's IIT MacBook Pro, DSCR cluster, and CCB cluster
search_paths = {
# These files/programs are used in HREX.py
# Generalized AMBER force field
'gaff.dat':['/Users/dminh/Installers/AlGDock-0.0.1/data/gaff.dat',
'/home/dbchem/dm225/.local/installers/AlGDock-0.0.1/Data/gaff.dat',
'/home/dminh/Installers/AlGDock-0.0.1/Data/gaff.dat'],
# For postprocessing snapshots
'namd':['/home/xin/DevelopmentTool/NAMD_2.9_Linux-x86_64-multicore/namd2',
#'/Users/dminh/Installers/NAMD_2.9_Source/MacOSX-x86_64-g++/namd2',
'/home/dbchem/dm225/.local/bin/namd2',
'/share/apps/namd/2.9/Linux-x86_64-g++/namd2'],
# For postprocessing snapshots
'sander':['/Users/dminh/Installers/amber14/bin/sander',
'/home/dbchem/dm225/.local/installers/amber14/bin/sander',
'/share/apps/amber/14/bin/sander'],
# HREX.py is built on MMTK
'MMTK':['/home/xin/DevelopmentTool/MMTK-2.7.9',
'/home/dbchem/dm225/.local/installers/MMTK-2.7.9',
'/home/dminh/Installers/MMTK-2.7.9'],
# For visualizing (not essential)
'vmd':['/usr/local/bin/vmd'
#'/Applications/VMD 1.9.1.app/Contents/Resources/VMD.app/Contents/MacOS/VMD',
'/home/dbchem/dm225/.local/bin/vmd',
'/share/apps/vmd/1.9.1/bin/vmd']}
download_paths = {
'namd':('namd.tar.gz','','namd2')}
| en | 0.796457 | Parses a list of locations, returning the first file that exists. If none exist, then None is returned. # Download file and install program if available # Check that it has not already been downloaded # Define search paths for external programs and files # Defined for # David's IIT MacBook Pro, DSCR cluster, and CCB cluster # These files/programs are used in HREX.py # Generalized AMBER force field # For postprocessing snapshots #'/Users/dminh/Installers/NAMD_2.9_Source/MacOSX-x86_64-g++/namd2', # For postprocessing snapshots # HREX.py is built on MMTK # For visualizing (not essential) #'/Applications/VMD 1.9.1.app/Contents/Resources/VMD.app/Contents/MacOS/VMD', | 2.941873 | 3 |
pessoas/models.py | Maxceleste/alura-receitas-django | 0 | 6624219 | <gh_stars>0
from django.db import models
class Pessoa(models.Model):
nome = models.CharField(max_length = 200)
email = models.CharField(max_length = 200)
def __str__(self):
return self.nome | from django.db import models
class Pessoa(models.Model):
nome = models.CharField(max_length = 200)
email = models.CharField(max_length = 200)
def __str__(self):
return self.nome | none | 1 | 2.369756 | 2 | |
analyse.py | chanwoochoigit/IR_coursework_2 | 0 | 6624220 | <gh_stars>0
import itertools
import random
import re
import time
from collections import defaultdict
import json
from sklearn.metrics import classification_report
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
import numpy as np
from collections import Counter
from gensim.corpora.dictionary import Dictionary
from gensim.test.utils import datapath
from gensim.models import LdaModel
from nltk.stem import PorterStemmer
from math import log2
from scipy import sparse
#my preprocessing module from coursework 1
import pickle
from sklearn.model_selection import train_test_split
class Preprocessor():
def __init__(self):
self.stopwords = self.get_stopwords()
def get_stopwords(self):
with open('stopwords.txt') as f:
stop_words = f.read().split('\n')
return stop_words
def unique_from_array(self, items):
items_1d = list(itertools.chain.from_iterable(items.values()))
vocab = {}
for i, x in enumerate(items_1d):
if x not in vocab.keys():
vocab[x] = 0
for i, k in enumerate(vocab.keys()):
vocab[k] = i
# using a rather unique structure to run faster
# vocab[word] = word_index
return vocab
#convert word list to dictionary for speeding purposes
def dictionify(self, items):
word_dict = {}
for i, word in enumerate(items):
word_dict[i] = word
return word_dict
def encode_labels(self, labels):
labels_encoded = []
for l in labels:
if l == 'ot':
labels_encoded.append(0)
elif l == 'nt':
labels_encoded.append(1)
elif l == 'quran':
labels_encoded.append(2)
else:
raise ValueError('wrong corpus name!')
return labels_encoded
def create_count_matrix(self, docs, vocab, mode):
count_mtx = sparse.dok_matrix((len(docs), len(vocab)), dtype='uint8')
for i in docs.keys():
if i % 3000 == 0:
print('creating count matrix for {} SVM model ..... {}%'.format(mode, round(i / len(docs) * 100, 2)))
count_dict = Counter(docs[i])
for word in count_dict.keys():
if mode == 'baseline':
try:
count_mtx[i, vocab[word]] = count_dict[word]
except:
continue
elif mode == 'improved':
try:
count_mtx[i, vocab[word]] = count_dict[word] * 1000
except:
continue
else:
raise ValueError('wrong mode choice!')
return count_mtx
def trim_text(self, text):
text_str = text.replace('\n', ' ').replace('\t',' ').replace(' ',' ') # replace \n with a space, and if that creates a double space, replace it with a single space
return text_str.lower()
def tokenise(self, text_str):
words = re.split('\W+', text_str)
words_lower = []
for word in words:
words_lower.append(word.lower())
return words_lower
def remove_stopwords(self, words):
stop_words = self.stopwords
words_dup_nostop = []
[words_dup_nostop.append(x) for x in words if x not in stop_words]
return words_dup_nostop
def stem_data(self, words_preprocessed):
ps = PorterStemmer()
words_stemmed = []
for word in words_preprocessed:
words_stemmed.append(ps.stem(word))
return words_stemmed
def remove_void(self, word_list):
clean = []
for word in word_list:
if word != '':
clean.append(word)
return clean
def create_bigram_vectors(self, uni_vectors):
bigram_vector = {}
for vi, v in enumerate(uni_vectors):
bv = []
for i in range(len(v)-1):
bv.append(str(v[i]+'_'+str(v[i+1])))
bigram_vector[vi] = bv
return bigram_vector
def preprocess_baseline(self, document):
# trim
text_str = self.trim_text(document)
# tokenise
words_dup = self.tokenise(text_str)
return words_dup
#arbitrarily limit word length for better accuracy (heuristic for lemmitisation)
def limit_word_length(self, word_list, limit, offset):
cut_text = []
for word in word_list:
if len(word) > limit:
cut_text.append(word[:limit-offset])
else:
cut_text.append(word)
return cut_text
#preprocess 1-d list of text
def preprocess(self, data_chunk):
#trim
text_str = self.trim_text(data_chunk)
#tokenise
words_dup = self.tokenise(text_str)
#remove stop words
# words_dup_nostop = self.remove_stopwords(words_dup)
# """normalisation"""
words_stemmed = self.stem_data(words_dup)
# arbitrary cut to 4 chars if word length is longer than 5
cut_off = self.limit_word_length(words_stemmed, 5, 1)
#remove empty quotation marks ('')
no_empties = self.remove_void(cut_off)
return no_empties
#preprocess 2-d list of text
def preprocess_many(self, data_chunk_loads):
processed_chunks_loads = []
for data in data_chunk_loads:
processed_chunks_loads.append(self.preprocess(data))
return processed_chunks_loads
class Analyse():
def __init__(self):
self.corpus = self.load_corpus()
self.p = Preprocessor()
def init_nd_dict(self):
return defaultdict(lambda : defaultdict(dict))
def create_corpus(self):
with open('train_and_dev.tsv', 'r') as f:
raw_text = f.readlines()
corpus = self.init_nd_dict()
counter = 0
current_corpus = ''
cp_list = ['ot', 'nt', 'quran']
for line in raw_text:
processed = self.p.preprocess(line)
head = processed[0]
if current_corpus not in cp_list:
current_corpus = head
if current_corpus != head:
current_corpus = head
counter = 0
corpus[current_corpus][counter] = processed[1:]
counter += 1
with open('corpus.json', 'w') as f:
json.dump(corpus, f)
return corpus
def load_corpus(self):
with open('corpus.json') as f:
corpus = json.load(f)
return corpus
# get counts to calculate mutual information
def get_Ns(self, term, cls):
classes = self.corpus.keys()
# find "non-current" class
c0 = [] # len(c0) is always 2
for c in classes:
if c != cls:
c0.append(c)
N11, N10, N01, N00 = 0, 0, 0, 0
# investigate document in the given class
for doc in self.corpus[cls].keys():
curr_doc = self.corpus[cls][doc]
if term in curr_doc:
N11 += 1
elif term not in curr_doc:
N01 += 1
# investigate documents in other classes
for c in c0:
for doc in self.corpus[c].keys():
curr_doc = self.corpus[c][doc]
if term in curr_doc:
N10 += 1
elif term not in curr_doc:
N00 += 1
return N11, N10, N01, N00
# calculate mutual information given all 4 counts
def calc_mi(self, term, cls):
N11, N10, N01, N00 = self.get_Ns(term, cls)
N = N11 + N10 + N01 + N00
try:
aa = (N11 / N) * log2((N * N11) / ((N11 + N10) * (N01 + N11)))
except:
aa = 0
try:
bb = (N01 / N) * log2((N * N01) / ((N01 + N00) * (N01 + N11)))
except:
bb = 0
try:
cc = (N10 / N) * log2((N * N10) / ((N10 + N11) * (N10 + N00)))
except:
cc = 0
try:
dd = (N00 / N) * log2((N * N00) / ((N00 + N01) * (N10 + N00)))
except:
dd = 0
return aa + bb + cc + dd
def calc_chi(self, term, cls):
N11, N10, N01, N00 = self.get_Ns(term, cls)
return ((N11 + N10 + N01 + N00) * pow(((N11 * N00) - (N10 * N01)), 2)) / \
((N11 + N01) * (N11 + N10) * (N10 + N00) * (N01 + N00))
# run mi or chi calculation
def run_calculation(self, mode):
result = self.init_nd_dict()
counter = 1
for cls in self.corpus.keys():
for doc in self.corpus[cls]:
print('class: {}/3---------------------------------------------------'.format(counter))
print('calculating mutual information...{}/{}'.format(doc, len(self.corpus[cls].keys())))
for word in self.corpus[cls][doc]:
if mode == 'mi':
score = self.calc_mi(word, cls)
elif mode == 'chi':
score = self.calc_chi(word, cls)
else:
raise ValueError('wrong calcluation mode entered! - choose mi or chi')
result[word][cls] = score
counter += 1
with open('{}.json'.format(mode), 'w') as f:
json.dump(result, f)
return result
def sort_dict_by_value(self, dict_to_sort):
return dict(sorted(dict_to_sort.items(), key=lambda item: item[1], reverse=True))
def display_ranked_result(self, result_dict):
for i, item in enumerate(result_dict.items()):
term = item[0]
score = item[1]
print(term + ': ' + str(score))
if i > 10:
break
def sort_result(self, mode):
with open('{}.json'.format(mode), 'r') as f:
to_display = json.load(f)
to_sort = self.init_nd_dict()
for word in to_display.keys():
for corpus in to_display[word]:
score = to_display[word][corpus]
to_sort[corpus][word] = score
sorted_ot = self.sort_dict_by_value(to_sort['ot'])
sorted_nt = self.sort_dict_by_value(to_sort['nt'])
sorted_qu = self.sort_dict_by_value(to_sort['quran'])
self.display_ranked_result(sorted_ot)
print('----------------------------')
self.display_ranked_result(sorted_nt)
print('----------------------------')
self.display_ranked_result(sorted_qu)
#helper function for get_lda_corpus
# RETURNS: 2d list of documents based on self.corpus
def get_all_docs(self):
united_corpus = []
# add the three corpus as one
for cor in self.corpus.keys():
for doc in self.corpus[cor].keys():
united_corpus.append(self.corpus[cor][doc])
return united_corpus
def get_lda_corpus(self):
# format the existing corpus "self.corpus" to fit in the gensim's LDA model.
united_corpus = self.get_all_docs()
corp_dictionary = Dictionary(united_corpus)
corpus = [corp_dictionary.doc2bow(text) for text in united_corpus]
return corpus
def train_lda(self, k):
# r = randrange(100)
# print(r)
lda = LdaModel(corpus=self.get_lda_corpus(), num_topics=k)
# save lda model
save_loc = datapath('lda_model')
lda.save(save_loc)
def load_lda(self):
return LdaModel.load(datapath('lda_model'))
def reverse_dict(self, dictionary):
ks, vs = dictionary.keys(), dictionary.values()
return dict(zip(vs,ks))
def convert_list_of_tuple_to_dict(self, lot):
dic = {}
for item in lot:
topic, prob = item
dic[topic] = prob
return dic
def lda_calc_average_score(self):
len_ot, len_nt, len_qu = len(self.corpus['ot'].keys()), len(self.corpus['nt'].keys()), len(self.corpus['quran'].keys())
lda_result_dict = self.init_nd_dict()
lda_distrib = self.load_lda().get_document_topics(self.get_lda_corpus())
#add results for each corpus to get average score for each topic
for i, line in enumerate(lda_distrib):
if i % 1000 == 0:
print('converting the result to a disposable form...{}/{}'.format(i, len(lda_distrib)))
line_dict = self.convert_list_of_tuple_to_dict(line)
if i < len_ot:
lda_result_dict['ot'][i] = line_dict
elif len_ot <= i < len_ot + len_nt:
lda_result_dict['nt'][i] = line_dict
elif len_ot + len_nt <= i:
lda_result_dict['quran'][i] = line_dict
#set probability to 0 if a topic probability does not appear
for c in lda_result_dict.keys():
for doc in lda_result_dict[c].keys():
for topic in range(0, 20):
try:
if lda_result_dict[c][doc][topic] == {}:
lda_result_dict[c][doc][topic] = 0
except:
lda_result_dict[c][doc][topic] = 0
avg_scores = self.init_nd_dict()
#calculate average probability 1) sum up the values
for c in lda_result_dict.keys():
for doc in lda_result_dict[c].keys():
for topic in lda_result_dict[c][doc].keys():
try:
avg_scores[c][topic] += lda_result_dict[c][doc][topic]
except:
avg_scores[c][topic] = lda_result_dict[c][doc][topic]
#calculate average probability 2) average the values by the total number of documents in each corpus
for c in avg_scores.keys():
for topic in avg_scores[c].keys():
avg_scores[c][topic] = avg_scores[c][topic] / len(lda_result_dict[c].keys())
#sort each corpus by the probability of each topic candidate
for c in avg_scores.keys():
avg_scores[c] = {k: v for k, v in sorted(avg_scores[c].items(), key=lambda item: item[1], reverse=True)}
with open('avg_score_dict.json', 'w') as f:
json.dump(avg_scores, f)
#extract token ides from a string returned from lda.print_topic()
def extract_tokens_from_lda_str(self, lda_token_string):
ids = {}
#get token ID : word dictionary to retrieve words
corp_dictionary = Dictionary(self.get_all_docs())
word_to_id = self.reverse_dict(corp_dictionary.token2id)
pns = lda_token_string.replace(' ', '').replace('\"', '').split('+')
for prob_num in pns:
prob, num = prob_num.split('*')
ids[word_to_id[int(num)]] = prob
ids_sorted = {k: v for k, v in sorted(ids.items(), key=lambda item: item[1], reverse=True)}
return ids_sorted
def find_top_tokens(self):
with open('avg_score_dict.json', 'r') as f:
avg_scores = json.load(f)
ot_topic_best = list(avg_scores['ot'].keys())[0]
nt_topic_best = list(avg_scores['nt'].keys())[0]
qu_topic_best = list(avg_scores['quran'].keys())[0]
print('ot: '+ot_topic_best)
print('nt: '+nt_topic_best)
print('quran: '+qu_topic_best)
#find key tokens for each corpus
lda_token_str_ot = self.load_lda().print_topic(int(ot_topic_best))
power_words_ot = self.extract_tokens_from_lda_str(lda_token_str_ot)
lda_token_str_nt = self.load_lda().print_topic(int(nt_topic_best))
power_words_nt = self.extract_tokens_from_lda_str(lda_token_str_nt)
lda_token_str_qu = self.load_lda().print_topic(int(qu_topic_best))
power_words_qu = self.extract_tokens_from_lda_str(lda_token_str_qu)
print(power_words_ot)
print(power_words_nt)
print(power_words_qu)
return ot_topic_best, nt_topic_best, qu_topic_best
class Classifier():
def __init__(self):
self.raw_data = self.load_raw_data()
self.raw_test_data = self.load_raw_test_data()
def load_raw_data(self):
with open('train_and_dev.tsv', 'r') as f:
raw_text = f.readlines()
return raw_text
def load_raw_test_data(self):
with open('test.tsv', 'r') as f:
raw_text = f.readlines()
return raw_text
def shuffle_and_split(self, split, X, y):
dataset = list(zip(X.todense(),y)) #zip the count matrix and labels
random.shuffle(dataset) #shuffle the cm-label tuples
if split == 'train': #if training set is given, split to training and validation
X_train, X_dev, y_train, y_dev = train_test_split(X, y, test_size=0.1)
X_train_sparse = sparse.dok_matrix(X_train)
X_dev_sparse = sparse.dok_matrix(X_dev)
return X_train_sparse, X_dev_sparse, y_train, y_dev
elif split == 'test':
splitted = [list(t) for t in zip(*dataset)] #unzip the list of tuples of [(dense_matrix, labels)]
X_shuffled = splitted[0]
y_shuffled = splitted[1]
X_sparse = sparse.dok_matrix(np.concatenate(X_shuffled, axis=0)) #convert back to sparse matrix from dense
return X_sparse, y_shuffled
def collect_words_from_raw_text(self, mode, raw_text):
p = Preprocessor()
####collect words from raw text#####################################################################
docs = []
labels = []
for docid, line in enumerate(raw_text):
if docid % 5000 == 0:
print('building docs and preprocessing...{}%'.format(round(docid / len(raw_text) * 100, 2)))
c, document = line.split('\t')
if mode == 'baseline':
docs.append(p.preprocess_baseline(document))
elif mode == 'improved':
docs.append(p.preprocess(document))
else:
raise ValueError('Wrong mode choice! It should be either baseline or improved.')
labels.append(c.lower())
####################################################################################################
return docs, labels
#create vocabulary using the docs
def create_vocab(self, docs):
p = Preprocessor()
vocab = p.unique_from_array(p.dictionify(docs)) # convert docs to be in dictionary form and create vocab
return vocab
def run_count_matrix_creator(self, mode, docs, vocab, labels):
p = Preprocessor()
docs = p.dictionify(docs)
count_mtx = p.create_count_matrix(docs, vocab, mode)
encoded_labels = p.encode_labels(labels) # encode corpus labels; ot=0, nt=1, quran=2
return count_mtx, encoded_labels
def prepare_data(self, mode):
raw_text = self.raw_data
raw_test_text = self.raw_test_data
docs, labels = self.collect_words_from_raw_text(mode, raw_text)
test_docs, test_labels = self.collect_words_from_raw_text(mode, raw_test_text)
vocab = self.create_vocab(docs) #create vocabulary using training data: test data doesn't effect the vocab
count_mtx, encoded_labels = self.run_count_matrix_creator(mode, docs, vocab, labels)
count_mtx_test, encoded_labels_test = self.run_count_matrix_creator(mode, test_docs, vocab, test_labels)
X_train, X_dev, y_train, y_dev = self.shuffle_and_split('train', count_mtx, encoded_labels)
X_test, y_test = self.shuffle_and_split('test', count_mtx_test, encoded_labels_test)
#save shuffled and splitted data to disk
with open('X_train_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(X_train, f)
with open('X_test_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(X_test, f)
with open('X_dev_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(X_dev, f)
with open('y_train_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(y_train, f)
with open('y_dev_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(y_dev, f)
with open('y_test_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(y_test, f)
def load_data(self, mode):
with open('X_train_{}.pkl'.format(mode), 'rb') as f:
X_train = pickle.load(f)
with open('X_dev_{}.pkl'.format(mode), 'rb') as f:
X_dev = pickle.load(f)
with open('X_test_{}.pkl'.format(mode), 'rb') as f:
X_test = pickle.load(f)
with open('y_train_{}.pkl'.format(mode), 'rb') as f:
y_train = pickle.load(f)
with open('y_dev_{}.pkl'.format(mode), 'rb') as f:
y_dev = pickle.load(f)
with open('y_test_{}.pkl'.format(mode), 'rb') as f:
y_test = pickle.load(f)
return X_train, X_dev, X_test, y_train, y_dev, y_test
def train_model(self, mode, classifier='svm'):
if mode == 'baseline':
c = 1000
classifier = 'svm' #set baseline model to svm always
elif mode == 'improved':
c = 10
else:
raise ValueError('wrong mode to train SVM!!')
X_train, X_dev, X_test, y_train, y_dev, y_test = self.load_data(mode)
if classifier == 'linsvm':
model = LinearSVC(C=c, max_iter=5000, verbose=True) #init sklearn.svm.LinearSVC for "improved" model
elif classifier == 'nb':
model = GaussianNB()
elif classifier == 'svm':
model = SVC(C=c, verbose=True) #init sklearn.svm.SVC
else:
raise ValueError('Wrong model choice! your current model: {}'.format(classifier))
print("start training the {} model!".format(classifier))
start_train = time.time()
if classifier == 'nb':
model.fit(X_train.todense(),y_train)
else:
model.fit(X_train,y_train)
print('total training time: {} seconds'.format(time.time() - start_train))
with open('{}_model_{}.pkl'.format(classifier, mode), 'wb') as f:
pickle.dump(model, f)
self.evaluate_predictions(mode, classifier)
def load_svm_model(self, mode, classifier='svm'):
with open('{}_model_{}.pkl'.format(classifier, mode), 'rb') as f:
model = pickle.load(f)
return model
#required in the lab but not in cw2: only here to test the classification performance
#not required in classification.csv
def accuracy(self, y_true, y_pred):
correct = 0
for true, pred in zip(y_true, y_pred):
if true == pred:
correct += 1
return round(correct/ len(y_true),3)
#initialise metrics dictinary for easier additions
def init_metric_dict(self):
a = Analyse()
lookup = a.init_nd_dict()
for i in range(3):
lookup[i]['tp'] = 0
lookup[i]['fp'] = 0
lookup[i]['fn'] = 0
return lookup
def precision(self, y_true, y_pred):
#initialise metrics dictionary
lookup = self.init_metric_dict()
for true, pred in zip(y_true, y_pred):
if true == pred:
lookup[pred]['tp'] += 1
else:
lookup[pred]['fp'] += 1
precisions = {}
for i in range(3):
precisions[i] = round(lookup[i]['tp'] / (lookup[i]['tp'] + lookup[i]['fp']),3)
precisions['macro'] = round((precisions[0] + precisions[1] + precisions[2])/3,3)
return precisions
def recall(self, y_true, y_pred):
#initialise metrics dictionary
lookup = self.init_metric_dict()
for true, pred in zip(y_true, y_pred):
if true == pred:
lookup[true]['tp'] += 1
else:
lookup[true]['fn'] += 1
recall = {}
for i in range(3):
recall[i] = round(lookup[i]['tp'] / (lookup[i]['tp'] + lookup[i]['fn']), 3)
recall['macro'] = round((recall[0] + recall[1] + recall[2])/3,3)
return recall
def f1_score(self, y_true, y_pred):
precision = self.precision(y_true, y_pred)
recall = self.recall(y_true, y_pred)
f1 = {}
for i in range(3):
f1[i] = round( 2 * (precision[i] * recall[i]) / (precision[i] + recall[i]),3)
f1['macro'] = round((f1[0] + f1[1] + f1[2])/3,3)
return f1
def get_metrics_str(self, mode, split, y_true, y_pred):
#OT = 0, NT = 1, Quran = 2
precision = self.precision(y_true, y_pred)
recall = self.recall(y_true, y_pred)
f1 = self.f1_score(y_true, y_pred)
metrics_string = ''
metrics_string += mode + ',' + split+',' #add system and split
metrics_string += str(precision[2]) + ',' + str(recall[2]) + ',' + str(f1[2]) + ',' #add p, r, f of Quran
metrics_string += str(precision[0]) + ',' + str(recall[0]) + ',' + str(f1[0]) + ',' #add p, r, f of OT
metrics_string += str(precision[1]) + ',' + str(recall[1]) + ',' + str(f1[1]) + ',' #add p, r, f of NT
metrics_string += str(precision['macro']) + ',' + str(recall['macro']) + ',' + str(f1['macro'])
return metrics_string
def evaluate_predictions(self, mode, classifier='svm'):
model = self.load_svm_model(mode, classifier)
X_train, X_dev, X_test, y_train, y_dev, y_test = self.load_data(mode)
if classifier == 'nb':
y_train_pred = model.predict(X_train.todense())
y_dev_pred = model.predict(X_dev.todense())
y_test_pred = model.predict(X_test.todense())
else:
y_train_pred = model.predict(X_train)
y_dev_pred = model.predict(X_dev)
y_test_pred = model.predict(X_test)
with open('classification.csv', 'a') as f:
f.write('system,split,p-quran,r-quran,f-quran,p-ot,r-ot,f-ot,p-nt,r-nt,f-nt,p-macro,r-macro,f-macro\n')
f.write(self.get_metrics_str(mode, 'train', y_train, y_train_pred) + '\n')
f.write(self.get_metrics_str(mode, 'dev', y_dev, y_dev_pred) + '\n')
f.write(self.get_metrics_str(mode, 'test', y_test, y_test_pred) + '\n')
f.write('\n')
f.write(classification_report(y_train, y_train_pred))
f.write(classification_report(y_dev, y_dev_pred))
f.write(classification_report(y_test, y_test_pred))
a = Analyse()
# corp = a.create_corpus()
# corp = a.load_corpus()
# print(len(corp['ot'].keys()) + len(corp['nt'].keys()) + len(corp['quran'].keys()))
# print(a.get_mi_counts(1, 3))
# a.run_calculation('mi')
# a.run_calculation('chi')
# a.sort_result('mi')
# a.sort_result('chi')
# a.train_lda(k=20)
# a.lda_calc_average_score()
# a.find_top_tokens()
c = Classifier()
modes = ['baseline', 'improved']
m = 1
mode = modes[m]
# c.prepare_data(mode)
c.train_model(mode, 'linsvm')
| import itertools
import random
import re
import time
from collections import defaultdict
import json
from sklearn.metrics import classification_report
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
import numpy as np
from collections import Counter
from gensim.corpora.dictionary import Dictionary
from gensim.test.utils import datapath
from gensim.models import LdaModel
from nltk.stem import PorterStemmer
from math import log2
from scipy import sparse
#my preprocessing module from coursework 1
import pickle
from sklearn.model_selection import train_test_split
class Preprocessor():
def __init__(self):
self.stopwords = self.get_stopwords()
def get_stopwords(self):
with open('stopwords.txt') as f:
stop_words = f.read().split('\n')
return stop_words
def unique_from_array(self, items):
items_1d = list(itertools.chain.from_iterable(items.values()))
vocab = {}
for i, x in enumerate(items_1d):
if x not in vocab.keys():
vocab[x] = 0
for i, k in enumerate(vocab.keys()):
vocab[k] = i
# using a rather unique structure to run faster
# vocab[word] = word_index
return vocab
#convert word list to dictionary for speeding purposes
def dictionify(self, items):
word_dict = {}
for i, word in enumerate(items):
word_dict[i] = word
return word_dict
def encode_labels(self, labels):
labels_encoded = []
for l in labels:
if l == 'ot':
labels_encoded.append(0)
elif l == 'nt':
labels_encoded.append(1)
elif l == 'quran':
labels_encoded.append(2)
else:
raise ValueError('wrong corpus name!')
return labels_encoded
def create_count_matrix(self, docs, vocab, mode):
count_mtx = sparse.dok_matrix((len(docs), len(vocab)), dtype='uint8')
for i in docs.keys():
if i % 3000 == 0:
print('creating count matrix for {} SVM model ..... {}%'.format(mode, round(i / len(docs) * 100, 2)))
count_dict = Counter(docs[i])
for word in count_dict.keys():
if mode == 'baseline':
try:
count_mtx[i, vocab[word]] = count_dict[word]
except:
continue
elif mode == 'improved':
try:
count_mtx[i, vocab[word]] = count_dict[word] * 1000
except:
continue
else:
raise ValueError('wrong mode choice!')
return count_mtx
def trim_text(self, text):
text_str = text.replace('\n', ' ').replace('\t',' ').replace(' ',' ') # replace \n with a space, and if that creates a double space, replace it with a single space
return text_str.lower()
def tokenise(self, text_str):
words = re.split('\W+', text_str)
words_lower = []
for word in words:
words_lower.append(word.lower())
return words_lower
def remove_stopwords(self, words):
stop_words = self.stopwords
words_dup_nostop = []
[words_dup_nostop.append(x) for x in words if x not in stop_words]
return words_dup_nostop
def stem_data(self, words_preprocessed):
ps = PorterStemmer()
words_stemmed = []
for word in words_preprocessed:
words_stemmed.append(ps.stem(word))
return words_stemmed
def remove_void(self, word_list):
clean = []
for word in word_list:
if word != '':
clean.append(word)
return clean
def create_bigram_vectors(self, uni_vectors):
bigram_vector = {}
for vi, v in enumerate(uni_vectors):
bv = []
for i in range(len(v)-1):
bv.append(str(v[i]+'_'+str(v[i+1])))
bigram_vector[vi] = bv
return bigram_vector
def preprocess_baseline(self, document):
# trim
text_str = self.trim_text(document)
# tokenise
words_dup = self.tokenise(text_str)
return words_dup
#arbitrarily limit word length for better accuracy (heuristic for lemmitisation)
def limit_word_length(self, word_list, limit, offset):
cut_text = []
for word in word_list:
if len(word) > limit:
cut_text.append(word[:limit-offset])
else:
cut_text.append(word)
return cut_text
#preprocess 1-d list of text
def preprocess(self, data_chunk):
#trim
text_str = self.trim_text(data_chunk)
#tokenise
words_dup = self.tokenise(text_str)
#remove stop words
# words_dup_nostop = self.remove_stopwords(words_dup)
# """normalisation"""
words_stemmed = self.stem_data(words_dup)
# arbitrary cut to 4 chars if word length is longer than 5
cut_off = self.limit_word_length(words_stemmed, 5, 1)
#remove empty quotation marks ('')
no_empties = self.remove_void(cut_off)
return no_empties
#preprocess 2-d list of text
def preprocess_many(self, data_chunk_loads):
processed_chunks_loads = []
for data in data_chunk_loads:
processed_chunks_loads.append(self.preprocess(data))
return processed_chunks_loads
class Analyse():
def __init__(self):
self.corpus = self.load_corpus()
self.p = Preprocessor()
def init_nd_dict(self):
return defaultdict(lambda : defaultdict(dict))
def create_corpus(self):
with open('train_and_dev.tsv', 'r') as f:
raw_text = f.readlines()
corpus = self.init_nd_dict()
counter = 0
current_corpus = ''
cp_list = ['ot', 'nt', 'quran']
for line in raw_text:
processed = self.p.preprocess(line)
head = processed[0]
if current_corpus not in cp_list:
current_corpus = head
if current_corpus != head:
current_corpus = head
counter = 0
corpus[current_corpus][counter] = processed[1:]
counter += 1
with open('corpus.json', 'w') as f:
json.dump(corpus, f)
return corpus
def load_corpus(self):
with open('corpus.json') as f:
corpus = json.load(f)
return corpus
# get counts to calculate mutual information
def get_Ns(self, term, cls):
classes = self.corpus.keys()
# find "non-current" class
c0 = [] # len(c0) is always 2
for c in classes:
if c != cls:
c0.append(c)
N11, N10, N01, N00 = 0, 0, 0, 0
# investigate document in the given class
for doc in self.corpus[cls].keys():
curr_doc = self.corpus[cls][doc]
if term in curr_doc:
N11 += 1
elif term not in curr_doc:
N01 += 1
# investigate documents in other classes
for c in c0:
for doc in self.corpus[c].keys():
curr_doc = self.corpus[c][doc]
if term in curr_doc:
N10 += 1
elif term not in curr_doc:
N00 += 1
return N11, N10, N01, N00
# calculate mutual information given all 4 counts
def calc_mi(self, term, cls):
N11, N10, N01, N00 = self.get_Ns(term, cls)
N = N11 + N10 + N01 + N00
try:
aa = (N11 / N) * log2((N * N11) / ((N11 + N10) * (N01 + N11)))
except:
aa = 0
try:
bb = (N01 / N) * log2((N * N01) / ((N01 + N00) * (N01 + N11)))
except:
bb = 0
try:
cc = (N10 / N) * log2((N * N10) / ((N10 + N11) * (N10 + N00)))
except:
cc = 0
try:
dd = (N00 / N) * log2((N * N00) / ((N00 + N01) * (N10 + N00)))
except:
dd = 0
return aa + bb + cc + dd
def calc_chi(self, term, cls):
N11, N10, N01, N00 = self.get_Ns(term, cls)
return ((N11 + N10 + N01 + N00) * pow(((N11 * N00) - (N10 * N01)), 2)) / \
((N11 + N01) * (N11 + N10) * (N10 + N00) * (N01 + N00))
# run mi or chi calculation
def run_calculation(self, mode):
result = self.init_nd_dict()
counter = 1
for cls in self.corpus.keys():
for doc in self.corpus[cls]:
print('class: {}/3---------------------------------------------------'.format(counter))
print('calculating mutual information...{}/{}'.format(doc, len(self.corpus[cls].keys())))
for word in self.corpus[cls][doc]:
if mode == 'mi':
score = self.calc_mi(word, cls)
elif mode == 'chi':
score = self.calc_chi(word, cls)
else:
raise ValueError('wrong calcluation mode entered! - choose mi or chi')
result[word][cls] = score
counter += 1
with open('{}.json'.format(mode), 'w') as f:
json.dump(result, f)
return result
def sort_dict_by_value(self, dict_to_sort):
return dict(sorted(dict_to_sort.items(), key=lambda item: item[1], reverse=True))
def display_ranked_result(self, result_dict):
for i, item in enumerate(result_dict.items()):
term = item[0]
score = item[1]
print(term + ': ' + str(score))
if i > 10:
break
def sort_result(self, mode):
with open('{}.json'.format(mode), 'r') as f:
to_display = json.load(f)
to_sort = self.init_nd_dict()
for word in to_display.keys():
for corpus in to_display[word]:
score = to_display[word][corpus]
to_sort[corpus][word] = score
sorted_ot = self.sort_dict_by_value(to_sort['ot'])
sorted_nt = self.sort_dict_by_value(to_sort['nt'])
sorted_qu = self.sort_dict_by_value(to_sort['quran'])
self.display_ranked_result(sorted_ot)
print('----------------------------')
self.display_ranked_result(sorted_nt)
print('----------------------------')
self.display_ranked_result(sorted_qu)
#helper function for get_lda_corpus
# RETURNS: 2d list of documents based on self.corpus
def get_all_docs(self):
united_corpus = []
# add the three corpus as one
for cor in self.corpus.keys():
for doc in self.corpus[cor].keys():
united_corpus.append(self.corpus[cor][doc])
return united_corpus
def get_lda_corpus(self):
# format the existing corpus "self.corpus" to fit in the gensim's LDA model.
united_corpus = self.get_all_docs()
corp_dictionary = Dictionary(united_corpus)
corpus = [corp_dictionary.doc2bow(text) for text in united_corpus]
return corpus
def train_lda(self, k):
# r = randrange(100)
# print(r)
lda = LdaModel(corpus=self.get_lda_corpus(), num_topics=k)
# save lda model
save_loc = datapath('lda_model')
lda.save(save_loc)
def load_lda(self):
return LdaModel.load(datapath('lda_model'))
def reverse_dict(self, dictionary):
ks, vs = dictionary.keys(), dictionary.values()
return dict(zip(vs,ks))
def convert_list_of_tuple_to_dict(self, lot):
dic = {}
for item in lot:
topic, prob = item
dic[topic] = prob
return dic
def lda_calc_average_score(self):
len_ot, len_nt, len_qu = len(self.corpus['ot'].keys()), len(self.corpus['nt'].keys()), len(self.corpus['quran'].keys())
lda_result_dict = self.init_nd_dict()
lda_distrib = self.load_lda().get_document_topics(self.get_lda_corpus())
#add results for each corpus to get average score for each topic
for i, line in enumerate(lda_distrib):
if i % 1000 == 0:
print('converting the result to a disposable form...{}/{}'.format(i, len(lda_distrib)))
line_dict = self.convert_list_of_tuple_to_dict(line)
if i < len_ot:
lda_result_dict['ot'][i] = line_dict
elif len_ot <= i < len_ot + len_nt:
lda_result_dict['nt'][i] = line_dict
elif len_ot + len_nt <= i:
lda_result_dict['quran'][i] = line_dict
#set probability to 0 if a topic probability does not appear
for c in lda_result_dict.keys():
for doc in lda_result_dict[c].keys():
for topic in range(0, 20):
try:
if lda_result_dict[c][doc][topic] == {}:
lda_result_dict[c][doc][topic] = 0
except:
lda_result_dict[c][doc][topic] = 0
avg_scores = self.init_nd_dict()
#calculate average probability 1) sum up the values
for c in lda_result_dict.keys():
for doc in lda_result_dict[c].keys():
for topic in lda_result_dict[c][doc].keys():
try:
avg_scores[c][topic] += lda_result_dict[c][doc][topic]
except:
avg_scores[c][topic] = lda_result_dict[c][doc][topic]
#calculate average probability 2) average the values by the total number of documents in each corpus
for c in avg_scores.keys():
for topic in avg_scores[c].keys():
avg_scores[c][topic] = avg_scores[c][topic] / len(lda_result_dict[c].keys())
#sort each corpus by the probability of each topic candidate
for c in avg_scores.keys():
avg_scores[c] = {k: v for k, v in sorted(avg_scores[c].items(), key=lambda item: item[1], reverse=True)}
with open('avg_score_dict.json', 'w') as f:
json.dump(avg_scores, f)
#extract token ides from a string returned from lda.print_topic()
def extract_tokens_from_lda_str(self, lda_token_string):
ids = {}
#get token ID : word dictionary to retrieve words
corp_dictionary = Dictionary(self.get_all_docs())
word_to_id = self.reverse_dict(corp_dictionary.token2id)
pns = lda_token_string.replace(' ', '').replace('\"', '').split('+')
for prob_num in pns:
prob, num = prob_num.split('*')
ids[word_to_id[int(num)]] = prob
ids_sorted = {k: v for k, v in sorted(ids.items(), key=lambda item: item[1], reverse=True)}
return ids_sorted
def find_top_tokens(self):
with open('avg_score_dict.json', 'r') as f:
avg_scores = json.load(f)
ot_topic_best = list(avg_scores['ot'].keys())[0]
nt_topic_best = list(avg_scores['nt'].keys())[0]
qu_topic_best = list(avg_scores['quran'].keys())[0]
print('ot: '+ot_topic_best)
print('nt: '+nt_topic_best)
print('quran: '+qu_topic_best)
#find key tokens for each corpus
lda_token_str_ot = self.load_lda().print_topic(int(ot_topic_best))
power_words_ot = self.extract_tokens_from_lda_str(lda_token_str_ot)
lda_token_str_nt = self.load_lda().print_topic(int(nt_topic_best))
power_words_nt = self.extract_tokens_from_lda_str(lda_token_str_nt)
lda_token_str_qu = self.load_lda().print_topic(int(qu_topic_best))
power_words_qu = self.extract_tokens_from_lda_str(lda_token_str_qu)
print(power_words_ot)
print(power_words_nt)
print(power_words_qu)
return ot_topic_best, nt_topic_best, qu_topic_best
class Classifier():
def __init__(self):
self.raw_data = self.load_raw_data()
self.raw_test_data = self.load_raw_test_data()
def load_raw_data(self):
with open('train_and_dev.tsv', 'r') as f:
raw_text = f.readlines()
return raw_text
def load_raw_test_data(self):
with open('test.tsv', 'r') as f:
raw_text = f.readlines()
return raw_text
def shuffle_and_split(self, split, X, y):
dataset = list(zip(X.todense(),y)) #zip the count matrix and labels
random.shuffle(dataset) #shuffle the cm-label tuples
if split == 'train': #if training set is given, split to training and validation
X_train, X_dev, y_train, y_dev = train_test_split(X, y, test_size=0.1)
X_train_sparse = sparse.dok_matrix(X_train)
X_dev_sparse = sparse.dok_matrix(X_dev)
return X_train_sparse, X_dev_sparse, y_train, y_dev
elif split == 'test':
splitted = [list(t) for t in zip(*dataset)] #unzip the list of tuples of [(dense_matrix, labels)]
X_shuffled = splitted[0]
y_shuffled = splitted[1]
X_sparse = sparse.dok_matrix(np.concatenate(X_shuffled, axis=0)) #convert back to sparse matrix from dense
return X_sparse, y_shuffled
def collect_words_from_raw_text(self, mode, raw_text):
p = Preprocessor()
####collect words from raw text#####################################################################
docs = []
labels = []
for docid, line in enumerate(raw_text):
if docid % 5000 == 0:
print('building docs and preprocessing...{}%'.format(round(docid / len(raw_text) * 100, 2)))
c, document = line.split('\t')
if mode == 'baseline':
docs.append(p.preprocess_baseline(document))
elif mode == 'improved':
docs.append(p.preprocess(document))
else:
raise ValueError('Wrong mode choice! It should be either baseline or improved.')
labels.append(c.lower())
####################################################################################################
return docs, labels
#create vocabulary using the docs
def create_vocab(self, docs):
p = Preprocessor()
vocab = p.unique_from_array(p.dictionify(docs)) # convert docs to be in dictionary form and create vocab
return vocab
def run_count_matrix_creator(self, mode, docs, vocab, labels):
p = Preprocessor()
docs = p.dictionify(docs)
count_mtx = p.create_count_matrix(docs, vocab, mode)
encoded_labels = p.encode_labels(labels) # encode corpus labels; ot=0, nt=1, quran=2
return count_mtx, encoded_labels
def prepare_data(self, mode):
raw_text = self.raw_data
raw_test_text = self.raw_test_data
docs, labels = self.collect_words_from_raw_text(mode, raw_text)
test_docs, test_labels = self.collect_words_from_raw_text(mode, raw_test_text)
vocab = self.create_vocab(docs) #create vocabulary using training data: test data doesn't effect the vocab
count_mtx, encoded_labels = self.run_count_matrix_creator(mode, docs, vocab, labels)
count_mtx_test, encoded_labels_test = self.run_count_matrix_creator(mode, test_docs, vocab, test_labels)
X_train, X_dev, y_train, y_dev = self.shuffle_and_split('train', count_mtx, encoded_labels)
X_test, y_test = self.shuffle_and_split('test', count_mtx_test, encoded_labels_test)
#save shuffled and splitted data to disk
with open('X_train_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(X_train, f)
with open('X_test_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(X_test, f)
with open('X_dev_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(X_dev, f)
with open('y_train_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(y_train, f)
with open('y_dev_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(y_dev, f)
with open('y_test_{}.pkl'.format(mode), 'wb') as f:
pickle.dump(y_test, f)
def load_data(self, mode):
with open('X_train_{}.pkl'.format(mode), 'rb') as f:
X_train = pickle.load(f)
with open('X_dev_{}.pkl'.format(mode), 'rb') as f:
X_dev = pickle.load(f)
with open('X_test_{}.pkl'.format(mode), 'rb') as f:
X_test = pickle.load(f)
with open('y_train_{}.pkl'.format(mode), 'rb') as f:
y_train = pickle.load(f)
with open('y_dev_{}.pkl'.format(mode), 'rb') as f:
y_dev = pickle.load(f)
with open('y_test_{}.pkl'.format(mode), 'rb') as f:
y_test = pickle.load(f)
return X_train, X_dev, X_test, y_train, y_dev, y_test
def train_model(self, mode, classifier='svm'):
if mode == 'baseline':
c = 1000
classifier = 'svm' #set baseline model to svm always
elif mode == 'improved':
c = 10
else:
raise ValueError('wrong mode to train SVM!!')
X_train, X_dev, X_test, y_train, y_dev, y_test = self.load_data(mode)
if classifier == 'linsvm':
model = LinearSVC(C=c, max_iter=5000, verbose=True) #init sklearn.svm.LinearSVC for "improved" model
elif classifier == 'nb':
model = GaussianNB()
elif classifier == 'svm':
model = SVC(C=c, verbose=True) #init sklearn.svm.SVC
else:
raise ValueError('Wrong model choice! your current model: {}'.format(classifier))
print("start training the {} model!".format(classifier))
start_train = time.time()
if classifier == 'nb':
model.fit(X_train.todense(),y_train)
else:
model.fit(X_train,y_train)
print('total training time: {} seconds'.format(time.time() - start_train))
with open('{}_model_{}.pkl'.format(classifier, mode), 'wb') as f:
pickle.dump(model, f)
self.evaluate_predictions(mode, classifier)
def load_svm_model(self, mode, classifier='svm'):
with open('{}_model_{}.pkl'.format(classifier, mode), 'rb') as f:
model = pickle.load(f)
return model
#required in the lab but not in cw2: only here to test the classification performance
#not required in classification.csv
def accuracy(self, y_true, y_pred):
correct = 0
for true, pred in zip(y_true, y_pred):
if true == pred:
correct += 1
return round(correct/ len(y_true),3)
#initialise metrics dictinary for easier additions
def init_metric_dict(self):
a = Analyse()
lookup = a.init_nd_dict()
for i in range(3):
lookup[i]['tp'] = 0
lookup[i]['fp'] = 0
lookup[i]['fn'] = 0
return lookup
def precision(self, y_true, y_pred):
#initialise metrics dictionary
lookup = self.init_metric_dict()
for true, pred in zip(y_true, y_pred):
if true == pred:
lookup[pred]['tp'] += 1
else:
lookup[pred]['fp'] += 1
precisions = {}
for i in range(3):
precisions[i] = round(lookup[i]['tp'] / (lookup[i]['tp'] + lookup[i]['fp']),3)
precisions['macro'] = round((precisions[0] + precisions[1] + precisions[2])/3,3)
return precisions
def recall(self, y_true, y_pred):
#initialise metrics dictionary
lookup = self.init_metric_dict()
for true, pred in zip(y_true, y_pred):
if true == pred:
lookup[true]['tp'] += 1
else:
lookup[true]['fn'] += 1
recall = {}
for i in range(3):
recall[i] = round(lookup[i]['tp'] / (lookup[i]['tp'] + lookup[i]['fn']), 3)
recall['macro'] = round((recall[0] + recall[1] + recall[2])/3,3)
return recall
def f1_score(self, y_true, y_pred):
precision = self.precision(y_true, y_pred)
recall = self.recall(y_true, y_pred)
f1 = {}
for i in range(3):
f1[i] = round( 2 * (precision[i] * recall[i]) / (precision[i] + recall[i]),3)
f1['macro'] = round((f1[0] + f1[1] + f1[2])/3,3)
return f1
def get_metrics_str(self, mode, split, y_true, y_pred):
#OT = 0, NT = 1, Quran = 2
precision = self.precision(y_true, y_pred)
recall = self.recall(y_true, y_pred)
f1 = self.f1_score(y_true, y_pred)
metrics_string = ''
metrics_string += mode + ',' + split+',' #add system and split
metrics_string += str(precision[2]) + ',' + str(recall[2]) + ',' + str(f1[2]) + ',' #add p, r, f of Quran
metrics_string += str(precision[0]) + ',' + str(recall[0]) + ',' + str(f1[0]) + ',' #add p, r, f of OT
metrics_string += str(precision[1]) + ',' + str(recall[1]) + ',' + str(f1[1]) + ',' #add p, r, f of NT
metrics_string += str(precision['macro']) + ',' + str(recall['macro']) + ',' + str(f1['macro'])
return metrics_string
def evaluate_predictions(self, mode, classifier='svm'):
model = self.load_svm_model(mode, classifier)
X_train, X_dev, X_test, y_train, y_dev, y_test = self.load_data(mode)
if classifier == 'nb':
y_train_pred = model.predict(X_train.todense())
y_dev_pred = model.predict(X_dev.todense())
y_test_pred = model.predict(X_test.todense())
else:
y_train_pred = model.predict(X_train)
y_dev_pred = model.predict(X_dev)
y_test_pred = model.predict(X_test)
with open('classification.csv', 'a') as f:
f.write('system,split,p-quran,r-quran,f-quran,p-ot,r-ot,f-ot,p-nt,r-nt,f-nt,p-macro,r-macro,f-macro\n')
f.write(self.get_metrics_str(mode, 'train', y_train, y_train_pred) + '\n')
f.write(self.get_metrics_str(mode, 'dev', y_dev, y_dev_pred) + '\n')
f.write(self.get_metrics_str(mode, 'test', y_test, y_test_pred) + '\n')
f.write('\n')
f.write(classification_report(y_train, y_train_pred))
f.write(classification_report(y_dev, y_dev_pred))
f.write(classification_report(y_test, y_test_pred))
a = Analyse()
# corp = a.create_corpus()
# corp = a.load_corpus()
# print(len(corp['ot'].keys()) + len(corp['nt'].keys()) + len(corp['quran'].keys()))
# print(a.get_mi_counts(1, 3))
# a.run_calculation('mi')
# a.run_calculation('chi')
# a.sort_result('mi')
# a.sort_result('chi')
# a.train_lda(k=20)
# a.lda_calc_average_score()
# a.find_top_tokens()
c = Classifier()
modes = ['baseline', 'improved']
m = 1
mode = modes[m]
# c.prepare_data(mode)
c.train_model(mode, 'linsvm') | en | 0.567732 | #my preprocessing module from coursework 1 # using a rather unique structure to run faster # vocab[word] = word_index #convert word list to dictionary for speeding purposes # replace \n with a space, and if that creates a double space, replace it with a single space # trim # tokenise #arbitrarily limit word length for better accuracy (heuristic for lemmitisation) #preprocess 1-d list of text #trim #tokenise #remove stop words # words_dup_nostop = self.remove_stopwords(words_dup) # """normalisation""" # arbitrary cut to 4 chars if word length is longer than 5 #remove empty quotation marks ('') #preprocess 2-d list of text # get counts to calculate mutual information # find "non-current" class # len(c0) is always 2 # investigate document in the given class # investigate documents in other classes # calculate mutual information given all 4 counts # run mi or chi calculation #helper function for get_lda_corpus # RETURNS: 2d list of documents based on self.corpus # add the three corpus as one # format the existing corpus "self.corpus" to fit in the gensim's LDA model. # r = randrange(100) # print(r) # save lda model #add results for each corpus to get average score for each topic #set probability to 0 if a topic probability does not appear #calculate average probability 1) sum up the values #calculate average probability 2) average the values by the total number of documents in each corpus #sort each corpus by the probability of each topic candidate #extract token ides from a string returned from lda.print_topic() #get token ID : word dictionary to retrieve words #find key tokens for each corpus #zip the count matrix and labels #shuffle the cm-label tuples #if training set is given, split to training and validation #unzip the list of tuples of [(dense_matrix, labels)] #convert back to sparse matrix from dense ####collect words from raw text##################################################################### #################################################################################################### #create vocabulary using the docs # convert docs to be in dictionary form and create vocab # encode corpus labels; ot=0, nt=1, quran=2 #create vocabulary using training data: test data doesn't effect the vocab #save shuffled and splitted data to disk #set baseline model to svm always #init sklearn.svm.LinearSVC for "improved" model #init sklearn.svm.SVC #required in the lab but not in cw2: only here to test the classification performance #not required in classification.csv #initialise metrics dictinary for easier additions #initialise metrics dictionary #initialise metrics dictionary #OT = 0, NT = 1, Quran = 2 #add system and split #add p, r, f of Quran #add p, r, f of OT #add p, r, f of NT # corp = a.create_corpus() # corp = a.load_corpus() # print(len(corp['ot'].keys()) + len(corp['nt'].keys()) + len(corp['quran'].keys())) # print(a.get_mi_counts(1, 3)) # a.run_calculation('mi') # a.run_calculation('chi') # a.sort_result('mi') # a.sort_result('chi') # a.train_lda(k=20) # a.lda_calc_average_score() # a.find_top_tokens() # c.prepare_data(mode) | 2.49675 | 2 |
linkograph/tests/testDynamic.py | mikiec84/linkshop | 6 | 6624221 | <filename>linkograph/tests/testDynamic.py
#!/usr/bin/env python3
"""Tests the enumeration.py package."""
import unittest
from linkograph import linkoCreate # For creating linkographs.
from linkograph import dynamic # The package under test.
class Test_addNode(unittest.TestCase):
"""Basic unit tests for addNode using size 4."""
def setUp(self):
"""Set up the parameters for the individual tests."""
ontology = {'A': ['C', 'D'],
'B': ['C'],
'C': ['B'],
'D': ['B', 'C']}
trivialLinko = linkoCreate.Linkograph(
[], [])
linko0 = linkoCreate.Linkograph(
[({'A'}, set(), set())],
['A'])
linko1 = linkoCreate.Linkograph(
[({'A'}, set(), {1}),
({'D'}, {0}, set())],
['A', 'D'])
linko2 = linkoCreate.Linkograph(
[({'A'}, set(), {1}),
({'D'}, {0}, {2}),
({'B'}, {1}, set())],
['A', 'D', 'B'])
linko3 = linkoCreate.Linkograph(
[({'A'}, set(), {1,3}),
({'D'}, {0}, {2,3}),
({'B'}, {1}, {3}),
({'C'}, {0,1,2}, set())],
['A', 'D', 'B', 'C'])
linko4 = linkoCreate.Linkograph(
[({'D'}, set(), {1,2,3}),
({'B'}, {0}, {2}),
({'C'}, {0,1}, {3}),
({'B'}, {0,2}, set())],
['A', 'D', 'B', 'C'])
if self.id().split('.')[-1] == 'test_addNodeSize4':
self.testParams = [
{'linko': trivialLinko,
'newLabels': {'A'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko0},
{'linko': linko0,
'newLabels': {'D'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko1},
{'linko': linko1,
'newLabels': {'B'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko2},
{'linko': linko2,
'newLabels': {'C'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko3},
{'linko': linko3,
'newLabels': {'B'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko4},
]
def performTestForParams(self):
""""Performs the tests for each set of parameters."""
for (testNum, params) in enumerate(self.testParams):
actualLinkograph = dynamic.addNode(params['linko'],
params['newLabels'],
params['ontology'],
params['size'])
self.assertEqual(
actualLinkograph,
params['ExpectedLinkograph'],
(" linko = {}\n"
" newLabels = {}\n"
" ontology = {}\n"
" size = {}\n"
" actualLinkograph = {}\n"
" ExpectedLinkograph = {}\n")
.format(params['linko'],
params['newLabels'],
params['ontology'],
params['size'],
actualLinkograph,
params['ExpectedLinkograph']))
def test_addNodeSize4(self):
"""Tests the addNode function with a size of 4."""
self.performTestForParams()
| <filename>linkograph/tests/testDynamic.py
#!/usr/bin/env python3
"""Tests the enumeration.py package."""
import unittest
from linkograph import linkoCreate # For creating linkographs.
from linkograph import dynamic # The package under test.
class Test_addNode(unittest.TestCase):
"""Basic unit tests for addNode using size 4."""
def setUp(self):
"""Set up the parameters for the individual tests."""
ontology = {'A': ['C', 'D'],
'B': ['C'],
'C': ['B'],
'D': ['B', 'C']}
trivialLinko = linkoCreate.Linkograph(
[], [])
linko0 = linkoCreate.Linkograph(
[({'A'}, set(), set())],
['A'])
linko1 = linkoCreate.Linkograph(
[({'A'}, set(), {1}),
({'D'}, {0}, set())],
['A', 'D'])
linko2 = linkoCreate.Linkograph(
[({'A'}, set(), {1}),
({'D'}, {0}, {2}),
({'B'}, {1}, set())],
['A', 'D', 'B'])
linko3 = linkoCreate.Linkograph(
[({'A'}, set(), {1,3}),
({'D'}, {0}, {2,3}),
({'B'}, {1}, {3}),
({'C'}, {0,1,2}, set())],
['A', 'D', 'B', 'C'])
linko4 = linkoCreate.Linkograph(
[({'D'}, set(), {1,2,3}),
({'B'}, {0}, {2}),
({'C'}, {0,1}, {3}),
({'B'}, {0,2}, set())],
['A', 'D', 'B', 'C'])
if self.id().split('.')[-1] == 'test_addNodeSize4':
self.testParams = [
{'linko': trivialLinko,
'newLabels': {'A'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko0},
{'linko': linko0,
'newLabels': {'D'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko1},
{'linko': linko1,
'newLabels': {'B'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko2},
{'linko': linko2,
'newLabels': {'C'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko3},
{'linko': linko3,
'newLabels': {'B'},
'ontology': ontology,
'size': 4,
'ExpectedLinkograph': linko4},
]
def performTestForParams(self):
""""Performs the tests for each set of parameters."""
for (testNum, params) in enumerate(self.testParams):
actualLinkograph = dynamic.addNode(params['linko'],
params['newLabels'],
params['ontology'],
params['size'])
self.assertEqual(
actualLinkograph,
params['ExpectedLinkograph'],
(" linko = {}\n"
" newLabels = {}\n"
" ontology = {}\n"
" size = {}\n"
" actualLinkograph = {}\n"
" ExpectedLinkograph = {}\n")
.format(params['linko'],
params['newLabels'],
params['ontology'],
params['size'],
actualLinkograph,
params['ExpectedLinkograph']))
def test_addNodeSize4(self):
"""Tests the addNode function with a size of 4."""
self.performTestForParams()
| en | 0.583705 | #!/usr/bin/env python3 Tests the enumeration.py package. # For creating linkographs. # The package under test. Basic unit tests for addNode using size 4. Set up the parameters for the individual tests. "Performs the tests for each set of parameters. Tests the addNode function with a size of 4. | 2.676354 | 3 |
pirev/__init__.py | samjrdn/pirev-python | 0 | 6624222 | <filename>pirev/__init__.py
name = "pirev"
| <filename>pirev/__init__.py
name = "pirev"
| none | 1 | 1.089721 | 1 | |
Run_DNet.py | Matt-Golightly/MDS_Kara_One | 2 | 6624223 | <filename>Run_DNet.py
'''
This is used to repeatedly run DenseNet_One_vs_Rest2.py for the many variations of GAF, data sets, words, and subjects
'''
import os
subjects = ['MM05', 'MM08', 'MM09', 'MM10', 'MM11', 'MM12', 'MM14', 'MM15', 'MM16', 'MM18', 'MM19', 'MM20', 'MM21', 'P02']
for subject in subjects:
for gaf in ['GASF', 'GADF']: #['GASF', 'GADF']
for method in ['DTCWT', 'FILTERED', 'RAW', 'ICA']: # type of image method, ['DTCWT', 'FILTERED', 'RAW', 'ICA']
for word in ['gnaw', 'knew', 'pat', 'pot']: #['gnaw', 'knew', 'pat', 'pot']
os.system("python DenseNet_One_vs_Rest2.py {} {} {} {}".format(gaf, word, method, subject))
| <filename>Run_DNet.py
'''
This is used to repeatedly run DenseNet_One_vs_Rest2.py for the many variations of GAF, data sets, words, and subjects
'''
import os
subjects = ['MM05', 'MM08', 'MM09', 'MM10', 'MM11', 'MM12', 'MM14', 'MM15', 'MM16', 'MM18', 'MM19', 'MM20', 'MM21', 'P02']
for subject in subjects:
for gaf in ['GASF', 'GADF']: #['GASF', 'GADF']
for method in ['DTCWT', 'FILTERED', 'RAW', 'ICA']: # type of image method, ['DTCWT', 'FILTERED', 'RAW', 'ICA']
for word in ['gnaw', 'knew', 'pat', 'pot']: #['gnaw', 'knew', 'pat', 'pot']
os.system("python DenseNet_One_vs_Rest2.py {} {} {} {}".format(gaf, word, method, subject))
| en | 0.7866 | This is used to repeatedly run DenseNet_One_vs_Rest2.py for the many variations of GAF, data sets, words, and subjects #['GASF', 'GADF'] # type of image method, ['DTCWT', 'FILTERED', 'RAW', 'ICA'] #['gnaw', 'knew', 'pat', 'pot'] | 2.806286 | 3 |
chevah/compat/tests/normal/test_capabilities.py | chevah/compat | 5 | 6624224 | <reponame>chevah/compat
# -*- coding: utf-8 -*-
# Copyright (c) 2011 <NAME>.
# See LICENSE for details.
"""
Test for platform capabilities detection.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
try:
import win32security
except ImportError:
pass
from zope.interface.verify import verifyObject
from chevah.compat import process_capabilities
from chevah.compat.exceptions import AdjustPrivilegeException
from chevah.compat.interfaces import IProcessCapabilities
from chevah.compat.testing import conditionals, CompatTestCase, mk
@conditionals.onOSFamily('posix')
class TestProcessCapabilitiesPosix(CompatTestCase):
"""
Unit tests for process capabilities executed on Posix platforms.
"""
def setUp(self):
super(TestProcessCapabilitiesPosix, self).setUp()
self.capabilities = process_capabilities
def test_init(self):
"""
Check ProcessCapabilities initialization.
"""
verifyObject(IProcessCapabilities, self.capabilities)
def test_impersonate_local_account(self):
"""
When running under normal account, impersonation is always False
on Unix.
"""
result = self.capabilities.impersonate_local_account
self.assertFalse(result)
def test_create_home_folder(self):
"""
When running under normal account, we can not create home folders
on Unix.
"""
result = self.capabilities.create_home_folder
self.assertFalse(result)
def test_get_home_folder(self):
"""
On Unix we can always get home home folder.
On Windows, only Windows 2008 and Windows 7 can get home folder path.
"""
result = self.capabilities.get_home_folder
self.assertTrue(result)
def test_getCurrentPrivilegesDescription(self):
"""
Check getCurrentPrivilegesDescription.
"""
text = self.capabilities.getCurrentPrivilegesDescription()
self.assertEqual(u'root capabilities disabled.', text)
def test_pam(self):
"""
PAM is supported on Linux/Unix.
"""
if self.os_name == 'hpux':
# PAM is not supported on HPUX.
self.assertFalse(self.capabilities.pam)
elif self.os_name == 'openbsd':
# OpenBSD does not has PAM by default.
self.assertFalse(self.capabilities.pam)
elif self.os_version == 'alpine-3':
# We don't bother about PAM on Alpine.
self.assertFalse(self.capabilities.pam)
else:
self.assertTrue(self.capabilities.pam)
def test_symbolic_link(self):
"""
Support on all Unix.
"""
symbolic_link = self.capabilities.symbolic_link
self.assertTrue(symbolic_link)
@conditionals.onOSFamily('nt')
class TestNTProcessCapabilities(CompatTestCase):
"""
Capability tests executed only on Windows slaves.
"""
def setUp(self):
super(TestNTProcessCapabilities, self).setUp()
self.capabilities = process_capabilities
def test_init(self):
"""
Check ProcessCapabilities initialization.
"""
verifyObject(IProcessCapabilities, self.capabilities)
def test_openProcess_success(self):
"""
_openProcess can be used for process token for the current
process having a specified mode enabled.
"""
with self.capabilities._openProcess(win32security.TOKEN_QUERY) as (
process_token):
self.assertIsNotNone(process_token)
def test_isPrivilegeEnabled_absent(self):
"""
Returns False for a privilege which is not present.
"""
# By default SE_RELABEL_NAME should not be available to test
# accounts.
privilege = win32security.SE_RELABEL_NAME
self.assertEqual(
u'absent', self.capabilities._getPrivilegeState(privilege))
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
def test_elevatePrivileges_invalid_privilege(self):
"""
It raises an exception when an invalid privilege name is requested.
"""
with self.assertRaises(AdjustPrivilegeException):
with (self.capabilities._elevatePrivileges(
win32security.SE_IMPERSONATE_NAME,
'no-such-privilege-name',
)):
pass # pragma: no cover
def test_pam(self):
"""
PAM is not supported on Windows
"""
self.assertFalse(self.capabilities.pam)
@conditionals.onCIName([CompatTestCase.CI.LOCAL, CompatTestCase.CI.BUILDBOT])
@conditionals.onAdminPrivileges(False)
@conditionals.onOSFamily('nt')
class TestNTProcessCapabilitiesNormalUser(CompatTestCase):
"""
Capability tests executed only on Windows slaves that are configured to
run without administrator rights.
These tests are only valid on local OS or on Buildbot where we have
a VM configured in a very specific way.
"""
def setUp(self):
super(TestNTProcessCapabilitiesNormalUser, self).setUp()
self.capabilities = process_capabilities
def test_getAvailablePrivileges(self):
"""
Return a list with privileges and state value.
"""
result = self.capabilities._getAvailablePrivileges()
self.assertIsNotEmpty(result)
privilege = self.capabilities._getPrivilegeID(
win32security.SE_CHANGE_NOTIFY_NAME)
self.assertContains((privilege, 3), result)
def test_getPrivilegeState_invalid(self):
"""
Return `absent` for unknown names.
"""
privilege = mk.getUniqueString()
result = self.capabilities._getPrivilegeState(privilege)
self.assertEqual(u'absent', result)
def test_getPrivilegeState_absent(self):
"""
Return `absent` for privileges which are not attached to current
process.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_ASSIGNPRIMARYTOKEN_NAME)
self.assertEqual(u'absent', result)
def test_getPrivilegeState_present(self):
"""
Return `absent` for privileges which are attached to current
process but are not enabled.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_SECURITY_NAME)
self.assertEqual(u'absent', result)
def test_getPrivilegeState_enabled_default(self):
"""
Return `absent` for privileges which are attached to
current process but are not enabled by default.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_IMPERSONATE_NAME)
self.assertEqual(u'absent', result)
def test_isPrivilegeEnabled_enabled(self):
"""
Returns False for a privilege which is present and is not enabled.
"""
# We use SE_IMPERSONATE privilege as it is enabled by default.
privilege = win32security.SE_IMPERSONATE_NAME
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
def test_isPrivilegeEnabled_disabled(self):
"""
Returns False for a privilege which is present but disabled.
"""
# By default SE_SECURITY_NAME privilege is disabled.
privilege = win32security.SE_SECURITY_NAME
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
def test_symbolic_link(self):
"""
Not supported on Windows without elevated permissions.
"""
symbolic_link = self.capabilities.symbolic_link
self.assertFalse(symbolic_link)
def test_impersonate_local_account_windows(self):
"""
Impersonation is not available when running as a normal user.
"""
result = self.capabilities.impersonate_local_account
self.assertFalse(result)
def test_create_home_folder(self):
"""
On Windows home folders can be created if SE_BACKUP and SE_RESTORE
privileges are available for the process.
"""
result = self.capabilities.create_home_folder
if self.ci_name == self.CI.BUILDBOT and self.TEST_LANGUAGE != 'FR':
# Only buildbot slaves are setup with "Backup Operators"
# group (SE_BACKUP/SE_RESTORE) enabled.
# But not the I18N slave.
self.assertTrue(result)
else:
self.assertFalse(result)
def test_getCurrentPrivilegesDescription(self):
"""
Check getCurrentPrivilegesDescription.
"""
text = self.capabilities.getCurrentPrivilegesDescription()
self.assertContains('SeChangeNotifyPrivilege:3', text)
self.assertNotContains('SeCreateSymbolicLinkPrivilege', text)
self.assertNotContains('SeImpersonatePrivilege', text)
# Slave which don't run as admin have no SE_BACKUP/SE_RESTORE
self.assertNotContains('SeBackupPrivilege:0', text)
self.assertNotContains('SeRestorePrivilege', text)
if self.os_version in ['nt-5.1']:
# Windows XP has SE_CREATE_GLOBAL enabled even when
# running without administrator privileges.
self.assertContains('SeCreateGlobalPrivilege:3', text)
else:
# Windows 2003 is not admin.
self.assertNotContains('SeCreateGlobalPrivilege', text)
@conditionals.onAdminPrivileges(True)
@conditionals.onOSFamily('nt')
class TestNTProcessCapabilitiesAdministrator(CompatTestCase):
"""
Capability tests executed only on Windows slaves that are configured to
run with administrator rights.
"""
def setUp(self):
super(TestNTProcessCapabilitiesAdministrator, self).setUp()
self.capabilities = process_capabilities
def test_getAvailablePrivileges(self):
"""
Return a list with privileges and state value.
"""
result = self.capabilities._getAvailablePrivileges()
self.assertIsNotEmpty(result)
privilege = self.capabilities._getPrivilegeID(
win32security.SE_SECURITY_NAME)
self.assertContains((privilege, 0), result)
privilege = self.capabilities._getPrivilegeID(
win32security.SE_IMPERSONATE_NAME)
self.assertContains((privilege, 3), result)
privilege = self.capabilities._getPrivilegeID(
win32security.SE_CREATE_SYMBOLIC_LINK_NAME)
self.assertContains((privilege, 0), result)
def test_getPrivilegeState_present(self):
"""
Return `present` for privileges which are attached to current
process but are not enabled.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_SECURITY_NAME)
self.assertEqual(u'present', result)
def test_getPrivilegeState_enabled_default(self):
"""
Return `enabled-by-default` for privileges which are attached to
current process but are enabled by default.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_IMPERSONATE_NAME)
self.assertEqual(u'enabled-by-default', result)
def test_isPrivilegeEnabled_enabled(self):
"""
Returns True for a privilege which is present and is enabled.
"""
# We use SE_IMPERSONATE privilege as it is enabled by default.
privilege = win32security.SE_IMPERSONATE_NAME
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
def test_adjustPrivilege_success(self):
"""
Turning SE_BACKUP privilege on/off for the current process when
running as super user.
"""
initial_state = self.capabilities._isPrivilegeEnabled(
win32security.SE_BACKUP_NAME)
self.capabilities._adjustPrivilege(
win32security.SE_BACKUP_NAME, False)
self.assertIsFalse(self.capabilities._isPrivilegeEnabled(
win32security.SE_BACKUP_NAME))
self.capabilities._adjustPrivilege(
win32security.SE_BACKUP_NAME, initial_state)
self.assertEquals(
initial_state,
self.capabilities._isPrivilegeEnabled(
win32security.SE_BACKUP_NAME),
)
def test_elevatePrivileges_take_ownership_success(self):
"""
elevatePrivileges is a context manager which will elevate the
privileges for current process upon entering the context,
and restore them on exit.
"""
# We use SE_TAKE_OWNERSHIP privilege as it should be present for
# super user and disabled by default.
privilege = win32security.SE_TAKE_OWNERSHIP_NAME
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
with (self.capabilities._elevatePrivileges(privilege)):
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
# We should be able to take ownership again.
with (self.capabilities._elevatePrivileges(privilege)):
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
def test_elevatePrivilege_impersonate_unchanged(self):
"""
elevatePrivilege will not modify the process if the privilege is
already enabled.
"""
# We use SE_IMPERSONATE as it should be enabled by default.
privilege = win32security.SE_IMPERSONATE_NAME
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
capabilities = self.capabilities
with self.Patch.object(capabilities, '_adjustPrivilege') as method:
with (capabilities._elevatePrivileges(privilege)):
self.assertFalse(method.called)
self.assertTrue(capabilities._isPrivilegeEnabled(privilege))
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
def test_elevatePrivilege_multiple_privileges_success(self):
"""
elevatePrivileges supports a variable list of privilege name
arguments and will make sure all of them are enabled.
"""
# We use SE_IMPERSONATE as it is enabled by default
# We also use SE_TAKE_OWNERSHIP as it is disabled by default but can
# be enabled when running as super user.
take_ownership = win32security.SE_TAKE_OWNERSHIP_NAME
impersonate = win32security.SE_IMPERSONATE_NAME
self.assertTrue(self.capabilities._isPrivilegeEnabled(impersonate))
self.assertFalse(
self.capabilities._isPrivilegeEnabled(take_ownership))
capabilities = self.capabilities
with (capabilities._elevatePrivileges(take_ownership, impersonate)):
self.assertTrue(
self.capabilities._isPrivilegeEnabled(impersonate))
self.assertTrue(
self.capabilities._isPrivilegeEnabled(take_ownership))
self.assertTrue(self.capabilities._isPrivilegeEnabled(impersonate))
self.assertFalse(
self.capabilities._isPrivilegeEnabled(take_ownership))
def test_symbolic_link(self):
"""
Supported on Vista and above.
"""
symbolic_link = self.capabilities.symbolic_link
self.assertTrue(symbolic_link)
def test_get_home_folder(self):
"""
The home folder can be retrieved.
"""
result = self.capabilities.get_home_folder
self.assertTrue(result)
def test_create_home_folder(self):
"""
On Windows home folders can be created if required privileges
are configured for the process.
"""
result = self.capabilities.create_home_folder
self.assertTrue(result)
def test_getCurrentPrivilegesDescription(self):
"""
Check that SE_CHANGE_NOTIFY_NAME, SE_IMPERSONATE_NAME and
SE_CREATE_GLOBAL_NAME are all present in the privileges description
and enabled.
Check that SE_CREATE_SYMBOLIC_LINK_NAME is present in the privileges
description and it's disabled.
"""
text = self.capabilities.getCurrentPrivilegesDescription()
self.assertContains('SeChangeNotifyPrivilege:3', text)
self.assertContains('SeCreateSymbolicLinkPrivilege:0', text)
self.assertContains('SeImpersonatePrivilege:3', text)
self.assertContains('SeCreateGlobalPrivilege:3', text)
| # -*- coding: utf-8 -*-
# Copyright (c) 2011 <NAME>.
# See LICENSE for details.
"""
Test for platform capabilities detection.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
try:
import win32security
except ImportError:
pass
from zope.interface.verify import verifyObject
from chevah.compat import process_capabilities
from chevah.compat.exceptions import AdjustPrivilegeException
from chevah.compat.interfaces import IProcessCapabilities
from chevah.compat.testing import conditionals, CompatTestCase, mk
@conditionals.onOSFamily('posix')
class TestProcessCapabilitiesPosix(CompatTestCase):
"""
Unit tests for process capabilities executed on Posix platforms.
"""
def setUp(self):
super(TestProcessCapabilitiesPosix, self).setUp()
self.capabilities = process_capabilities
def test_init(self):
"""
Check ProcessCapabilities initialization.
"""
verifyObject(IProcessCapabilities, self.capabilities)
def test_impersonate_local_account(self):
"""
When running under normal account, impersonation is always False
on Unix.
"""
result = self.capabilities.impersonate_local_account
self.assertFalse(result)
def test_create_home_folder(self):
"""
When running under normal account, we can not create home folders
on Unix.
"""
result = self.capabilities.create_home_folder
self.assertFalse(result)
def test_get_home_folder(self):
"""
On Unix we can always get home home folder.
On Windows, only Windows 2008 and Windows 7 can get home folder path.
"""
result = self.capabilities.get_home_folder
self.assertTrue(result)
def test_getCurrentPrivilegesDescription(self):
"""
Check getCurrentPrivilegesDescription.
"""
text = self.capabilities.getCurrentPrivilegesDescription()
self.assertEqual(u'root capabilities disabled.', text)
def test_pam(self):
"""
PAM is supported on Linux/Unix.
"""
if self.os_name == 'hpux':
# PAM is not supported on HPUX.
self.assertFalse(self.capabilities.pam)
elif self.os_name == 'openbsd':
# OpenBSD does not has PAM by default.
self.assertFalse(self.capabilities.pam)
elif self.os_version == 'alpine-3':
# We don't bother about PAM on Alpine.
self.assertFalse(self.capabilities.pam)
else:
self.assertTrue(self.capabilities.pam)
def test_symbolic_link(self):
"""
Support on all Unix.
"""
symbolic_link = self.capabilities.symbolic_link
self.assertTrue(symbolic_link)
@conditionals.onOSFamily('nt')
class TestNTProcessCapabilities(CompatTestCase):
"""
Capability tests executed only on Windows slaves.
"""
def setUp(self):
super(TestNTProcessCapabilities, self).setUp()
self.capabilities = process_capabilities
def test_init(self):
"""
Check ProcessCapabilities initialization.
"""
verifyObject(IProcessCapabilities, self.capabilities)
def test_openProcess_success(self):
"""
_openProcess can be used for process token for the current
process having a specified mode enabled.
"""
with self.capabilities._openProcess(win32security.TOKEN_QUERY) as (
process_token):
self.assertIsNotNone(process_token)
def test_isPrivilegeEnabled_absent(self):
"""
Returns False for a privilege which is not present.
"""
# By default SE_RELABEL_NAME should not be available to test
# accounts.
privilege = win32security.SE_RELABEL_NAME
self.assertEqual(
u'absent', self.capabilities._getPrivilegeState(privilege))
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
def test_elevatePrivileges_invalid_privilege(self):
"""
It raises an exception when an invalid privilege name is requested.
"""
with self.assertRaises(AdjustPrivilegeException):
with (self.capabilities._elevatePrivileges(
win32security.SE_IMPERSONATE_NAME,
'no-such-privilege-name',
)):
pass # pragma: no cover
def test_pam(self):
"""
PAM is not supported on Windows
"""
self.assertFalse(self.capabilities.pam)
@conditionals.onCIName([CompatTestCase.CI.LOCAL, CompatTestCase.CI.BUILDBOT])
@conditionals.onAdminPrivileges(False)
@conditionals.onOSFamily('nt')
class TestNTProcessCapabilitiesNormalUser(CompatTestCase):
"""
Capability tests executed only on Windows slaves that are configured to
run without administrator rights.
These tests are only valid on local OS or on Buildbot where we have
a VM configured in a very specific way.
"""
def setUp(self):
super(TestNTProcessCapabilitiesNormalUser, self).setUp()
self.capabilities = process_capabilities
def test_getAvailablePrivileges(self):
"""
Return a list with privileges and state value.
"""
result = self.capabilities._getAvailablePrivileges()
self.assertIsNotEmpty(result)
privilege = self.capabilities._getPrivilegeID(
win32security.SE_CHANGE_NOTIFY_NAME)
self.assertContains((privilege, 3), result)
def test_getPrivilegeState_invalid(self):
"""
Return `absent` for unknown names.
"""
privilege = mk.getUniqueString()
result = self.capabilities._getPrivilegeState(privilege)
self.assertEqual(u'absent', result)
def test_getPrivilegeState_absent(self):
"""
Return `absent` for privileges which are not attached to current
process.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_ASSIGNPRIMARYTOKEN_NAME)
self.assertEqual(u'absent', result)
def test_getPrivilegeState_present(self):
"""
Return `absent` for privileges which are attached to current
process but are not enabled.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_SECURITY_NAME)
self.assertEqual(u'absent', result)
def test_getPrivilegeState_enabled_default(self):
"""
Return `absent` for privileges which are attached to
current process but are not enabled by default.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_IMPERSONATE_NAME)
self.assertEqual(u'absent', result)
def test_isPrivilegeEnabled_enabled(self):
"""
Returns False for a privilege which is present and is not enabled.
"""
# We use SE_IMPERSONATE privilege as it is enabled by default.
privilege = win32security.SE_IMPERSONATE_NAME
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
def test_isPrivilegeEnabled_disabled(self):
"""
Returns False for a privilege which is present but disabled.
"""
# By default SE_SECURITY_NAME privilege is disabled.
privilege = win32security.SE_SECURITY_NAME
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
def test_symbolic_link(self):
"""
Not supported on Windows without elevated permissions.
"""
symbolic_link = self.capabilities.symbolic_link
self.assertFalse(symbolic_link)
def test_impersonate_local_account_windows(self):
"""
Impersonation is not available when running as a normal user.
"""
result = self.capabilities.impersonate_local_account
self.assertFalse(result)
def test_create_home_folder(self):
"""
On Windows home folders can be created if SE_BACKUP and SE_RESTORE
privileges are available for the process.
"""
result = self.capabilities.create_home_folder
if self.ci_name == self.CI.BUILDBOT and self.TEST_LANGUAGE != 'FR':
# Only buildbot slaves are setup with "Backup Operators"
# group (SE_BACKUP/SE_RESTORE) enabled.
# But not the I18N slave.
self.assertTrue(result)
else:
self.assertFalse(result)
def test_getCurrentPrivilegesDescription(self):
"""
Check getCurrentPrivilegesDescription.
"""
text = self.capabilities.getCurrentPrivilegesDescription()
self.assertContains('SeChangeNotifyPrivilege:3', text)
self.assertNotContains('SeCreateSymbolicLinkPrivilege', text)
self.assertNotContains('SeImpersonatePrivilege', text)
# Slave which don't run as admin have no SE_BACKUP/SE_RESTORE
self.assertNotContains('SeBackupPrivilege:0', text)
self.assertNotContains('SeRestorePrivilege', text)
if self.os_version in ['nt-5.1']:
# Windows XP has SE_CREATE_GLOBAL enabled even when
# running without administrator privileges.
self.assertContains('SeCreateGlobalPrivilege:3', text)
else:
# Windows 2003 is not admin.
self.assertNotContains('SeCreateGlobalPrivilege', text)
@conditionals.onAdminPrivileges(True)
@conditionals.onOSFamily('nt')
class TestNTProcessCapabilitiesAdministrator(CompatTestCase):
"""
Capability tests executed only on Windows slaves that are configured to
run with administrator rights.
"""
def setUp(self):
super(TestNTProcessCapabilitiesAdministrator, self).setUp()
self.capabilities = process_capabilities
def test_getAvailablePrivileges(self):
"""
Return a list with privileges and state value.
"""
result = self.capabilities._getAvailablePrivileges()
self.assertIsNotEmpty(result)
privilege = self.capabilities._getPrivilegeID(
win32security.SE_SECURITY_NAME)
self.assertContains((privilege, 0), result)
privilege = self.capabilities._getPrivilegeID(
win32security.SE_IMPERSONATE_NAME)
self.assertContains((privilege, 3), result)
privilege = self.capabilities._getPrivilegeID(
win32security.SE_CREATE_SYMBOLIC_LINK_NAME)
self.assertContains((privilege, 0), result)
def test_getPrivilegeState_present(self):
"""
Return `present` for privileges which are attached to current
process but are not enabled.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_SECURITY_NAME)
self.assertEqual(u'present', result)
def test_getPrivilegeState_enabled_default(self):
"""
Return `enabled-by-default` for privileges which are attached to
current process but are enabled by default.
"""
result = self.capabilities._getPrivilegeState(
win32security.SE_IMPERSONATE_NAME)
self.assertEqual(u'enabled-by-default', result)
def test_isPrivilegeEnabled_enabled(self):
"""
Returns True for a privilege which is present and is enabled.
"""
# We use SE_IMPERSONATE privilege as it is enabled by default.
privilege = win32security.SE_IMPERSONATE_NAME
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
def test_adjustPrivilege_success(self):
"""
Turning SE_BACKUP privilege on/off for the current process when
running as super user.
"""
initial_state = self.capabilities._isPrivilegeEnabled(
win32security.SE_BACKUP_NAME)
self.capabilities._adjustPrivilege(
win32security.SE_BACKUP_NAME, False)
self.assertIsFalse(self.capabilities._isPrivilegeEnabled(
win32security.SE_BACKUP_NAME))
self.capabilities._adjustPrivilege(
win32security.SE_BACKUP_NAME, initial_state)
self.assertEquals(
initial_state,
self.capabilities._isPrivilegeEnabled(
win32security.SE_BACKUP_NAME),
)
def test_elevatePrivileges_take_ownership_success(self):
"""
elevatePrivileges is a context manager which will elevate the
privileges for current process upon entering the context,
and restore them on exit.
"""
# We use SE_TAKE_OWNERSHIP privilege as it should be present for
# super user and disabled by default.
privilege = win32security.SE_TAKE_OWNERSHIP_NAME
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
with (self.capabilities._elevatePrivileges(privilege)):
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
# We should be able to take ownership again.
with (self.capabilities._elevatePrivileges(privilege)):
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
self.assertFalse(self.capabilities._isPrivilegeEnabled(privilege))
def test_elevatePrivilege_impersonate_unchanged(self):
"""
elevatePrivilege will not modify the process if the privilege is
already enabled.
"""
# We use SE_IMPERSONATE as it should be enabled by default.
privilege = win32security.SE_IMPERSONATE_NAME
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
capabilities = self.capabilities
with self.Patch.object(capabilities, '_adjustPrivilege') as method:
with (capabilities._elevatePrivileges(privilege)):
self.assertFalse(method.called)
self.assertTrue(capabilities._isPrivilegeEnabled(privilege))
self.assertTrue(self.capabilities._isPrivilegeEnabled(privilege))
def test_elevatePrivilege_multiple_privileges_success(self):
"""
elevatePrivileges supports a variable list of privilege name
arguments and will make sure all of them are enabled.
"""
# We use SE_IMPERSONATE as it is enabled by default
# We also use SE_TAKE_OWNERSHIP as it is disabled by default but can
# be enabled when running as super user.
take_ownership = win32security.SE_TAKE_OWNERSHIP_NAME
impersonate = win32security.SE_IMPERSONATE_NAME
self.assertTrue(self.capabilities._isPrivilegeEnabled(impersonate))
self.assertFalse(
self.capabilities._isPrivilegeEnabled(take_ownership))
capabilities = self.capabilities
with (capabilities._elevatePrivileges(take_ownership, impersonate)):
self.assertTrue(
self.capabilities._isPrivilegeEnabled(impersonate))
self.assertTrue(
self.capabilities._isPrivilegeEnabled(take_ownership))
self.assertTrue(self.capabilities._isPrivilegeEnabled(impersonate))
self.assertFalse(
self.capabilities._isPrivilegeEnabled(take_ownership))
def test_symbolic_link(self):
"""
Supported on Vista and above.
"""
symbolic_link = self.capabilities.symbolic_link
self.assertTrue(symbolic_link)
def test_get_home_folder(self):
"""
The home folder can be retrieved.
"""
result = self.capabilities.get_home_folder
self.assertTrue(result)
def test_create_home_folder(self):
"""
On Windows home folders can be created if required privileges
are configured for the process.
"""
result = self.capabilities.create_home_folder
self.assertTrue(result)
def test_getCurrentPrivilegesDescription(self):
"""
Check that SE_CHANGE_NOTIFY_NAME, SE_IMPERSONATE_NAME and
SE_CREATE_GLOBAL_NAME are all present in the privileges description
and enabled.
Check that SE_CREATE_SYMBOLIC_LINK_NAME is present in the privileges
description and it's disabled.
"""
text = self.capabilities.getCurrentPrivilegesDescription()
self.assertContains('SeChangeNotifyPrivilege:3', text)
self.assertContains('SeCreateSymbolicLinkPrivilege:0', text)
self.assertContains('SeImpersonatePrivilege:3', text)
self.assertContains('SeCreateGlobalPrivilege:3', text) | en | 0.868782 | # -*- coding: utf-8 -*- # Copyright (c) 2011 <NAME>. # See LICENSE for details. Test for platform capabilities detection. Unit tests for process capabilities executed on Posix platforms. Check ProcessCapabilities initialization. When running under normal account, impersonation is always False on Unix. When running under normal account, we can not create home folders on Unix. On Unix we can always get home home folder. On Windows, only Windows 2008 and Windows 7 can get home folder path. Check getCurrentPrivilegesDescription. PAM is supported on Linux/Unix. # PAM is not supported on HPUX. # OpenBSD does not has PAM by default. # We don't bother about PAM on Alpine. Support on all Unix. Capability tests executed only on Windows slaves. Check ProcessCapabilities initialization. _openProcess can be used for process token for the current process having a specified mode enabled. Returns False for a privilege which is not present. # By default SE_RELABEL_NAME should not be available to test # accounts. It raises an exception when an invalid privilege name is requested. # pragma: no cover PAM is not supported on Windows Capability tests executed only on Windows slaves that are configured to run without administrator rights. These tests are only valid on local OS or on Buildbot where we have a VM configured in a very specific way. Return a list with privileges and state value. Return `absent` for unknown names. Return `absent` for privileges which are not attached to current process. Return `absent` for privileges which are attached to current process but are not enabled. Return `absent` for privileges which are attached to current process but are not enabled by default. Returns False for a privilege which is present and is not enabled. # We use SE_IMPERSONATE privilege as it is enabled by default. Returns False for a privilege which is present but disabled. # By default SE_SECURITY_NAME privilege is disabled. Not supported on Windows without elevated permissions. Impersonation is not available when running as a normal user. On Windows home folders can be created if SE_BACKUP and SE_RESTORE privileges are available for the process. # Only buildbot slaves are setup with "Backup Operators" # group (SE_BACKUP/SE_RESTORE) enabled. # But not the I18N slave. Check getCurrentPrivilegesDescription. # Slave which don't run as admin have no SE_BACKUP/SE_RESTORE # Windows XP has SE_CREATE_GLOBAL enabled even when # running without administrator privileges. # Windows 2003 is not admin. Capability tests executed only on Windows slaves that are configured to run with administrator rights. Return a list with privileges and state value. Return `present` for privileges which are attached to current process but are not enabled. Return `enabled-by-default` for privileges which are attached to current process but are enabled by default. Returns True for a privilege which is present and is enabled. # We use SE_IMPERSONATE privilege as it is enabled by default. Turning SE_BACKUP privilege on/off for the current process when running as super user. elevatePrivileges is a context manager which will elevate the privileges for current process upon entering the context, and restore them on exit. # We use SE_TAKE_OWNERSHIP privilege as it should be present for # super user and disabled by default. # We should be able to take ownership again. elevatePrivilege will not modify the process if the privilege is already enabled. # We use SE_IMPERSONATE as it should be enabled by default. elevatePrivileges supports a variable list of privilege name arguments and will make sure all of them are enabled. # We use SE_IMPERSONATE as it is enabled by default # We also use SE_TAKE_OWNERSHIP as it is disabled by default but can # be enabled when running as super user. Supported on Vista and above. The home folder can be retrieved. On Windows home folders can be created if required privileges are configured for the process. Check that SE_CHANGE_NOTIFY_NAME, SE_IMPERSONATE_NAME and SE_CREATE_GLOBAL_NAME are all present in the privileges description and enabled. Check that SE_CREATE_SYMBOLIC_LINK_NAME is present in the privileges description and it's disabled. | 2.136129 | 2 |
1_SVG_converter_Gold.py | hirowgit/2B0_python_optmization_course | 0 | 6624225 | #!/usr/bin/env python
# coding: utf-8
# In[3]:
## Python basics for novice data scientists, supported by Wagatsuma Lab@Kyutech
#
# The MIT License (MIT): Copyright (c) 2021 <NAME> and Wagatsuma Lab<EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#
# # @Time : 2021-1-16
# # @Author : <NAME>
# # @Site : https://github.com/hirowgit/2B0_python_optmization_course
# # @IDE : Python 3.7.7 (default, Mar 10 2020, 15:43:27) [Clang 10.0.0 (clang-1000.11.45.5)] on darwin
# # @File : SVG_converter_Gold.py
import numpy as np
import matplotlib.pyplot as plt
from svg.path import parse_path
from svg.path.path import Line
from xml.dom import minidom
from time import time
import pandas as pd
def line_splitter(start, end):
return (lambda t: (1-t)*start+t*end)
def cubic_bezier_converter(start, control1, control2, end):
original_data = np.array([start, control1, control2, end])
cubic_bezier_matrix = np.array([
[-1, 3, -3, 1],
[ 3, -6, 3, 0],
[-3, 3, 0, 0],
[ 1, 0, 0, 0]
])
return_data = cubic_bezier_matrix.dot(original_data)
return (lambda t: np.array([t**3, t**2, t, 1]).dot(return_data))
# On the design of "cubic_bezier_converter" was learned from
# https://stackoverflow.com/questions/36971363/how-to-interpolate-svg-path-into-a-pixel-coordinates-not-simply-raster-in-pyth
doc = minidom.parse('data/LaneMap2.svg')
path_strings = [path.getAttribute('d') for path
in doc.getElementsByTagName('path')]
doc.unlink()
points_np_all=[]
points_np_all=np.empty((len(path_strings)),dtype=object)
print(len(points_np_all))
for k in range(len(path_strings)):
#for path_string in path_strings:
path = parse_path(path_strings[k])
points_np_merge=np.empty((0,2), float)
for dat in path:
if type(dat).__name__=='CubicBezier':
start_np = np.array([dat.start.real, dat.start.imag])
control1_np = np.array([dat.control1.real, dat.control1.imag])
control2_np = np.array([dat.control2.real, dat.control2.imag])
end_np = np.array([dat.end.real, dat.end.imag])
converted_curve = cubic_bezier_converter(start_np, control1_np, control2_np, end_np)
#
diff_np=start_np-end_np
n_dots=np.round(np.linalg.norm(diff_np))
#
points_np = np.array([converted_curve(t) for t in np.linspace(0, 1, n_dots)])
elif type(dat).__name__=='Line':
start_np = np.array([dat.start.real, dat.start.imag])
end_np = np.array([dat.end.real, dat.end.imag])
converted_line = line_splitter(start_np,end_np)
#
diff_np=start_np-end_np
n_dots=np.round(np.linalg.norm(diff_np))
#
points_np=np.array([converted_line(t) for t in np.linspace(0, 1, n_dots)])
elif type(dat).__name__=='Move':
#
n_dots=1
#
start_np = np.array([dat.start.real, dat.start.imag])
end_np = np.array([dat.end.real, dat.end.imag])
points_np = np.array([start_np,end_np])
else:
points_np=np.array([])
#points_np_merge=np.concatenate(points_np_merge,points_np)
points_np_merge=np.append(points_np_merge, points_np, axis=0)
points_np_all[k]= points_np_merge
print(' %d : %d dots' % (k,len(points_np_merge)))
len(points_np_all)
len(points_np_all)
for k in range(len(points_np_all)):
points_np=points_np_all[k]
plt.plot(points_np[:, 0], points_np[:, 1], '.-')
plt.show()
maxL=max(len(points_np_all[k]) for k in range(len(points_np_all)))
label=np.empty([],dtype='unicode')
print("label size = %d" % (label.size))
label=[]
for k in range(len(points_np_all)):
label=np.append(label,["x%d"%(k+1),"y%d"%(k+1)])
dat_df = pd.DataFrame([],columns=label)
for k in range(len(points_np_all)):
points_np=points_np_all[k]
tmp0=np.zeros([maxL,2])
tmp0[0:points_np.shape[0],:]=points_np
dat_df["x%d"%(k+1)] = tmp0[:,0]
dat_df["y%d"%(k+1)] = tmp0[:,1]
print(dat_df.shape)
dat_df
dat_df.to_csv('output/to_csv2.csv')
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# In[3]:
## Python basics for novice data scientists, supported by Wagatsuma Lab@Kyutech
#
# The MIT License (MIT): Copyright (c) 2021 <NAME> and Wagatsuma Lab<EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#
# # @Time : 2021-1-16
# # @Author : <NAME>
# # @Site : https://github.com/hirowgit/2B0_python_optmization_course
# # @IDE : Python 3.7.7 (default, Mar 10 2020, 15:43:27) [Clang 10.0.0 (clang-1000.11.45.5)] on darwin
# # @File : SVG_converter_Gold.py
import numpy as np
import matplotlib.pyplot as plt
from svg.path import parse_path
from svg.path.path import Line
from xml.dom import minidom
from time import time
import pandas as pd
def line_splitter(start, end):
return (lambda t: (1-t)*start+t*end)
def cubic_bezier_converter(start, control1, control2, end):
original_data = np.array([start, control1, control2, end])
cubic_bezier_matrix = np.array([
[-1, 3, -3, 1],
[ 3, -6, 3, 0],
[-3, 3, 0, 0],
[ 1, 0, 0, 0]
])
return_data = cubic_bezier_matrix.dot(original_data)
return (lambda t: np.array([t**3, t**2, t, 1]).dot(return_data))
# On the design of "cubic_bezier_converter" was learned from
# https://stackoverflow.com/questions/36971363/how-to-interpolate-svg-path-into-a-pixel-coordinates-not-simply-raster-in-pyth
doc = minidom.parse('data/LaneMap2.svg')
path_strings = [path.getAttribute('d') for path
in doc.getElementsByTagName('path')]
doc.unlink()
points_np_all=[]
points_np_all=np.empty((len(path_strings)),dtype=object)
print(len(points_np_all))
for k in range(len(path_strings)):
#for path_string in path_strings:
path = parse_path(path_strings[k])
points_np_merge=np.empty((0,2), float)
for dat in path:
if type(dat).__name__=='CubicBezier':
start_np = np.array([dat.start.real, dat.start.imag])
control1_np = np.array([dat.control1.real, dat.control1.imag])
control2_np = np.array([dat.control2.real, dat.control2.imag])
end_np = np.array([dat.end.real, dat.end.imag])
converted_curve = cubic_bezier_converter(start_np, control1_np, control2_np, end_np)
#
diff_np=start_np-end_np
n_dots=np.round(np.linalg.norm(diff_np))
#
points_np = np.array([converted_curve(t) for t in np.linspace(0, 1, n_dots)])
elif type(dat).__name__=='Line':
start_np = np.array([dat.start.real, dat.start.imag])
end_np = np.array([dat.end.real, dat.end.imag])
converted_line = line_splitter(start_np,end_np)
#
diff_np=start_np-end_np
n_dots=np.round(np.linalg.norm(diff_np))
#
points_np=np.array([converted_line(t) for t in np.linspace(0, 1, n_dots)])
elif type(dat).__name__=='Move':
#
n_dots=1
#
start_np = np.array([dat.start.real, dat.start.imag])
end_np = np.array([dat.end.real, dat.end.imag])
points_np = np.array([start_np,end_np])
else:
points_np=np.array([])
#points_np_merge=np.concatenate(points_np_merge,points_np)
points_np_merge=np.append(points_np_merge, points_np, axis=0)
points_np_all[k]= points_np_merge
print(' %d : %d dots' % (k,len(points_np_merge)))
len(points_np_all)
len(points_np_all)
for k in range(len(points_np_all)):
points_np=points_np_all[k]
plt.plot(points_np[:, 0], points_np[:, 1], '.-')
plt.show()
maxL=max(len(points_np_all[k]) for k in range(len(points_np_all)))
label=np.empty([],dtype='unicode')
print("label size = %d" % (label.size))
label=[]
for k in range(len(points_np_all)):
label=np.append(label,["x%d"%(k+1),"y%d"%(k+1)])
dat_df = pd.DataFrame([],columns=label)
for k in range(len(points_np_all)):
points_np=points_np_all[k]
tmp0=np.zeros([maxL,2])
tmp0[0:points_np.shape[0],:]=points_np
dat_df["x%d"%(k+1)] = tmp0[:,0]
dat_df["y%d"%(k+1)] = tmp0[:,1]
print(dat_df.shape)
dat_df
dat_df.to_csv('output/to_csv2.csv')
# In[ ]:
| en | 0.707311 | #!/usr/bin/env python # coding: utf-8 # In[3]: ## Python basics for novice data scientists, supported by Wagatsuma Lab@Kyutech # # The MIT License (MIT): Copyright (c) 2021 <NAME> and Wagatsuma Lab<EMAIL> # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ # # # @Time : 2021-1-16 # # @Author : <NAME> # # @Site : https://github.com/hirowgit/2B0_python_optmization_course # # @IDE : Python 3.7.7 (default, Mar 10 2020, 15:43:27) [Clang 10.0.0 (clang-1000.11.45.5)] on darwin # # @File : SVG_converter_Gold.py # On the design of "cubic_bezier_converter" was learned from # https://stackoverflow.com/questions/36971363/how-to-interpolate-svg-path-into-a-pixel-coordinates-not-simply-raster-in-pyth #for path_string in path_strings: # # # # # # #points_np_merge=np.concatenate(points_np_merge,points_np) # In[ ]: | 2.164641 | 2 |
ros/src/twist_controller/twist_controller.py | fwa785/CarND-Capstone-fwa785 | 0 | 6624226 | <filename>ros/src/twist_controller/twist_controller.py
from yaw_controller import YawController
from lowpass import LowPassFilter
from pid import PID
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit,
accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
# Create Yaw Controller
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
# Setup the PID controller to control the throttle
kp = 0.3
ki = 0.1
kd = 0.
mn = 0.
mx = 0.2
self.throttle_controller = PID(kp, ki, kd, mn, mx)
# setup Low pass filter to filter out the noise in the velocity message
tau = 0.5
ts = .02
self.velocity_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brack_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, linear_velocity, angular_velocity, current_velocity, dbw_enabled):
# If DBW is not enabled, reset the PID controller
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
# filter velocity noise
current_velocity = self.velocity_lpf.filt(current_velocity)
steer = self.yaw_controller.get_steering(linear_velocity, angular_velocity, current_velocity)
# Use PID controller to control the throttle
velocity_err = linear_velocity - current_velocity
self.last_velocity = current_velocity
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(velocity_err, sample_time)
brake = 0
if (linear_velocity == 0) and (current_velocity < 0.1):
throttle = 0
brake = 400
elif (throttle < 0.1) and (velocity_err < 0):
throttle = 0
decel = max(velocity_err, self.decel_limit)
brake = abs(decel) * self.vehicle_mass * self.wheel_radius
# Return throttle, brake, steer
return throttle, brake, steer
| <filename>ros/src/twist_controller/twist_controller.py
from yaw_controller import YawController
from lowpass import LowPassFilter
from pid import PID
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit,
accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
# Create Yaw Controller
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
# Setup the PID controller to control the throttle
kp = 0.3
ki = 0.1
kd = 0.
mn = 0.
mx = 0.2
self.throttle_controller = PID(kp, ki, kd, mn, mx)
# setup Low pass filter to filter out the noise in the velocity message
tau = 0.5
ts = .02
self.velocity_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brack_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, linear_velocity, angular_velocity, current_velocity, dbw_enabled):
# If DBW is not enabled, reset the PID controller
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
# filter velocity noise
current_velocity = self.velocity_lpf.filt(current_velocity)
steer = self.yaw_controller.get_steering(linear_velocity, angular_velocity, current_velocity)
# Use PID controller to control the throttle
velocity_err = linear_velocity - current_velocity
self.last_velocity = current_velocity
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(velocity_err, sample_time)
brake = 0
if (linear_velocity == 0) and (current_velocity < 0.1):
throttle = 0
brake = 400
elif (throttle < 0.1) and (velocity_err < 0):
throttle = 0
decel = max(velocity_err, self.decel_limit)
brake = abs(decel) * self.vehicle_mass * self.wheel_radius
# Return throttle, brake, steer
return throttle, brake, steer
| en | 0.77022 | # Create Yaw Controller # Setup the PID controller to control the throttle # setup Low pass filter to filter out the noise in the velocity message # If DBW is not enabled, reset the PID controller # filter velocity noise # Use PID controller to control the throttle # Return throttle, brake, steer | 2.705952 | 3 |
cubes/backends/mongo/store.py | she11c0de/cubes | 0 | 6624227 | # -*- coding=utf -*-
from ...stores import Store
import pymongo
__all__ = []
class MongoStore(Store):
def __init__(self, url, database=None, collection=None, **options):
self.client = pymongo.MongoClient(url, read_preference=pymongo.read_preferences.ReadPreference.SECONDARY)
self.database = database
self.collection = collection
| # -*- coding=utf -*-
from ...stores import Store
import pymongo
__all__ = []
class MongoStore(Store):
def __init__(self, url, database=None, collection=None, **options):
self.client = pymongo.MongoClient(url, read_preference=pymongo.read_preferences.ReadPreference.SECONDARY)
self.database = database
self.collection = collection
| en | 0.452249 | # -*- coding=utf -*- | 2.351949 | 2 |
python/caty/core/script/interpreter/__init__.py | hidaruma/caty | 0 | 6624228 | <gh_stars>0
from caty.core.script.interpreter.executor import *
| from caty.core.script.interpreter.executor import * | none | 1 | 1.005195 | 1 | |
1stSemester_PythonCourse/work3/E06_1827406005.py | chenyz2000/schoolCourses | 0 | 6624229 | <reponame>chenyz2000/schoolCourses
n=eval(input('please input positive odd number'))
mgc=[]
row,col=0,n//2
for i in range(n):
mgc.append([0]*n)
mgc[row][col]=1
for i in range(2,n**2+1):
r,l=(row-1+n)%n,(col+1)%n
if mgc[r][l]==0:
row , col=r,l
else:row =(row+1)%n
mgc[row][col]=i
import numpy as np
x=np.array(mgc)
print(x) | n=eval(input('please input positive odd number'))
mgc=[]
row,col=0,n//2
for i in range(n):
mgc.append([0]*n)
mgc[row][col]=1
for i in range(2,n**2+1):
r,l=(row-1+n)%n,(col+1)%n
if mgc[r][l]==0:
row , col=r,l
else:row =(row+1)%n
mgc[row][col]=i
import numpy as np
x=np.array(mgc)
print(x) | none | 1 | 3.327118 | 3 | |
Site_Visit_2.0/migrations/versions/b8f2a201eb31_.py | opacichjj/FEMA-PDA-and-Route-Optimizer | 0 | 6624230 | <gh_stars>0
"""empty message
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2019-08-02 02:14:14.344836
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('optimizer',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('map_pic', sa.String(length=100), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_optimizer_timestamp'), 'optimizer', ['timestamp'], unique=False)
op.drop_column('assessment', 'state')
op.drop_column('assessment', 'address')
op.drop_column('assessment', 'county')
op.drop_column('assessment', 'city')
op.drop_column('assessment', 'damage_pcnt')
op.drop_column('assessment', 'body')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('assessment', sa.Column('body', sa.VARCHAR(length=140), nullable=True))
op.add_column('assessment', sa.Column('damage_pcnt', sa.INTEGER(), nullable=True))
op.add_column('assessment', sa.Column('city', sa.VARCHAR(length=40), nullable=True))
op.add_column('assessment', sa.Column('county', sa.VARCHAR(length=60), nullable=True))
op.add_column('assessment', sa.Column('address', sa.VARCHAR(length=80), nullable=True))
op.add_column('assessment', sa.Column('state', sa.VARCHAR(length=20), nullable=True))
op.drop_index(op.f('ix_optimizer_timestamp'), table_name='optimizer')
op.drop_table('optimizer')
# ### end Alembic commands ###
| """empty message
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2019-08-02 02:14:14.344836
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('optimizer',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('map_pic', sa.String(length=100), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_optimizer_timestamp'), 'optimizer', ['timestamp'], unique=False)
op.drop_column('assessment', 'state')
op.drop_column('assessment', 'address')
op.drop_column('assessment', 'county')
op.drop_column('assessment', 'city')
op.drop_column('assessment', 'damage_pcnt')
op.drop_column('assessment', 'body')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('assessment', sa.Column('body', sa.VARCHAR(length=140), nullable=True))
op.add_column('assessment', sa.Column('damage_pcnt', sa.INTEGER(), nullable=True))
op.add_column('assessment', sa.Column('city', sa.VARCHAR(length=40), nullable=True))
op.add_column('assessment', sa.Column('county', sa.VARCHAR(length=60), nullable=True))
op.add_column('assessment', sa.Column('address', sa.VARCHAR(length=80), nullable=True))
op.add_column('assessment', sa.Column('state', sa.VARCHAR(length=20), nullable=True))
op.drop_index(op.f('ix_optimizer_timestamp'), table_name='optimizer')
op.drop_table('optimizer')
# ### end Alembic commands ### | en | 0.465347 | empty message Revision ID: <KEY> Revises: <KEY> Create Date: 2019-08-02 02:14:14.344836 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.822521 | 2 |
imageGenie/trainingModule.py | ravi0531rp/imageGenie | 2 | 6624231 | <reponame>ravi0531rp/imageGenie<gh_stars>1-10
import numpy as np
import tensorflow as tf
import os
from glob import glob
from imageGenie.constants import Constants
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input, Dense, Conv2D, Flatten
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.efficientnet_v2 import EfficientNetV2B0 as PretrainedModel, preprocess_input
constants = Constants()
acc_thresh = constants.acc_threshold
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs={}):
if(logs.get('accuracy') > acc_thresh):
print("\nStopping training as desired accuracy reached....")
self.model.stop_training = True
class TrainingModule(object):
def __init__(self, root_folder, model_folder, resize_dim = [200,200,3], modelName = "effNetV2B0"):
self.root_folder = root_folder
self.IMAGE_SIZE = constants.image_size
self.ptm = PretrainedModel(
input_shape = self.IMAGE_SIZE + [3],
weights = 'imagenet',
include_top = False)
self.model_folder = model_folder
self.train_path = os.path.join(root_folder,"train")
self.test_path = os.path.join(root_folder,"test")
self.train_image_files = glob(self.train_path + '/*/*.png')
self.test_image_files = glob(self.test_path + '/*/*.png')
self.folders = glob(self.train_path + "/*")
def train(self):
self.ptm.trainable = False
K = len(self.folders)
x = Flatten()(self.ptm.output)
x = Dense(K, activation = 'softmax')(x)
model = Model(inputs = self.ptm.input , outputs = x)
gen = ImageDataGenerator(
rotation_range = 20,
width_shift_range = 0.1,
height_shift_range = 0.1,
shear_range = 0.1,
zoom_range = 0.2,
horizontal_flip = True,
preprocessing_function = preprocess_input
)
batch_size = 64
train_generator = gen.flow_from_directory(
self.train_path,
shuffle = True,
target_size = self.IMAGE_SIZE,
batch_size = batch_size
)
test_generator = gen.flow_from_directory(
self.test_path,
target_size = self.IMAGE_SIZE,
batch_size = batch_size
)
model.compile(loss = 'categorical_crossentropy' , optimizer = 'adam' , metrics = ['accuracy'])
self.model_file = os.path.join(self.model_folder, "trained_model.h5")
try:
os.mkdir(self.model_folder)
except:
pass
myCall = myCallback()
r = model.fit(
train_generator,
validation_data = test_generator,
epochs = 8,
steps_per_epoch = int(np.ceil(len(self.train_image_files)/batch_size)),
validation_steps = int(np.ceil(len(self.test_image_files)/batch_size)),
callbacks=[myCall]
)
model.save(self.model_file)
print(f"Saving model at {self.model_file}")
| import numpy as np
import tensorflow as tf
import os
from glob import glob
from imageGenie.constants import Constants
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Input, Dense, Conv2D, Flatten
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.efficientnet_v2 import EfficientNetV2B0 as PretrainedModel, preprocess_input
constants = Constants()
acc_thresh = constants.acc_threshold
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs={}):
if(logs.get('accuracy') > acc_thresh):
print("\nStopping training as desired accuracy reached....")
self.model.stop_training = True
class TrainingModule(object):
def __init__(self, root_folder, model_folder, resize_dim = [200,200,3], modelName = "effNetV2B0"):
self.root_folder = root_folder
self.IMAGE_SIZE = constants.image_size
self.ptm = PretrainedModel(
input_shape = self.IMAGE_SIZE + [3],
weights = 'imagenet',
include_top = False)
self.model_folder = model_folder
self.train_path = os.path.join(root_folder,"train")
self.test_path = os.path.join(root_folder,"test")
self.train_image_files = glob(self.train_path + '/*/*.png')
self.test_image_files = glob(self.test_path + '/*/*.png')
self.folders = glob(self.train_path + "/*")
def train(self):
self.ptm.trainable = False
K = len(self.folders)
x = Flatten()(self.ptm.output)
x = Dense(K, activation = 'softmax')(x)
model = Model(inputs = self.ptm.input , outputs = x)
gen = ImageDataGenerator(
rotation_range = 20,
width_shift_range = 0.1,
height_shift_range = 0.1,
shear_range = 0.1,
zoom_range = 0.2,
horizontal_flip = True,
preprocessing_function = preprocess_input
)
batch_size = 64
train_generator = gen.flow_from_directory(
self.train_path,
shuffle = True,
target_size = self.IMAGE_SIZE,
batch_size = batch_size
)
test_generator = gen.flow_from_directory(
self.test_path,
target_size = self.IMAGE_SIZE,
batch_size = batch_size
)
model.compile(loss = 'categorical_crossentropy' , optimizer = 'adam' , metrics = ['accuracy'])
self.model_file = os.path.join(self.model_folder, "trained_model.h5")
try:
os.mkdir(self.model_folder)
except:
pass
myCall = myCallback()
r = model.fit(
train_generator,
validation_data = test_generator,
epochs = 8,
steps_per_epoch = int(np.ceil(len(self.train_image_files)/batch_size)),
validation_steps = int(np.ceil(len(self.test_image_files)/batch_size)),
callbacks=[myCall]
)
model.save(self.model_file)
print(f"Saving model at {self.model_file}") | none | 1 | 2.577975 | 3 | |
Python/python_basics/findRandom.py | paulolima18/ProgrammingMesh | 3 | 6624232 | <gh_stars>1-10
import random
def findRandom():
RandomNum = random.randint(0,55)
for k in range(3):
guess = eval(input("Number: "))
if RandomNum == guess:
print("Correct")
return;
elif randomNum > guess :
print("Bigger")
else:
print("Smaller")
print("You Lost, your number was: ",guess)
findRandom()
| import random
def findRandom():
RandomNum = random.randint(0,55)
for k in range(3):
guess = eval(input("Number: "))
if RandomNum == guess:
print("Correct")
return;
elif randomNum > guess :
print("Bigger")
else:
print("Smaller")
print("You Lost, your number was: ",guess)
findRandom() | none | 1 | 3.761706 | 4 | |
tools/validateAttr.py | fsanges/glTools | 165 | 6624233 | <gh_stars>100-1000
import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.base
import glTools.utils.mesh
import math
def validatePoints(obj):
'''
'''
# Initiate check
check = 0
# Get points
pArray = glTools.utils.base.getMPointArray(obj,worldSpace=False)
# Check points
for i in range(pArray.length()):
# Check point values
for val in [pArray[i].x,pArray[i].y,pArray[i].z,pArray[i].w]:
# Check NaN
if math.isnan(val):
print('Found NaN : '+obj+'.p['+str(i)+']')
check += 1
# Check INF
if math.isinf(val):
print('Found INF : '+obj+'.p['+str(i)+']')
check += 1
# Return result
return check
def validateNormals(mesh):
'''
'''
# Initiate check
check = 0
# Get points
nArray = glTools.utils.mesh.getNormals(mesh,worldSpace=False)
# Check points
for i in range(nArray.length()):
# Check point values
for val in [nArray[i].x,nArray[i].y,nArray[i].z]:
# Check NaN
if math.isnan(val):
print('Found NaN : '+mesh+'.n['+str(i)+']')
check += 1
# Check INF
if math.isinf(val):
print('Found INF : '+mesh+'.n['+str(i)+']')
check += 1
# Return result
return check
def validateUVs(mesh):
'''
'''
# Initiate check
check = 0
# Get meshFn
meshFn = glTools.utils.mesh.getMeshFn(mesh)
# Get UV Sets
uvSetList = mc.polyUVSet(mesh,q=True,allUVSets=True)
if not uvSetList:
print('No UV Set : '+mesh)
check += 1
for uvSet in uvSetList:
# Get UV values
uArray = OpenMaya.MFloatArray()
vArray = OpenMaya.MFloatArray()
meshFn.getUVs(uArray,vArray,uvSet)
# Check empty UV set
if not uArray.length() and not vArray.length():
print('Empty UV Set : '+mesh+' - '+uvSet)
check += 1
# Check U values
for i in range(uArray.length()):
if math.isnan(uArray[i]):
print('Found NaN : '+mesh+'.uv['+str(i)+']')
check += 1
if math.isinf(uArray[i]):
print('Found INF : '+mesh+'.uv['+str(i)+']')
check += 1
# Check V values
for i in range(vArray.length()):
if math.isnan(vArray[i]):
print('Found NaN : '+mesh+'.uv['+str(i)+']')
check += 1
if math.isinf(vArray[i]):
print('Found INF : '+mesh+'.uv['+str(i)+']')
check += 1
# Return result
return check
def validateAttr(attr):
'''
'''
# Initiate check
check = 0
# Check Attr
if not mc.objExists(attr):
raise Exception('Attribute "'+attr+'" does not exist!')
# Get Attribute type
val = mc.getAttr(attr)
# Check value
if type(val) == list:
for i in val:
if type(i) == list or type(i) == tuple:
for n in i:
if math.isnan(n):
print('Found NaN : '+attr+'['+str(i)+']['+str(n)+']')
check += 1
if math.isinf(n):
print('Found INF : '+attr+'['+str(i)+']['+str(n)+']')
check += 1
else:
if math.isnan(i):
print('Found NaN : '+attr+'['+str(i)+']')
check += 1
if math.isinf(i):
print('Found INF : '+attr+'['+str(i)+']')
check += 1
else:
if math.isnan(val):
print('Found NaN : '+attr)
check += 1
if math.isinf(val):
print('Found INF : '+attr)
check += 1
# Return result
return check
| import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.base
import glTools.utils.mesh
import math
def validatePoints(obj):
'''
'''
# Initiate check
check = 0
# Get points
pArray = glTools.utils.base.getMPointArray(obj,worldSpace=False)
# Check points
for i in range(pArray.length()):
# Check point values
for val in [pArray[i].x,pArray[i].y,pArray[i].z,pArray[i].w]:
# Check NaN
if math.isnan(val):
print('Found NaN : '+obj+'.p['+str(i)+']')
check += 1
# Check INF
if math.isinf(val):
print('Found INF : '+obj+'.p['+str(i)+']')
check += 1
# Return result
return check
def validateNormals(mesh):
'''
'''
# Initiate check
check = 0
# Get points
nArray = glTools.utils.mesh.getNormals(mesh,worldSpace=False)
# Check points
for i in range(nArray.length()):
# Check point values
for val in [nArray[i].x,nArray[i].y,nArray[i].z]:
# Check NaN
if math.isnan(val):
print('Found NaN : '+mesh+'.n['+str(i)+']')
check += 1
# Check INF
if math.isinf(val):
print('Found INF : '+mesh+'.n['+str(i)+']')
check += 1
# Return result
return check
def validateUVs(mesh):
'''
'''
# Initiate check
check = 0
# Get meshFn
meshFn = glTools.utils.mesh.getMeshFn(mesh)
# Get UV Sets
uvSetList = mc.polyUVSet(mesh,q=True,allUVSets=True)
if not uvSetList:
print('No UV Set : '+mesh)
check += 1
for uvSet in uvSetList:
# Get UV values
uArray = OpenMaya.MFloatArray()
vArray = OpenMaya.MFloatArray()
meshFn.getUVs(uArray,vArray,uvSet)
# Check empty UV set
if not uArray.length() and not vArray.length():
print('Empty UV Set : '+mesh+' - '+uvSet)
check += 1
# Check U values
for i in range(uArray.length()):
if math.isnan(uArray[i]):
print('Found NaN : '+mesh+'.uv['+str(i)+']')
check += 1
if math.isinf(uArray[i]):
print('Found INF : '+mesh+'.uv['+str(i)+']')
check += 1
# Check V values
for i in range(vArray.length()):
if math.isnan(vArray[i]):
print('Found NaN : '+mesh+'.uv['+str(i)+']')
check += 1
if math.isinf(vArray[i]):
print('Found INF : '+mesh+'.uv['+str(i)+']')
check += 1
# Return result
return check
def validateAttr(attr):
'''
'''
# Initiate check
check = 0
# Check Attr
if not mc.objExists(attr):
raise Exception('Attribute "'+attr+'" does not exist!')
# Get Attribute type
val = mc.getAttr(attr)
# Check value
if type(val) == list:
for i in val:
if type(i) == list or type(i) == tuple:
for n in i:
if math.isnan(n):
print('Found NaN : '+attr+'['+str(i)+']['+str(n)+']')
check += 1
if math.isinf(n):
print('Found INF : '+attr+'['+str(i)+']['+str(n)+']')
check += 1
else:
if math.isnan(i):
print('Found NaN : '+attr+'['+str(i)+']')
check += 1
if math.isinf(i):
print('Found INF : '+attr+'['+str(i)+']')
check += 1
else:
if math.isnan(val):
print('Found NaN : '+attr)
check += 1
if math.isinf(val):
print('Found INF : '+attr)
check += 1
# Return result
return check | en | 0.214993 | # Initiate check # Get points # Check points # Check point values # Check NaN # Check INF # Return result # Initiate check # Get points # Check points # Check point values # Check NaN # Check INF # Return result # Initiate check # Get meshFn # Get UV Sets # Get UV values # Check empty UV set # Check U values # Check V values # Return result # Initiate check # Check Attr # Get Attribute type # Check value # Return result | 2.241363 | 2 |
corpus_statistics.py | GreenParachute/wordnet-randomwalk-python | 9 | 6624234 | <filename>corpus_statistics.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Calculate statistics on already generated random walk cporpora.
Assumes input is a text file where 1 line = 1 sentence.
Copyright © 2019 <NAME>, <NAME>. Technological University Dublin, ADAPT Centre.
All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = "<NAME>"
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-c", "--corpus", help="File name/location of the input corpus.")
parser.add_argument("-r", "--rare", type=str, default='no', help="Lists the rare words in the corpus. Options: yes, no. Default: no")
parser.add_argument("-u", "--unique_sentences", type=str, default='yes', help="Also calculates the number of unique sentences in the corpus and counts n-length sentences. Options: yes, no. Default: yes. NOTE: This is a bit more memory intensive, especially with larger corpora, so if you're only interested in token counts, set this to 'no'.")
args = parser.parse_args()
return args
args = parse_arguments()
token_count=0
types=[]
token_freq={}
rare_words=[]
sentences=[] #this stores each individual sentence as a list of tokens; needed to calculate frequency of n-word sentences
bow_sents=[] #this stores each individual sentence as a bag of words set of tokens (i.e. not in any order and no tokens are repeated); only does this if -u argument is set to yes
for line in open(args.corpus):
sentences.append(line.strip().lower().split(' '))
tokens=line.strip().lower().split(' ')
token_count+=len(tokens) #add token count of current sentence to total token count
for token in tokens:
if token not in types:
types.append(token) #counts types (i.e. unique tokens)
if len(tokens) == 1: #we do not count 1-word sentences as occurrences of rare words, but you might wish to do so, which is what the commented code below is for
#if token not in rare_words:
# rare_words.append(token)
pass
else:
if token not in token_freq:
token_freq[token]=1
else:
token_freq[token]+=1
if args.unique_sentences == 'yes':
bow_sents.append(set(line.strip().lower().split(' ')))
#Uncomment the the two lines commented below if you wish to print a list of tokens sorted by frequency
#sorted_by_value = sorted(token_freq.items(), key=lambda kv: kv[1])
#print(sorted_by_value)
#this snippet adds rare words into a printable list
for token in token_freq:
if token_freq[token]<10:
if token not in rare_words:
rare_words.append(token)
sent_count=len(sentences)
if args.rare == 'yes':
for token in rare_words:
print(token)
else:
if args.unique_sentences == 'yes':
string_sents=[]
#turn sentence i.e. set of unique tokens into a list and sort the list alphabetically, then add that sorted list to a list of sentences
for sent in bow_sents:
string_sents.append(' '.join(sorted(sent)))
#turn the *list* of alphabetically sorted sentences into a *set* of alphabetically sorted sentences, thereby removing all duplicates and obtaining a set of unique sentences
bagof_uniq_sents=set(string_sents)
uniq_n=len(bagof_uniq_sents)
same_n=sent_count-uniq_n
print('### For a ', sent_count, ' sentence-strong corpus ###\nNumber of identical sentences: ', same_n, ' which is ', 100*(float(same_n)/(same_n+uniq_n)), 'percent of the corpus \nNumber of unique sentences: ', uniq_n, 'which is ', 100*(float(uniq_n)/(same_n+uniq_n)), 'percent of the corpus\n')
#this snippet counts the number of sentences of length n (e.g. there's x 1-word sentences, y 2-word sentences, z 3-word sentences, etc.)
max_sent_len=0
for sent in sentences:
if len(sent) > max_sent_len:
max_sent_len=len(sent)
sent_lens={}
for i in range(max_sent_len+1)[1:]:
sent_lens[i]=0
for sent in sentences:
if len(sent) in sent_lens:
sent_lens[len(sent)]+=1
for senlen in sent_lens:
print('Number of ', senlen, '-word sentences: ', sent_lens[senlen], ' which is', 100*(float(sent_lens[senlen])/(sent_count)), 'percent of the corpus')
print('\nAverage sentence length (in tokens): ', token_count/len(sentences),'\nTotal number of tokens (words) is: ', token_count, '\nNumber of types (unique words): ', len(types), '\nNumber of "rare" word types (with frequency<10):', len(rare_words), '\nPercentage of "rare" words (types):', 100*(float(len(rare_words))/(len(types))))
else:
print('### For a ', sent_count, ' sentence-strong corpus ###\nAverage sentence length (in tokens): ', token_count/len(sentences),'\nTotal number of tokens (words) is: ', token_count, '\nNumber of types (unique words): ', len(types), '\nNumber of "rare" word types (with frequency<10):', len(rare_words), '\nPercentage of "rare" words (types):', 100*(float(len(rare_words))/(len(types)))) | <filename>corpus_statistics.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Calculate statistics on already generated random walk cporpora.
Assumes input is a text file where 1 line = 1 sentence.
Copyright © 2019 <NAME>, <NAME>. Technological University Dublin, ADAPT Centre.
All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = "<NAME>"
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-c", "--corpus", help="File name/location of the input corpus.")
parser.add_argument("-r", "--rare", type=str, default='no', help="Lists the rare words in the corpus. Options: yes, no. Default: no")
parser.add_argument("-u", "--unique_sentences", type=str, default='yes', help="Also calculates the number of unique sentences in the corpus and counts n-length sentences. Options: yes, no. Default: yes. NOTE: This is a bit more memory intensive, especially with larger corpora, so if you're only interested in token counts, set this to 'no'.")
args = parser.parse_args()
return args
args = parse_arguments()
token_count=0
types=[]
token_freq={}
rare_words=[]
sentences=[] #this stores each individual sentence as a list of tokens; needed to calculate frequency of n-word sentences
bow_sents=[] #this stores each individual sentence as a bag of words set of tokens (i.e. not in any order and no tokens are repeated); only does this if -u argument is set to yes
for line in open(args.corpus):
sentences.append(line.strip().lower().split(' '))
tokens=line.strip().lower().split(' ')
token_count+=len(tokens) #add token count of current sentence to total token count
for token in tokens:
if token not in types:
types.append(token) #counts types (i.e. unique tokens)
if len(tokens) == 1: #we do not count 1-word sentences as occurrences of rare words, but you might wish to do so, which is what the commented code below is for
#if token not in rare_words:
# rare_words.append(token)
pass
else:
if token not in token_freq:
token_freq[token]=1
else:
token_freq[token]+=1
if args.unique_sentences == 'yes':
bow_sents.append(set(line.strip().lower().split(' ')))
#Uncomment the the two lines commented below if you wish to print a list of tokens sorted by frequency
#sorted_by_value = sorted(token_freq.items(), key=lambda kv: kv[1])
#print(sorted_by_value)
#this snippet adds rare words into a printable list
for token in token_freq:
if token_freq[token]<10:
if token not in rare_words:
rare_words.append(token)
sent_count=len(sentences)
if args.rare == 'yes':
for token in rare_words:
print(token)
else:
if args.unique_sentences == 'yes':
string_sents=[]
#turn sentence i.e. set of unique tokens into a list and sort the list alphabetically, then add that sorted list to a list of sentences
for sent in bow_sents:
string_sents.append(' '.join(sorted(sent)))
#turn the *list* of alphabetically sorted sentences into a *set* of alphabetically sorted sentences, thereby removing all duplicates and obtaining a set of unique sentences
bagof_uniq_sents=set(string_sents)
uniq_n=len(bagof_uniq_sents)
same_n=sent_count-uniq_n
print('### For a ', sent_count, ' sentence-strong corpus ###\nNumber of identical sentences: ', same_n, ' which is ', 100*(float(same_n)/(same_n+uniq_n)), 'percent of the corpus \nNumber of unique sentences: ', uniq_n, 'which is ', 100*(float(uniq_n)/(same_n+uniq_n)), 'percent of the corpus\n')
#this snippet counts the number of sentences of length n (e.g. there's x 1-word sentences, y 2-word sentences, z 3-word sentences, etc.)
max_sent_len=0
for sent in sentences:
if len(sent) > max_sent_len:
max_sent_len=len(sent)
sent_lens={}
for i in range(max_sent_len+1)[1:]:
sent_lens[i]=0
for sent in sentences:
if len(sent) in sent_lens:
sent_lens[len(sent)]+=1
for senlen in sent_lens:
print('Number of ', senlen, '-word sentences: ', sent_lens[senlen], ' which is', 100*(float(sent_lens[senlen])/(sent_count)), 'percent of the corpus')
print('\nAverage sentence length (in tokens): ', token_count/len(sentences),'\nTotal number of tokens (words) is: ', token_count, '\nNumber of types (unique words): ', len(types), '\nNumber of "rare" word types (with frequency<10):', len(rare_words), '\nPercentage of "rare" words (types):', 100*(float(len(rare_words))/(len(types))))
else:
print('### For a ', sent_count, ' sentence-strong corpus ###\nAverage sentence length (in tokens): ', token_count/len(sentences),'\nTotal number of tokens (words) is: ', token_count, '\nNumber of types (unique words): ', len(types), '\nNumber of "rare" word types (with frequency<10):', len(rare_words), '\nPercentage of "rare" words (types):', 100*(float(len(rare_words))/(len(types)))) | en | 0.763738 | #!/usr/bin/env python # -*- coding: utf-8 -*- Calculate statistics on already generated random walk cporpora. Assumes input is a text file where 1 line = 1 sentence. Copyright © 2019 <NAME>, <NAME>. Technological University Dublin, ADAPT Centre. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #this stores each individual sentence as a list of tokens; needed to calculate frequency of n-word sentences #this stores each individual sentence as a bag of words set of tokens (i.e. not in any order and no tokens are repeated); only does this if -u argument is set to yes #add token count of current sentence to total token count #counts types (i.e. unique tokens) #we do not count 1-word sentences as occurrences of rare words, but you might wish to do so, which is what the commented code below is for #if token not in rare_words: # rare_words.append(token) #Uncomment the the two lines commented below if you wish to print a list of tokens sorted by frequency #sorted_by_value = sorted(token_freq.items(), key=lambda kv: kv[1]) #print(sorted_by_value) #this snippet adds rare words into a printable list #turn sentence i.e. set of unique tokens into a list and sort the list alphabetically, then add that sorted list to a list of sentences #turn the *list* of alphabetically sorted sentences into a *set* of alphabetically sorted sentences, thereby removing all duplicates and obtaining a set of unique sentences ## For a ', sent_count, ' sentence-strong corpus ###\nNumber of identical sentences: ', same_n, ' which is ', 100*(float(same_n)/(same_n+uniq_n)), 'percent of the corpus \nNumber of unique sentences: ', uniq_n, 'which is ', 100*(float(uniq_n)/(same_n+uniq_n)), 'percent of the corpus\n') #this snippet counts the number of sentences of length n (e.g. there's x 1-word sentences, y 2-word sentences, z 3-word sentences, etc.) ## For a ', sent_count, ' sentence-strong corpus ###\nAverage sentence length (in tokens): ', token_count/len(sentences),'\nTotal number of tokens (words) is: ', token_count, '\nNumber of types (unique words): ', len(types), '\nNumber of "rare" word types (with frequency<10):', len(rare_words), '\nPercentage of "rare" words (types):', 100*(float(len(rare_words))/(len(types)))) | 2.855374 | 3 |
bench/a.py | t1mch0w/memc3 | 0 | 6624235 | <gh_stars>0
import sys
filea=open(sys.argv[1],'r')
fileb=open(sys.argv[2],'r')
load_key=set()
run_key=set()
for l in filea:
if 'usertable' in l:
load_key.add(l.split()[2])
for l in fileb:
if 'usertable' in l:
run_key.add(l.split()[2])
num_match=0
num_nomatch=0
for k in run_key:
if k in load_key:
num_match+=1
else:
num_nomatch+=1
print('num_match = %d, num_nomatch = %d' % (num_match, num_nomatch))
| import sys
filea=open(sys.argv[1],'r')
fileb=open(sys.argv[2],'r')
load_key=set()
run_key=set()
for l in filea:
if 'usertable' in l:
load_key.add(l.split()[2])
for l in fileb:
if 'usertable' in l:
run_key.add(l.split()[2])
num_match=0
num_nomatch=0
for k in run_key:
if k in load_key:
num_match+=1
else:
num_nomatch+=1
print('num_match = %d, num_nomatch = %d' % (num_match, num_nomatch)) | none | 1 | 2.650492 | 3 | |
src/forum/schema.py | VishalZ123/gymkhana_portal | 16 | 6624236 | import graphene
from graphene import relay
from graphene_django import DjangoObjectType
from graphene_django.forms.mutation import DjangoModelFormMutation
from graphql_jwt.decorators import login_required
from forum.forms import TopicForm, AnswerForm
from forum.models import Topic, Answer
class AnswerNode(DjangoObjectType):
id = graphene.ID(required=True)
upvotes_count = graphene.Int()
is_upvoted = graphene.Boolean()
is_author = graphene.Boolean()
class Meta:
model = Answer
fields = '__all__'
filter_fields = ()
interfaces = (relay.Node,)
def resolve_id(self, info):
return self.id
def resolve_upvotes_count(self, info):
return self.upvotes.count()
def resolve_is_upvoted(self, info):
if info.context.user.userprofile in self.upvotes.all():
return True
return False
def resolve_is_author(self, info):
return info.context.user.userprofile == self.author
class TopicNode(DjangoObjectType):
id = graphene.ID(required=True)
upvotes_count = graphene.Int()
answers_count = graphene.Int()
is_upvoted = graphene.Boolean()
is_author = graphene.Boolean()
class Meta:
model = Topic
fields = '__all__'
filter_fields = ('slug',)
interfaces = (relay.Node,)
@classmethod
def search(cls, query, indfo):
nodes = cls._meta.model.objects.search(query) if query else cls._meta.model.objects
return nodes.all()
def resolve_id(self, info):
return self.id
def resolve_upvotes_count(self, info):
return self.upvotes.count()
def resolve_answers_count(self, info):
return self.answer_set.count()
def resolve_is_upvoted(self, info):
if info.context.user.userprofile in self.upvotes.all():
return True
return False
def resolve_is_author(self, info):
return info.context.user.userprofile == self.author
class CreateTopicMutation(DjangoModelFormMutation):
class Meta:
form_class = TopicForm
@classmethod
@login_required
def perform_mutate(cls, form, info):
obj = form.save(commit=False)
obj.author = info.context.user.userprofile
obj.save()
kwargs = {cls._meta.return_field_name: obj}
return cls(errors=[], **kwargs)
class AddAnswerMutation(DjangoModelFormMutation):
class Meta:
form_class = AnswerForm
exclude_fields = ('author',)
@classmethod
@login_required
def get_form_kwargs(cls, root, info, **input):
input.__setitem__('author', str(info.context.user.userprofile.id))
kwargs = {"data": input}
return kwargs
class UpvoteMutaiton(graphene.Mutation):
class Arguments:
is_topic = graphene.Boolean(required=True)
id = graphene.ID(required=True)
updated = graphene.Boolean()
upvoted = graphene.Boolean()
def mutate(self, info, id, is_topic):
updated = False
upvoted = False
user = info.context.user.userprofile
obj = Topic.objects.get(id=id) if is_topic else Answer.objects.get(id=id)
if info.context.user.is_authenticated:
if user in obj.upvotes.all():
obj.upvotes.remove(user)
upvoted = False
else:
obj.upvotes.add(user)
upvoted = True
updated = True
return UpvoteMutaiton(updated=updated, upvoted=upvoted)
class DeleteMutation(graphene.Mutation):
class Arguments:
is_topic = graphene.Boolean(required=True)
id = graphene.ID(required=True)
deleted = graphene.Boolean()
def mutate(self, info, id, is_topic):
deleted = False
obj = Topic.objects.filter(id=id) if is_topic else Answer.objects.filter(id=id)
if obj:
deleted = True
obj.delete()
return DeleteMutation(deleted=deleted)
| import graphene
from graphene import relay
from graphene_django import DjangoObjectType
from graphene_django.forms.mutation import DjangoModelFormMutation
from graphql_jwt.decorators import login_required
from forum.forms import TopicForm, AnswerForm
from forum.models import Topic, Answer
class AnswerNode(DjangoObjectType):
id = graphene.ID(required=True)
upvotes_count = graphene.Int()
is_upvoted = graphene.Boolean()
is_author = graphene.Boolean()
class Meta:
model = Answer
fields = '__all__'
filter_fields = ()
interfaces = (relay.Node,)
def resolve_id(self, info):
return self.id
def resolve_upvotes_count(self, info):
return self.upvotes.count()
def resolve_is_upvoted(self, info):
if info.context.user.userprofile in self.upvotes.all():
return True
return False
def resolve_is_author(self, info):
return info.context.user.userprofile == self.author
class TopicNode(DjangoObjectType):
id = graphene.ID(required=True)
upvotes_count = graphene.Int()
answers_count = graphene.Int()
is_upvoted = graphene.Boolean()
is_author = graphene.Boolean()
class Meta:
model = Topic
fields = '__all__'
filter_fields = ('slug',)
interfaces = (relay.Node,)
@classmethod
def search(cls, query, indfo):
nodes = cls._meta.model.objects.search(query) if query else cls._meta.model.objects
return nodes.all()
def resolve_id(self, info):
return self.id
def resolve_upvotes_count(self, info):
return self.upvotes.count()
def resolve_answers_count(self, info):
return self.answer_set.count()
def resolve_is_upvoted(self, info):
if info.context.user.userprofile in self.upvotes.all():
return True
return False
def resolve_is_author(self, info):
return info.context.user.userprofile == self.author
class CreateTopicMutation(DjangoModelFormMutation):
class Meta:
form_class = TopicForm
@classmethod
@login_required
def perform_mutate(cls, form, info):
obj = form.save(commit=False)
obj.author = info.context.user.userprofile
obj.save()
kwargs = {cls._meta.return_field_name: obj}
return cls(errors=[], **kwargs)
class AddAnswerMutation(DjangoModelFormMutation):
class Meta:
form_class = AnswerForm
exclude_fields = ('author',)
@classmethod
@login_required
def get_form_kwargs(cls, root, info, **input):
input.__setitem__('author', str(info.context.user.userprofile.id))
kwargs = {"data": input}
return kwargs
class UpvoteMutaiton(graphene.Mutation):
class Arguments:
is_topic = graphene.Boolean(required=True)
id = graphene.ID(required=True)
updated = graphene.Boolean()
upvoted = graphene.Boolean()
def mutate(self, info, id, is_topic):
updated = False
upvoted = False
user = info.context.user.userprofile
obj = Topic.objects.get(id=id) if is_topic else Answer.objects.get(id=id)
if info.context.user.is_authenticated:
if user in obj.upvotes.all():
obj.upvotes.remove(user)
upvoted = False
else:
obj.upvotes.add(user)
upvoted = True
updated = True
return UpvoteMutaiton(updated=updated, upvoted=upvoted)
class DeleteMutation(graphene.Mutation):
class Arguments:
is_topic = graphene.Boolean(required=True)
id = graphene.ID(required=True)
deleted = graphene.Boolean()
def mutate(self, info, id, is_topic):
deleted = False
obj = Topic.objects.filter(id=id) if is_topic else Answer.objects.filter(id=id)
if obj:
deleted = True
obj.delete()
return DeleteMutation(deleted=deleted)
| none | 1 | 2.129931 | 2 | |
preparing_data/prepare_test_dataset.py | Kukuster/SelfDecode-phasing | 0 | 6624237 | import numpy as np
from lib.pd import pd_read_vcf
"""
Have to rewrite this using bcftools (and tabix for indexing input datasets)
So the function receives two inputs: full dataset of test samples, and list of sites.
Currently this works under the assumption:
- both FULL and CHIP datasets are harmonized in the same way
"""
FULL_DATASET_OF_TEST_SAMPLES = "/home/ubuntu/files/400_random_samples_BP0-1000000.80-test-samples.vcf.gz"
CHIP_DATASET = "/home/ubuntu/files/400_random_samples_BP0-1000000_EUROFINS.vcf.gz"
OUTPUT_DATASET = "/home/ubuntu/files/400_random_samples_BP0-1000000.80-test-samples_EUROFINS-masked_unimputed-unphased_int-data.tsv.gz"
"""
Prepares test dataset from:
1) A full dataset of test samples
2) A vcf with only sites that are needed in the output file
1. Masks given first vcf file with sites that are present in the second vcf file
by setting all that aren't present in both to `np.nan`
2. Translates genotype and NaNs to whatever vocabulary defined in `.replace({})`.
For "unphasing", translation should map "0|1" and "1|0" to the same number.
"""
def prepare_test_dataset():
df = pd_read_vcf(FULL_DATASET_OF_TEST_SAMPLES)
df_CHIP = pd_read_vcf(CHIP_DATASET)
df['key'] = df['#CHROM'].astype(str) + ':' + df['POS'].astype(str) + '.' + df['REF'] + '.' + df['ALT']
df_CHIP['key'] = df_CHIP['#CHROM'].astype(str) + ':' + df_CHIP['POS'].astype(str) + '.' + df_CHIP['REF'] + '.' + df_CHIP['ALT']
df.set_index('key', inplace=True)
df_CHIP.set_index('key', inplace=True)
samples = df.columns[9:] # in VCF the first 9 columns are data about the SNP, and all other columns are samples
df_result = df.copy()
for col in samples:
df_result[col] = df_CHIP[col]
df_result[col].replace({
np.nan: '0',
'0|0': '1',
'1|0': '2',
'0|1': '2',
'1|1': '3'
}, inplace=True)
df_result[col].astype(str)
df_result.to_csv(OUTPUT_DATASET, index=False, sep="\t", line_terminator="\n")
if __name__ == "__main__":
prepare_test_dataset()
| import numpy as np
from lib.pd import pd_read_vcf
"""
Have to rewrite this using bcftools (and tabix for indexing input datasets)
So the function receives two inputs: full dataset of test samples, and list of sites.
Currently this works under the assumption:
- both FULL and CHIP datasets are harmonized in the same way
"""
FULL_DATASET_OF_TEST_SAMPLES = "/home/ubuntu/files/400_random_samples_BP0-1000000.80-test-samples.vcf.gz"
CHIP_DATASET = "/home/ubuntu/files/400_random_samples_BP0-1000000_EUROFINS.vcf.gz"
OUTPUT_DATASET = "/home/ubuntu/files/400_random_samples_BP0-1000000.80-test-samples_EUROFINS-masked_unimputed-unphased_int-data.tsv.gz"
"""
Prepares test dataset from:
1) A full dataset of test samples
2) A vcf with only sites that are needed in the output file
1. Masks given first vcf file with sites that are present in the second vcf file
by setting all that aren't present in both to `np.nan`
2. Translates genotype and NaNs to whatever vocabulary defined in `.replace({})`.
For "unphasing", translation should map "0|1" and "1|0" to the same number.
"""
def prepare_test_dataset():
df = pd_read_vcf(FULL_DATASET_OF_TEST_SAMPLES)
df_CHIP = pd_read_vcf(CHIP_DATASET)
df['key'] = df['#CHROM'].astype(str) + ':' + df['POS'].astype(str) + '.' + df['REF'] + '.' + df['ALT']
df_CHIP['key'] = df_CHIP['#CHROM'].astype(str) + ':' + df_CHIP['POS'].astype(str) + '.' + df_CHIP['REF'] + '.' + df_CHIP['ALT']
df.set_index('key', inplace=True)
df_CHIP.set_index('key', inplace=True)
samples = df.columns[9:] # in VCF the first 9 columns are data about the SNP, and all other columns are samples
df_result = df.copy()
for col in samples:
df_result[col] = df_CHIP[col]
df_result[col].replace({
np.nan: '0',
'0|0': '1',
'1|0': '2',
'0|1': '2',
'1|1': '3'
}, inplace=True)
df_result[col].astype(str)
df_result.to_csv(OUTPUT_DATASET, index=False, sep="\t", line_terminator="\n")
if __name__ == "__main__":
prepare_test_dataset()
| en | 0.906182 | Have to rewrite this using bcftools (and tabix for indexing input datasets) So the function receives two inputs: full dataset of test samples, and list of sites. Currently this works under the assumption: - both FULL and CHIP datasets are harmonized in the same way Prepares test dataset from: 1) A full dataset of test samples 2) A vcf with only sites that are needed in the output file 1. Masks given first vcf file with sites that are present in the second vcf file by setting all that aren't present in both to `np.nan` 2. Translates genotype and NaNs to whatever vocabulary defined in `.replace({})`. For "unphasing", translation should map "0|1" and "1|0" to the same number. # in VCF the first 9 columns are data about the SNP, and all other columns are samples | 2.954931 | 3 |
notebooks/char_rnn.py | lewfish/gmm-pytorch | 1 | 6624238 | <filename>notebooks/char_rnn.py<gh_stars>1-10
# Generates names from different languages using an RNN.
# Reading material:
# https://karpathy.github.io/2015/05/21/rnn-effectiveness/
# https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html
# The data loading part of this code was copied and adapted from
# https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html
# %%
from __future__ import unicode_literals, print_function, division
from IPython import get_ipython
from io import open
import glob
import os
import unicodedata
import string
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Categorical
get_ipython().run_line_magic('matplotlib', 'inline')
# %%
all_letters = string.ascii_letters + " .,;'-"
START = len(all_letters)
END = len(all_letters) + 1
n_letters = len(all_letters) + 2
def findFiles(path): return glob.glob(path)
# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
# Build the category_lines dictionary, a list of lines per category
category_lines = {}
all_categories = []
for filename in findFiles('/opt/data/pytorch-tutorial-data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
if n_categories == 0:
raise RuntimeError('Data not found. Make sure that you downloaded data '
'from https://download.pytorch.org/tutorial/data.zip and extract it to '
'the current directory.')
print('# categories:', n_categories, all_categories)
print(unicodeToAscii("O'Néàl"))
# %%
# Random item from a list
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
# Get a random category and random line from that category
def randomTrainingPair():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
return category, line
def categoryTensor(category):
return torch.tensor(all_categories.index(category))
def line2tensor(line):
return torch.tensor([START] + [all_letters.find(letter) for letter in line] + [END])
def randomTrainingExample():
category, line = randomTrainingPair()
category_tensor = categoryTensor(category)
line_tensor = line2tensor(line)
input_line_tensor = line_tensor[0:-1]
target_line_tensor = line_tensor[1:]
return category_tensor, input_line_tensor, target_line_tensor
def make_batch(batch_sz):
samples = []
for i in range(batch_sz):
samples.append(randomTrainingExample())
max_len = torch.tensor([len(s[1]) for s in samples]).max()
batch_cat = torch.cat([s[0].unsqueeze(0) for s in samples])
batch_input = torch.full((max_len, batch_sz), END, dtype=torch.long)
batch_target = torch.full((max_len, batch_sz), END, dtype=torch.long)
for i, s in enumerate(samples):
batch_input[0:len(s[1]), i] = s[1]
batch_target[0:len(s[2]), i] = s[2]
return batch_cat, batch_input, batch_target
# %%
class MyRNN(nn.Module):
def __init__(self, ncats, ntokens, nhidden, nembed, nout):
super(MyRNN, self).__init__()
self.cat_embed = nn.Embedding(ncats, nembed)
self.input_embed = nn.Embedding(ntokens, nembed)
self.hidden = nn.Linear(nembed + nembed + nhidden, nhidden)
self.output = nn.Linear(nembed + nhidden, nout)
def forward(self, cat, input, hidden):
cat = self.cat_embed(cat)
input = self.input_embed(input)
hidden = nn.functional.tanh(self.hidden(torch.cat([cat, input, hidden], dim=1)))
output = self.output(torch.cat([hidden, input], dim=1))
return hidden, output
def get_init_hidden(self, batch_sz):
return torch.zeros(batch_sz, nhidden)
# %%
ncats = n_categories
ntokens = n_letters
nhidden = 128
nembed = 5
nout = n_letters
model = MyRNN(ncats, ntokens, nhidden, nembed, nout)
model.train()
nsteps = 10000
log_every = 500
batch_sz = 4
lr = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
sum_loss = 0.0
for step in range(nsteps):
model.zero_grad()
cat, input, target = make_batch(batch_sz)
hidden = model.get_init_hidden(batch_sz)
loss = torch.tensor(0.0)
for i in range(len(input)):
hidden, output = model(cat, input[i], hidden)
loss += nn.functional.cross_entropy(output, target[i])
loss.backward()
optimizer.step()
sum_loss += loss.item()
if step != 0 and step % log_every == 0:
print(f'step: {step} / loss: {sum_loss / log_every}')
sum_loss = 0.0
model.eval()
# %%
def get_sample(model, cat):
hidden = model.get_init_hidden(1)
cat = categoryTensor(cat)
input = torch.tensor(START)
sample = []
with torch.no_grad():
while True:
hidden, output = model(cat.unsqueeze(0), input.unsqueeze(0), hidden)
output_dist = nn.functional.softmax(output)[0]
output = Categorical(output_dist).sample()
if output == END:
break
input = output
sample.append(output)
return ''.join([all_letters[s.item()] for s in sample])
def get_samples(model, cat, nsamples):
return [get_sample(model, cat) for i in range(nsamples)]
def print_samples():
for cat in all_categories:
samples = get_samples(model, cat, 10)
print(cat)
print(samples)
print()
print_samples()
# %%
| <filename>notebooks/char_rnn.py<gh_stars>1-10
# Generates names from different languages using an RNN.
# Reading material:
# https://karpathy.github.io/2015/05/21/rnn-effectiveness/
# https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html
# The data loading part of this code was copied and adapted from
# https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html
# %%
from __future__ import unicode_literals, print_function, division
from IPython import get_ipython
from io import open
import glob
import os
import unicodedata
import string
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Categorical
get_ipython().run_line_magic('matplotlib', 'inline')
# %%
all_letters = string.ascii_letters + " .,;'-"
START = len(all_letters)
END = len(all_letters) + 1
n_letters = len(all_letters) + 2
def findFiles(path): return glob.glob(path)
# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
# Build the category_lines dictionary, a list of lines per category
category_lines = {}
all_categories = []
for filename in findFiles('/opt/data/pytorch-tutorial-data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
if n_categories == 0:
raise RuntimeError('Data not found. Make sure that you downloaded data '
'from https://download.pytorch.org/tutorial/data.zip and extract it to '
'the current directory.')
print('# categories:', n_categories, all_categories)
print(unicodeToAscii("O'Néàl"))
# %%
# Random item from a list
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
# Get a random category and random line from that category
def randomTrainingPair():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
return category, line
def categoryTensor(category):
return torch.tensor(all_categories.index(category))
def line2tensor(line):
return torch.tensor([START] + [all_letters.find(letter) for letter in line] + [END])
def randomTrainingExample():
category, line = randomTrainingPair()
category_tensor = categoryTensor(category)
line_tensor = line2tensor(line)
input_line_tensor = line_tensor[0:-1]
target_line_tensor = line_tensor[1:]
return category_tensor, input_line_tensor, target_line_tensor
def make_batch(batch_sz):
samples = []
for i in range(batch_sz):
samples.append(randomTrainingExample())
max_len = torch.tensor([len(s[1]) for s in samples]).max()
batch_cat = torch.cat([s[0].unsqueeze(0) for s in samples])
batch_input = torch.full((max_len, batch_sz), END, dtype=torch.long)
batch_target = torch.full((max_len, batch_sz), END, dtype=torch.long)
for i, s in enumerate(samples):
batch_input[0:len(s[1]), i] = s[1]
batch_target[0:len(s[2]), i] = s[2]
return batch_cat, batch_input, batch_target
# %%
class MyRNN(nn.Module):
def __init__(self, ncats, ntokens, nhidden, nembed, nout):
super(MyRNN, self).__init__()
self.cat_embed = nn.Embedding(ncats, nembed)
self.input_embed = nn.Embedding(ntokens, nembed)
self.hidden = nn.Linear(nembed + nembed + nhidden, nhidden)
self.output = nn.Linear(nembed + nhidden, nout)
def forward(self, cat, input, hidden):
cat = self.cat_embed(cat)
input = self.input_embed(input)
hidden = nn.functional.tanh(self.hidden(torch.cat([cat, input, hidden], dim=1)))
output = self.output(torch.cat([hidden, input], dim=1))
return hidden, output
def get_init_hidden(self, batch_sz):
return torch.zeros(batch_sz, nhidden)
# %%
ncats = n_categories
ntokens = n_letters
nhidden = 128
nembed = 5
nout = n_letters
model = MyRNN(ncats, ntokens, nhidden, nembed, nout)
model.train()
nsteps = 10000
log_every = 500
batch_sz = 4
lr = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
sum_loss = 0.0
for step in range(nsteps):
model.zero_grad()
cat, input, target = make_batch(batch_sz)
hidden = model.get_init_hidden(batch_sz)
loss = torch.tensor(0.0)
for i in range(len(input)):
hidden, output = model(cat, input[i], hidden)
loss += nn.functional.cross_entropy(output, target[i])
loss.backward()
optimizer.step()
sum_loss += loss.item()
if step != 0 and step % log_every == 0:
print(f'step: {step} / loss: {sum_loss / log_every}')
sum_loss = 0.0
model.eval()
# %%
def get_sample(model, cat):
hidden = model.get_init_hidden(1)
cat = categoryTensor(cat)
input = torch.tensor(START)
sample = []
with torch.no_grad():
while True:
hidden, output = model(cat.unsqueeze(0), input.unsqueeze(0), hidden)
output_dist = nn.functional.softmax(output)[0]
output = Categorical(output_dist).sample()
if output == END:
break
input = output
sample.append(output)
return ''.join([all_letters[s.item()] for s in sample])
def get_samples(model, cat, nsamples):
return [get_sample(model, cat) for i in range(nsamples)]
def print_samples():
for cat in all_categories:
samples = get_samples(model, cat, 10)
print(cat)
print(samples)
print()
print_samples()
# %%
| en | 0.673496 | # Generates names from different languages using an RNN. # Reading material: # https://karpathy.github.io/2015/05/21/rnn-effectiveness/ # https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html # The data loading part of this code was copied and adapted from # https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html # %% # %% # Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427 # Read a file and split into lines # Build the category_lines dictionary, a list of lines per category # %% # Random item from a list # Get a random category and random line from that category # %% # %% # %% # %% | 3.017147 | 3 |
tests/test_auto_mixed_precision_model_path.py | Microsoft/onnxconverter-common | 0 | 6624239 | <reponame>Microsoft/onnxconverter-common
import unittest
import numpy as np
import onnxruntime as _ort
import onnx
import os
from distutils.version import LooseVersion as V
from onnxconverter_common.onnx_fx import Graph
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from onnxconverter_common.auto_mixed_precision_model_path import auto_convert_mixed_precision_model_path
def _ort_inference(model_path, inputs):
sess = _ort.InferenceSession(model_path)
return sess.run(None, inputs)
Graph.inference_runtime = _ort_inference
Graph.opset = 9
onnx_function = Graph.trace
@unittest.skipIf(V(onnx.__version__) <= V('1.8.0'), "test for ONNX 1.8 and above")
@unittest.skipIf(get_maximum_opset_supported() < 9, "tests designed for ONNX opset 9 and greater")
@unittest.skipIf(not hasattr(onnx, "shape_inference"), "shape inference is required")
class AutoFloat16Test(unittest.TestCase):
def test_auto_mixed_precision_model_path(self):
model32_name = "image_classifier32.onnx"
working_path = os.path.abspath(os.path.dirname(__file__))
data_path = os.path.join(working_path, 'data')
model32_path = os.path.join(data_path, model32_name)
input_x = np.random.rand(1, 3, 32, 32).astype(np.float32)
expected = _ort_inference(model32_path, {'modelInput': input_x})
model16_name = "image_classifier16.onnx"
model16_path = os.path.join(data_path, model16_name)
auto_convert_mixed_precision_model_path(
model32_path, {'modelInput': input_x},
model16_path, ['CPUExecutionProvider'], location="tmp.data",
rtol=1e-2, keep_io_types=True, verbose=True)
actual = _ort_inference(model16_path, {'modelInput': input_x.astype(np.float32)})
self.assertTrue(np.allclose(expected, actual, rtol=0.01))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(AutoFloat16Test)
# suite.debug()
unittest.TextTestRunner().run(suite)
| import unittest
import numpy as np
import onnxruntime as _ort
import onnx
import os
from distutils.version import LooseVersion as V
from onnxconverter_common.onnx_fx import Graph
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from onnxconverter_common.auto_mixed_precision_model_path import auto_convert_mixed_precision_model_path
def _ort_inference(model_path, inputs):
sess = _ort.InferenceSession(model_path)
return sess.run(None, inputs)
Graph.inference_runtime = _ort_inference
Graph.opset = 9
onnx_function = Graph.trace
@unittest.skipIf(V(onnx.__version__) <= V('1.8.0'), "test for ONNX 1.8 and above")
@unittest.skipIf(get_maximum_opset_supported() < 9, "tests designed for ONNX opset 9 and greater")
@unittest.skipIf(not hasattr(onnx, "shape_inference"), "shape inference is required")
class AutoFloat16Test(unittest.TestCase):
def test_auto_mixed_precision_model_path(self):
model32_name = "image_classifier32.onnx"
working_path = os.path.abspath(os.path.dirname(__file__))
data_path = os.path.join(working_path, 'data')
model32_path = os.path.join(data_path, model32_name)
input_x = np.random.rand(1, 3, 32, 32).astype(np.float32)
expected = _ort_inference(model32_path, {'modelInput': input_x})
model16_name = "image_classifier16.onnx"
model16_path = os.path.join(data_path, model16_name)
auto_convert_mixed_precision_model_path(
model32_path, {'modelInput': input_x},
model16_path, ['CPUExecutionProvider'], location="tmp.data",
rtol=1e-2, keep_io_types=True, verbose=True)
actual = _ort_inference(model16_path, {'modelInput': input_x.astype(np.float32)})
self.assertTrue(np.allclose(expected, actual, rtol=0.01))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(AutoFloat16Test)
# suite.debug()
unittest.TextTestRunner().run(suite) | de | 0.37605 | # suite.debug() | 2.157542 | 2 |
kosakana_fan_blog_get_text.py | yukou-isshiki/hinatazaka_blog | 0 | 6624240 | <reponame>yukou-isshiki/hinatazaka_blog
from urllib import request,parse
from urllib import error
from bs4 import BeautifulSoup
f = open("kosakana_fan_blog_url.txt", "r")
wf = open("kosakana_fan_blog_text.txt", "w")
lines = f.readlines()
for line in lines:
print(line)
try:
blog_html = request.urlopen(line)
blog_soup = BeautifulSoup(blog_html, "html.parser")
sleep_check = blog_soup.select("div.sleeping-ad")
if sleep_check == []:
blog_text = blog_soup.select("div.entry-inner > div.entry-content")[0].text
else:
blog_text = blog_soup.select("div.entry-inner > div.entry-content")[1].text
print(blog_text)
wf.write(blog_text)
except error.HTTPError:
continue
wf.close()
f.close() | from urllib import request,parse
from urllib import error
from bs4 import BeautifulSoup
f = open("kosakana_fan_blog_url.txt", "r")
wf = open("kosakana_fan_blog_text.txt", "w")
lines = f.readlines()
for line in lines:
print(line)
try:
blog_html = request.urlopen(line)
blog_soup = BeautifulSoup(blog_html, "html.parser")
sleep_check = blog_soup.select("div.sleeping-ad")
if sleep_check == []:
blog_text = blog_soup.select("div.entry-inner > div.entry-content")[0].text
else:
blog_text = blog_soup.select("div.entry-inner > div.entry-content")[1].text
print(blog_text)
wf.write(blog_text)
except error.HTTPError:
continue
wf.close()
f.close() | none | 1 | 3.032402 | 3 | |
venv_mac/lib/python3.7/site-packages/flask_pagedown/fields.py | mazx4960/Notes-WebApp | 232 | 6624241 | <gh_stars>100-1000
from wtforms.fields import TextAreaField
from .widgets import PageDown
class PageDownField(TextAreaField):
widget = PageDown()
| from wtforms.fields import TextAreaField
from .widgets import PageDown
class PageDownField(TextAreaField):
widget = PageDown() | none | 1 | 1.875602 | 2 | |
Discord-Moderation-Bot-master/events/member.py | MonsterGaming9845/Alphy | 0 | 6624242 | import inspect
import sys
import time
import discord
from helpers.embed_builder import EmbedBuilder
from events.base import EventHandler
class MemberJoinEvent(EventHandler):
def __init__(self, client_instance):
self.client = client_instance
self.storage = self.client.storage
self.event = "on_member_join"
async def handle(self, *args, **kwargs):
# Get member from args
member = args[0]
guild = member.guild
guild_id = str(guild.id)
muted_role_id = int(self.storage.settings["guilds"][guild_id]["muted_role_id"])
log_channel_id = int(self.storage.settings["guilds"][guild_id]["log_channel_id"])
muted_role = guild.get_role(muted_role_id)
log_channel = guild.get_channel(log_channel_id)
muted_users = self.storage.settings["guilds"][guild_id]["muted_users"]
mutes_to_remove = []
# Loop over the muted users
for user_info in muted_users.items():
user_id = int(user_info[0])
duration = int(user_info[1]["duration"])
normal_duration = user_info[1]["normal_duration"]
user = await guild.fetch_member(user_id)
# if the user_id for this user_info matches the member who joined the guild
if user_id == member.id:
if -1 < duration < int(time.time()):
# Mute is expired. Remove it from the guild's storage
mutes_to_remove.append(user_id)
# Build a mute expire embed and message it to the log channel
embed_builder = EmbedBuilder(event="muteexpire")
await embed_builder.add_field(name="**Unmuted user**", value=f"`{user.name}`")
await embed_builder.add_field(name="**Mute duration**", value=f"`{normal_duration}`")
embed = await embed_builder.get_embed()
await log_channel.send(embed=embed)
else:
# Mute is not expired. Re-add it to the offender
await user.add_roles(muted_role, reason="Remuted user since they had an active mute when they rejoined the server")
for user_id in mutes_to_remove:
self.storage.settings["guilds"][guild_id]["muted_users"].pop(str(user_id))
await self.storage.write_settings_file_to_disk()
class MemberBanEvent(EventHandler):
def __init__(self, client_instance):
self.client = client_instance
self.storage = self.client.storage
self.event = "on_member_ban"
async def handle(self, *args, **kwargs):
# Get the guild from the args
guild = args[0]
guild_id = str(guild.id)
log_channel_id = int(self.storage.settings["guilds"][guild_id]["log_channel_id"])
log_channel = guild.get_channel(log_channel_id)
# Get the actions we already logged recently
logged_actions = []
async for message in log_channel.history(limit=25):
for embed in message.embeds:
for field in embed.fields:
if field.name == "**Audit Log ID**":
logged_actions.append(int(field.value.replace("`", "")))
# Get recent ban actions
async for entry in guild.audit_logs(action=discord.AuditLogAction.ban, limit=5):
# If the entry was made by the bot or it's entry ID has already been logged, skip it
if entry.user == self.client.user or entry.id in logged_actions:
continue
else:
# Build a ban embed with the info.
embed_builder = EmbedBuilder(event="ban")
await embed_builder.add_field(name="**Executor**", value=f"`{entry.user.name}`")
await embed_builder.add_field(name="**Banned User**", value=f"`{entry.target.name}`")
await embed_builder.add_field(name="**Reason**", value=f"`{entry.reason}`")
await embed_builder.add_field(name="**Audit Log ID**", value=f"`{entry.id}`")
embed = await embed_builder.get_embed()
await log_channel.send(embed=embed)
class MemberKickEvent(EventHandler):
def __init__(self, client_instance):
self.client = client_instance
self.storage = self.client.storage
self.event = "on_member_remove"
async def handle(self, *args, **kwargs):
# Get the guild from the args
guild = args[0]
guild_id = str(guild.id)
log_channel_id = int(self.storage.settings["guilds"][guild_id]["log_channel_id"])
log_channel = guild.get_channel(log_channel_id)
# Get the actions we already logged recently
logged_actions = []
async for message in log_channel.history(limit=25):
for embed in message.embeds:
for field in embed.fields:
if field.name == "**Audit Log ID**":
logged_actions.append(int(field.value.replace("`", "")))
# Get recent kick actions
async for entry in guild.audit_logs(action=discord.AuditLogAction.kick, limit=5):
# If the entry was made by the bot or it's entry ID has already been logged, skip it.
if entry.user == self.client.user or entry.id in logged_actions:
continue
else:
# Build a kick embed with the info.
embed_builder = EmbedBuilder(event="kick")
await embed_builder.add_field(name="**Executor**", value=f"`{entry.user.name}`")
await embed_builder.add_field(name="**Kicked User**", value=f"`{entry.target.name}`")
await embed_builder.add_field(name="**Reason**", value=f"`{entry.reason}`")
await embed_builder.add_field(name="**Audit Log ID**", value=f"`{entry.id}`")
embed = await embed_builder.get_embed()
await log_channel.send(embed=embed)
# Collects a list of classes in the file
classes = inspect.getmembers(sys.modules[__name__], lambda member: inspect.isclass(member) and member.__module__ == __name__)
| import inspect
import sys
import time
import discord
from helpers.embed_builder import EmbedBuilder
from events.base import EventHandler
class MemberJoinEvent(EventHandler):
def __init__(self, client_instance):
self.client = client_instance
self.storage = self.client.storage
self.event = "on_member_join"
async def handle(self, *args, **kwargs):
# Get member from args
member = args[0]
guild = member.guild
guild_id = str(guild.id)
muted_role_id = int(self.storage.settings["guilds"][guild_id]["muted_role_id"])
log_channel_id = int(self.storage.settings["guilds"][guild_id]["log_channel_id"])
muted_role = guild.get_role(muted_role_id)
log_channel = guild.get_channel(log_channel_id)
muted_users = self.storage.settings["guilds"][guild_id]["muted_users"]
mutes_to_remove = []
# Loop over the muted users
for user_info in muted_users.items():
user_id = int(user_info[0])
duration = int(user_info[1]["duration"])
normal_duration = user_info[1]["normal_duration"]
user = await guild.fetch_member(user_id)
# if the user_id for this user_info matches the member who joined the guild
if user_id == member.id:
if -1 < duration < int(time.time()):
# Mute is expired. Remove it from the guild's storage
mutes_to_remove.append(user_id)
# Build a mute expire embed and message it to the log channel
embed_builder = EmbedBuilder(event="muteexpire")
await embed_builder.add_field(name="**Unmuted user**", value=f"`{user.name}`")
await embed_builder.add_field(name="**Mute duration**", value=f"`{normal_duration}`")
embed = await embed_builder.get_embed()
await log_channel.send(embed=embed)
else:
# Mute is not expired. Re-add it to the offender
await user.add_roles(muted_role, reason="Remuted user since they had an active mute when they rejoined the server")
for user_id in mutes_to_remove:
self.storage.settings["guilds"][guild_id]["muted_users"].pop(str(user_id))
await self.storage.write_settings_file_to_disk()
class MemberBanEvent(EventHandler):
def __init__(self, client_instance):
self.client = client_instance
self.storage = self.client.storage
self.event = "on_member_ban"
async def handle(self, *args, **kwargs):
# Get the guild from the args
guild = args[0]
guild_id = str(guild.id)
log_channel_id = int(self.storage.settings["guilds"][guild_id]["log_channel_id"])
log_channel = guild.get_channel(log_channel_id)
# Get the actions we already logged recently
logged_actions = []
async for message in log_channel.history(limit=25):
for embed in message.embeds:
for field in embed.fields:
if field.name == "**Audit Log ID**":
logged_actions.append(int(field.value.replace("`", "")))
# Get recent ban actions
async for entry in guild.audit_logs(action=discord.AuditLogAction.ban, limit=5):
# If the entry was made by the bot or it's entry ID has already been logged, skip it
if entry.user == self.client.user or entry.id in logged_actions:
continue
else:
# Build a ban embed with the info.
embed_builder = EmbedBuilder(event="ban")
await embed_builder.add_field(name="**Executor**", value=f"`{entry.user.name}`")
await embed_builder.add_field(name="**Banned User**", value=f"`{entry.target.name}`")
await embed_builder.add_field(name="**Reason**", value=f"`{entry.reason}`")
await embed_builder.add_field(name="**Audit Log ID**", value=f"`{entry.id}`")
embed = await embed_builder.get_embed()
await log_channel.send(embed=embed)
class MemberKickEvent(EventHandler):
def __init__(self, client_instance):
self.client = client_instance
self.storage = self.client.storage
self.event = "on_member_remove"
async def handle(self, *args, **kwargs):
# Get the guild from the args
guild = args[0]
guild_id = str(guild.id)
log_channel_id = int(self.storage.settings["guilds"][guild_id]["log_channel_id"])
log_channel = guild.get_channel(log_channel_id)
# Get the actions we already logged recently
logged_actions = []
async for message in log_channel.history(limit=25):
for embed in message.embeds:
for field in embed.fields:
if field.name == "**Audit Log ID**":
logged_actions.append(int(field.value.replace("`", "")))
# Get recent kick actions
async for entry in guild.audit_logs(action=discord.AuditLogAction.kick, limit=5):
# If the entry was made by the bot or it's entry ID has already been logged, skip it.
if entry.user == self.client.user or entry.id in logged_actions:
continue
else:
# Build a kick embed with the info.
embed_builder = EmbedBuilder(event="kick")
await embed_builder.add_field(name="**Executor**", value=f"`{entry.user.name}`")
await embed_builder.add_field(name="**Kicked User**", value=f"`{entry.target.name}`")
await embed_builder.add_field(name="**Reason**", value=f"`{entry.reason}`")
await embed_builder.add_field(name="**Audit Log ID**", value=f"`{entry.id}`")
embed = await embed_builder.get_embed()
await log_channel.send(embed=embed)
# Collects a list of classes in the file
classes = inspect.getmembers(sys.modules[__name__], lambda member: inspect.isclass(member) and member.__module__ == __name__)
| en | 0.950934 | # Get member from args # Loop over the muted users # if the user_id for this user_info matches the member who joined the guild # Mute is expired. Remove it from the guild's storage # Build a mute expire embed and message it to the log channel # Mute is not expired. Re-add it to the offender # Get the guild from the args # Get the actions we already logged recently # Get recent ban actions # If the entry was made by the bot or it's entry ID has already been logged, skip it # Build a ban embed with the info. # Get the guild from the args # Get the actions we already logged recently # Get recent kick actions # If the entry was made by the bot or it's entry ID has already been logged, skip it. # Build a kick embed with the info. # Collects a list of classes in the file | 2.396878 | 2 |
basic_programs/IsQuadConvex.py | manuaatitya/computer_graphics | 0 | 6624243 | class Point:
def __init__(self,x,y):
self.x = x
self.y = y
class Quad:
def __init__(self):
self.quad = []
self.isConvex = 0
def dot(self, vector1, vector2):
return vector1.x * vector1.x + vector1.y * vector2.y
def cross(self, vector1, vector2):
return vector1.x * vector2.y - vector1.y * vector2.x
def vector(self, point1, point2):
return Point(point2.x - point1.x, point2.y - point1.y)
def getQuadData(self):
print('\nEnter the data for the quadrilateral in a cyclic order \n')
for i in range(4):
x,y = map(float,input('Enter the x and y cordinate of the Point {} of the quadrilateral '.format(i+1)).split())
self.quad.append(Point(x,y))
def quadConvexity(self):
bd = self.vector(self.quad[1], self.quad[3]) # BD vector
ba = self.vector(self.quad[1], self.quad[0]) # BA vector
bc = self.vector(self.quad[1], self.quad[2]) # BC vector
ac = self.vector(self.quad[0], self.quad[2]) # AC vector
ad = self.vector(self.quad[0], self.quad[3]) # AD vector
ab = self.vector(self.quad[0], self.quad[1]) # AB vector
normal1 = self.cross(bd,ba) * self.cross(bd,bc)
normal2 = self.cross(ac,ad) * self.cross(ac,ab)
if(normal1 < 0 and normal2 < 0):
self.isConvex = 1 | class Point:
def __init__(self,x,y):
self.x = x
self.y = y
class Quad:
def __init__(self):
self.quad = []
self.isConvex = 0
def dot(self, vector1, vector2):
return vector1.x * vector1.x + vector1.y * vector2.y
def cross(self, vector1, vector2):
return vector1.x * vector2.y - vector1.y * vector2.x
def vector(self, point1, point2):
return Point(point2.x - point1.x, point2.y - point1.y)
def getQuadData(self):
print('\nEnter the data for the quadrilateral in a cyclic order \n')
for i in range(4):
x,y = map(float,input('Enter the x and y cordinate of the Point {} of the quadrilateral '.format(i+1)).split())
self.quad.append(Point(x,y))
def quadConvexity(self):
bd = self.vector(self.quad[1], self.quad[3]) # BD vector
ba = self.vector(self.quad[1], self.quad[0]) # BA vector
bc = self.vector(self.quad[1], self.quad[2]) # BC vector
ac = self.vector(self.quad[0], self.quad[2]) # AC vector
ad = self.vector(self.quad[0], self.quad[3]) # AD vector
ab = self.vector(self.quad[0], self.quad[1]) # AB vector
normal1 = self.cross(bd,ba) * self.cross(bd,bc)
normal2 = self.cross(ac,ad) * self.cross(ac,ab)
if(normal1 < 0 and normal2 < 0):
self.isConvex = 1 | en | 0.803837 | # BD vector # BA vector # BC vector # AC vector # AD vector # AB vector | 3.757424 | 4 |
tpch/tf_tpu.py | pdet/tpc-tpu | 3 | 6624244 | import numpy as np
import pandas as pd
import os
import tensorflow as tf
from tensorflow.contrib import tpu
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
import sys
from subprocess import Popen
import time
l_shipdate = 0
l_discount = 0
l_quantity = 0
l_extendedprice = 0
l_tax = 0
l_returnflag = 0
l_linestatus = 0
l_returnflag_group_size = 0
l_linestatus_group_size = 0
# Turn dates into integers
def date_to_integer(dt_time):
dt_time = dt_time.split("-")
return 10000 * int(dt_time[0]) + 100 * int(dt_time[1]) + int(dt_time[2])
def load_input(scale):
global l_shipdate
global l_discount
global l_quantity
global l_extendedprice
global l_tax
global l_returnflag
global l_linestatus
os.chdir('/home/pedroholanda/tpch-' + str(scale))
lineitem = pd.read_csv("lineitem.tbl", sep='|',
names=["l_orderkey", "l_partkey", "l_suppkey", "l_linenumber", "l_quantity",
"l_extendedprice", "l_discount", "l_tax", "l_returnflag", "l_linestatus",
"l_shipdate",
"l_commitdate", "l_receiptdate", "l_shipinstruct", "l_shipmode", "l_comment"],
dtype={'l_returnflag': 'category', 'l_linestatus': 'category'})
# Query 01 and 06
l_shipdate = lineitem["l_shipdate"].values.astype('S10')
l_discount = lineitem["l_discount"].values.astype('float32')
l_quantity = lineitem["l_quantity"].values.astype('float32')
l_extendedprice = lineitem["l_extendedprice"].values.astype('float32')
# Query 01
l_tax = lineitem["l_tax"].values.astype('float32')
l_returnflag = lineitem["l_returnflag"].values.astype('S1')
l_linestatus = lineitem["l_linestatus"].values.astype('S1')
# Dictionaries
l_returnflag[l_returnflag == "A"] = "1"
l_returnflag[l_returnflag == "N"] = "2"
l_returnflag[l_returnflag == "R"] = "3"
l_returnflag = l_returnflag.astype(np.float32, copy=False)
l_linestatus[l_linestatus == "F"] = "1"
l_linestatus[l_linestatus == "O"] = "2"
l_linestatus = l_linestatus.astype(np.float32, copy=False)
vfunc = np.vectorize(date_to_integer)
l_shipdate = vfunc(l_shipdate)
def q1_computation(shipdate, returnflag, linestatus, quantity, extendedprice, discount, tax, returnflag_groups_tensors,
linestatus_groups_tensors):
zeros = tf.zeros_like(discount)
ones = tf.ones_like(discount)
minus_one = tf.constant(-1.0, dtype=tf.float32)
returnflag_groups = tf.unstack(returnflag_groups_tensors, l_returnflag_group_size)
linestatus_groups = tf.unstack(linestatus_groups_tensors, l_linestatus_group_size)
group_filters = []
shipdate = tf.less_equal(shipdate, 19980901)
for returnflag_group in returnflag_groups:
for linestatus_group in linestatus_groups:
returnflag_aux = tf.cast(tf.where(tf.equal(returnflag, returnflag_group), ones, zeros), tf.bool)
linestatus_aux = tf.cast(tf.where(tf.equal(linestatus, linestatus_group), ones, zeros), tf.bool)
group_filters.append(tf.logical_and(tf.logical_and(returnflag_aux, linestatus_aux), shipdate))
result = tf.constant([], dtype=tf.float32, shape=[8])
for group_filter in group_filters:
sum_qty = tf.reduce_sum(tf.where(group_filter, quantity, zeros))
sum_base_price = tf.reduce_sum(tf.where(group_filter, extendedprice, zeros))
sum_disc_price = tf.reduce_sum(
tf.where(group_filter, tf.multiply(tf.add(ones, tf.multiply(minus_one, discount)), extendedprice), zeros))
sum_charge = tf.reduce_sum(tf.where(group_filter, tf.multiply(
tf.multiply(tf.add(ones, tf.multiply(minus_one, discount)), extendedprice), tf.add(ones, tax))
, zeros))
count = tf.reduce_sum(tf.cast(group_filter, tf.float32))
avg_qty = tf.div(tf.reduce_sum(tf.where(group_filter, quantity, zeros)), tf.reduce_sum(count))
avg_price = tf.div(tf.reduce_sum(tf.where(group_filter, extendedprice, zeros)), tf.reduce_sum(count))
avg_disc = tf.div(tf.reduce_sum(tf.where(group_filter, discount, zeros)), tf.reduce_sum(count))
result = tf.concat([result, tf.stack(
[sum_qty, sum_base_price, sum_disc_price, sum_charge, avg_qty, avg_price, avg_disc, count])], axis=0)
result = tf.reshape(result, [l_returnflag_group_size * l_linestatus_group_size + 1, 8])
return result
def q1():
global l_returnflag_group_size
global l_linestatus_group_size
returnflag_groups = np.unique(l_returnflag)
linestatus_groups = np.unique(l_linestatus)
l_returnflag_group_size = len(returnflag_groups)
l_linestatus_group_size = len(linestatus_groups)
inputs = [tf.convert_to_tensor(l_shipdate, np.float32), tf.convert_to_tensor(l_returnflag, np.float32),
tf.convert_to_tensor(l_linestatus, np.float32), tf.convert_to_tensor(l_quantity, np.float32),
tf.convert_to_tensor(l_extendedprice, np.float32), tf.convert_to_tensor(l_discount, np.float32),
tf.convert_to_tensor(l_tax, np.float32), tf.convert_to_tensor(returnflag_groups, np.float32),
tf.convert_to_tensor(linestatus_groups, np.float32)]
tpu_computation = tpu.rewrite(q1_computation, inputs)
tpu_grpc_url = TPUClusterResolver(
tpu=[os.environ['TPU_NAME']]).get_master()
with tf.Session(tpu_grpc_url) as sess:
sess.run(tpu.initialize_system())
sess.run(tf.global_variables_initializer())
for i in range(0,5):
res = sess.run(tpu_computation)
sess.run(tpu.shutdown_system())
print (res)
return res
def q6_computation(shipdate, discount, quantity, extendedprice):
zeros = tf.zeros_like(discount)
complete_filter = tf.logical_and(tf.greater_equal(discount, 0.05), tf.logical_and(tf.less_equal(discount, 0.07),
tf.logical_and(
tf.less(quantity, 24),
tf.logical_and(
tf.less(shipdate,
19950101),
tf.greater_equal(shipdate,
19940101)))))
result = tf.reduce_sum(
tf.multiply(tf.where(complete_filter, extendedprice, zeros), tf.where(complete_filter, discount, zeros)))
return result
def q6():
inputs = [tf.convert_to_tensor(l_shipdate, np.float32), tf.convert_to_tensor(l_discount, np.float32),
tf.convert_to_tensor(l_quantity, np.float32), tf.convert_to_tensor(l_extendedprice, np.float32)]
tpu_computation = tpu.rewrite(q6_computation, inputs)
tpu_grpc_url = TPUClusterResolver(
tpu=[os.environ['TPU_NAME']]).get_master()
with tf.Session(tpu_grpc_url) as sess:
sess.run(tpu.initialize_system())
sess.run(tf.global_variables_initializer())
for i in range(0,5):
res = sess.run(tpu_computation)
sess.run(tpu.shutdown_system())
print(res)
return res
def run_tpch(scale):
load_input(scale)
# Popen('capture_tpu_profile --tpu=$TPU_NAME --logdir=${STORAGE_BUCKET}/res --duration_ms=60000 --num_tracing_attempts=10', shell=True,stdin=None, stdout=None, stderr=None, close_fds=True)
# time.sleep(5)
q1()
# q6()
if __name__ == "__main__":
scale = int(sys.argv[1])
run_tpch(scale)
| import numpy as np
import pandas as pd
import os
import tensorflow as tf
from tensorflow.contrib import tpu
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
import sys
from subprocess import Popen
import time
l_shipdate = 0
l_discount = 0
l_quantity = 0
l_extendedprice = 0
l_tax = 0
l_returnflag = 0
l_linestatus = 0
l_returnflag_group_size = 0
l_linestatus_group_size = 0
# Turn dates into integers
def date_to_integer(dt_time):
dt_time = dt_time.split("-")
return 10000 * int(dt_time[0]) + 100 * int(dt_time[1]) + int(dt_time[2])
def load_input(scale):
global l_shipdate
global l_discount
global l_quantity
global l_extendedprice
global l_tax
global l_returnflag
global l_linestatus
os.chdir('/home/pedroholanda/tpch-' + str(scale))
lineitem = pd.read_csv("lineitem.tbl", sep='|',
names=["l_orderkey", "l_partkey", "l_suppkey", "l_linenumber", "l_quantity",
"l_extendedprice", "l_discount", "l_tax", "l_returnflag", "l_linestatus",
"l_shipdate",
"l_commitdate", "l_receiptdate", "l_shipinstruct", "l_shipmode", "l_comment"],
dtype={'l_returnflag': 'category', 'l_linestatus': 'category'})
# Query 01 and 06
l_shipdate = lineitem["l_shipdate"].values.astype('S10')
l_discount = lineitem["l_discount"].values.astype('float32')
l_quantity = lineitem["l_quantity"].values.astype('float32')
l_extendedprice = lineitem["l_extendedprice"].values.astype('float32')
# Query 01
l_tax = lineitem["l_tax"].values.astype('float32')
l_returnflag = lineitem["l_returnflag"].values.astype('S1')
l_linestatus = lineitem["l_linestatus"].values.astype('S1')
# Dictionaries
l_returnflag[l_returnflag == "A"] = "1"
l_returnflag[l_returnflag == "N"] = "2"
l_returnflag[l_returnflag == "R"] = "3"
l_returnflag = l_returnflag.astype(np.float32, copy=False)
l_linestatus[l_linestatus == "F"] = "1"
l_linestatus[l_linestatus == "O"] = "2"
l_linestatus = l_linestatus.astype(np.float32, copy=False)
vfunc = np.vectorize(date_to_integer)
l_shipdate = vfunc(l_shipdate)
def q1_computation(shipdate, returnflag, linestatus, quantity, extendedprice, discount, tax, returnflag_groups_tensors,
linestatus_groups_tensors):
zeros = tf.zeros_like(discount)
ones = tf.ones_like(discount)
minus_one = tf.constant(-1.0, dtype=tf.float32)
returnflag_groups = tf.unstack(returnflag_groups_tensors, l_returnflag_group_size)
linestatus_groups = tf.unstack(linestatus_groups_tensors, l_linestatus_group_size)
group_filters = []
shipdate = tf.less_equal(shipdate, 19980901)
for returnflag_group in returnflag_groups:
for linestatus_group in linestatus_groups:
returnflag_aux = tf.cast(tf.where(tf.equal(returnflag, returnflag_group), ones, zeros), tf.bool)
linestatus_aux = tf.cast(tf.where(tf.equal(linestatus, linestatus_group), ones, zeros), tf.bool)
group_filters.append(tf.logical_and(tf.logical_and(returnflag_aux, linestatus_aux), shipdate))
result = tf.constant([], dtype=tf.float32, shape=[8])
for group_filter in group_filters:
sum_qty = tf.reduce_sum(tf.where(group_filter, quantity, zeros))
sum_base_price = tf.reduce_sum(tf.where(group_filter, extendedprice, zeros))
sum_disc_price = tf.reduce_sum(
tf.where(group_filter, tf.multiply(tf.add(ones, tf.multiply(minus_one, discount)), extendedprice), zeros))
sum_charge = tf.reduce_sum(tf.where(group_filter, tf.multiply(
tf.multiply(tf.add(ones, tf.multiply(minus_one, discount)), extendedprice), tf.add(ones, tax))
, zeros))
count = tf.reduce_sum(tf.cast(group_filter, tf.float32))
avg_qty = tf.div(tf.reduce_sum(tf.where(group_filter, quantity, zeros)), tf.reduce_sum(count))
avg_price = tf.div(tf.reduce_sum(tf.where(group_filter, extendedprice, zeros)), tf.reduce_sum(count))
avg_disc = tf.div(tf.reduce_sum(tf.where(group_filter, discount, zeros)), tf.reduce_sum(count))
result = tf.concat([result, tf.stack(
[sum_qty, sum_base_price, sum_disc_price, sum_charge, avg_qty, avg_price, avg_disc, count])], axis=0)
result = tf.reshape(result, [l_returnflag_group_size * l_linestatus_group_size + 1, 8])
return result
def q1():
global l_returnflag_group_size
global l_linestatus_group_size
returnflag_groups = np.unique(l_returnflag)
linestatus_groups = np.unique(l_linestatus)
l_returnflag_group_size = len(returnflag_groups)
l_linestatus_group_size = len(linestatus_groups)
inputs = [tf.convert_to_tensor(l_shipdate, np.float32), tf.convert_to_tensor(l_returnflag, np.float32),
tf.convert_to_tensor(l_linestatus, np.float32), tf.convert_to_tensor(l_quantity, np.float32),
tf.convert_to_tensor(l_extendedprice, np.float32), tf.convert_to_tensor(l_discount, np.float32),
tf.convert_to_tensor(l_tax, np.float32), tf.convert_to_tensor(returnflag_groups, np.float32),
tf.convert_to_tensor(linestatus_groups, np.float32)]
tpu_computation = tpu.rewrite(q1_computation, inputs)
tpu_grpc_url = TPUClusterResolver(
tpu=[os.environ['TPU_NAME']]).get_master()
with tf.Session(tpu_grpc_url) as sess:
sess.run(tpu.initialize_system())
sess.run(tf.global_variables_initializer())
for i in range(0,5):
res = sess.run(tpu_computation)
sess.run(tpu.shutdown_system())
print (res)
return res
def q6_computation(shipdate, discount, quantity, extendedprice):
zeros = tf.zeros_like(discount)
complete_filter = tf.logical_and(tf.greater_equal(discount, 0.05), tf.logical_and(tf.less_equal(discount, 0.07),
tf.logical_and(
tf.less(quantity, 24),
tf.logical_and(
tf.less(shipdate,
19950101),
tf.greater_equal(shipdate,
19940101)))))
result = tf.reduce_sum(
tf.multiply(tf.where(complete_filter, extendedprice, zeros), tf.where(complete_filter, discount, zeros)))
return result
def q6():
inputs = [tf.convert_to_tensor(l_shipdate, np.float32), tf.convert_to_tensor(l_discount, np.float32),
tf.convert_to_tensor(l_quantity, np.float32), tf.convert_to_tensor(l_extendedprice, np.float32)]
tpu_computation = tpu.rewrite(q6_computation, inputs)
tpu_grpc_url = TPUClusterResolver(
tpu=[os.environ['TPU_NAME']]).get_master()
with tf.Session(tpu_grpc_url) as sess:
sess.run(tpu.initialize_system())
sess.run(tf.global_variables_initializer())
for i in range(0,5):
res = sess.run(tpu_computation)
sess.run(tpu.shutdown_system())
print(res)
return res
def run_tpch(scale):
load_input(scale)
# Popen('capture_tpu_profile --tpu=$TPU_NAME --logdir=${STORAGE_BUCKET}/res --duration_ms=60000 --num_tracing_attempts=10', shell=True,stdin=None, stdout=None, stderr=None, close_fds=True)
# time.sleep(5)
q1()
# q6()
if __name__ == "__main__":
scale = int(sys.argv[1])
run_tpch(scale)
| en | 0.3432 | # Turn dates into integers # Query 01 and 06 # Query 01 # Dictionaries # Popen('capture_tpu_profile --tpu=$TPU_NAME --logdir=${STORAGE_BUCKET}/res --duration_ms=60000 --num_tracing_attempts=10', shell=True,stdin=None, stdout=None, stderr=None, close_fds=True) # time.sleep(5) # q6() | 2.208227 | 2 |
limix_ext/rglmm/test/test_rglmm.py | glimix/limix-ext | 0 | 6624245 | import limix_ext as lext
from numpy.testing import assert_
import numpy as np
import os
from os.path import join
def test_rglmm_binomial():
folder = os.path.dirname(os.path.realpath(__file__))
nsuc = np.load(join(folder, "nsuc.npy"))
ntri = np.load(join(folder, "ntri.npy"))
K = np.load(join(folder, "K.npy"))
X0 = np.load(join(folder, "X.npy"))
X = X0[:, :2].copy()
G = X0[:, 2:].copy()
r = lext.rglmm.rglmm_binomial(nsuc, ntri, X, K, G)
pv = r['pv']
assert_(pv[0] <= 1)
| import limix_ext as lext
from numpy.testing import assert_
import numpy as np
import os
from os.path import join
def test_rglmm_binomial():
folder = os.path.dirname(os.path.realpath(__file__))
nsuc = np.load(join(folder, "nsuc.npy"))
ntri = np.load(join(folder, "ntri.npy"))
K = np.load(join(folder, "K.npy"))
X0 = np.load(join(folder, "X.npy"))
X = X0[:, :2].copy()
G = X0[:, 2:].copy()
r = lext.rglmm.rglmm_binomial(nsuc, ntri, X, K, G)
pv = r['pv']
assert_(pv[0] <= 1)
| none | 1 | 2.040982 | 2 | |
twml/__init__.py | thingswise/tw-ml-sdk-python | 1 | 6624246 | <reponame>thingswise/tw-ml-sdk-python
# Copyright 2016 Thingswise, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._version import get_versions
__version__ = get_versions()['version']
#del get_versions
from metadata_query import MetadataQuery
from template import Template
from data_query import DataQuery
from model_helper import Filter, AndFilter,\
HasCoord, TimeseriesFeatureExtractor, TimeseriesFeatureCoordinateExtractor,\
OnTimeExtractor, StopsExtractor, Extract, Progress, ModelHelper, Scale, \
Sum, TimeseriesCollectionFeatureExtractor, TimeseriesCollectionFeatureCoordinateExtractor, \
diag_vect, matrix, stack, VectorList
from models import Model, Scatter, KMeansModel, KMeansModelFactory
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| # Copyright 2016 Thingswise, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._version import get_versions
__version__ = get_versions()['version']
#del get_versions
from metadata_query import MetadataQuery
from template import Template
from data_query import DataQuery
from model_helper import Filter, AndFilter,\
HasCoord, TimeseriesFeatureExtractor, TimeseriesFeatureCoordinateExtractor,\
OnTimeExtractor, StopsExtractor, Extract, Progress, ModelHelper, Scale, \
Sum, TimeseriesCollectionFeatureExtractor, TimeseriesCollectionFeatureCoordinateExtractor, \
diag_vect, matrix, stack, VectorList
from models import Model, Scatter, KMeansModel, KMeansModelFactory
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions | en | 0.84096 | # Copyright 2016 Thingswise, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #del get_versions | 1.376373 | 1 |
uukanshu/book_tracker.py | stonewell/booktracker | 0 | 6624247 | import datetime
from pathlib import Path
from uukanshu.index_parser import IndexParser
from uukanshu.page_tracker import PageTracker
from book_tracker_base import TrackerBase
class Tracker(TrackerBase):
def __init__(self, url, author, title, data_dir, timeout):
super().__init__(url, author, title, data_dir, timeout)
def _get_title(self, title):
return title
def _get_index_parser(self):
return IndexParser()
def _get_page_tracker(self, page_key, page_url, content_dir, timeout):
return PageTracker(page_url, content_dir, timeout)
def _need_read_page_content(self, response):
m_time = datetime.datetime.strptime(response
.info()['Last-Modified'],
'%a, %d %b %Y %H:%M:%S %Z').timestamp()
if 'title' in self.idx_ and m_time <= self.idx_['m_time'] and 'author' in self.idx_:
self.title = self._get_title(self.idx_['title'])
self.author = self.idx_['author']
return False
return True
def _get_page_url(self, page_file):
return page_file
def _parse_url(self):
self.prefix_ = Path(self.url_).parts[-1]
def _get_chapter_local_file(self, chapter_url):
return Path(chapter_url).parts[-1]
| import datetime
from pathlib import Path
from uukanshu.index_parser import IndexParser
from uukanshu.page_tracker import PageTracker
from book_tracker_base import TrackerBase
class Tracker(TrackerBase):
def __init__(self, url, author, title, data_dir, timeout):
super().__init__(url, author, title, data_dir, timeout)
def _get_title(self, title):
return title
def _get_index_parser(self):
return IndexParser()
def _get_page_tracker(self, page_key, page_url, content_dir, timeout):
return PageTracker(page_url, content_dir, timeout)
def _need_read_page_content(self, response):
m_time = datetime.datetime.strptime(response
.info()['Last-Modified'],
'%a, %d %b %Y %H:%M:%S %Z').timestamp()
if 'title' in self.idx_ and m_time <= self.idx_['m_time'] and 'author' in self.idx_:
self.title = self._get_title(self.idx_['title'])
self.author = self.idx_['author']
return False
return True
def _get_page_url(self, page_file):
return page_file
def _parse_url(self):
self.prefix_ = Path(self.url_).parts[-1]
def _get_chapter_local_file(self, chapter_url):
return Path(chapter_url).parts[-1]
| none | 1 | 2.803799 | 3 | |
netdash/hostlookup_bluecat/api/serializers.py | NetDash/netdash | 15 | 6624248 | from rest_framework import serializers
class BlueCatHostLookupResponseSerializer(serializers.Serializer):
mac = serializers.CharField(required=False)
ipv4 = serializers.IPAddressField(required=False)
ipv6 = serializers.IPAddressField(required=False)
hostnames = serializers.ListField(child=serializers.CharField(), required=False)
| from rest_framework import serializers
class BlueCatHostLookupResponseSerializer(serializers.Serializer):
mac = serializers.CharField(required=False)
ipv4 = serializers.IPAddressField(required=False)
ipv6 = serializers.IPAddressField(required=False)
hostnames = serializers.ListField(child=serializers.CharField(), required=False)
| none | 1 | 2.008823 | 2 | |
src/input/adaptive.py | ngannguyen/aimseqtk | 2 | 6624249 | <filename>src/input/adaptive.py<gh_stars>1-10
#Copyright (C) 2013 by <NAME>
#
#Released under the MIT license, see LICENSE.txt
'''
Parse Adaptive Biotechnologies (http://www.adaptivebiotech.com/) data files
Edit: Mon Jun 30 12:12:37 PDT 2014
Update to the current format of Adaptive TSV files (as of June 30 2014)
'''
import os
import sys
from aimseqtk.lib.clone import Clone
import aimseqtk.lib.common as libcommon
#nucleotide aminoAcid count frequencyCount cdr3Length vMaxResolved vFamilyName vGeneName vGeneAllele vFamilyTies vGeneNameTies vGeneAlleleTies dMaxResolved dFamilyName dGeneName dGeneAllele dFamilyTies dGeneNameTies dGeneAlleleTies jMaxResolved jFamilyName jGeneName jGeneAllele jFamilyTies jGeneNameTies jGeneAlleleTies vDeletion d5Deletion d3Deletion jDeletion n2Insertion n1Insertion vIndex n2Index dIndex n1Index jIndex estimatedNumberGenomes sequenceStatus cloneResolved vOrphon dOrphon jOrphon vFunction dFunction jFunction fractionNucleated vAlignLength vAlignSubstitutionCount vAlignSubstitutionIndexes vAlignSubstitutionGeneThreePrimeIndexes
def adaptive_columns():
#cols = ['nucleotide', 'aminoAcid',
# 'count', 'frequencyCount'
# 'cdr3Length', 'vFamilyName', 'vGeneName', 'VGeneNameTies',
# 'dGeneName', 'jGeneName', 'jGeneNameTies', 'vDeletion', 'd5Deletion',
# 'd3Deletion', 'jDeletion', 'n2Insertion', 'n1Insertion',
# 'sequenceStatus', 'vIndex', 'n1Index', 'n2Index', 'dIndex',
# 'jIndex']
cols = ['nucleotide', 'aminoAcid', 'count', 'frequencyCount', 'cdr3Length',
'vMaxResolved', 'vFamilyName', 'vGeneName', 'vGeneAllele',
'vFamilyTies', 'vGeneNameTies', 'vGeneAlleleTies', 'dMaxResolved',
'dFamilyName', 'dGeneName', 'dGeneAllele', 'dFamilyTies',
'dGeneNameTies', 'dGeneAlleleTies', 'jMaxResolved', 'jFamilyName',
'jGeneName', 'jGeneAllele', 'jFamilyTies', 'jGeneNameTies',
'jGeneAlleleTies', 'vDeletion', 'd5Deletion', 'd3Deletion',
'jDeletion', 'n2Insertion', 'n1Insertion', 'vIndex', 'n2Index',
'dIndex', 'n1Index', 'jIndex', 'estimatedNumberGenomes',
'sequenceStatus', 'cloneResolved', 'vOrphon', 'dOrphon', 'jOrphon',
'vFunction', 'dFunction', 'jFunction', 'fractionNucleated',
'vAlignLength', 'vAlignSubstitutionCount',
'vAlignSubstitutionIndexes',
'vAlignSubstitutionGeneThreePrimeIndexes']
return cols
def adaptive_parseline(line, index2col):
items = line.strip('\n').split('\t')
if len(items) != len(index2col):
sys.stderr.write("Incosistent number of columns between the following\
line and the header line, skipped it:\n\
Line:\n%s\n" %line)
return None
col2val = {}
valid_cols = adaptive_columns()
for i, col in index2col.iteritems():
if col in valid_cols:
col2val[col] = items[i].replace("/", ", ")
# Return None if line does not have minimum required fields.
required_cols = ['count', 'frequencyCount', 'nucleotide',
'vGeneName', 'jGeneName', 'vGeneNameTies', 'jGeneNameTies']
for c in required_cols:
if c not in col2val: # or col2val[c] in ["(undefined)", ""]:
return None
count = int(col2val['count'])
freq = float(col2val['frequencyCount'])/100.0 # convert to non percentage
nuc = col2val['nucleotide']
vgene = col2val['vGeneName']
if vgene == 'unresolved':
vgenes = col2val['vGeneNameTies'].split(',')
else:
vgenes = [vgene]
jgene = col2val['jGeneName']
if jgene == 'unresolved':
jgenes = col2val['jGeneNameTies'].split(',')
else:
jgenes = [jgene]
# Clone with required fields
clone = Clone(count, freq, nuc, vgenes, jgenes)
# Additional information if available
# Gene info:
if 'dGeneName' in col2val:
dgenestr = col2val['dGeneName']
if dgenestr == 'unresolved':
clone.dgenes = col2val['dGeneNameTies'].split(',')
else:
clone.dgenes = [dgenestr]
if 'sequenceStatus' in col2val:
status = col2val['sequenceStatus'].lower()
if status is not None and status == 'in':
clone.productive = True
elif status == 'out' or status == 'stop':
clone.productive = False
else:
sys.stderr.write("Unknown status: %s\n" % status)
if 'aminoAcid' in col2val:
clone.cdr3aa = col2val['aminoAcid']
# Junctional info:
offset = 0
if 'vIndex' in col2val:
vindex = int(col2val['vIndex'])
if clone.productive:
# Make sure nuc is inframe:
offset = vindex % 3
nuclen = len(clone.nuc)
endoffset = (nuclen - offset) % 3
clone.nuc = clone.nuc[offset: nuclen - endoffset]
clone.aa = libcommon.nt2aa(clone.nuc)
if clone.cdr3aa:
cdr3len = len(clone.cdr3aa) * 3
endindex = max(len(clone.nuc), vindex + cdr3len)
clone.cdr3nuc = clone.nuc[vindex: endindex]
if 'dIndex' in col2val:
clone.firstdpos = int(col2val['dIndex']) - offset
if 'n2Index' in col2val:
n2index = int(col2val['n2Index'])
if n2index != -1:
clone.lastvpos = n2index - 1 - offset
elif clone.firstdpos: # No d5ins
clone.lastvpos = clone.firstdpos - 1
if 'jIndex' in col2val:
clone.firstjpos = int(col2val['jIndex']) - offset
if 'n1Index' in col2val:
n1index = int(col2val['n1Index'])
if n1index != -1:
clone.lastdpos = n1index - 1 - offset
elif clone.firstjpos: # No d3ins
clone.lastdpos = clone.firstjpos - 1
# Deletion info:
if 'vDeletion' in col2val:
clone.vdel = int(col2val['vDeletion'])
if 'd5Deletion' in col2val:
clone.d5del = int(col2val['d5Deletion'])
if 'd3Deletion' in col2val:
clone.d3del = int(col2val['d3Deletion'])
if 'jDeletion' in col2val:
clone.jdel = int(col2val['jDeletion'])
return clone
| <filename>src/input/adaptive.py<gh_stars>1-10
#Copyright (C) 2013 by <NAME>
#
#Released under the MIT license, see LICENSE.txt
'''
Parse Adaptive Biotechnologies (http://www.adaptivebiotech.com/) data files
Edit: Mon Jun 30 12:12:37 PDT 2014
Update to the current format of Adaptive TSV files (as of June 30 2014)
'''
import os
import sys
from aimseqtk.lib.clone import Clone
import aimseqtk.lib.common as libcommon
#nucleotide aminoAcid count frequencyCount cdr3Length vMaxResolved vFamilyName vGeneName vGeneAllele vFamilyTies vGeneNameTies vGeneAlleleTies dMaxResolved dFamilyName dGeneName dGeneAllele dFamilyTies dGeneNameTies dGeneAlleleTies jMaxResolved jFamilyName jGeneName jGeneAllele jFamilyTies jGeneNameTies jGeneAlleleTies vDeletion d5Deletion d3Deletion jDeletion n2Insertion n1Insertion vIndex n2Index dIndex n1Index jIndex estimatedNumberGenomes sequenceStatus cloneResolved vOrphon dOrphon jOrphon vFunction dFunction jFunction fractionNucleated vAlignLength vAlignSubstitutionCount vAlignSubstitutionIndexes vAlignSubstitutionGeneThreePrimeIndexes
def adaptive_columns():
#cols = ['nucleotide', 'aminoAcid',
# 'count', 'frequencyCount'
# 'cdr3Length', 'vFamilyName', 'vGeneName', 'VGeneNameTies',
# 'dGeneName', 'jGeneName', 'jGeneNameTies', 'vDeletion', 'd5Deletion',
# 'd3Deletion', 'jDeletion', 'n2Insertion', 'n1Insertion',
# 'sequenceStatus', 'vIndex', 'n1Index', 'n2Index', 'dIndex',
# 'jIndex']
cols = ['nucleotide', 'aminoAcid', 'count', 'frequencyCount', 'cdr3Length',
'vMaxResolved', 'vFamilyName', 'vGeneName', 'vGeneAllele',
'vFamilyTies', 'vGeneNameTies', 'vGeneAlleleTies', 'dMaxResolved',
'dFamilyName', 'dGeneName', 'dGeneAllele', 'dFamilyTies',
'dGeneNameTies', 'dGeneAlleleTies', 'jMaxResolved', 'jFamilyName',
'jGeneName', 'jGeneAllele', 'jFamilyTies', 'jGeneNameTies',
'jGeneAlleleTies', 'vDeletion', 'd5Deletion', 'd3Deletion',
'jDeletion', 'n2Insertion', 'n1Insertion', 'vIndex', 'n2Index',
'dIndex', 'n1Index', 'jIndex', 'estimatedNumberGenomes',
'sequenceStatus', 'cloneResolved', 'vOrphon', 'dOrphon', 'jOrphon',
'vFunction', 'dFunction', 'jFunction', 'fractionNucleated',
'vAlignLength', 'vAlignSubstitutionCount',
'vAlignSubstitutionIndexes',
'vAlignSubstitutionGeneThreePrimeIndexes']
return cols
def adaptive_parseline(line, index2col):
items = line.strip('\n').split('\t')
if len(items) != len(index2col):
sys.stderr.write("Incosistent number of columns between the following\
line and the header line, skipped it:\n\
Line:\n%s\n" %line)
return None
col2val = {}
valid_cols = adaptive_columns()
for i, col in index2col.iteritems():
if col in valid_cols:
col2val[col] = items[i].replace("/", ", ")
# Return None if line does not have minimum required fields.
required_cols = ['count', 'frequencyCount', 'nucleotide',
'vGeneName', 'jGeneName', 'vGeneNameTies', 'jGeneNameTies']
for c in required_cols:
if c not in col2val: # or col2val[c] in ["(undefined)", ""]:
return None
count = int(col2val['count'])
freq = float(col2val['frequencyCount'])/100.0 # convert to non percentage
nuc = col2val['nucleotide']
vgene = col2val['vGeneName']
if vgene == 'unresolved':
vgenes = col2val['vGeneNameTies'].split(',')
else:
vgenes = [vgene]
jgene = col2val['jGeneName']
if jgene == 'unresolved':
jgenes = col2val['jGeneNameTies'].split(',')
else:
jgenes = [jgene]
# Clone with required fields
clone = Clone(count, freq, nuc, vgenes, jgenes)
# Additional information if available
# Gene info:
if 'dGeneName' in col2val:
dgenestr = col2val['dGeneName']
if dgenestr == 'unresolved':
clone.dgenes = col2val['dGeneNameTies'].split(',')
else:
clone.dgenes = [dgenestr]
if 'sequenceStatus' in col2val:
status = col2val['sequenceStatus'].lower()
if status is not None and status == 'in':
clone.productive = True
elif status == 'out' or status == 'stop':
clone.productive = False
else:
sys.stderr.write("Unknown status: %s\n" % status)
if 'aminoAcid' in col2val:
clone.cdr3aa = col2val['aminoAcid']
# Junctional info:
offset = 0
if 'vIndex' in col2val:
vindex = int(col2val['vIndex'])
if clone.productive:
# Make sure nuc is inframe:
offset = vindex % 3
nuclen = len(clone.nuc)
endoffset = (nuclen - offset) % 3
clone.nuc = clone.nuc[offset: nuclen - endoffset]
clone.aa = libcommon.nt2aa(clone.nuc)
if clone.cdr3aa:
cdr3len = len(clone.cdr3aa) * 3
endindex = max(len(clone.nuc), vindex + cdr3len)
clone.cdr3nuc = clone.nuc[vindex: endindex]
if 'dIndex' in col2val:
clone.firstdpos = int(col2val['dIndex']) - offset
if 'n2Index' in col2val:
n2index = int(col2val['n2Index'])
if n2index != -1:
clone.lastvpos = n2index - 1 - offset
elif clone.firstdpos: # No d5ins
clone.lastvpos = clone.firstdpos - 1
if 'jIndex' in col2val:
clone.firstjpos = int(col2val['jIndex']) - offset
if 'n1Index' in col2val:
n1index = int(col2val['n1Index'])
if n1index != -1:
clone.lastdpos = n1index - 1 - offset
elif clone.firstjpos: # No d3ins
clone.lastdpos = clone.firstjpos - 1
# Deletion info:
if 'vDeletion' in col2val:
clone.vdel = int(col2val['vDeletion'])
if 'd5Deletion' in col2val:
clone.d5del = int(col2val['d5Deletion'])
if 'd3Deletion' in col2val:
clone.d3del = int(col2val['d3Deletion'])
if 'jDeletion' in col2val:
clone.jdel = int(col2val['jDeletion'])
return clone
| en | 0.505914 | #Copyright (C) 2013 by <NAME> # #Released under the MIT license, see LICENSE.txt Parse Adaptive Biotechnologies (http://www.adaptivebiotech.com/) data files Edit: Mon Jun 30 12:12:37 PDT 2014 Update to the current format of Adaptive TSV files (as of June 30 2014) #nucleotide aminoAcid count frequencyCount cdr3Length vMaxResolved vFamilyName vGeneName vGeneAllele vFamilyTies vGeneNameTies vGeneAlleleTies dMaxResolved dFamilyName dGeneName dGeneAllele dFamilyTies dGeneNameTies dGeneAlleleTies jMaxResolved jFamilyName jGeneName jGeneAllele jFamilyTies jGeneNameTies jGeneAlleleTies vDeletion d5Deletion d3Deletion jDeletion n2Insertion n1Insertion vIndex n2Index dIndex n1Index jIndex estimatedNumberGenomes sequenceStatus cloneResolved vOrphon dOrphon jOrphon vFunction dFunction jFunction fractionNucleated vAlignLength vAlignSubstitutionCount vAlignSubstitutionIndexes vAlignSubstitutionGeneThreePrimeIndexes #cols = ['nucleotide', 'aminoAcid', # 'count', 'frequencyCount' # 'cdr3Length', 'vFamilyName', 'vGeneName', 'VGeneNameTies', # 'dGeneName', 'jGeneName', 'jGeneNameTies', 'vDeletion', 'd5Deletion', # 'd3Deletion', 'jDeletion', 'n2Insertion', 'n1Insertion', # 'sequenceStatus', 'vIndex', 'n1Index', 'n2Index', 'dIndex', # 'jIndex'] # Return None if line does not have minimum required fields. # or col2val[c] in ["(undefined)", ""]: # convert to non percentage # Clone with required fields # Additional information if available # Gene info: # Junctional info: # Make sure nuc is inframe: # No d5ins # No d3ins # Deletion info: | 2.006893 | 2 |
benchmark/workload/tpch.py | ChenYi015/Raven | 1 | 6624250 | # Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
import yaml
from benchmark.core.query import Query
from benchmark.core.workload import LoopWorkload, QpsWorkload
TPCH_QUERIES: List[Query] = []
with open(os.path.join(os.environ['RAVEN_HOME'], 'config', 'workload', 'tpch.yaml'),
encoding='utf-8') as file:
workload_config: dict = yaml.load(file, Loader=yaml.FullLoader)
for query_config in workload_config['Queries']:
TPCH_QUERIES.append(Query(name=query_config.get('Name', ''), description=query_config.get('Description', ''),
database=query_config['Database'], sql=query_config['Sql']))
class TpchLoopWorkload(LoopWorkload):
def __init__(self):
name = 'TPC-H Loop Workload'
description = 'TPC-H Workload which can generate multiple loops of queries.'
super().__init__(name=name, description=description)
for query in TPCH_QUERIES:
self.append_query(query)
class TpchQpsWorkload(QpsWorkload):
def __init__(self):
name = 'TPC-H QPS Workload'
description = 'TPC-H Workload which qps varies with diverse distributions as time goes on..'
super().__init__(name=name, description=description)
for query in TPCH_QUERIES:
self.append_query(query)
| # Copyright 2021 Raven Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
import yaml
from benchmark.core.query import Query
from benchmark.core.workload import LoopWorkload, QpsWorkload
TPCH_QUERIES: List[Query] = []
with open(os.path.join(os.environ['RAVEN_HOME'], 'config', 'workload', 'tpch.yaml'),
encoding='utf-8') as file:
workload_config: dict = yaml.load(file, Loader=yaml.FullLoader)
for query_config in workload_config['Queries']:
TPCH_QUERIES.append(Query(name=query_config.get('Name', ''), description=query_config.get('Description', ''),
database=query_config['Database'], sql=query_config['Sql']))
class TpchLoopWorkload(LoopWorkload):
def __init__(self):
name = 'TPC-H Loop Workload'
description = 'TPC-H Workload which can generate multiple loops of queries.'
super().__init__(name=name, description=description)
for query in TPCH_QUERIES:
self.append_query(query)
class TpchQpsWorkload(QpsWorkload):
def __init__(self):
name = 'TPC-H QPS Workload'
description = 'TPC-H Workload which qps varies with diverse distributions as time goes on..'
super().__init__(name=name, description=description)
for query in TPCH_QUERIES:
self.append_query(query)
| en | 0.857412 | # Copyright 2021 Raven Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.977948 | 2 |