hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
86a8e1ed877d30bb9fe2c31cbcb8f214021f1ba6
| 2,006
|
py
|
Python
|
setup.py
|
pasinskim/mender-python-client
|
d6f3dc86ec46b0b249a112c5037bea579266e649
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
pasinskim/mender-python-client
|
d6f3dc86ec46b0b249a112c5037bea579266e649
|
[
"Apache-2.0"
] | 71
|
2020-12-21T05:08:13.000Z
|
2022-01-31T02:04:26.000Z
|
setup.py
|
pasinskim/mender-python-client
|
d6f3dc86ec46b0b249a112c5037bea579266e649
|
[
"Apache-2.0"
] | 11
|
2020-12-02T14:46:58.000Z
|
2021-12-02T06:43:25.000Z
|
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
import re
VERSIONFILE = "src/mender/_version.py"
version_string_line = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
match = re.search(VSRE, version_string_line, re.M)
if match:
version_string = match.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="mender-python-client-mendersoftware",
version=version_string,
license="Apache 2.0",
author="Mendersoftware",
author_email="contact@mender.io",
description="A Python implementation of the Mender client interface",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mendersoftware/mender-python-client",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
keywords=["mender", "OTA", "updater"],
packages=setuptools.find_packages(where="src"),
install_requires=["cryptography", "requests", "msgpack", "websockets"],
entry_points={"console_scripts": ["mender-python-client=mender.mender:main"]},
package_dir={"": "src"},
python_requires=">=3.6",
zip_safe=False,
include_package_data=True,
)
| 37.849057
| 82
| 0.698903
| 254
| 2,006
| 5.413386
| 0.590551
| 0.043636
| 0.039273
| 0.023273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009014
| 0.170489
| 2,006
| 52
| 83
| 38.576923
| 0.817308
| 0.289133
| 0
| 0
| 0
| 0
| 0.378187
| 0.067989
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a985b6e0366a5f31612b64e590684791f59ced
| 740
|
py
|
Python
|
Q295-v2.py
|
Linchin/python_leetcode_git
|
3d08ab04bbdbd2ce268f33c501fbb149662872c7
|
[
"MIT"
] | null | null | null |
Q295-v2.py
|
Linchin/python_leetcode_git
|
3d08ab04bbdbd2ce268f33c501fbb149662872c7
|
[
"MIT"
] | null | null | null |
Q295-v2.py
|
Linchin/python_leetcode_git
|
3d08ab04bbdbd2ce268f33c501fbb149662872c7
|
[
"MIT"
] | null | null | null |
"""
295
find median from data stream
hard
"""
from heapq import *
class MedianFinder:
# max heap and min heap
def __init__(self):
"""
initialize your data structure here.
"""
self.hi = []
self.lo = []
def addNum(self, num: int) -> None:
heappush(self.lo, -heappushpop(self.hi, num))
while len(self.lo) > len(self.hi):
heappush(self.hi, -heappop(self.lo))
def findMedian(self) -> float:
if len(self.hi) > len(self.lo):
return self.hi[0]
if len(self.hi) == len(self.lo):
return (self.hi[0] - self.lo[0]) / 2.0
sol = MedianFinder()
sol.addNum(1)
print(sol.findMedian())
sol.addNum(2)
print(sol.findMedian())
| 18.5
| 53
| 0.558108
| 100
| 740
| 4.09
| 0.42
| 0.117359
| 0.066015
| 0.05379
| 0.161369
| 0.161369
| 0.161369
| 0.161369
| 0.161369
| 0.161369
| 0
| 0.018975
| 0.287838
| 740
| 40
| 54
| 18.5
| 0.757116
| 0.131081
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.052632
| 0
| 0.368421
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86a988c6aa7f35cfd3902d0931e8d87597572497
| 3,445
|
py
|
Python
|
raisimPy/examples/newtonsCradle.py
|
mstoelzle/raisimLib
|
81f33a1b82f296e9622f950bc292f61bee2d2c2f
|
[
"Apache-2.0"
] | null | null | null |
raisimPy/examples/newtonsCradle.py
|
mstoelzle/raisimLib
|
81f33a1b82f296e9622f950bc292f61bee2d2c2f
|
[
"Apache-2.0"
] | null | null | null |
raisimPy/examples/newtonsCradle.py
|
mstoelzle/raisimLib
|
81f33a1b82f296e9622f950bc292f61bee2d2c2f
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
import raisimpy as raisim
import math
import time
raisim.World.setLicenseFile(os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/activation.raisim")
world = raisim.World()
ground = world.addGround()
world.setTimeStep(0.001)
world.setMaterialPairProp("steel", "steel", 0.1, 1.0, 0.0)
pin1 = world.addSphere(0.1, 0.8)
pin1.setAppearance("1,0,0,0.3")
pin1.setPosition(0.0, 0.0, 3.0)
pin1.setBodyType(raisim.BodyType.STATIC)
pin2 = world.addSphere(0.1, 0.8)
pin2.setAppearance("0,1,0,0.3")
pin2.setPosition(0.3, 0.0, 3.0)
pin2.setBodyType(raisim.BodyType.STATIC)
pin3 = world.addSphere(0.1, 0.8)
pin3.setAppearance("0,0,1,0.3")
pin3.setPosition(0.6, 0.0, 3.0)
pin3.setBodyType(raisim.BodyType.STATIC)
pin4 = world.addSphere(0.1, 0.8)
pin4.setAppearance("1,0,0,0.3")
pin4.setPosition(0.9, 0.0, 3.0)
pin4.setBodyType(raisim.BodyType.STATIC)
pin5 = world.addSphere(0.1, 0.8)
pin5.setPosition(0.9, 0.0, 6.0)
pin5.setBodyType(raisim.BodyType.STATIC)
pin6 = world.addSphere(0.1, 0.8)
pin6.setPosition(-3., 0.0, 7.0)
pin6.setBodyType(raisim.BodyType.STATIC)
pin7 = world.addSphere(0.1, 0.8)
pin7.setPosition(-4., 0.0, 7.0)
pin7.setBodyType(raisim.BodyType.STATIC)
anymalB_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal/urdf/anymal.urdf"
anymalC_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal_c/urdf/anymal.urdf"
anymalC = world.addArticulatedSystem(anymalC_urdf_file)
anymalB = world.addArticulatedSystem(anymalB_urdf_file)
jointNominalConfig = np.array([-3, 0, 4.54, 1.0, 0.0, 0.0, 0.0, 0.03, 0.4, -0.8, -0.03, 0.4, -0.8, 0.03, -0.4, 0.8, -0.03, -0.4, 0.8])
jointVelocityTarget = np.zeros([anymalC.getDOF()])
jointPgain = np.ones(anymalC.getDOF()) * 100.0
jointDgain = np.ones(anymalC.getDOF()) * 1.0
anymalC.setGeneralizedCoordinate(jointNominalConfig)
anymalC.setPdGains(jointPgain, jointDgain)
anymalC.setPdTarget(jointNominalConfig, jointVelocityTarget)
anymalC.setName("anymalC")
jointNominalConfig[0] = -4
anymalB.setGeneralizedCoordinate(jointNominalConfig)
anymalB.setPdGains(jointPgain, jointDgain)
anymalB.setPdTarget(jointNominalConfig, jointVelocityTarget)
anymalB.setName("anymalB")
ball1 = world.addSphere(0.1498, 0.8, "steel")
ball1.setPosition(0, 0.0, 1.0)
ball2 = world.addSphere(0.1499, 0.8, "steel")
ball2.setPosition(0.3, 0.0, 1.0)
ball3 = world.addSphere(0.1499, 0.8, "steel")
ball3.setPosition(0.6, 0.0, 1.0)
ball4 = world.addSphere(0.1499, 0.8, "steel")
ball4.setPosition(2.9, 0.0, 3.0)
box = world.addBox(.1, .1, .1, 1)
box.setPosition(0.9, 0.0, 4.2)
world.addStiffWire(pin1, 0, np.zeros(3), ball1, 0, np.zeros(3), 2.0)
world.addStiffWire(pin2, 0, np.zeros(3), ball2, 0, np.zeros(3), 2.0)
world.addStiffWire(pin3, 0, np.zeros(3), ball3, 0, np.zeros(3), 2.0)
world.addStiffWire(pin4, 0, np.zeros(3), ball4, 0, np.zeros(3), 2.0)
wire5 = world.addCompliantWire(pin5, 0, np.zeros(3), box, 0, np.zeros(3), 2.0, 200)
wire5.setStretchType(raisim.StretchType.BOTH)
wire6 = world.addCompliantWire(pin6, 0, np.zeros(3), anymalC, 0, np.zeros(3), 2.0, 1000)
wire6.setStretchType(raisim.StretchType.BOTH)
wire7 = world.addCustomWire(pin7, 0, np.zeros(3), anymalB, 0, np.zeros(3), 2.0)
wire7.setTension(310)
server = raisim.RaisimServer(world)
server.launchServer(8080)
for i in range(500000):
time.sleep(0.001)
server.integrateWorldThreadSafe()
if i == 5000:
world.removeObject(wire7)
server.killServer()
| 32.196262
| 134
| 0.722787
| 565
| 3,445
| 4.369912
| 0.180531
| 0.024301
| 0.045362
| 0.051033
| 0.260024
| 0.214662
| 0.131227
| 0.0968
| 0.049413
| 0.049413
| 0
| 0.105179
| 0.092017
| 3,445
| 106
| 135
| 32.5
| 0.684143
| 0
| 0
| 0
| 0
| 0
| 0.051669
| 0.028447
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86aa12779a6111083d5f447b8a7b523841c60e96
| 15,132
|
py
|
Python
|
nova/virt/hyperv/volumeops.py
|
viveknandavanam/nova
|
556377b6915936467436c9d5bb33bc0e22244e1e
|
[
"Apache-2.0"
] | 1
|
2019-07-29T10:30:24.000Z
|
2019-07-29T10:30:24.000Z
|
nova/virt/hyperv/volumeops.py
|
viveknandavanam/nova
|
556377b6915936467436c9d5bb33bc0e22244e1e
|
[
"Apache-2.0"
] | 11
|
2017-06-19T01:28:55.000Z
|
2017-06-23T02:01:47.000Z
|
nova/virt/hyperv/volumeops.py
|
viveknandavanam/nova
|
556377b6915936467436c9d5bb33bc0e22244e1e
|
[
"Apache-2.0"
] | 3
|
2018-04-04T15:15:01.000Z
|
2018-04-19T18:14:25.000Z
|
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
import time
from os_brick.initiator import connector
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_utils import strutils
import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import utils
from nova.virt import driver
from nova.virt.hyperv import constants
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class VolumeOps(object):
"""Management class for Volume-related tasks
"""
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._default_root_device = 'vda'
self.volume_drivers = {
constants.STORAGE_PROTOCOL_SMBFS: SMBFSVolumeDriver(),
constants.STORAGE_PROTOCOL_ISCSI: ISCSIVolumeDriver(),
constants.STORAGE_PROTOCOL_FC: FCVolumeDriver()}
def _get_volume_driver(self, connection_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def attach_volumes(self, volumes, instance_name):
for vol in volumes:
self.attach_volume(vol['connection_info'], instance_name)
def disconnect_volumes(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self.disconnect_volume(vol['connection_info'])
def attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
tries_left = CONF.hyperv.volume_attach_retry_count + 1
while tries_left:
try:
self._attach_volume(connection_info,
instance_name,
disk_bus)
break
except Exception as ex:
tries_left -= 1
if not tries_left:
LOG.exception(
_LE("Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. "),
{'connection_info': strutils.mask_dict_password(
connection_info),
'instance_name': instance_name})
self.disconnect_volume(connection_info)
raise exception.VolumeAttachFailed(
volume_id=connection_info['serial'],
reason=ex)
else:
LOG.warning(
_LW("Failed to attach volume %(connection_info)s "
"to instance %(instance_name)s. "
"Tries left: %(tries_left)s."),
{'connection_info': strutils.mask_dict_password(
connection_info),
'instance_name': instance_name,
'tries_left': tries_left})
time.sleep(CONF.hyperv.volume_attach_retry_interval)
def _attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
LOG.debug(
"Attaching volume: %(connection_info)s to %(instance_name)s",
{'connection_info': strutils.mask_dict_password(connection_info),
'instance_name': instance_name})
volume_driver = self._get_volume_driver(connection_info)
volume_driver.attach_volume(connection_info,
instance_name,
disk_bus)
qos_specs = connection_info['data'].get('qos_specs') or {}
if qos_specs:
volume_driver.set_disk_qos_specs(connection_info,
qos_specs)
def disconnect_volume(self, connection_info):
volume_driver = self._get_volume_driver(connection_info)
volume_driver.disconnect_volume(connection_info)
def detach_volume(self, connection_info, instance_name):
LOG.debug("Detaching volume: %(connection_info)s "
"from %(instance_name)s",
{'connection_info': strutils.mask_dict_password(
connection_info),
'instance_name': instance_name})
volume_driver = self._get_volume_driver(connection_info)
volume_driver.detach_volume(connection_info, instance_name)
volume_driver.disconnect_volume(connection_info)
def fix_instance_volume_disk_paths(self, instance_name, block_device_info):
# Mapping containing the current disk paths for each volume.
actual_disk_mapping = self.get_disk_path_mapping(block_device_info)
if not actual_disk_mapping:
return
# Mapping containing virtual disk resource path and the physical
# disk path for each volume serial number. The physical path
# associated with this resource may not be the right one,
# as physical disk paths can get swapped after host reboots.
vm_disk_mapping = self._vmutils.get_vm_physical_disk_mapping(
instance_name)
for serial, vm_disk in vm_disk_mapping.items():
actual_disk_path = actual_disk_mapping[serial]
if vm_disk['mounted_disk_path'] != actual_disk_path:
self._vmutils.set_disk_host_res(vm_disk['resource_path'],
actual_disk_path)
def get_volume_connector(self):
# NOTE(lpetrut): the Windows os-brick connectors
# do not use a root helper.
conn = connector.get_connector_properties(
root_helper=None,
my_ip=CONF.my_block_storage_ip,
multipath=CONF.hyperv.use_multipath_io,
enforce_multipath=True,
host=CONF.host)
return conn
def connect_volumes(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
connection_info = vol['connection_info']
volume_driver = self._get_volume_driver(connection_info)
volume_driver.connect_volume(connection_info)
def get_disk_path_mapping(self, block_device_info):
block_mapping = driver.block_device_info_get_mapping(block_device_info)
disk_path_mapping = {}
for vol in block_mapping:
connection_info = vol['connection_info']
disk_serial = connection_info['serial']
disk_path = self.get_disk_resource_path(connection_info)
disk_path_mapping[disk_serial] = disk_path
return disk_path_mapping
def get_disk_resource_path(self, connection_info):
volume_driver = self._get_volume_driver(connection_info)
return volume_driver.get_disk_resource_path(connection_info)
@staticmethod
def bytes_per_sec_to_iops(no_bytes):
# Hyper-v uses normalized IOPS (8 KB increments)
# as IOPS allocation units.
return (
(no_bytes + constants.IOPS_BASE_SIZE - 1) //
constants.IOPS_BASE_SIZE)
@staticmethod
def validate_qos_specs(qos_specs, supported_qos_specs):
unsupported_specs = set(qos_specs.keys()).difference(
supported_qos_specs)
if unsupported_specs:
msg = (_LW('Got unsupported QoS specs: '
'%(unsupported_specs)s. '
'Supported qos specs: %(supported_qos_specs)s') %
{'unsupported_specs': unsupported_specs,
'supported_qos_specs': supported_qos_specs})
LOG.warning(msg)
class BaseVolumeDriver(object):
_is_block_dev = True
_protocol = None
_extra_connector_args = {}
def __init__(self):
self._conn = None
self._diskutils = utilsfactory.get_diskutils()
self._vmutils = utilsfactory.get_vmutils()
@property
def _connector(self):
if not self._conn:
scan_attempts = CONF.hyperv.mounted_disk_query_retry_count
scan_interval = CONF.hyperv.mounted_disk_query_retry_interval
self._conn = connector.InitiatorConnector.factory(
protocol=self._protocol,
root_helper=None,
use_multipath=CONF.hyperv.use_multipath_io,
device_scan_attempts=scan_attempts,
device_scan_interval=scan_interval,
**self._extra_connector_args)
return self._conn
def connect_volume(self, connection_info):
return self._connector.connect_volume(connection_info['data'])
def disconnect_volume(self, connection_info):
self._connector.disconnect_volume(connection_info['data'])
def get_disk_resource_path(self, connection_info):
disk_paths = self._connector.get_volume_paths(connection_info['data'])
if not disk_paths:
vol_id = connection_info['serial']
err_msg = _("Could not find disk path. Volume id: %s")
raise exception.DiskNotFound(err_msg % vol_id)
return self._get_disk_res_path(disk_paths[0])
def _get_disk_res_path(self, disk_path):
if self._is_block_dev:
# We need the Msvm_DiskDrive resource path as this
# will be used when the disk is attached to an instance.
disk_number = self._diskutils.get_device_number_from_device_name(
disk_path)
disk_res_path = self._vmutils.get_mounted_disk_by_drive_number(
disk_number)
else:
disk_res_path = disk_path
return disk_res_path
def attach_volume(self, connection_info, instance_name,
disk_bus=constants.CTRL_TYPE_SCSI):
dev_info = self.connect_volume(connection_info)
serial = connection_info['serial']
disk_path = self._get_disk_res_path(dev_info['path'])
ctrller_path, slot = self._get_disk_ctrl_and_slot(instance_name,
disk_bus)
if self._is_block_dev:
# We need to tag physical disk resources with the volume
# serial number, in order to be able to retrieve them
# during live migration.
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
slot,
disk_path,
serial=serial)
else:
self._vmutils.attach_drive(instance_name,
disk_path,
ctrller_path,
slot)
def detach_volume(self, connection_info, instance_name):
disk_path = self.get_disk_resource_path(connection_info)
LOG.debug("Detaching disk %(disk_path)s "
"from instance: %(instance_name)s",
dict(disk_path=disk_path,
instance_name=instance_name))
self._vmutils.detach_vm_disk(instance_name, disk_path,
is_physical=self._is_block_dev)
def _get_disk_ctrl_and_slot(self, instance_name, disk_bus):
if disk_bus == constants.CTRL_TYPE_IDE:
# Find the IDE controller for the vm.
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
# Attaching to the first slot
slot = 0
else:
# Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._vmutils.get_free_controller_slot(ctrller_path)
return ctrller_path, slot
def set_disk_qos_specs(self, connection_info, disk_qos_specs):
LOG.info(_LI("The %(protocol)s Hyper-V volume driver "
"does not support QoS. Ignoring QoS specs."),
dict(protocol=self._protocol))
class ISCSIVolumeDriver(BaseVolumeDriver):
_is_block_dev = True
_protocol = constants.STORAGE_PROTOCOL_ISCSI
def __init__(self, *args, **kwargs):
self._extra_connector_args = dict(
initiator_list=CONF.hyperv.iscsi_initiator_list)
super(ISCSIVolumeDriver, self).__init__(*args, **kwargs)
class SMBFSVolumeDriver(BaseVolumeDriver):
_is_block_dev = False
_protocol = constants.STORAGE_PROTOCOL_SMBFS
_extra_connector_args = dict(local_path_for_loopback=True)
def export_path_synchronized(f):
def wrapper(inst, connection_info, *args, **kwargs):
export_path = inst._get_export_path(connection_info)
@utils.synchronized(export_path)
def inner():
return f(inst, connection_info, *args, **kwargs)
return inner()
return wrapper
def _get_export_path(self, connection_info):
return connection_info['data']['export'].replace('/', '\\')
@export_path_synchronized
def attach_volume(self, *args, **kwargs):
super(SMBFSVolumeDriver, self).attach_volume(*args, **kwargs)
@export_path_synchronized
def disconnect_volume(self, *args, **kwargs):
# We synchronize those operations based on the share path in order to
# avoid the situation when a SMB share is unmounted while a volume
# exported by it is about to be attached to an instance.
super(SMBFSVolumeDriver, self).disconnect_volume(*args, **kwargs)
def set_disk_qos_specs(self, connection_info, qos_specs):
supported_qos_specs = ['total_iops_sec', 'total_bytes_sec']
VolumeOps.validate_qos_specs(qos_specs, supported_qos_specs)
total_bytes_sec = int(qos_specs.get('total_bytes_sec') or 0)
total_iops_sec = int(qos_specs.get('total_iops_sec') or
VolumeOps.bytes_per_sec_to_iops(
total_bytes_sec))
if total_iops_sec:
disk_path = self.get_disk_resource_path(connection_info)
self._vmutils.set_disk_qos_specs(disk_path, total_iops_sec)
class FCVolumeDriver(BaseVolumeDriver):
_is_block_dev = True
_protocol = constants.STORAGE_PROTOCOL_FC
| 41.231608
| 79
| 0.630849
| 1,727
| 15,132
| 5.161552
| 0.177186
| 0.098945
| 0.02827
| 0.037918
| 0.345075
| 0.292461
| 0.242876
| 0.229527
| 0.185102
| 0.148082
| 0
| 0.002074
| 0.298837
| 15,132
| 366
| 80
| 41.344262
| 0.838077
| 0.114063
| 0
| 0.249057
| 0
| 0
| 0.072011
| 0.003369
| 0
| 0
| 0
| 0
| 0
| 1
| 0.124528
| false
| 0.015094
| 0.041509
| 0.015094
| 0.279245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86aa70a303cf42efa31de488c8f84aac08996583
| 1,326
|
py
|
Python
|
-Loan-Approval-Analysis/code.py
|
lakshit-sharma/greyatom-python-for-data-science
|
55a6e5a4c54a4f7135cc09fb287d2f2fa1d36413
|
[
"MIT"
] | null | null | null |
-Loan-Approval-Analysis/code.py
|
lakshit-sharma/greyatom-python-for-data-science
|
55a6e5a4c54a4f7135cc09fb287d2f2fa1d36413
|
[
"MIT"
] | null | null | null |
-Loan-Approval-Analysis/code.py
|
lakshit-sharma/greyatom-python-for-data-science
|
55a6e5a4c54a4f7135cc09fb287d2f2fa1d36413
|
[
"MIT"
] | null | null | null |
# --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
banks = bank.drop(columns=['Loan_ID'])
bank_mode = banks.mode()
banks = banks.fillna(bank_mode.iloc[0])
print(banks.isnull().sum())
avg_loan_amount = pd.pivot_table(banks, index=['Gender', 'Married', 'Self_Employed'], values='LoanAmount', aggfunc = 'mean')
print(avg_loan_amount)
loan_approved_se = banks[ (banks['Self_Employed'] == "Yes") & (banks['Loan_Status'] == "Y") ]
loan_approved_nse = banks[ (banks['Self_Employed'] == "No") & (banks['Loan_Status'] == "Y") ]
percentage_se = (len(loan_approved_se) / 614) * 100
percentage_nse = (len(loan_approved_nse) / 614) * 100
# loan amount term
loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12 )
big_loan_term=len(loan_term[loan_term>=25])
print(big_loan_term)
columns_to_show = ['ApplicantIncome', 'Credit_History']
loan_groupby=banks.groupby(['Loan_Status'])[columns_to_show]
# Check the mean value
mean_values=loan_groupby.agg([np.mean])
print(mean_values)
# code ends here
| 24.109091
| 125
| 0.69457
| 185
| 1,326
| 4.713514
| 0.443243
| 0.045872
| 0.029817
| 0.043578
| 0.059633
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015071
| 0.149321
| 1,326
| 54
| 126
| 24.555556
| 0.757979
| 0.082202
| 0
| 0
| 0
| 0
| 0.146932
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0.24
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86aa77866191f8899234ee88d0a38f765c6e8d3e
| 7,673
|
py
|
Python
|
others/train_RNN.py
|
jacobswan1/Video2Commonsense
|
4dcef76360a29702fd90b7030a39a123da6db19e
|
[
"MIT"
] | 31
|
2021-01-07T00:42:05.000Z
|
2022-01-18T16:44:09.000Z
|
others/train_RNN.py
|
jacobswan1/Video2Commonsense
|
4dcef76360a29702fd90b7030a39a123da6db19e
|
[
"MIT"
] | 7
|
2021-01-07T00:41:28.000Z
|
2021-12-01T09:29:49.000Z
|
others/train_RNN.py
|
jacobswan1/Video2Commonsense
|
4dcef76360a29702fd90b7030a39a123da6db19e
|
[
"MIT"
] | 4
|
2021-02-04T04:55:20.000Z
|
2021-07-25T06:50:44.000Z
|
''' Training Scropt for V2C captioning task. '''
__author__ = 'Jacob Zhiyuan Fang'
import os
import numpy as np
from opts import *
from utils.utils import *
import torch.optim as optim
from model.Model import Model
from torch.utils.data import DataLoader
from utils.dataloader import VideoDataset
from model.transformer.Optim import ScheduledOptim
def train(loader, model, optimizer, opt, cap_vocab, cms_vocab):
model.train()
for epoch in range(opt['epochs']):
iteration = 0
for data in loader:
torch.cuda.synchronize()
if opt['cms'] == 'int':
cms_labels = data['int_labels']
elif opt['cms'] == 'eff':
cms_labels = data['eff_labels']
else:
cms_labels = data['att_labels']
if opt['cuda']:
fc_feats = data['fc_feats'].cuda()
cap_labels = data['cap_labels'].cuda()
cms_labels = cms_labels.cuda()
optimizer.zero_grad()
# cap_probs, cms_probs = model(fc_feats, cap_labels, cap_pos, cms_labels, cms_pos)
cap_probs, _, cms_probs, _ = model(fc_feats, cap_labels, cms_labels)
# note: currently we just used most naive cross-entropy as training objective,
# advanced loss func. like SELF-CRIT, different loss weights or stronger video feature
# may lead performance boost, however is not the goal of this work.
cap_loss, cap_n_correct = cal_performance(cap_probs.view(-1, cap_probs.shape[-1]),
cap_labels[:, 1:], smoothing=True)
cms_loss, cms_n_correct = cal_performance(cms_probs.view(-1, cms_probs.shape[-1]),
cms_labels[:, 1:], smoothing=True)
# compute the token prediction Acc.
non_pad_mask = cap_labels[:, 1:].ne(Constants.PAD)
n_word = non_pad_mask.sum().item()
cms_non_pad_mask = cms_labels[:, 1:].ne(Constants.PAD)
cms_n_word = cms_non_pad_mask.sum().item()
cap_loss /= n_word
cms_loss /= n_word
loss = cms_loss + cap_loss
loss.backward()
optimizer.step_and_update_lr()
torch.nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()), 1)
# update parameters
cap_train_loss = cap_loss.item()
cms_train_loss = cms_loss.item()
# multi-gpu case, not necessary in newer PyTorch version or on single GPU.
if opt['cuda']: torch.cuda.synchronize()
iteration += 1
if iteration % opt['print_loss_every'] ==0:
print('iter %d (epoch %d), cap_train_loss = %.6f, cms_train_loss = %.6f,'
' current step = %d, current lr = %.3E, cap_acc = %.3f, cms_acc = %.3f'
% (iteration, epoch, cap_train_loss, cms_train_loss, optimizer.n_current_steps,
optimizer._optimizer.param_groups[0]['lr'],
cap_n_correct/n_word, cms_n_correct/cms_n_word))
# show the intermediate generations
if opt['show_predict']:
cap_pr, cap_gt = show_prediction(cap_probs, cap_labels[:, :-1], cap_vocab, caption=True)
cms_pr, cms_gt = show_prediction(cms_probs, cms_labels[:, :-1], cms_vocab, caption=False)
print(' \n')
with open(opt['info_path'], 'a') as f:
f.write('model_%d, cap_loss: %.6f, cms_loss: %.6f\n'
% (epoch, cap_train_loss, cms_train_loss))
f.write('\n %s \n %s' % (cap_pr, cap_gt))
f.write('\n %s \n %s' % (cms_pr, cms_gt))
f.write('\n')
if epoch % opt['save_checkpoint_every'] == 0:
# save the checkpoint
model_path = os.path.join(opt['output_dir'],
'CMS_CAP_MODEL_{}_lr_{}_BS_{}_Layer_{}_ATTHEAD_{}_HID_{}_RNNLayer_{}_epoch_{}.pth'
.format(opt['cms'], opt['init_lr'], opt['batch_size'], opt['num_layer'],
opt['num_head'], opt['dim_model'], opt['rnn_layer'], epoch))
torch.save(model.state_dict(), model_path)
print('model saved to %s' % model_path)
with open(opt['model_info_path'], 'a') as f:
f.write('model_%d, cap_loss: %.6f, cms_loss: %.6f\n'
% (epoch, cap_train_loss/n_word, cms_train_loss/n_word))
def main(opt):
# load and define dataloader
dataset = VideoDataset(opt, 'train')
dataloader = DataLoader(dataset, batch_size=opt['batch_size'], shuffle=True)
opt['cms_vocab_size'] = dataset.get_cms_vocab_size()
opt['cap_vocab_size'] = dataset.get_cap_vocab_size()
if opt['cms'] == 'int':
cms_text_length = opt['int_max_len']
elif opt['cms'] == 'eff':
cms_text_length = opt['eff_max_len']
else:
cms_text_length = opt['att_max_len']
# model initialization.
from model.S2VTModel import S2VTModel
model = S2VTModel(
dataset.get_cap_vocab_size(),
dataset.get_cms_vocab_size(),
opt['cap_max_len'],
cms_text_length,
opt["dim_model"],
opt["dim_word"],
opt['dim_vis_feat'],
n_layers=opt['rnn_layer'])
# number of parameters
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('number of learnable parameters are {}'.format(params))
if opt['cuda']: model = model.cuda()
# resume from previous checkpoint if indicated
if opt['load_checkpoint'] and opt['resume']:
cap_state_dict = torch.load(opt['load_checkpoint'])
model_dict = model.state_dict()
model_dict.update(cap_state_dict)
model.load_state_dict(model_dict)
optimizer = ScheduledOptim(optim.Adam(filter(lambda x: x.requires_grad, model.parameters()),
betas=(0.9, 0.98), eps=1e-09), 512, opt['warm_up_steps'])
# note: though we set the init learning rate as np.power(d_model, -0.5),
# grid search indicates different LR may improve the results.
opt['init_lr'] = round(optimizer.init_lr, 3)
# create checkpoint output directory
dir = os.path.join(opt['checkpoint_path'], 'S2VT_CMS_CAP_MODEL_{}_lr_{}_BS_{}_Layer_{}_ATTHEAD_{}_HID_{}_RNNLayer_{}'
.format(opt['cms'], opt['init_lr'], opt['batch_size'], opt['num_layer'],
opt['num_head'], opt['dim_model'], opt['rnn_layer']))
if not os.path.exists(dir): os.makedirs(dir)
# save the model snapshot to local
info_path = os.path.join(dir, 'iteration_info_log.log')
print('model architecture saved to {} \n {}'.format(info_path, str(model)))
with open(info_path, 'a') as f:
f.write(str(model))
f.write('\n')
f.write(str(params))
f.write('\n')
# log file directory
opt['output_dir'] = dir
opt['info_path'] = info_path
opt['model_info_path'] = os.path.join(opt['output_dir'], 'checkpoint_loss_log.log')
train(dataloader, model, optimizer, opt, dataset.get_cap_vocab(), dataset.get_cms_vocab())
if __name__ == '__main__':
opt = parse_opt()
opt = vars(opt)
main(opt)
| 41.475676
| 121
| 0.571224
| 975
| 7,673
| 4.208205
| 0.248205
| 0.021935
| 0.014623
| 0.015598
| 0.222033
| 0.1728
| 0.167926
| 0.140873
| 0.120887
| 0.066293
| 0
| 0.008246
| 0.304574
| 7,673
| 185
| 122
| 41.475676
| 0.760682
| 0.112472
| 0
| 0.104839
| 0
| 0.008065
| 0.160601
| 0.03212
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.080645
| 0
| 0.096774
| 0.048387
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ab2a7a0d57050e80f3f20e1f2f61131ca45a9a
| 487
|
py
|
Python
|
new-influx-client.py
|
benlamonica/energy-monitor
|
86714a365c91cc05c265de81bce191ff4ab585f8
|
[
"MIT"
] | null | null | null |
new-influx-client.py
|
benlamonica/energy-monitor
|
86714a365c91cc05c265de81bce191ff4ab585f8
|
[
"MIT"
] | null | null | null |
new-influx-client.py
|
benlamonica/energy-monitor
|
86714a365c91cc05c265de81bce191ff4ab585f8
|
[
"MIT"
] | null | null | null |
import influxdb_client
from influxdb_client import InfluxDBClient
bucket = "python-client-sandbox"
org = "Energy Monitor"
token = "miQdAvNXHiNDVVzPzV5FpkCaR_8qdQ-L1FlPCOXQPI325Kbrh1fgfhkcDUZ4FepaebDdpZ-A1gmtnnjU0_hViA=="
url = "http://localhost:9999"
client = InfluxDBClient(url=url, token=token, org=org)
writeApi = client.write_api()
write_api.write("my-bucket", "my-org", [{"measurement": "h2o_feet", "tags": {"location": "coyote_creek"}, "fields": {"water_level": 1}, "time": 1}])
| 40.583333
| 148
| 0.755647
| 57
| 487
| 6.298246
| 0.649123
| 0.077994
| 0.072423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038117
| 0.084189
| 487
| 11
| 149
| 44.272727
| 0.766816
| 0
| 0
| 0
| 0
| 0
| 0.457906
| 0.223819
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86acd0c8a74d48d7a1cf116cc0a40300ec411cd2
| 16,459
|
py
|
Python
|
utils/thin.py
|
BnF-jadis/projet
|
212b1e7b179a564650fb959d9c2565648178f6b6
|
[
"CC-BY-3.0"
] | 5
|
2021-06-17T12:48:45.000Z
|
2022-01-22T22:23:44.000Z
|
utils/thin.py
|
BnF-jadis/projet
|
212b1e7b179a564650fb959d9c2565648178f6b6
|
[
"CC-BY-3.0"
] | 7
|
2020-11-13T18:42:14.000Z
|
2022-02-10T01:31:07.000Z
|
utils/thin.py
|
BnF-jadis/projet
|
212b1e7b179a564650fb959d9c2565648178f6b6
|
[
"CC-BY-3.0"
] | 1
|
2021-10-17T10:49:45.000Z
|
2021-10-17T10:49:45.000Z
|
# 2020, BackThen Maps
# Coded by Remi Petitpierre https://github.com/RPetitpierre
# For Bibliothèque nationale de France (BnF)
import cv2, thinning, os
import numpy as np
import pandas as pd
import shapefile as shp
from skimage.measure import approximate_polygon
from PIL import Image, ImageDraw
from utils.utils import *
from utils.match import toLatLon
Image.MAX_IMAGE_PIXELS = 500000000
def skeletonize(road_network: np.ndarray, path: str = "workshop/vectorized.png", largest_component: bool = False):
''' Thinning/skeletonization of the road network image to a wired model.
Input(s):
road_network: black and white image of the road network (streets in white)
path: path where the skeletonized image should be saved
largest_component: if True, only the largest road network component will be kept
Output(s):
vectorized: skeletonized image
'''
assert len(road_network.shape) == 2, 'ERROR: road_network must be grayscale image'
img = cv2.resize(road_network, (road_network.shape[1]//2, road_network.shape[0]//2))
vectorized = thinning.guo_hall_thinning(img)
vectorized[vectorized > 100] = 255
vectorized[vectorized <= 100] = 0
if largest_component:
try:
_, labels, stats, _ = cv2.connectedComponentsWithStats(vectorized.copy(), connectivity=8, stats=cv2.CC_STAT_AREA)
stats = stats[1:]
main_component = (np.argmax(stats[:,4])+1).astype('int32')
vectorized = (labels == main_component).astype('uint8')*255
except:
'Warning: Skeletonization failed to apply largest_component = True param. Skipping.'
cv2.imwrite(path, vectorized)
return vectorized
def findNodes(image: np.ndarray):
''' Find the nodes in the road network skeleton image.
Input(s):
image: skeletonized image
Output(s):
nodes: array of nodes coordinates (x, y)
degree: degrees of the nodes (2=endpoint, 4=crossroads of 3 streets, 5=crossroads of 4 streets, etc.)
addresses: directions of the crossing roads, with regard to the node
'''
img = image.copy()
# Find row and column locations that are non-zero
(rows, cols) = np.nonzero(img)
nodes, degree, addresses = [], [], []
for (r,c) in zip(rows, cols):
if r > 0 and c > 0 and r < image.shape[0]-1 and c < image.shape[1]-1:
# Extract an 8-connected neighbourhood
(col_neigh, row_neigh) = np.meshgrid(np.array([c-1, c, c+1]), np.array([r-1, r, r+1]))
# Cast to int to index into image
col_neigh = col_neigh.astype('int')
row_neigh = row_neigh.astype('int')
# Convert into a single 1D array and check for non-zero locations
pix_neighbourhood = img[row_neigh, col_neigh].ravel() != 0
# If the number of non-zero locations equals 2, add this to our list of coordinates
n_neighbours = np.sum(pix_neighbourhood)
if (n_neighbours == 2) or (n_neighbours >= 4):
nodes.append((r, c))
degree.append(n_neighbours)
direction_set = np.where(pix_neighbourhood == True)[0]
direction_set = direction_set[direction_set != 4]
addresses.append(direction_set)
nodes = np.asarray(nodes)
return nodes, degree, addresses
def cleanNodesEdges(df_nodes: pd.DataFrame):
df = df_nodes.copy()
new_addresses, new_degree = [], []
for ind, address in df['address'].iteritems():
new_address = avoidDiagonalEdges(address)
new_addresses.append(new_address)
new_degree.append(len(new_address) + 1)
df['address'] = new_addresses
df['degree'] = new_degree
return df
def avoidDiagonalEdges(address: list, direction: int = None):
right, diagonal = [1, 3, 5, 7], {0: [1, 3], 2: [1, 5], 6: [3, 7], 8: [5, 7]}
new_address = []
for r in right:
if r in address:
new_address.append(r)
for d in diagonal.keys():
if d in address:
if not(diagonal[d][0] in address) and not(diagonal[d][1] in address):
if direction != None:
if not((8-direction) in diagonal[d]):
new_address.append(d)
else:
new_address.append(d)
return new_address
def explorePath(start_x: int, start_y: int, start_dir: int, image: np.ndarray, nodes_grid: np.ndarray):
''' Follow the path from one given start node and direction until the next node, and stores the pixels
on the way.
Input(s):
start_x: start node x-coordinate
start_y: start node y-coordinate
start_dir: starting direction ({0, 1, 2,
3, -, 5,
6, 7, 8})
image: skeletonized image of the road network
nodes_grid: grid of the nodes of the skeletonized image
Output(s):
way: list of pixel coordinates on the way
direction: last direction to reach the 2nd node
nodes_grid[x, y]: degree of the arrival node
'''
def absoluteWay(x: int, y: int, way: int):
if way == 0:
x_, y_ = x-1, y-1
elif way == 1:
x_, y_ = x-1, y
elif way == 2:
x_, y_ = x-1, y+1
elif way == 3:
x_, y_ = x, y-1
elif way == 5:
x_, y_ = x, y+1
elif way == 6:
x_, y_ = x+1, y-1
elif way == 7:
x_, y_ = x+1, y
elif way == 8:
x_, y_ = x+1, y+1
else:
raise AttributeError('Parameters invalid: (' + str(x) + ',' + str(y) + ',' + str(way) + '), way \
should be comprised between 0 and 8, and != 4. x, y and way should be of type int.')
return x_, y_
def noTurnBack(direction: int):
wrong_paths = []
if direction == 0:
wrong_paths = [5, 7]
elif direction == 1:
wrong_paths = [6, 8]
elif direction == 2:
wrong_paths = [3, 7]
elif direction == 3:
wrong_paths = [2, 8]
elif direction == 5:
wrong_paths = [0, 6]
elif direction == 6:
wrong_paths = [1, 5]
elif direction == 7:
wrong_paths = [0, 2]
elif direction == 8:
wrong_paths = [1, 3]
return wrong_paths
direction = start_dir
x, y = start_x, start_y
assert image[x, y] != 0, 'ERROR: start point is not white'
end = False
way = [(x, y)]
# First iteration
new_x, new_y = absoluteWay(x, y, direction)
assert image[new_x, new_y] != 0, 'ERROR: 2nd point is not white'
way.append((new_x, new_y))
x, y = new_x, new_y
wrong_paths = noTurnBack(direction)
wrong_paths_active = True
if nodes_grid[x, y]:
end = True
direction = 8-start_dir
while not(end):
if x > 0 and y > 0 and x < image.shape[0]-1 and y < image.shape[1]-1:
# Extract an 8-connected neighbourhood
(row_neigh, col_neigh) = np.meshgrid(np.array([x-1, x, x+1]), np.array([y-1, y, y+1]))
# Cast to int to index into image
col_neigh, row_neigh = col_neigh.astype('int'), row_neigh.astype('int')
# Convert into a single 1D array and check for non-zero locations
try:
pix_neighbourhood = image[row_neigh, col_neigh].transpose().ravel() != 0
except:
print(x, y, image.shape, )
raise AssertionError()
# If the number of non-zero locations equals 2, add this to our list of coordinates
n_neighbours = np.sum(pix_neighbourhood)
direction_set = np.where(pix_neighbourhood == True)[0]
last_ds = [wrong_paths]
last_ds.append(direction_set)
direction_set = direction_set[direction_set != 4]
last_ds.append(direction_set)
direction_set = direction_set[direction_set != (8-direction)]
last_ds.append(direction_set)
direction_set = np.asarray(avoidDiagonalEdges(direction_set, direction))
last_ds.append(direction_set)
if wrong_paths_active:
for wrong_path in wrong_paths:
direction_set = direction_set[direction_set != wrong_path]
wrong_paths_active = False
if len(direction_set) != 1:
end = True
break
direction = direction_set[0]
new_x, new_y = absoluteWay(x, y, direction)
way.append((new_x, new_y))
x, y = new_x, new_y
if nodes_grid[x, y]:
end = True
else:
end = True
return way, direction, nodes_grid[x, y]
def findSegments(df_nodes: pd.DataFrame, image: np.ndarray, min_length: int = 30, return_simple_ways: bool = True):
''' Find all the road segments in the network. Keep the ones that are longer than a given length or non-terminal.
Optionally, compute the Douglas-Peucker simple itinerary of each segment and return it.
Input(s):
df_nodes: list of nodes
image: skeletonized image of the road network
min_length: min segment length if the segment is terminal
return_simple_ways: if True, compute the Douglas-Peucker simple itinerary of each segment and return it
Output(s):
(Optional)(simple_ways: the Douglas-Peucker simple itinerary of each segmenty)
ways: list of segments, containing all the pixels on the way between each couple of nodes
nodes_grid: image containing all the nodes found in the image and their degree
'''
img = image.copy()
done, ways = [], []
df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True)
nodes_grid = np.zeros(image.shape)
for ind, row in df_nodes[['x', 'y', 'degree']].iterrows():
nodes_grid[row['x'], row['y']] = row['degree']
nodes_grid = nodes_grid.astype('int')
for ind, node in df_nodes.iterrows():
for direct in node['address']:
code = str(node['x']) + '_' + str(node['y']) + '_' + str(direct)
if not(code in done):
way, last_direct, degree = explorePath(start_x=node['x'], start_y=node['y'],
start_dir=direct, image=img, nodes_grid=nodes_grid)
if not((len(way) <= min_length) and ((node['degree'] == 2) or (degree == 2))):
done.append(str(way[-1][0]) + '_' + str(way[-1][1]) + '_' + str(8-last_direct))
ways.append(way)
if return_simple_ways:
simple_ways = []
for way in ways:
inv_way = np.asarray([np.asarray(way)[:,1], image.shape[0]-np.asarray(way)[:,0]]).transpose()
simple_ways.append(approximate_polygon(np.asarray(inv_way), tolerance=1.6).tolist())
return simple_ways, ways, nodes_grid
else:
return ways, nodes_grid
def thinImage(image: np.ndarray, image_name: str, export_file_path: str, exportPNG: bool = False,
exportJSON: bool = False, exportSVG: bool = False, exportSHP: bool = False, geoloc: bool = False):
assert (exportPNG or exportJSON or exportSVG or exportSHP)
# Convert to B&W
road_network = image.copy()
road_network[road_network < 254] = 0
road_network[road_network < 255/2] = 0
road_network[road_network >= 255/2] = 255
vectorized = skeletonize(road_network, largest_component = True)
nodes, degree, addresses = findNodes(vectorized)
if len(degree) < 0:
return [], [], np.zeros((image.shape[1], image.shape[0]))
df_nodes = pd.DataFrame({'x': nodes[:,0], 'y': nodes[:,1], 'degree': degree, 'address': addresses })
df_nodes = df_nodes.sort_values(by='degree').reset_index(drop=True)
df_nodes = cleanNodesEdges(df_nodes)
df_nodes = df_nodes[df_nodes['degree'] != 3]
if (exportJSON or exportSHP):
simple_segments, full_segments, nodes_grid = findSegments(df_nodes, vectorized, min_length = 15,
return_simple_ways = True)
else:
full_segments, nodes_grid = findSegments(df_nodes, vectorized, min_length = 15,
return_simple_ways = False)
simple_segments = []
if exportPNG:
toPNG(full_segments, vectorized, export_file_path)
elif exportSVG:
toPNG(full_segments, vectorized, os.path.join('workshop', 'thin.png'))
if geoloc:
if exportJSON:
project_name = getProjectName()
try:
with open(os.path.join('save', project_name, 'match' , 'primary', image_name + '.json')) as data:
data = json.load(data)
M = np.asarray(data['M'])
simple_segments_JSON = []
for segment in simple_segments:
s = np.asarray([2*np.asarray(segment)[:,0], image.shape[0]-(2*np.asarray(segment)[:,1])]).T
simple_segments_JSON.append(toLatLon((s@M[:, :2]) + M[:, 2:3].transpose()).tolist())
except:
print("La géolocalisation de l'image {} n'a pas encore été calculée. Par conséquent, \
il n'est pas possible de calculer la géolocalisation de son réseau filaire".format(image_name))
simple_segments_JSON = simple_segments
else:
print('La géolocalisation du réseau filaire ne fonctionne que pour le format JSON actuellement.')
else:
simple_segments_JSON = simple_segments
if exportJSON:
with open(export_file_path.replace('png', 'json'), 'w') as outfile:
json.dump(simple_segments_JSON, outfile)
if exportSHP:
os.makedirs(export_file_path.replace('.png', ''), exist_ok=True)
toShapefile(simple_segments, os.path.join(export_file_path.replace('.png', ''), image_name))
if exportSVG:
print("\nAvertissement: Si vous n'avez jamais utilisé cette commande, \
installez d'abord Homebrew, ImageMagick et Potrace via le terminal.\n")
print('Pour installer Homebrew:\n',
' /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"')
print('Pour installer ImageMagick:\n', ' brew install imagemagick')
print('Pour installer Potrace: \n', ' brew install potrace\n')
if exportPNG:
png_path = export_file_path
else:
png_path = os.path.join('workshop', 'thin.png')
pnm_path = os.path.join('workshop', 'thin.pnm')
svg_path = export_file_path.replace('png', 'svg')
os.system('convert ' + png_path + pnm_path)
os.system('potrace ' + pnm_path + ' -s -o ' + svg_path)
return simple_segments, full_segments, nodes_grid
def toPNG(segments: list, vectorized: np.ndarray, out_path: str):
''' Save a given set of segments as a bitmap image from the road network.
Input(s):
segments: list of segments, containing all the pixels on the way between each couple of nodes
vectorized: skeletonized image of the road network
out_path: the path, where the output bitmap image should be save
'''
canvas = (np.ones(vectorized.shape)*255).astype('uint8')
cv2.imwrite('workshop/canvas.png', canvas);
bitmap = Image.open('workshop/canvas.png')
draw = ImageDraw.Draw(bitmap)
for segment in segments:
coords = []
for point in segment:
coords.append((point[1], point[0]))
draw.line(coords, fill = 'black', width=0)
bitmap.save(out_path)
def toShapefile(simple_ways, out_path):
w = shp.Writer(out_path)
w.field('DeletionFlag', 'C', 1, 0)
w.field('gid', 'N', 11, 0)
w.field('streetname', 'C', 41, 0)
w.field('note', 'C', 32, 0)
for i in range(len(simple_ways)):
w.line([simple_ways[i]])
w.record('01', i, '', '')
w.close()
| 37.663616
| 125
| 0.584118
| 2,137
| 16,459
| 4.361722
| 0.185307
| 0.005579
| 0.027036
| 0.028323
| 0.244609
| 0.215428
| 0.186032
| 0.145585
| 0.126381
| 0.11694
| 0
| 0.020471
| 0.305486
| 16,459
| 436
| 126
| 37.75
| 0.794944
| 0.178808
| 0
| 0.176471
| 0
| 0.007353
| 0.065613
| 0.001731
| 0
| 0
| 0
| 0
| 0.018382
| 1
| 0.040441
| false
| 0
| 0.029412
| 0
| 0.110294
| 0.025735
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86acd82b514b30458fa54cefc7db6d72f32e8646
| 875
|
py
|
Python
|
easy2fa/tests/test_checkinput.py
|
lutostag/otp
|
0792548fa51c489cdc5fcb01a3c6dad1cd453154
|
[
"MIT"
] | 3
|
2018-01-22T13:45:12.000Z
|
2022-01-27T04:17:52.000Z
|
easy2fa/tests/test_checkinput.py
|
lutostag/otp
|
0792548fa51c489cdc5fcb01a3c6dad1cd453154
|
[
"MIT"
] | 1
|
2017-01-24T23:57:51.000Z
|
2017-12-11T14:33:32.000Z
|
easy2fa/tests/test_checkinput.py
|
lutostag/otp
|
0792548fa51c489cdc5fcb01a3c6dad1cd453154
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch
from easy2fa import cli
class TestCheckInput(TestCase):
@patch('builtins.input')
def test_default(self, mock_input):
mock_input.return_value = ''
self.assertEquals(cli.check_input('prompt', default='one'), 'one')
mock_input.return_value = 'two'
self.assertEquals(cli.check_input('prompt', default='one'), 'two')
@patch('builtins.input')
@patch('builtins.print')
def test_assertions(self, mock_print, mock_input):
def assertion(value):
if value not in ['yes', 'no']:
return 'use yes or no'
mock_input.side_effect = ['input', '', 'no']
self.assertEquals(cli.check_input('prompt', assertion=assertion),
'no')
mock_print.assert_called_with('\tInvalid input: use yes or no')
| 33.653846
| 74
| 0.634286
| 106
| 875
| 5.075472
| 0.367925
| 0.083643
| 0.105948
| 0.133829
| 0.232342
| 0.232342
| 0.167286
| 0.167286
| 0
| 0
| 0
| 0.001495
| 0.235429
| 875
| 25
| 75
| 35
| 0.802691
| 0
| 0
| 0.1
| 0
| 0
| 0.150857
| 0
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.15
| false
| 0
| 0.15
| 0
| 0.4
| 0.15
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ad342de7b5dfdb142a5dff63b155f6c655c5c6
| 2,845
|
py
|
Python
|
bert_finetuning/data_loader.py
|
nps1ngh/adversarial-bert-german-attacks-defense
|
3cca292ec4c3c07945f4198ae81e1f671462ed90
|
[
"Apache-2.0"
] | null | null | null |
bert_finetuning/data_loader.py
|
nps1ngh/adversarial-bert-german-attacks-defense
|
3cca292ec4c3c07945f4198ae81e1f671462ed90
|
[
"Apache-2.0"
] | null | null | null |
bert_finetuning/data_loader.py
|
nps1ngh/adversarial-bert-german-attacks-defense
|
3cca292ec4c3c07945f4198ae81e1f671462ed90
|
[
"Apache-2.0"
] | null | null | null |
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from bert_finetuning.data import GermanData
class GermanDataLoader:
def __init__(
self,
data_paths,
model_name,
do_cleansing,
max_sequence_length,
batch_size=8,
dataset_cls=GermanData,
):
self.german_data = dataset_cls(
data_paths,
model_name,
max_sequence_length=max_sequence_length,
do_cleansing=do_cleansing,
)
self.batch_size = batch_size
self.create_loaders()
def create_loaders(self):
"""
Create Torch dataloaders for data splits
"""
self.german_data.text_to_tensors()
print("creating dataloaders")
train_data = TensorDataset(
self.german_data.train_inputs,
self.german_data.train_masks,
self.german_data.train_labels,
)
train_sampler = RandomSampler(train_data)
self.train_dataloader = DataLoader(
train_data, sampler=train_sampler, batch_size=self.batch_size
)
validation_data = TensorDataset(
self.german_data.validation_inputs,
self.german_data.validation_masks,
self.german_data.validation_labels,
)
validation_sampler = SequentialSampler(validation_data)
self.validation_dataloader = DataLoader(
validation_data, sampler=validation_sampler, batch_size=self.batch_size
)
test_data = TensorDataset(
self.german_data.test_inputs,
self.german_data.test_masks,
self.german_data.test_labels,
)
test_sampler = SequentialSampler(test_data)
self.test_dataloader = DataLoader(
test_data, sampler=test_sampler, batch_size=self.batch_size
)
print("finished creating dataloaders")
"""
** FOR DEBUGGING **
if __name__ == "__main__":
## define data paths
germeval_data_paths = {
"train": "./datasets/hasoc_dataset/hasoc_german_train.csv",
"dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv",
"test": "./datasets/hasoc_dataset/hasoc_german_test.csv",
}
hasoc_german_data_paths = {
"train": "./datasets/hasoc_dataset/hasoc_german_train.csv",
"dev": "./datasets/hasoc_dataset/hasoc_german_validation.csv",
"test": "./datasets/hasoc_dataset/hasoc_german_test.csv",
}
## create dataloaders
print("creating germeval dataloaders...")
germ_eval_dataloader = GermanDataLoader(germeval_data_paths)
print("creating hasoc dataloaders...")
hasoc_german_dataloader = GermanDataLoader(hasoc_german_data_paths)
"""
| 31.966292
| 89
| 0.634798
| 284
| 2,845
| 5.96831
| 0.211268
| 0.076696
| 0.090855
| 0.088496
| 0.273746
| 0.218879
| 0.167552
| 0.167552
| 0.167552
| 0.167552
| 0
| 0.000489
| 0.281547
| 2,845
| 88
| 90
| 32.329545
| 0.828767
| 0.01406
| 0
| 0.078431
| 0
| 0
| 0.026147
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.039216
| 0
| 0.098039
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ae167dd0746f0077e0b0c327435fcca99f837b
| 1,973
|
py
|
Python
|
data/dirty_mnist.py
|
Karthik-Ragunath/DDU
|
b9daae9304bdeb222857884ef8cb3b6b3d004d33
|
[
"MIT"
] | 43
|
2021-05-20T14:07:53.000Z
|
2022-03-23T12:58:26.000Z
|
data/dirty_mnist.py
|
Karthik-Ragunath/DDU
|
b9daae9304bdeb222857884ef8cb3b6b3d004d33
|
[
"MIT"
] | 3
|
2021-09-19T20:49:21.000Z
|
2022-03-07T10:25:47.000Z
|
data/dirty_mnist.py
|
Karthik-Ragunath/DDU
|
b9daae9304bdeb222857884ef8cb3b6b3d004d33
|
[
"MIT"
] | 8
|
2021-06-26T15:28:45.000Z
|
2022-02-19T02:07:05.000Z
|
import torch
import numpy as np
import torch.utils.data as data
from torch.utils.data import Subset
from data.fast_mnist import create_MNIST_dataset
from data.ambiguous_mnist.ambiguous_mnist_dataset import AmbiguousMNIST
def get_train_valid_loader(root, batch_size, val_seed=1, val_size=0.1, **kwargs):
error_msg = "[!] val_size should be in the range [0, 1]."
assert (val_size >= 0) and (val_size <= 1), error_msg
# load the dataset
mnist_train_dataset, _ = create_MNIST_dataset()
# AmbiguousMNIST does whiten the data itself
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_dataset = data.ConcatDataset(
[mnist_train_dataset, AmbiguousMNIST(root=root, train=True, device=device),]
)
valid_dataset = data.ConcatDataset(
[mnist_train_dataset, AmbiguousMNIST(root=root, train=True, device=device),]
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(val_size * num_train))
np.random.seed(val_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_subset = Subset(train_dataset, train_idx)
valid_subset = Subset(valid_dataset, valid_idx)
train_loader = torch.utils.data.DataLoader(train_subset, batch_size=batch_size, num_workers=0, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_subset, batch_size=batch_size, num_workers=0, shuffle=False)
return train_loader, valid_loader
def get_test_loader(root, batch_size, **kwargs):
# load the dataset
_, mnist_test_dataset = create_MNIST_dataset()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
test_dataset = data.ConcatDataset(
[mnist_test_dataset, AmbiguousMNIST(root=root, train=False, device=device),]
)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=0)
return test_loader
| 34.017241
| 113
| 0.737456
| 276
| 1,973
| 5.007246
| 0.235507
| 0.052098
| 0.050651
| 0.062952
| 0.344428
| 0.254703
| 0.254703
| 0.254703
| 0.254703
| 0.193922
| 0
| 0.006035
| 0.160162
| 1,973
| 57
| 114
| 34.614035
| 0.828002
| 0.03852
| 0
| 0.111111
| 0
| 0
| 0.030111
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86b032b82ee76fccb3eab7e57dd8b06b6868e592
| 2,633
|
py
|
Python
|
examples/basic_examples/aws_sns_sqs_middleware_service.py
|
tranvietanh1991/tomodachi
|
a815fc718b6cc42dc3fe241abb0e5a5829eba0e8
|
[
"MIT"
] | 1
|
2021-11-01T02:18:55.000Z
|
2021-11-01T02:18:55.000Z
|
examples/basic_examples/aws_sns_sqs_middleware_service.py
|
tranvietanh1991/tomodachi
|
a815fc718b6cc42dc3fe241abb0e5a5829eba0e8
|
[
"MIT"
] | 1
|
2020-12-28T16:16:53.000Z
|
2020-12-28T16:16:53.000Z
|
examples/basic_examples/aws_sns_sqs_middleware_service.py
|
tranvietanh1991/tomodachi
|
a815fc718b6cc42dc3fe241abb0e5a5829eba0e8
|
[
"MIT"
] | null | null | null |
import os
from typing import Any, Callable, Dict
import tomodachi
from tomodachi import aws_sns_sqs, aws_sns_sqs_publish
from tomodachi.discovery import AWSSNSRegistration
from tomodachi.envelope import JsonBase
async def middleware_function(
func: Callable, service: Any, message: Any, topic: str, context: Dict, *args: Any, **kwargs: Any
) -> Any:
# Functionality before function is called
service.log("middleware before")
return_value = await func(*args, **kwargs)
# There's also the possibility to pass in extra arguments or keywords arguments, for example:
# return_value = await func(*args, id='overridden', **kwargs)
# Functinoality after function is called
service.log("middleware after")
return return_value
class ExampleAWSSNSSQSService(tomodachi.Service):
name = "example-aws-sns-sqs-service"
log_level = "INFO"
uuid = str(os.environ.get("SERVICE_UUID") or "")
# Build own "discovery" functions, to be run on start and stop
# See tomodachi/discovery/aws_sns_registration.py for example
discovery = [AWSSNSRegistration]
# The message envelope class defines how a message should be processed when sent and received
# See tomodachi/envelope/json_base.py for a basic example using JSON and transferring some metadata
message_envelope = JsonBase
# Adds a middleware function that is run on every incoming message.
# Several middlewares can be chained.
message_middleware = [middleware_function]
# Some options can be specified to define credentials, used ports, hostnames, access log, etc.
options = {
"aws_sns_sqs": {
"region_name": None, # specify AWS region (example: 'eu-west-1')
"aws_access_key_id": None, # specify AWS access key (example: 'AKIAXNTIENCJIY2STOCI')
"aws_secret_access_key": None, # specify AWS secret key (example: 'f7sha92hNotarealsecretkeyn29ShnSYQi3nzgA')
},
"aws_endpoint_urls": {
"sns": None, # For example 'http://localhost:4575' if localstack is used for testing
"sqs": None, # For example 'http://localhost:4576' if localstack is used for testing
},
}
@aws_sns_sqs("example-route1")
async def route1a(self, data: Any) -> None:
self.log('Received data (function: route1a) - "{}"'.format(data))
async def _started_service(self) -> None:
async def publish(data: Any, topic: str) -> None:
self.log('Publish data "{}"'.format(data))
await aws_sns_sqs_publish(self, data, topic=topic, wait=False)
await publish("友達", "example-route1")
| 39.298507
| 122
| 0.692366
| 331
| 2,633
| 5.401813
| 0.404834
| 0.02349
| 0.030201
| 0.017897
| 0.128635
| 0.071588
| 0
| 0
| 0
| 0
| 0
| 0.009629
| 0.211166
| 2,633
| 66
| 123
| 39.893939
| 0.851228
| 0.399544
| 0
| 0
| 0
| 0
| 0.157289
| 0.030691
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86b0a422c8bc9f85b86cb962da85b578f24f06e1
| 425
|
py
|
Python
|
ex9.py
|
ThitsarAung/python-exercises
|
bca97875e25f9621fc5f58ab1d360426a21efc7f
|
[
"MIT"
] | null | null | null |
ex9.py
|
ThitsarAung/python-exercises
|
bca97875e25f9621fc5f58ab1d360426a21efc7f
|
[
"MIT"
] | null | null | null |
ex9.py
|
ThitsarAung/python-exercises
|
bca97875e25f9621fc5f58ab1d360426a21efc7f
|
[
"MIT"
] | null | null | null |
types_of_people = 10
x = f"There are {types_of_people} types of people."
binary = "binary"
do_not = "don't"
y = f"Those who know {binary} and those who {do_not}."
print(x)
print(y)
print(f"I said: {x}")
print(f"I also said: '{y}'")
hilarious = False
joke_evaluation = "Isn't that joke so funny?! {}"
print(joke_evaluation.format(hilarious))
w="This is the left side of..."
e="a string with a right side."
print(w + e)
| 18.478261
| 54
| 0.672941
| 78
| 425
| 3.564103
| 0.538462
| 0.07554
| 0.140288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005634
| 0.164706
| 425
| 22
| 55
| 19.318182
| 0.777465
| 0
| 0
| 0
| 0
| 0
| 0.503529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86b2f2b4446116811cbd5f27739dd93c92634c93
| 7,182
|
py
|
Python
|
mmdnn/conversion/caffe/writer.py
|
2yz/MMdnn
|
13d909e4b591a5043b74b611e412c3c0a5eba0cc
|
[
"MIT"
] | 3,442
|
2017-11-20T08:39:51.000Z
|
2019-05-06T10:51:19.000Z
|
mmdnn/conversion/caffe/writer.py
|
2yz/MMdnn
|
13d909e4b591a5043b74b611e412c3c0a5eba0cc
|
[
"MIT"
] | 430
|
2017-11-29T04:21:48.000Z
|
2019-05-06T05:37:37.000Z
|
mmdnn/conversion/caffe/writer.py
|
2yz/MMdnn
|
13d909e4b591a5043b74b611e412c3c0a5eba0cc
|
[
"MIT"
] | 683
|
2017-11-20T08:50:34.000Z
|
2019-05-04T04:25:14.000Z
|
import base64
from google.protobuf import json_format
from importlib import import_module
import json
import numpy as np
import os
import sys
from mmdnn.conversion.caffe.errors import ConversionError
from mmdnn.conversion.caffe.common_graph import fetch_attr_value
from mmdnn.conversion.caffe.utils import get_lower_case, get_upper_case, get_real_name
class JsonFormatter(object):
'''Dumpt a DL graph into a Json file.'''
def __init__(self, graph):
self.graph_def = graph.as_graph_def()
def dump(self, json_path):
json_txt = json_format.MessageToJson(self.graph_def)
parsed = json.loads(json_txt)
formatted = json.dumps(parsed, indent=4, sort_keys=True)
with open(json_path, 'w') as f:
f.write(formatted)
class PyWriter(object):
'''Dumpt a DL graph into a Python script.'''
def __init__(self, graph, data, target):
self.graph = graph
self.data = data
self.tab = ' ' * 4
self.prefix = ''
target = target.lower()
if target == 'tensorflow':
self.target = target
self.net = 'TensorFlowNetwork'
elif target == 'keras':
self.target = target
self.net = 'KerasNetwork'
elif target == 'caffe':
self.target = target
self.net = 'CaffeNetwork'
else:
raise ConversionError('Target %s is not supported yet.' % target)
def indent(self):
self.prefix += self.tab
def outdent(self):
self.prefix = self.prefix[:-len(self.tab)]
def statement(self, s):
return self.prefix + s + '\n'
def emit_imports(self):
return self.statement('from dlconv.%s import %s\n' % (self.target, self.net))
def emit_class_def(self, name):
return self.statement('class %s(%s):' % (name, self.net))
def emit_setup_def(self):
return self.statement('def setup(self):')
def emit_node(self, node):
'''Emits the Python source for this node.'''
def pair(key, value):
return '%s=%s' % (key, value)
args = []
for input in node.input:
input = input.strip().split(':')
name = ''.join(input[:-1])
idx = int(input[-1])
assert name in self.graph.node_dict
parent = self.graph.get_node(name)
args.append(parent.output[idx])
#FIXME:
output = [node.output[0]]
# output = node.output
for k, v in node.attr:
if k == 'cell_type':
args.append(pair(k, "'" + fetch_attr_value(v) + "'"))
else:
args.append(pair(k, fetch_attr_value(v)))
args.append(pair('name', "'" + node.name + "'")) # Set the node name
args = ', '.join(args)
return self.statement('%s = self.%s(%s)' % (', '.join(output), node.op, args))
def dump(self, code_output_dir):
if not os.path.exists(code_output_dir):
os.makedirs(code_output_dir)
file_name = get_lower_case(self.graph.name)
code_output_path = os.path.join(code_output_dir, file_name + '.py')
data_output_path = os.path.join(code_output_dir, file_name + '.npy')
with open(code_output_path, 'w') as f:
f.write(self.emit())
with open(data_output_path, 'wb') as f:
np.save(f, self.data)
return code_output_path, data_output_path
def emit(self):
# Decompose DAG into chains
chains = []
for node in self.graph.topologically_sorted():
attach_to_chain = None
if len(node.input) == 1:
parent = get_real_name(node.input[0])
for chain in chains:
if chain[-1].name == parent: # Node is part of an existing chain.
attach_to_chain = chain
break
if attach_to_chain is None: # Start a new chain for this node.
attach_to_chain = []
chains.append(attach_to_chain)
attach_to_chain.append(node)
# Generate Python code line by line
source = self.emit_imports()
source += self.emit_class_def(self.graph.name)
self.indent()
source += self.emit_setup_def()
self.indent()
blocks = []
for chain in chains:
b = ''
for node in chain:
b += self.emit_node(node)
blocks.append(b[:-1])
source += '\n\n'.join(blocks)
return source
class ModelSaver(object):
def __init__(self, code_output_path, data_output_path):
self.code_output_path = code_output_path
self.data_output_path = data_output_path
def dump(self, model_output_dir):
'''Return the file path containing graph in generated model files.'''
if not os.path.exists(model_output_dir):
os.makedirs(model_output_dir)
sys.path.append(os.path.dirname(self.code_output_path))
file_name = os.path.splitext(os.path.basename(self.code_output_path))[0]
module = import_module(file_name)
class_name = get_upper_case(file_name)
net = getattr(module, class_name)
return net.dump(self.data_output_path, model_output_dir)
class GraphDrawer(object):
def __init__(self, toolkit, meta_path):
self.toolkit = toolkit.lower()
self.meta_path = meta_path
def dump(self, graph_path):
if self.toolkit == 'tensorflow':
from dlconv.tensorflow.visualizer import TensorFlowVisualizer
if self._is_web_page(graph_path):
TensorFlowVisualizer(self.meta_path).dump_html(graph_path)
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
elif self.toolkit == 'keras':
from dlconv.keras.visualizer import KerasVisualizer
png_path, html_path = (None, None)
if graph_path.endswith('.png'):
png_path = graph_path
elif self._is_web_page(graph_path):
png_path = graph_path + ".png"
html_path = graph_path
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
KerasVisualizer(self.meta_path).dump_png(png_path)
if html_path:
self._png_to_html(png_path, html_path)
os.remove(png_path)
else:
raise NotImplementedError('Visualization of %s is unsupported!' % self.toolkit)
def _is_web_page(self, path):
return path.split('.')[-1] in ('html', 'htm')
def _png_to_html(self, png_path, html_path):
with open(png_path, "rb") as f:
encoded = base64.b64encode(f.read()).decode('utf-8')
source = """<!DOCTYPE>
<html>
<head>
<meta charset="utf-8">
<title>Keras</title>
</head>
<body>
<img alt="Model Graph" src="data:image/png;base64,{base64_str}" />
</body>
</html>""".format(base64_str=encoded)
with open(html_path, 'w', encoding='utf-8') as f:
f.write(source)
| 35.731343
| 92
| 0.589112
| 911
| 7,182
| 4.446762
| 0.212953
| 0.037028
| 0.027647
| 0.017773
| 0.152555
| 0.121945
| 0.082942
| 0.071094
| 0.056282
| 0.056282
| 0
| 0.005145
| 0.296436
| 7,182
| 201
| 93
| 35.731343
| 0.796557
| 0.048733
| 0
| 0.085366
| 0
| 0
| 0.082427
| 0.005877
| 0
| 0
| 0
| 0.004975
| 0.006098
| 1
| 0.115854
| false
| 0
| 0.097561
| 0.036585
| 0.29878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86b35d8336f90b1f441624f230053b48e0260a33
| 1,258
|
py
|
Python
|
week1/85-maximal-rectangle.py
|
LionTao/algo_weekend
|
d25756761d47491b8c78ecf8a857080497910c76
|
[
"Unlicense"
] | null | null | null |
week1/85-maximal-rectangle.py
|
LionTao/algo_weekend
|
d25756761d47491b8c78ecf8a857080497910c76
|
[
"Unlicense"
] | null | null | null |
week1/85-maximal-rectangle.py
|
LionTao/algo_weekend
|
d25756761d47491b8c78ecf8a857080497910c76
|
[
"Unlicense"
] | null | null | null |
"""
leetcode-85
给定一个仅包含 0 和 1 , 大小为 rows x cols 的二维二进制矩阵, 找出只包含 1 的最大矩形, 并返回其面积。
"""
from typing import List
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
"""
统计直方图然后单调递增栈
"""
rows = len(matrix)
if rows == 0:
return 0
columns = len(matrix[0])
res = 0
heights = [0]*columns
for r in range(rows):
for c in range(columns):
if matrix[r][c]=="1":
heights[c]+=1
else:
heights[c]=0
res = max(res,self.largestRectangleArea(heights))
def largestRectangleArea(self, heights: List[int]) -> int:
#单调递增栈
heights = [-1] + heights + [-1]
res = 0
ascend_stack = []
for i in range(len(heights)):
while ascend_stack and heights[ascend_stack[-1]] > heights[i]:
window_L_height_min_height = heights[ascend_stack.pop(-1)]
window_L = ascend_stack[-1] + 1
window_R = i - 1
cur_area = window_L_height_min_height * (window_R - window_L + 1)
res = max(res, cur_area)
ascend_stack.append(i)
return res
| 32.25641
| 81
| 0.509539
| 150
| 1,258
| 4.14
| 0.366667
| 0.10628
| 0.028986
| 0.05153
| 0.070853
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028205
| 0.379968
| 1,258
| 39
| 82
| 32.25641
| 0.767949
| 0.075517
| 0
| 0.068966
| 0
| 0
| 0.000883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.034483
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86b3d8112beb6b385c29392912e1d48581db14c2
| 680
|
py
|
Python
|
cookie_refresh.py
|
guoxianru/cookie_pool_lite
|
02c4b2009b4c8aa3306ae1f5f7c5decde1eb5f3f
|
[
"Apache-2.0"
] | null | null | null |
cookie_refresh.py
|
guoxianru/cookie_pool_lite
|
02c4b2009b4c8aa3306ae1f5f7c5decde1eb5f3f
|
[
"Apache-2.0"
] | null | null | null |
cookie_refresh.py
|
guoxianru/cookie_pool_lite
|
02c4b2009b4c8aa3306ae1f5f7c5decde1eb5f3f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: GXR
# @CreateTime: 2022-01-20
# @UpdateTime: 2022-01-20
import redis
import config
import cookie_login
from cookie_api import app
red = redis.Redis(
host=config.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_DB,
decode_responses=True,
)
# 刷新cookie数量
def cookie_refresh():
while 1:
cookie_list = red.smembers(config.REDIS_KEY_COOKIE)
if len(cookie_list) >= config.COOKIE_COUNT:
break
cookie_login.run_cookie_login(1)
app.logger.info("[cookie数量正常]-[%s]" % len(cookie_list))
def run_cookie_refresh():
cookie_refresh()
if __name__ == "__main__":
run_cookie_refresh()
| 18.888889
| 59
| 0.679412
| 91
| 680
| 4.758242
| 0.483516
| 0.101617
| 0.036952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034862
| 0.198529
| 680
| 35
| 60
| 19.428571
| 0.759633
| 0.136765
| 0
| 0
| 0
| 0
| 0.043029
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.190476
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86b6adb997cbd21ec9e8e9a5843dcd2235408ae3
| 2,997
|
py
|
Python
|
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py
|
yangulei/tvm
|
d2cbdf381b68134951bfd7525c6a3a67838e5bdf
|
[
"Apache-2.0"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py
|
dmlc/tvm
|
1e0e9548a6875241267481a4223b4dbf29fa1641
|
[
"Apache-2.0"
] | 2,863
|
2017-08-17T19:55:50.000Z
|
2019-11-04T17:18:41.000Z
|
python/tvm/topi/hexagon/slice_ops/add_subtract_multiply.py
|
yelite/tvm
|
7ae919292d42f5858d4db04533bca67b4b5bb44f
|
[
"Apache-2.0"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Compute and schedule for add, multiply, subtract slice op
Please note the following assumptions made by the implementation:
1) The inputs will be multiple of crouton layout except for the axis that needs broadcasting."""
from tvm import te
from tvm import tir
from tvm import topi
from ..utils import get_layout_transform_fn
def add_broadcast_compute(input_a, input_b):
"""Call the add op from topi"""
return topi.add(input_a, input_b)
def subtract_broadcast_compute(input_a, input_b):
"""Call the subtract op from topi"""
return topi.subtract(input_a, input_b)
def multiply_broadcast_compute(input_a, input_b):
"""Call the multiply op from topi"""
return topi.multiply(input_a, input_b)
def tir_broadcast_schedule(
out_m,
input_a,
input_b,
output_layout: str,
input_a_layout: str,
input_b_layout: str,
op_name: str,
):
"""Schedule for input and output layout nhwc-8h2w32c2w-2d considering broadcast"""
func = te.create_prim_func([input_a, input_b, out_m])
s = tir.Schedule(func)
block_dict = {"add": "T_add", "subtract": "T_subtract", "multiply": "T_multiply"}
block = s.get_block(block_dict[op_name])
if input_a_layout == "nhwc-8h2w32c2w-2d":
input_a_transformed_layout = get_layout_transform_fn(input_a_layout)
s.transform_layout(block, buffer=("read", 0), index_map=input_a_transformed_layout)
if input_b_layout == "nhwc-8h2w32c2w-2d":
input_b_transformed_layout = get_layout_transform_fn(input_b_layout)
s.transform_layout(block, buffer=("read", 1), index_map=input_b_transformed_layout)
output_transformed_layout = get_layout_transform_fn(output_layout)
s.transform_layout(block, buffer=("write", 0), index_map=output_transformed_layout)
n, h, w, c = s.get_loops(block)
h_o, h_i = s.split(h, [None, 8])
w_o, w_i = s.split(w, [None, 4])
c_o, c_i = s.split(c, [None, 32])
wio, wii = s.split(w_i, [None, 2])
s.reorder(n, h_o, w_o, c_o, h_i, wio, c_i, wii)
fused = s.fuse(c_i, wii)
s.vectorize(fused)
return s
| 34.056818
| 97
| 0.703704
| 461
| 2,997
| 4.362256
| 0.338395
| 0.038787
| 0.043759
| 0.047737
| 0.24366
| 0.165589
| 0.130781
| 0.052213
| 0
| 0
| 0
| 0.01296
| 0.201869
| 2,997
| 87
| 98
| 34.448276
| 0.827759
| 0.389389
| 0
| 0
| 0
| 0
| 0.053498
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86b7ef11958dc926cec50bcec5a016a3d479c413
| 6,634
|
py
|
Python
|
python_modules/automation/automation/docker/dagster_docker.py
|
jrouly/dagster
|
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
|
[
"Apache-2.0"
] | null | null | null |
python_modules/automation/automation/docker/dagster_docker.py
|
jrouly/dagster
|
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
|
[
"Apache-2.0"
] | 1
|
2021-06-21T18:30:02.000Z
|
2021-06-25T21:18:39.000Z
|
python_modules/automation/automation/docker/dagster_docker.py
|
jrouly/dagster
|
2b3104db2fc6439050f7825d4b9ebaf39ddf6c0c
|
[
"Apache-2.0"
] | null | null | null |
import contextlib
import os
from collections import namedtuple
import yaml
from dagster import __version__ as current_dagster_version
from dagster import check
from .ecr import ecr_image, get_aws_account_id, get_aws_region
from .utils import (
execute_docker_build,
execute_docker_push,
execute_docker_tag,
python_version_image_tag,
)
# Default repository prefix used for local images
DEFAULT_LOCAL_PREFIX = "dagster"
# Location of the template assets used here
IMAGES_PATH = os.path.join(os.path.dirname(__file__), "images")
@contextlib.contextmanager
def do_nothing(_cwd):
yield
class DagsterDockerImage(namedtuple("_DagsterDockerImage", "image build_cm path")):
"""Represents a Dagster image.
Properties:
image (str): Name of the image
build_cm (function): function that is a context manager for build (e.g. for populating a
build cache)
path (Optional(str)): The path to the image's path. Defaults to docker/images/<IMAGE NAME>
"""
def __new__(cls, image, build_cm=do_nothing, path=None):
return super(DagsterDockerImage, cls).__new__(
cls,
check.str_param(image, "image"),
check.callable_param(build_cm, "build_cm"),
check.opt_str_param(
path, "path", default=os.path.join(os.path.dirname(__file__), "images", image)
),
)
@property
def python_versions(self):
"""List of Python versions supported for this image."""
with open(os.path.join(self.path, "versions.yaml"), "r") as f:
versions = yaml.safe_load(f.read())
return list(versions.keys())
def _get_last_updated_for_python_version(self, python_version):
"""Retrieve the last_updated timestamp for a particular python_version of this image."""
check.str_param(python_version, "python_version")
with open(os.path.join(self.path, "last_updated.yaml"), "r") as f:
last_updated = yaml.safe_load(f.read())
return last_updated[python_version]
def _set_last_updated_for_python_version(self, timestamp, python_version):
"""Update the last_updated timestamp for a particular python_version of this image."""
check.str_param(timestamp, "timestamp")
check.str_param(python_version, "python_version")
last_updated = {}
last_updated_path = os.path.join(self.path, "last_updated.yaml")
if os.path.exists(last_updated_path):
with open(last_updated_path, "r") as f:
last_updated = yaml.safe_load(f.read())
last_updated[python_version] = timestamp
with open(os.path.join(self.path, "last_updated.yaml"), "w") as f:
yaml.dump(last_updated, f, default_flow_style=False)
def local_image(self, python_version):
"""Generates the local image name, like: "dagster/foo:some-tag" """
check.str_param(python_version, "python_version")
last_updated = self._get_last_updated_for_python_version(python_version)
tag = python_version_image_tag(python_version, last_updated)
return "{}/{}:{}".format(DEFAULT_LOCAL_PREFIX, self.image, tag)
def aws_image(self, python_version=None, custom_tag=None):
"""Generates the AWS ECR image name, like:
"1234567890.dkr.ecr.us-west-1.amazonaws.com/foo:some-tag"
"""
check.invariant(not (python_version and custom_tag))
check.opt_str_param(python_version, "python_version")
check.opt_str_param(custom_tag, "custom_tag")
if python_version:
last_updated = self._get_last_updated_for_python_version(python_version)
tag = python_version_image_tag(python_version, last_updated)
else:
tag = custom_tag
return ecr_image(
self.image,
tag,
aws_account_id=get_aws_account_id(),
aws_region=get_aws_region(),
)
def _get_docker_args(self, python_version):
"""Retrieve Docker arguments from this image's versions.yaml, and update with latest Dagster
version.
Also, we allow references in the image versions.yaml to another Dagster image to use as a
base image. If defined, set the BASE_IMAGE Docker arg from the full name of the parent
image.
"""
with open(os.path.join(self.path, "versions.yaml"), "r") as f:
versions = yaml.safe_load(f.read())
image_info = versions.get(python_version, {})
docker_args = image_info.get("docker_args", {})
if "base_image" in image_info:
check.invariant(
"BASE_IMAGE" not in docker_args, "Cannot override an existing BASE_IMAGE"
)
base_image = DagsterDockerImage(image_info["base_image"]["name"])
source = image_info["base_image"]["source"]
if source == "aws":
docker_args["BASE_IMAGE"] = base_image.aws_image(python_version)
elif source == "local":
docker_args["BASE_IMAGE"] = base_image.local_image(python_version)
else:
raise Exception("Unrecognized source {}".format(source))
# Set Dagster version
docker_args["DAGSTER_VERSION"] = current_dagster_version
return docker_args
def build(self, timestamp, dagster_version, python_version):
check.str_param(timestamp, "timestamp")
check.str_param(python_version, "python_version")
check.invariant(
dagster_version == current_dagster_version,
desc="Current dagster version ({}) does not match provided arg ({})".format(
current_dagster_version, dagster_version
),
)
with self.build_cm(self.path):
self._set_last_updated_for_python_version(timestamp, python_version)
execute_docker_build(
self.local_image(python_version),
docker_args=self._get_docker_args(python_version),
cwd=self.path,
)
def push(self, python_version, custom_tag=None):
"""Push this image to ECR."""
if custom_tag:
execute_docker_tag(
self.local_image(python_version),
self.aws_image(python_version=None, custom_tag=custom_tag),
)
execute_docker_push(self.aws_image(python_version=None, custom_tag=custom_tag))
else:
execute_docker_tag(self.local_image(python_version), self.aws_image(python_version))
execute_docker_push(self.aws_image(python_version))
| 38.569767
| 100
| 0.655412
| 832
| 6,634
| 4.925481
| 0.182692
| 0.149097
| 0.039531
| 0.044412
| 0.395803
| 0.351147
| 0.310639
| 0.293558
| 0.255002
| 0.245974
| 0
| 0.002207
| 0.248568
| 6,634
| 171
| 101
| 38.795322
| 0.81986
| 0.162345
| 0
| 0.219298
| 0
| 0
| 0.08967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.070175
| 0.008772
| 0.219298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86b8aba13af33d7534f429cc7d5eda4e95f58299
| 13,716
|
py
|
Python
|
chrome/test/telemetry/chromeos/login_unittest.py
|
Fusion-Rom/android_external_chromium_org
|
d8b126911c6ea9753e9f526bee5654419e1d0ebd
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231
|
2015-01-08T09:04:44.000Z
|
2021-12-30T03:03:10.000Z
|
chrome/test/telemetry/chromeos/login_unittest.py
|
Fusion-Rom/android_external_chromium_org
|
d8b126911c6ea9753e9f526bee5654419e1d0ebd
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2018-02-10T21:00:08.000Z
|
2018-03-20T05:09:50.000Z
|
chrome/test/telemetry/chromeos/login_unittest.py
|
Fusion-Rom/android_external_chromium_org
|
d8b126911c6ea9753e9f526bee5654419e1d0ebd
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268
|
2015-01-21T05:53:28.000Z
|
2022-03-25T22:09:01.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
import unittest
from telemetry.core import browser_finder
from telemetry.core import exceptions
from telemetry.core import extension_to_load
from telemetry.core import util
from telemetry.core.backends.chrome import cros_interface
from telemetry.unittest import options_for_unittests
class CrOSAutoTest(unittest.TestCase):
def setUp(self):
options = options_for_unittests.GetCopy()
self._cri = cros_interface.CrOSInterface(options.cros_remote,
options.cros_ssh_identity)
self._is_guest = options.browser_type == 'cros-chrome-guest'
self._username = '' if self._is_guest else options.browser_options.username
self._password = options.browser_options.password
def _IsCryptohomeMounted(self):
"""Returns True if cryptohome is mounted"""
cryptohomeJSON, _ = self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome',
'--action=status'])
cryptohomeStatus = json.loads(cryptohomeJSON)
return (cryptohomeStatus['mounts'] and
cryptohomeStatus['mounts'][0]['mounted'])
def _CreateBrowser(self, autotest_ext=False, auto_login=True):
"""Finds and creates a browser for tests. if autotest_ext is True,
also loads the autotest extension"""
options = options_for_unittests.GetCopy()
if autotest_ext:
extension_path = os.path.join(os.path.dirname(__file__), 'autotest_ext')
self._load_extension = extension_to_load.ExtensionToLoad(
path=extension_path,
browser_type=options.browser_type,
is_component=True)
options.extensions_to_load = [self._load_extension]
browser_to_create = browser_finder.FindBrowser(options)
self.assertTrue(browser_to_create)
options.browser_options.create_browser_with_oobe = True
options.browser_options.auto_login = auto_login
b = browser_to_create.Create()
b.Start()
return b
def _GetAutotestExtension(self, browser):
"""Returns the autotest extension instance"""
extension = browser.extensions[self._load_extension]
self.assertTrue(extension)
return extension
def _GetLoginStatus(self, browser):
extension = self._GetAutotestExtension(browser)
self.assertTrue(extension.EvaluateJavaScript(
"typeof('chrome.autotestPrivate') != 'undefined'"))
extension.ExecuteJavaScript('''
window.__login_status = null;
chrome.autotestPrivate.loginStatus(function(s) {
window.__login_status = s;
});
''')
return util.WaitFor(
lambda: extension.EvaluateJavaScript('window.__login_status'), 10)
def testCryptohomeMounted(self):
"""Verifies cryptohome mount status for regular and guest user and when
logged out"""
with self._CreateBrowser() as b:
self.assertEquals(1, len(b.tabs))
self.assertTrue(b.tabs[0].url)
self.assertTrue(self._IsCryptohomeMounted())
chronos_fs = self._cri.FilesystemMountedAt('/home/chronos/user')
self.assertTrue(chronos_fs)
if self._is_guest:
self.assertEquals(chronos_fs, 'guestfs')
else:
home, _ = self._cri.RunCmdOnDevice(['/usr/sbin/cryptohome-path',
'user', self._username])
self.assertEquals(self._cri.FilesystemMountedAt(home.rstrip()),
chronos_fs)
self.assertFalse(self._IsCryptohomeMounted())
self.assertEquals(self._cri.FilesystemMountedAt('/home/chronos/user'),
'/dev/mapper/encstateful')
def testLoginStatus(self):
"""Tests autotestPrivate.loginStatus"""
with self._CreateBrowser(autotest_ext=True) as b:
login_status = self._GetLoginStatus(b)
self.assertEquals(type(login_status), dict)
self.assertEquals(not self._is_guest, login_status['isRegularUser'])
self.assertEquals(self._is_guest, login_status['isGuest'])
self.assertEquals(login_status['email'], self._username)
self.assertFalse(login_status['isScreenLocked'])
def _IsScreenLocked(self, browser):
return self._GetLoginStatus(browser)['isScreenLocked']
def _LockScreen(self, browser):
self.assertFalse(self._IsScreenLocked(browser))
extension = self._GetAutotestExtension(browser)
self.assertTrue(extension.EvaluateJavaScript(
"typeof chrome.autotestPrivate.lockScreen == 'function'"))
logging.info('Locking screen')
extension.ExecuteJavaScript('chrome.autotestPrivate.lockScreen();')
logging.info('Waiting for the lock screen')
def ScreenLocked():
return (browser.oobe and
browser.oobe.EvaluateJavaScript("typeof Oobe == 'function'") and
browser.oobe.EvaluateJavaScript(
"typeof Oobe.authenticateForTesting == 'function'"))
util.WaitFor(ScreenLocked, 10)
self.assertTrue(self._IsScreenLocked(browser))
def _AttemptUnlockBadPassword(self, browser):
logging.info('Trying a bad password')
def ErrorBubbleVisible():
return not browser.oobe.EvaluateJavaScript('''
document.getElementById('bubble').hidden
''')
self.assertFalse(ErrorBubbleVisible())
browser.oobe.ExecuteJavaScript('''
Oobe.authenticateForTesting('%s', 'bad');
''' % self._username)
util.WaitFor(ErrorBubbleVisible, 10)
self.assertTrue(self._IsScreenLocked(browser))
def _UnlockScreen(self, browser):
logging.info('Unlocking')
browser.oobe.ExecuteJavaScript('''
Oobe.authenticateForTesting('%s', '%s');
''' % (self._username, self._password))
util.WaitFor(lambda: not browser.oobe, 10)
self.assertFalse(self._IsScreenLocked(browser))
def testScreenLock(self):
"""Tests autotestPrivate.screenLock"""
with self._CreateBrowser(autotest_ext=True) as browser:
self._LockScreen(browser)
self._AttemptUnlockBadPassword(browser)
self._UnlockScreen(browser)
def testLogout(self):
"""Tests autotestPrivate.logout"""
with self._CreateBrowser(autotest_ext=True) as b:
extension = self._GetAutotestExtension(b)
try:
extension.ExecuteJavaScript('chrome.autotestPrivate.logout();')
except (exceptions.BrowserConnectionGoneException,
exceptions.BrowserGoneException):
pass
util.WaitFor(lambda: not self._IsCryptohomeMounted(), 20)
def _SwitchRegion(self, region):
self._cri.RunCmdOnDevice(['stop', 'ui'])
# Change VPD (requires RW-enabled firmware).
# To save time, region and initial_timezone are not set.
vpd = {'initial_locale': region.language_code,
'keyboard_layout': region.keyboard}
for (key, value) in vpd.items():
self._cri.RunCmdOnDevice(['vpd', '-s', '"%s"="%s"' % (key, value)])
# Remove cached files to clear initial locale info and force regeneration.
self._cri.RunCmdOnDevice(['rm', '/home/chronos/Local\ State'])
self._cri.RunCmdOnDevice(['rm', '/home/chronos/.oobe_completed'])
self._cri.RunCmdOnDevice(['dump_vpd_log', '--force'])
self._cri.RunCmdOnDevice(['start', 'ui'])
def _OobeHasOption(self, browser, selectId, value):
hasOptionJs = '''
// Check that the option is present, and selected if it is the default.
(function hasOption(selectId, value, isDefault) {
var options = document.getElementById(selectId).options;
for (var i = 0; i < options.length; i++) {
if (options[i].value == value) {
// The option is present. Make sure it's selected if necessary.
return !isDefault || options.selectedIndex == i;
}
}
return false;
})("%s", "%s", %s);
'''
return browser.oobe.EvaluateJavaScript(
hasOptionJs % (selectId, value, 'true'))
def _ResolveLanguage(self, locale):
# If the locale matches a language but not the country, fall back to
# an existing locale. See ui/base/l10n/l10n_util.cc.
lang, _, region = map(str.lower, locale.partition('-'))
if not region:
return ""
# Map from other countries to a localized country
if lang == 'es' and region == 'es':
return 'es-419'
if lang == 'zh':
if region in ('hk', 'mo'):
return 'zh-TW'
return 'zh-CN'
if lang == 'en':
if region in ('au', 'ca', 'nz', 'za'):
return 'en-GB'
return 'en-US'
# No mapping found
return ""
def testOobeLocalization(self):
"""Tests different region configurations at OOBE"""
# Save the original device localization settings.
# To save time, only read initial_locale and keyboard_layout.
initial_region = self.Region('', '', '', '', '')
initial_region.language_code, _ = self._cri.RunCmdOnDevice(
['vpd', '-g', 'initial_locale'])
initial_region.keyboard, _ = self._cri.RunCmdOnDevice(
['vpd', '-g', 'keyboard_layout'])
for region in self.REGIONS_LIST:
self._SwitchRegion(region)
with self._CreateBrowser(auto_login=False) as browser:
# Ensure the dropdown lists have been created.
util.WaitFor(lambda: browser.oobe.EvaluateJavaScript(
'document.getElementById("language-select") != null'),
10)
# Find the language, or an acceptable fallback value.
languageFound = self._OobeHasOption(browser,
'language-select',
region.language_code)
if not languageFound:
fallback = self._ResolveLanguage(region.language_code)
self.assertTrue(fallback and
self._OobeHasOption(browser,
'language-select',
fallback))
# Find the keyboard layout.
self.assertTrue(self._OobeHasOption(
browser, 'keyboard-select', region.keyboard))
# Test is finished. Restore original region settings.
self._SwitchRegion(initial_region)
# The Region class and region list will be available in regions.py.
class Region(object):
def __init__(self, region_code, keyboard, time_zone, language_code,
keyboard_mechanical_layout, description=None, notes=None):
self.region_code = region_code
self.keyboard = keyboard
self.time_zone = time_zone
self.language_code = language_code
self.keyboard_mechanical_layout = keyboard_mechanical_layout
self.description = description or region_code
self.notes = notes
class Enum(frozenset):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
KeyboardMechanicalLayout = Enum(['ANSI', 'ISO', 'JIS', 'ABNT2'])
_KML = KeyboardMechanicalLayout
REGIONS_LIST = [
Region('au', 'xkb:us::eng', 'Australia/Sydney', 'en-AU', _KML.ANSI,
'Australia'),
Region('ca.ansi', 'xkb:us::eng', 'America/Toronto', 'en-CA', _KML.ANSI,
'Canada (US keyboard)',
'Canada with US (ANSI) keyboard; see http://goto/cros-canada'),
Region('ca.fr', 'xkb:ca::fra', 'America/Toronto', 'fr-CA', _KML.ISO,
'Canada (French keyboard)',
('Canadian French (ISO) keyboard. The most common configuration for '
'Canadian French SKUs. See http://goto/cros-canada')),
Region('ca.hybrid', 'xkb:ca:eng:eng', 'America/Toronto', 'en-CA', _KML.ISO,
'Canada (hybrid)',
('Canada with hybrid xkb:ca:eng:eng + xkb:ca::fra keyboard (ISO), '
'defaulting to English language and keyboard. Used only if there '
'needs to be a single SKU for all of Canada. See '
'http://goto/cros-canada')),
Region('ca.multix', 'xkb:ca:multix:fra', 'America/Toronto', 'fr-CA',
_KML.ISO, 'Canada (multilingual)',
("Canadian Multilingual keyboard; you probably don't want this. See "
"http://goto/cros-canada")),
Region('de', 'xkb:de::ger', 'Europe/Berlin', 'de', _KML.ISO, 'Germany'),
Region('fi', 'xkb:fi::fin', 'Europe/Helsinki', 'fi', _KML.ISO, 'Finland'),
Region('fr', 'xkb:fr::fra', 'Europe/Paris', 'fr', _KML.ISO, 'France'),
Region('gb', 'xkb:gb:extd:eng', 'Europe/London', 'en-GB', _KML.ISO, 'UK'),
Region('ie', 'xkb:gb:extd:eng', 'Europe/Dublin', 'en-GB', _KML.ISO,
'Ireland'),
Region('in', 'xkb:us::eng', 'Asia/Calcutta', 'en-US', _KML.ANSI, 'India'),
Region('my', 'xkb:us::eng', 'Asia/Kuala_Lumpur', 'ms', _KML.ANSI,
'Malaysia'),
Region('nl', 'xkb:us:intl:eng', 'Europe/Amsterdam', 'nl', _KML.ANSI,
'Netherlands'),
Region('nordic', 'xkb:se::swe', 'Europe/Stockholm', 'en-US', _KML.ISO,
'Nordics',
('Unified SKU for Sweden, Norway, and Denmark. This defaults '
'to Swedish keyboard layout, but starts with US English language '
'for neutrality. Use if there is a single combined SKU for Nordic '
'countries.')),
Region('se', 'xkb:se::swe', 'Europe/Stockholm', 'sv', _KML.ISO, 'Sweden',
("Use this if there separate SKUs for Nordic countries (Sweden, "
"Norway, and Denmark), or the device is only shipping to Sweden. "
"If there is a single unified SKU, use 'nordic' instead.")),
Region('sg', 'xkb:us::eng', 'Asia/Singapore', 'en-GB', _KML.ANSI,
'Singapore'),
Region('us', 'xkb:us::eng', 'America/Los_Angeles', 'en-US', _KML.ANSI,
'United States'),
]
| 42.203077
| 80
| 0.646544
| 1,523
| 13,716
| 5.680893
| 0.266579
| 0.011327
| 0.024272
| 0.010633
| 0.196486
| 0.132224
| 0.067846
| 0.043227
| 0.026583
| 0.026583
| 0
| 0.002644
| 0.227763
| 13,716
| 324
| 81
| 42.333333
| 0.814199
| 0.091936
| 0
| 0.07722
| 0
| 0
| 0.269976
| 0.044956
| 0
| 0
| 0
| 0
| 0.092664
| 1
| 0.081081
| false
| 0.023166
| 0.03861
| 0.011583
| 0.212355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86b8d88ae37a5647339fb11a5a98693e6a0c570d
| 790
|
py
|
Python
|
generator/database.py
|
Neotrinost/Neotrinost.ir
|
f501b8cf410c1e6ec6cc4e5fce935147b8be1e61
|
[
"MIT"
] | 4
|
2021-05-02T17:35:30.000Z
|
2021-11-08T12:55:14.000Z
|
generator/database.py
|
Neotrinost/Flask_Neotrinost
|
f501b8cf410c1e6ec6cc4e5fce935147b8be1e61
|
[
"MIT"
] | 4
|
2021-07-12T19:08:01.000Z
|
2021-08-13T19:37:50.000Z
|
generator/database.py
|
Neotrinost/Neotrinost.ir
|
f501b8cf410c1e6ec6cc4e5fce935147b8be1e61
|
[
"MIT"
] | 2
|
2021-08-08T15:10:07.000Z
|
2021-11-15T08:59:22.000Z
|
import sqlite3
class Database:
def get_connection(self):
return sqlite3.connect("./db.sqlite")
def add_card(self, card_title, card_text, card_link_text, card_link_url):
con = self.get_connection()
cur = con.cursor()
create_table_query = "CREATE TABLE IF NOT EXISTS cards('card_title' VARCHAR," + \
" 'card_text' TEXT, 'card_link_text' VARCHAR, 'card_link_url' VARCHAR )"
insert_data_query = f"INSERT INTO " + \
f"cards VALUES ({card_title}, {card_text}, {card_link_text}, {card_link_url})"
try:
cur.execute(create_table_query)
cur.execute(insert_data_query)
con.commit()
except:
print("an error has been occurred !")
| 35.909091
| 106
| 0.596203
| 96
| 790
| 4.604167
| 0.458333
| 0.108597
| 0.135747
| 0.108597
| 0.180995
| 0.180995
| 0.180995
| 0.180995
| 0.180995
| 0.180995
| 0
| 0.00361
| 0.298734
| 790
| 21
| 107
| 37.619048
| 0.794224
| 0
| 0
| 0
| 0
| 0
| 0.316456
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0.058824
| 0.294118
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86babfbac8b5c2af0dd5e02e52be427fd0ffce35
| 3,688
|
py
|
Python
|
crits/backdoors/forms.py
|
frbapolkosnik/crits
|
1278c034f2238e2fe34e65e32ce241128a014df2
|
[
"MIT"
] | 22
|
2015-01-14T19:49:32.000Z
|
2022-01-26T12:18:52.000Z
|
crits/backdoors/forms.py
|
frbapolkosnik/crits
|
1278c034f2238e2fe34e65e32ce241128a014df2
|
[
"MIT"
] | null | null | null |
crits/backdoors/forms.py
|
frbapolkosnik/crits
|
1278c034f2238e2fe34e65e32ce241128a014df2
|
[
"MIT"
] | 6
|
2015-01-22T21:25:52.000Z
|
2021-04-12T23:24:14.000Z
|
from django import forms
from django.forms.utils import ErrorList
from crits.campaigns.campaign import Campaign
from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form
from crits.core.handlers import get_item_names, get_source_names
from crits.core.user_tools import get_user_organization
from crits.core import form_consts
from crits.vocabulary.relationships import RelationshipTypes
relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)]
class AddBackdoorForm(forms.Form):
"""
Django form for adding a Backdoor to CRITs.
"""
error_css_class = 'error'
required_css_class = 'required'
name = forms.CharField(label=form_consts.Backdoor.NAME, required=True)
aliases = forms.CharField(label=form_consts.Backdoor.ALIASES,
required=False)
version = forms.CharField(label=form_consts.Backdoor.VERSION,
required=False)
description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION,
required=False)
campaign = forms.ChoiceField(widget=forms.Select,
label=form_consts.Backdoor.CAMPAIGN,
required=False)
confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE,
required=False)
source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}),
label=form_consts.Backdoor.SOURCE,
required=True)
source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD,
required=False)
source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}),
label=form_consts.Backdoor.SOURCE_REFERENCE,
required=False)
related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID)
related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE)
relationship_type = forms.ChoiceField(required=False,
label=form_consts.Common.RELATIONSHIP_TYPE,
widget=forms.Select(attrs={'id':'relationship_type'}))
def __init__(self, username, *args, **kwargs):
super(AddBackdoorForm, self).__init__(*args, **kwargs)
self.fields['campaign'].choices = [('', '')] + [
(c.name, c.name) for c in get_item_names(Campaign, True)]
self.fields['confidence'].choices = [
('', ''),
('low', 'low'),
('medium', 'medium'),
('high', 'high')]
self.fields['source'].choices = [
(c.name, c.name) for c in get_source_names(True, True, username)]
self.fields['source'].initial = get_user_organization(username)
self.fields['relationship_type'].choices = relationship_choices
self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO
add_bucketlist_to_form(self)
add_ticket_to_form(self)
def clean(self):
cleaned_data = super(AddBackdoorForm, self).clean()
campaign = cleaned_data.get('campaign')
if campaign:
confidence = cleaned_data.get('confidence')
if not confidence or confidence == '':
self._errors.setdefault('confidence', ErrorList())
self._errors['confidence'].append(u'This field is required if campaign is specified.')
return cleaned_data
| 44.97561
| 117
| 0.629067
| 386
| 3,688
| 5.821244
| 0.240933
| 0.057855
| 0.080107
| 0.092123
| 0.274588
| 0.189141
| 0.091678
| 0.091678
| 0.091678
| 0.068536
| 0
| 0.00074
| 0.267354
| 3,688
| 81
| 118
| 45.530864
| 0.830866
| 0.011659
| 0
| 0.112903
| 0
| 0
| 0.063929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.129032
| 0
| 0.419355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86bb18dffc0306993885a2bc13f98c2bb5b4a5b0
| 7,471
|
py
|
Python
|
src/aprl/agents/monte_carlo.py
|
fkamrani/adversarial-policies
|
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
|
[
"MIT"
] | 211
|
2019-02-22T08:07:25.000Z
|
2022-03-14T10:44:20.000Z
|
src/aprl/agents/monte_carlo.py
|
fkamrani/adversarial-policies
|
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
|
[
"MIT"
] | 51
|
2019-02-08T01:39:49.000Z
|
2022-02-15T21:21:46.000Z
|
src/aprl/agents/monte_carlo.py
|
fkamrani/adversarial-policies
|
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
|
[
"MIT"
] | 41
|
2019-04-23T05:01:49.000Z
|
2022-03-16T06:51:19.000Z
|
"""Monte Carlo receding horizon control."""
from abc import ABC, abstractmethod
from multiprocessing import Pipe, Process
import gym
from stable_baselines.common.vec_env import CloudpickleWrapper
from aprl.common.mujoco import MujocoState, ResettableEnv
class MujocoResettableWrapper(ResettableEnv, gym.Wrapper):
"""Converts a MujocoEnv into a ResettableEnv.
Note all MuJoCo environments are resettable."""
def __init__(self, env):
"""Wraps a MujocoEnv, adding get_state and set_state methods.
:param env: a MujocoEnv. NOTE: it must not be wrapped in a TimeLimit."""
if hasattr(env, "_max_episode_steps"):
raise TypeError(
"Environment must not have a time limit " "(try passing in env.unwrapped instead)."
)
gym.Wrapper.__init__(self, env)
self.sim = env.unwrapped.sim
def get_state(self):
"""Serializes the qpos and qvel state of the MuJoCo emulator."""
return MujocoState.from_mjdata(self.sim.data).flatten()
def set_state(self, x):
"""Restores qpos and qvel, calling forward() to derive other values."""
state = MujocoState.from_flattened(x, self.sim)
state.set_mjdata(self.sim.data)
self.sim.forward() # put mjData in consistent state
def reset(self):
"""See base class."""
return self.env.reset()
def step(self, a):
"""See base class."""
return self.env.step(a)
class MonteCarlo(ABC):
"""Selects an action for a ResettableEnv by random search. Randomly samples
fixed-length sequences of actions. Evaluates each trajectory in the
environment, resetting the state to the original after each trajectory."""
@abstractmethod
def __init__(self, horizon, trajectories):
"""Constructs a MonteCarlo instance for env.
:param horizon: the length of the trajectories to search over.
:param trajectories: the number of trajectories to evaluate."""
self.horizon = horizon
self.trajectories = trajectories
@abstractmethod
def seed(self, seed):
"""Sets a seed for the PRNG for the action sequences.
:param seed (int): a seed."""
pass
@abstractmethod
def best_action(self, state):
"""Returns the best action out of a random search of action sequences.
Generates self.trajectories action sequences, each of length
self.horizon. The cumulative reward of each action sequence is computed,
starting from state. The function returns the first action and the
cumulative reward of the action sequences with the largest cumulative
reward.
:param state: a value returned by env.get_state().
:return (action, reward): the best action found and associated reward."""
pass
class MonteCarloSingle(MonteCarlo):
"""Selects an action for a ResettableEnv by random search.
See base class for details. This implementation is not parallelized."""
def __init__(self, env, horizon, trajectories):
"""See base class."""
super().__init__(horizon, trajectories)
self.env = env
def seed(self, seed):
"""Sets a seed for the PRNG for the action sequences.
:param seed (int): a seed."""
self.env.action_space.np_random.seed(seed)
def best_action(self, state):
"""Returns the best action out of a random search of action sequences.
See base class for details.
Search takes place in a single environment, which is reset to state
before evaluating each action sequence."""
res = []
for _ in range(self.trajectories):
self.env.set_state(state)
us = [self.env.action_space.sample() for _ in range(self.horizon)]
total_rew = 0
for u in us:
_ob, rew, done, _info = self.env.step(u)
total_rew += rew
if done:
break
res.append((us[0], total_rew))
self.env.set_state(state)
best = max(res, key=lambda x: x[1])
return best
def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories):
parent_remote.close()
dynamics = dynamic_fn_wrapper.var()
dynamics.reset()
mc = MonteCarloSingle(dynamics, horizon, trajectories)
try:
while True:
cmd, x = remote.recv()
if cmd == "seed":
mc.seed(x)
elif cmd == "search":
best_u, best_r = mc.best_action(x)
remote.send((best_u, best_r))
elif cmd == "close":
remote.close()
break
else:
raise NotImplementedError
except KeyboardInterrupt:
print("MonteCarloParallel worker: got KeyboardInterrupt")
finally:
dynamics.close()
class MonteCarloParallel(MonteCarlo):
"""Like MonteCarlo, but performs the random search in parallel."""
# This implementation is inspired by Baselines SubprocVecEnv.
def __init__(self, env_fns, horizon, trajectories, seed=0):
"""Launch subprocess workers and store configuration parameters.
:param env_fns (list<()->ResettableEnv>): list of thunks.
:param horizon (int): length of trajectories to search over.
:param trajectories (int): minimum number of trajectories to evaluate.
It will be rounded up to the nearest multiple of len(make_env)."""
super().__init__(horizon, trajectories)
nremotes = len(env_fns)
# Integer ceiling of self.trajectories / nworkers
traj_per_worker = (self.trajectories - 1) // nremotes + 1
pipes = [Pipe() for _ in range(nremotes)]
self.remotes, self.work_remotes = zip(*pipes)
worker_cfgs = zip(self.work_remotes, self.remotes, env_fns)
self.ps = []
for i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs):
args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker)
process = Process(target=_worker, args=args)
process.daemon = True
# If the main process crashes, we should not cause things to hang
process.start()
self.ps.append(process)
for remote in self.work_remotes:
remote.close()
def seed(self, seed):
"""See base class."""
for i, remote in enumerate(self.remotes):
remote.send(("seed", seed + i))
def best_action(self, state):
"""Returns the best action out of a random search of action sequences."""
for remote in self.remotes:
remote.send(("search", state))
results = [remote.recv() for remote in self.remotes]
best = max(results, key=lambda x: x[1])
return best
def close(self):
"""Shuts down parallel workers."""
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
def receding_horizon(monte_carlo, env):
"""Receding horizon control
:param monte_carlo(MonteCarlo): a Monte Carlo controller for env or a clone of env.
:param env(ResettableEnv): a resettable environment."""
while True:
state = env.get_state()
a, _seq_rew = monte_carlo.best_action(state)
ob, rew, done, info = env.step(a)
yield a, ob, rew, done, info
if done:
break
| 37.355
| 99
| 0.63191
| 923
| 7,471
| 5.010834
| 0.277356
| 0.018162
| 0.015568
| 0.012973
| 0.190054
| 0.154162
| 0.125622
| 0.111784
| 0.100973
| 0.081081
| 0
| 0.001293
| 0.275599
| 7,471
| 199
| 100
| 37.542714
| 0.853289
| 0.351492
| 0
| 0.241379
| 0
| 0
| 0.038158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146552
| false
| 0.025862
| 0.043103
| 0
| 0.267241
| 0.008621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86bb2ac534bb948d97b846d6681e205945c4c9dd
| 2,063
|
py
|
Python
|
machineLearnInAction/bayes.py
|
xuwening/tensorflowDemo
|
65687a61e16f947b7ec8a85d12213f954a71542b
|
[
"MIT"
] | null | null | null |
machineLearnInAction/bayes.py
|
xuwening/tensorflowDemo
|
65687a61e16f947b7ec8a85d12213f954a71542b
|
[
"MIT"
] | null | null | null |
machineLearnInAction/bayes.py
|
xuwening/tensorflowDemo
|
65687a61e16f947b7ec8a85d12213f954a71542b
|
[
"MIT"
] | null | null | null |
import numpy as np
def loadDataSet():
postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......]
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0, 1, 0, 1, 0, 1] # 1 is abusive, 0 not
return postingList, classVec
def createVocabList(dataSet):
vocabSet = set([])
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print('the word: %s is not in my vocabulary' % word)
return returnVec
def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory) / float(numTrainDocs)
p0Num = np.zeros(numWords)
p1Num = np.zeros(numWords)
p0Denom = 0.0
p1Denom = 0.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = p1Num / p1Denom
p0Vect = p0Num / p0Denom
return p0Vect, p1Vect, pAbusive
if __name__ == '__main__':
postinList, classVec = loadDataSet()
myVocabList = createVocabList(postinList)
# print(setOfWords2Vec(myVocabList, postinList[0]))
trainMat = []
for postinDoc in postinList:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
print(trainMat)
p0V, p1V, pAb = trainNB0(trainMat, classVec)
print(p0V, p1V, pAb)
| 31.738462
| 97
| 0.573921
| 220
| 2,063
| 5.345455
| 0.413636
| 0.006803
| 0.005102
| 0.006803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031292
| 0.271934
| 2,063
| 65
| 98
| 31.738462
| 0.751664
| 0.042172
| 0
| 0.04
| 0
| 0
| 0.11404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.02
| 0
| 0.18
| 0.06
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86bd7ed417f64120a297b91ba487086bf72ccb3f
| 2,328
|
py
|
Python
|
cacheable/adapter/PeeweeAdapter.py
|
d1hotpep/cacheable
|
9ea97d6504965179f8fe495b67e466c068719445
|
[
"MIT"
] | null | null | null |
cacheable/adapter/PeeweeAdapter.py
|
d1hotpep/cacheable
|
9ea97d6504965179f8fe495b67e466c068719445
|
[
"MIT"
] | null | null | null |
cacheable/adapter/PeeweeAdapter.py
|
d1hotpep/cacheable
|
9ea97d6504965179f8fe495b67e466c068719445
|
[
"MIT"
] | null | null | null |
import peewee
import playhouse.kv
from time import time
from . import CacheableAdapter
class PeeweeAdapter(CacheableAdapter, peewee.Model):
key = peewee.CharField(max_length=256, unique=True)
value = playhouse.kv.JSONField()
mtime = peewee.IntegerField(default=time)
ttl = peewee.IntegerField(default=0)
class Meta:
database = peewee.Proxy()
def __init__(self, db_connection, table_name=None):
if table_name:
self._meta.db_table = table_name
self._meta.database.initialize(db_connection)
def multiget(self, keys):
cls = self.__class__
res = self.select(cls.key, cls.value) \
.where(cls.key << keys & self.__ttl_filter()) \
.tuples()
return { x[0] : x[1] for x in res }
@classmethod
def multiset(cls, data, ttl=None):
ts = int(time())
ttl = ttl or 0
kvs = []
for key, value in data.items():
kvs.append({
cls.key : key,
cls.value : value,
cls.mtime : ts,
cls.ttl : ttl,
})
cls.insert_many(kvs).upsert().execute()
def delete(self, key_or_keys):
if list == type(key_or_keys):
keys = key_or_keys
else:
keys = [ key_or_keys ]
cls = self.__class__
peewee.DeleteQuery(cls).where(cls.key << keys).execute()
def list(self, prefix=None, limit=None):
cls = self.__class__
q = self.select(cls.key, cls.value)
if prefix:
if self.__db_type() == peewee.SqliteDatabase:
wildcard = '*'
else:
wildcard = '%'
q = q.where(cls.key % ('%s%s' % (prefix, wildcard)))
q = q.where(self.__ttl_filter())
if limit:
q = q.limit(limit)
res = { x[0] : x[1] for x in q.tuples() }
if prefix:
res = { k[len(prefix):] : v for k, v in res.items() }
return res
def __ttl_filter(self):
"""
Add the TTL where clause to a query, to filter out stale results
"""
ts = int(time())
cls = self.__class__
return cls.ttl == 0 | (cls.mtime + cls.ttl > ts)
def __db_type(self):
return type(self._meta.database.obj)
| 24.25
| 72
| 0.537371
| 287
| 2,328
| 4.170732
| 0.303136
| 0.030075
| 0.0401
| 0.028404
| 0.056809
| 0.056809
| 0.016708
| 0
| 0
| 0
| 0
| 0.006532
| 0.342354
| 2,328
| 95
| 73
| 24.505263
| 0.77531
| 0.027491
| 0
| 0.15873
| 0
| 0
| 0.002679
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.063492
| 0.015873
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86bf8dc5885e11ca632362fcec2e79f7e5e74050
| 6,006
|
py
|
Python
|
mmgen/models/architectures/arcface/helpers.py
|
plutoyuxie/mmgeneration
|
0a7f5d16c970de1766ebf049d7a0264fe506504b
|
[
"Apache-2.0"
] | null | null | null |
mmgen/models/architectures/arcface/helpers.py
|
plutoyuxie/mmgeneration
|
0a7f5d16c970de1766ebf049d7a0264fe506504b
|
[
"Apache-2.0"
] | null | null | null |
mmgen/models/architectures/arcface/helpers.py
|
plutoyuxie/mmgeneration
|
0a7f5d16c970de1766ebf049d7a0264fe506504b
|
[
"Apache-2.0"
] | null | null | null |
from collections import namedtuple
import torch
from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d,
Module, PReLU, ReLU, Sequential, Sigmoid)
# yapf: disable
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa
"""
# yapf: enable
class Flatten(Module):
"""Flatten Module."""
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
"""l2 normalization.
Args:
input (torch.Tensor): The input tensor.
axis (int, optional): Specifies which axis of input to calculate the
norm across. Defaults to 1.
Returns:
Tensor: Tensor after L2 normalization per-instance.
"""
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
"""A named tuple describing a ResNet block."""
def get_block(in_channel, depth, num_units, stride=2):
"""Get a single block config.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
num_units (int): Number of unit modules.
stride (int, optional): Conv2d stride. Defaults to 2.
Returns:
list: A list of unit modules' config.
"""
return [Bottleneck(in_channel, depth, stride)
] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
"""Get block configs of backbone.
Args:
num_layers (int): Number of ConvBlock layers in backbone.
Raises:
ValueError: `num_layers` must be one of [50, 100, 152].
Returns:
list: A list of block configs.
"""
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError(
'Invalid number of layers: {}. Must be one of [50, 100, 152]'.
format(num_layers))
return blocks
class SEModule(Module):
"""Squeeze-and-Excitation Modules.
Args:
channels (int): Input channels.
reduction (int): Intermediate channels reduction ratio.
"""
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(
channels,
channels // reduction,
kernel_size=1,
padding=0,
bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(
channels // reduction,
channels,
kernel_size=1,
padding=0,
bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
"""Forward Function."""
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
"""Intermediate Resblock of bottleneck.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
def __init__(self, in_channel, depth, stride):
"""Intermediate Resblock of bottleneck.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth))
def forward(self, x):
"""Forward function."""
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
"""Intermediate Resblock of bottleneck with SEModule.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth), SEModule(depth, 16))
def forward(self, x):
"""Forward function."""
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
| 30.180905
| 106
| 0.585914
| 722
| 6,006
| 4.717452
| 0.202216
| 0.076629
| 0.057546
| 0.064886
| 0.534938
| 0.51145
| 0.502642
| 0.502642
| 0.469172
| 0.437463
| 0
| 0.041518
| 0.298202
| 6,006
| 198
| 107
| 30.333333
| 0.766548
| 0.241925
| 0
| 0.5
| 0
| 0
| 0.020306
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.028846
| 0.009615
| 0.240385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86bfaf5a13f46371cddc52c365f2b99eb199e27e
| 1,694
|
py
|
Python
|
createplaylist.py
|
mahi0601/SpotifyPlaylist
|
55e30bb4c13f291693b892d6eeccc70b4a769805
|
[
"MIT"
] | 47
|
2020-09-21T11:35:10.000Z
|
2022-01-17T21:25:39.000Z
|
createplaylist.py
|
mahi0601/SpotifyPlaylist
|
55e30bb4c13f291693b892d6eeccc70b4a769805
|
[
"MIT"
] | 2
|
2021-03-31T17:02:24.000Z
|
2021-07-30T08:17:37.000Z
|
createplaylist.py
|
mahi0601/SpotifyPlaylist
|
55e30bb4c13f291693b892d6eeccc70b4a769805
|
[
"MIT"
] | 24
|
2020-09-21T16:45:38.000Z
|
2022-03-02T10:50:47.000Z
|
import os
from spotifyclient import SpotifyClient
def main():
spotify_client = SpotifyClient(os.getenv("SPOTIFY_AUTHORIZATION_TOKEN"),
os.getenv("SPOTIFY_USER_ID"))
# get last played tracks
num_tracks_to_visualise = int(input("How many tracks would you like to visualise? "))
last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise)
print(f"\nHere are the last {num_tracks_to_visualise} tracks you listened to on Spotify:")
for index, track in enumerate(last_played_tracks):
print(f"{index+1}- {track}")
# choose which tracks to use as a seed to generate a playlist
indexes = input("\nEnter a list of up to 5 tracks you'd like to use as seeds. Use indexes separated by a space: ")
indexes = indexes.split()
seed_tracks = [last_played_tracks[int(index)-1] for index in indexes]
# get recommended tracks based off seed tracks
recommended_tracks = spotify_client.get_track_recommendations(seed_tracks)
print("\nHere are the recommended tracks which will be included in your new playlist:")
for index, track in enumerate(recommended_tracks):
print(f"{index+1}- {track}")
# get playlist name from user and create playlist
playlist_name = input("\nWhat's the playlist name? ")
playlist = spotify_client.create_playlist(playlist_name)
print(f"\nPlaylist '{playlist.name}' was created successfully.")
# populate playlist with recommended tracks
spotify_client.populate_playlist(playlist, recommended_tracks)
print(f"\nRecommended tracks successfully uploaded to playlist '{playlist.name}'.")
if __name__ == "__main__":
main()
| 42.35
| 118
| 0.725502
| 230
| 1,694
| 5.143478
| 0.36087
| 0.086221
| 0.067625
| 0.050719
| 0.145393
| 0.104818
| 0.065934
| 0.065934
| 0
| 0
| 0
| 0.002909
| 0.188312
| 1,694
| 40
| 119
| 42.35
| 0.857455
| 0.128099
| 0
| 0.083333
| 0
| 0.041667
| 0.366168
| 0.035326
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.125
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86c23c7616ed380cf3c80ae082afe689a1c8e0b9
| 7,318
|
py
|
Python
|
ConvDR/data/preprocess_cast19.py
|
blazejdolicki/CHEDAR
|
e4819775e7f6ffa2d6f1ad798ee262f01370b236
|
[
"MIT"
] | 1
|
2021-11-10T13:39:16.000Z
|
2021-11-10T13:39:16.000Z
|
ConvDR/data/preprocess_cast19.py
|
blazejdolicki/CHEDAR
|
e4819775e7f6ffa2d6f1ad798ee262f01370b236
|
[
"MIT"
] | null | null | null |
ConvDR/data/preprocess_cast19.py
|
blazejdolicki/CHEDAR
|
e4819775e7f6ffa2d6f1ad798ee262f01370b236
|
[
"MIT"
] | null | null | null |
import argparse
from trec_car import read_data
from tqdm import tqdm
import pickle
import os
import json
import copy
from utils.util import NUM_FOLD
def parse_sim_file(filename):
"""
Reads the deduplicated documents file and stores the
duplicate passage ids into a dictionary
"""
sim_dict = {}
lines = open(filename).readlines()
for line in lines:
data = line.strip().split(':')
if len(data[1]) > 0:
sim_docs = data[-1].split(',')
for docs in sim_docs:
sim_dict[docs] = 1
return sim_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--car_cbor", type=str)
parser.add_argument("--msmarco_collection", type=str)
parser.add_argument("--duplicate_file", type=str)
parser.add_argument("--cast_dir", type=str)
parser.add_argument("--out_data_dir", type=str)
parser.add_argument("--out_collection_dir", type=str)
args = parser.parse_args()
# INPUT
sim_file = args.duplicate_file
cast_topics_raw_file = os.path.join(args.cast_dir,
"evaluation_topics_v1.0.json")
cast_topics_manual_file = os.path.join(
args.cast_dir, "evaluation_topics_annotated_resolved_v1.0.tsv")
cast_qrels_file = os.path.join(args.cast_dir, "2019qrels.txt")
# OUTPUT
out_topics_file = os.path.join(args.out_data_dir, "eval_topics.jsonl")
out_raw_queries_file = os.path.join(args.out_data_dir, "queries.raw.tsv")
out_manual_queries_file = os.path.join(args.out_data_dir,
"queries.manual.tsv")
out_qrels_file = os.path.join(args.out_data_dir, "qrels.tsv")
car_id_to_idx_file = os.path.join(args.out_collection_dir,
"car_id_to_idx.pickle")
car_idx_to_id_file = os.path.join(args.out_collection_dir,
"car_idx_to_id.pickle")
out_collection_file = os.path.join(args.out_collection_dir,
"collection.tsv")
# 1. Combine TREC-CAR & MS MARCO, remove duplicate passages, assign new ids
car_id_to_idx = {}
car_idx_to_id = []
if os.path.exists(out_collection_file) and os.path.exists(
car_id_to_idx_file) and os.path.exists(car_idx_to_id_file):
print("Preprocessed collection found. Loading car_id_to_idx...")
with open(car_id_to_idx_file, "rb") as f:
car_id_to_idx = pickle.load(f)
else:
sim_dict = parse_sim_file(sim_file)
car_base_id = 10000000
i = 0
with open(out_collection_file, "w", encoding="utf-8") as f: #FIX change 'a' to 'w' in normal run
print("Processing TREC-CAR...")
for para in tqdm(
read_data.iter_paragraphs(open(args.car_cbor, 'rb'))):
car_id = "CAR_" + para.para_id
text = para.get_text()
text = text.replace("\t", " ").replace("\n",
" ").replace("\r", " ")
idx = car_base_id + i
car_id_to_idx[
car_id] = idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044
car_idx_to_id.append(car_id)
f.write("{}\t{}\n".format(idx, text))
i += 1
print("Processing MS MARCO...")
removed = 0
with open(args.msmarco_collection, "r") as m:
for line in tqdm(m):
marco_id, text = line.strip().split("\t")
if ("MARCO_" + marco_id) in sim_dict:
removed += 1
continue
f.write("{}\t{}\n".format(marco_id, text))
print("Removed " + str(removed) + " passages")
print("Dumping id mappings to {} and {}...".format(car_id_to_idx_file, car_idx_to_id_file))
with open(car_id_to_idx_file, "wb") as f:
pickle.dump(car_id_to_idx, f)
with open(car_idx_to_id_file, "wb") as f:
pickle.dump(car_idx_to_id, f)
# 2. Process queries
print("Processing CAsT utterances...")
with open(cast_topics_raw_file, "r") as fin:
raw_data = json.load(fin)
with open(cast_topics_manual_file, "r") as fin:
annonated_lines = fin.readlines()
out_raw_queries = open(out_raw_queries_file, "w")
out_manual_queries = open(out_manual_queries_file, "w")
all_annonated = {}
for line in annonated_lines:
splitted = line.split('\t')
out_manual_queries.write(line)
topic_query = splitted[0]
query = splitted[1].strip()
topic_id = topic_query.split('_')[0]
query_id = topic_query.split('_')[1]
if topic_id not in all_annonated:
all_annonated[topic_id] = {}
all_annonated[topic_id][query_id] = query
out_manual_queries.close()
topic_number_dict = {}
data = []
for group in raw_data:
topic_number, description, turn, title = str(
group['number']), group.get('description',
''), group['turn'], group.get(
'title', '')
queries = []
for query in turn:
query_number, raw_utterance = str(
query['number']), query['raw_utterance']
queries.append(raw_utterance)
record = {}
record['topic_number'] = topic_number
record['query_number'] = query_number
record['description'] = description
record['title'] = title
record['input'] = copy.deepcopy(queries)
record['target'] = all_annonated[topic_number][query_number]
out_raw_queries.write("{}_{}\t{}\n".format(topic_number,
query_number,
raw_utterance))
if not topic_number in topic_number_dict:
topic_number_dict[topic_number] = len(topic_number_dict)
data.append(record)
out_raw_queries.close()
with open(out_topics_file, 'w') as fout:
for item in data:
json_str = json.dumps(item)
fout.write(json_str + '\n')
# Split eval data into K-fold
topic_per_fold = len(topic_number_dict) // NUM_FOLD
for i in range(NUM_FOLD):
with open(out_topics_file + "." + str(i), 'w') as fout:
for item in data:
idx = topic_number_dict[item['topic_number']]
if idx // topic_per_fold == i:
json_str = json.dumps(item)
fout.write(json_str + '\n')
# 3. Process and convert qrels
print("Processing qrels...")
with open(cast_qrels_file, "r") as oq, open(out_qrels_file, "w") as nq:
for line in oq:
qid, _, pid, rel = line.strip().split()
if pid.startswith("CAR_"):
assert car_id_to_idx[pid] != -1
pid = car_id_to_idx[pid]
elif pid.startswith("MARCO_"):
pid = int(pid[6:])
else:
continue
nq.write(qid + "\t0\t" + str(pid) + "\t" + rel + "\n")
print("End")
| 39.556757
| 104
| 0.562859
| 929
| 7,318
| 4.138859
| 0.190527
| 0.020806
| 0.023667
| 0.03381
| 0.261899
| 0.173472
| 0.159428
| 0.105072
| 0.081665
| 0.041092
| 0
| 0.013696
| 0.321536
| 7,318
| 184
| 105
| 39.771739
| 0.760725
| 0.048237
| 0
| 0.064935
| 0
| 0
| 0.101903
| 0.010378
| 0
| 0
| 0
| 0
| 0.006494
| 1
| 0.006494
| false
| 0.006494
| 0.051948
| 0
| 0.064935
| 0.051948
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86c253258ad8f50c39a576db2e17ac13da5ea1c7
| 15,207
|
py
|
Python
|
coord_convert/geojson_utils.py
|
brandonxiang/example-pyQGIS
|
a61d0321d223d0b82e44bb809521965858fde857
|
[
"MIT"
] | 3
|
2017-02-23T08:35:30.000Z
|
2018-12-11T05:50:54.000Z
|
coord_convert/geojson_utils.py
|
brandonxiang/example-pyQGIS
|
a61d0321d223d0b82e44bb809521965858fde857
|
[
"MIT"
] | null | null | null |
coord_convert/geojson_utils.py
|
brandonxiang/example-pyQGIS
|
a61d0321d223d0b82e44bb809521965858fde857
|
[
"MIT"
] | 2
|
2019-10-22T02:16:50.000Z
|
2020-09-28T11:37:48.000Z
|
__doc__ = 'github: https://github.com/brandonxiang/geojson-python-utils'
import math
from coordTransform_utils import wgs84togcj02
from coordTransform_utils import gcj02tobd09
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two point on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
def wgs2gcj(geometry):
"""
convert wgs84 to gcj
referencing by https://github.com/wandergis/coordTransform_py
"""
# TODO: point linestring point
if geometry['type'] == 'MultiLineString':
coordinates = geometry['coordinates']
for lines in coordinates:
for line in lines:
line[0], line[1] = wgs84togcj02(line[0], line[1])
return geometry
def gcj2bd(geometry):
"""
convert gcj to bd
referencing by https://github.com/wandergis/coordTransform_py
"""
# TODO: point linestring point
if geometry['type'] == 'MultiLineString':
coordinates = geometry['coordinates']
for lines in coordinates:
for line in lines:
line[0], line[1] = gcj02tobd09(line[0], line[1])
return geometry
| 31.290123
| 125
| 0.572105
| 2,023
| 15,207
| 4.18784
| 0.158675
| 0.024551
| 0.008263
| 0.011213
| 0.283994
| 0.217068
| 0.180241
| 0.153447
| 0.128187
| 0.128187
| 0
| 0.046859
| 0.303939
| 15,207
| 485
| 126
| 31.354639
| 0.753519
| 0.260472
| 0
| 0.196154
| 0
| 0
| 0.069692
| 0
| 0
| 0
| 0
| 0.008247
| 0
| 1
| 0.073077
| false
| 0
| 0.011538
| 0
| 0.176923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86c368ef733994c7aa8778c60fbe8e4bdf94dac9
| 347
|
py
|
Python
|
10_days_of_statistics_8_1.py
|
sercangul/HackerRank
|
e6d7056babe03baafee8d7f1cacdca7c28b72ded
|
[
"Apache-2.0"
] | null | null | null |
10_days_of_statistics_8_1.py
|
sercangul/HackerRank
|
e6d7056babe03baafee8d7f1cacdca7c28b72ded
|
[
"Apache-2.0"
] | null | null | null |
10_days_of_statistics_8_1.py
|
sercangul/HackerRank
|
e6d7056babe03baafee8d7f1cacdca7c28b72ded
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 19:26:47 2019
@author: sercangul
"""
n = 5
xy = [map(int, input().split()) for _ in range(n)]
sx, sy, sx2, sxy = map(sum, zip(*[(x, y, x**2, x * y) for x, y in xy]))
b = (n * sxy - sx * sy) / (n * sx2 - sx**2)
a = (sy / n) - b * (sx / n)
print('{:.3f}'.format(a + b * 80))
| 24.785714
| 71
| 0.501441
| 68
| 347
| 2.544118
| 0.632353
| 0.034682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078358
| 0.227666
| 347
| 14
| 72
| 24.785714
| 0.567164
| 0.285303
| 0
| 0
| 0
| 0
| 0.025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86c4016c71680c25695f7a5d4e332b95ab4759b0
| 450
|
py
|
Python
|
rlutils/gym/envs/reset_obs/hopper.py
|
vermouth1992/rl-util
|
4c06ab8f5c96a44e58f88cf30146bcb837057112
|
[
"Apache-2.0"
] | null | null | null |
rlutils/gym/envs/reset_obs/hopper.py
|
vermouth1992/rl-util
|
4c06ab8f5c96a44e58f88cf30146bcb837057112
|
[
"Apache-2.0"
] | null | null | null |
rlutils/gym/envs/reset_obs/hopper.py
|
vermouth1992/rl-util
|
4c06ab8f5c96a44e58f88cf30146bcb837057112
|
[
"Apache-2.0"
] | null | null | null |
import gym.envs.mujoco.hopper as hopper
import numpy as np
class HopperEnv(hopper.HopperEnv):
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
])
def reset_obs(self, obs):
state = np.insert(obs, 0, 0.)
qpos = state[:self.model.nq]
qvel = state[self.model.nq:]
self.set_state(qpos, qvel)
return self._get_obs()
| 25
| 40
| 0.591111
| 63
| 450
| 4.126984
| 0.460317
| 0.046154
| 0.084615
| 0.123077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009259
| 0.28
| 450
| 17
| 41
| 26.470588
| 0.79321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0.071429
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86c692ea321aa5d6632c79b6a92f458cad0e5a70
| 2,723
|
py
|
Python
|
ncm/api.py
|
SDhuangao/netease-cloud-music-dl
|
4a970504e1fec0a9848f3920b392aa507d6b3879
|
[
"MIT"
] | null | null | null |
ncm/api.py
|
SDhuangao/netease-cloud-music-dl
|
4a970504e1fec0a9848f3920b392aa507d6b3879
|
[
"MIT"
] | null | null | null |
ncm/api.py
|
SDhuangao/netease-cloud-music-dl
|
4a970504e1fec0a9848f3920b392aa507d6b3879
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import requests
from ncm.encrypt import encrypted_request
from ncm.constants import headers
from ncm.constants import song_download_url
from ncm.constants import get_song_url
from ncm.constants import get_album_url
from ncm.constants import get_artist_url
from ncm.constants import get_playlist_url
class CloudApi(object):
def __init__(self, timeout=30):
super().__init__()
self.session = requests.session()
self.session.headers.update(headers)
self.timeout = timeout
def get_request(self, url):
response = self.session.get(url, timeout=self.timeout)
result = response.json()
if result['code'] != 200:
print('Return {} when try to get {}'.format(result, url))
else:
return result
def post_request(self, url, params):
data = encrypted_request(params)
response = self.session.post(url, data=data, timeout=self.timeout)
result = response.json()
if result['code'] != 200:
print('Return {} when try to post {} => {}'.format(result, params, url))
else:
return result
def get_song(self, song_id):
"""
Get song info by song id
:param song_id:
:return:
"""
url = get_song_url(song_id)
result = self.get_request(url)
return result['songs'][0]
def get_album_songs(self, album_id):
"""
Get all album songs info by album id
:param album_id:
:return:
"""
url = get_album_url(album_id)
result = self.get_request(url)
return result['album']['songs']
def get_song_url(self, song_id, bit_rate=320000):
"""Get a song's download url.
:params song_id: song id<int>.
:params bit_rate: {'MD 128k': 128000, 'HD 320k': 320000}
:return:
"""
url = song_download_url
csrf = ''
params = {'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf}
result = self.post_request(url, params)
song_url = result['data'][0]['url']
return song_url
def get_hot_songs(self, artist_id):
"""
Get a artist 50 hot songs
:param artist_id:
:return:
"""
url = get_artist_url(artist_id)
result = self.get_request(url)
return result['hotSongs']
def get_playlist_songs(self, playlist_id):
"""
Get a public playlist all songs
:param playlist_id:
:return:
"""
url = get_playlist_url(playlist_id)
result = self.get_request(url)
return result['playlist']['trackIds'], result['playlist']['name']
| 27.505051
| 84
| 0.589791
| 338
| 2,723
| 4.553254
| 0.218935
| 0.031189
| 0.062378
| 0.08577
| 0.289799
| 0.261209
| 0.188434
| 0.188434
| 0.092268
| 0.092268
| 0
| 0.019301
| 0.295997
| 2,723
| 98
| 85
| 27.785714
| 0.783516
| 0.136981
| 0
| 0.230769
| 0
| 0
| 0.066915
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.461538
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86c7301877ec46ff5d214d67d7d24373229e91aa
| 15,337
|
py
|
Python
|
book/trees/binary_search_tree.py
|
Web-Dev-Collaborative/algos
|
d280581d74ded382094283d931a202eb55fd8369
|
[
"CC0-1.0"
] | 153
|
2015-12-24T00:32:23.000Z
|
2022-02-24T06:00:29.000Z
|
book/trees/binary_search_tree.py
|
Web-Dev-Collaborative/algos
|
d280581d74ded382094283d931a202eb55fd8369
|
[
"CC0-1.0"
] | 78
|
2015-11-17T11:46:15.000Z
|
2021-06-28T18:37:58.000Z
|
book/trees/binary_search_tree.py
|
rhivent/algo-books-python
|
c4fa29616ca9a8a15ba40fa12d21fd8f35096d40
|
[
"CC0-1.0"
] | 66
|
2015-11-02T03:38:02.000Z
|
2022-03-05T17:36:26.000Z
|
# -*- coding: utf-8 -*-
"""
The `TreeNode` class provides many helper functions that make the work
done in the `BinarySearchTree` class methods much easier. The
constructor for a `TreeNode`, along with these helper functions, is
shown below. As you can see, many of these helper functions help to
classify a node according to its own position as a child, (left or
right) and the kind of children the node has. The `TreeNode` class will
also explicitly keep track of the parent as an attribute of each node.
You will see why this is important when we discuss the implementation
for the `del` operator.
One of the more interesting methods of `TreeNode` provides an interface
to simply iterate over all the keys in the tree in order. You already
know how to traverse a binary tree in order, using the `inorder`
traversal algorithm. However, because we want our iterator to operate
lazily, in this case we use the `yield` keyword to define our `__iter__`
method as a Python generator. Pay close attention to the `__iter__`
implementation as at first glance you might think that the code is
not recursive: in fact, because `__iter__` overrides the `for x
in` operation for iteration, it really is recursive!
Our full implementation of `TreeNode` is provided below. It includes
three further methods `find_successor`, `find_min` and `splice_out`
which you can ignore for now as we will return to them later when
discussing deletion.
"""
class TreeNode(object):
def __init__(self, key, val, left=None, right=None, parent=None):
self.key = key
self.val = val
self.left = left
self.right = right
self.parent = parent
def is_left_child(self):
return self.parent and self.parent.left == self
def is_right_child(self):
return self.parent and self.parent.right == self
def is_leaf(self):
return not (self.right or self.left)
def has_any_children(self):
return self.right or self.left
def has_both_children(self):
return self.right and self.left
def has_one_child(self):
return self.has_any_children() and not self.has_both_children()
def replace_node_data(self, key, val, left, right):
self.key = key
self.val = val
self.left = left
self.right = right
if self.left:
self.left.parent = self
if self.right:
self.right.parent = self
def __iter__(self):
if self is None:
return
if self.left:
# `in` calls `__iter__` so is recursive
for elem in self.left:
yield elem
yield self.key
if self.right:
# recurse again
for elem in self.right:
yield elem
def find_successor(self):
if self.right:
return self.right.find_min()
if self.parent is None:
return None
if self.is_left_child():
return self.parent
self.parent.right = None
successor = self.parent.find_successor()
self.parent.right = self
return successor
def find_min(self):
current = self
while current.left:
current = current.left
return current
def splice_out(self):
if self.is_leaf():
if self.is_left_child():
self.parent.left = None
else:
self.parent.right = None
else:
promoted_node = self.left or self.right
if self.is_left_child():
self.parent.left = promoted_node
else:
self.parent.right = promoted_node
promoted_node.parent = self.parent
"""
Now that we have our `TreeNode` class we can begin to write
`BinarySearchTree` itself. Recall that the core functionality of this
class will be to enable `put`ing to and `get`ing from the tree, so we
begin our implementation with the `put` functionality.
In order to enable the `tree[1] = 'foo'` style assignment interface for
our `BinarySearchTree` instances, we override the `__setitem__` magic
method. In this method we first check to see if the tree already has a
root. If there is not a root then we create a new `TreeNode` and set it
as the root of the tree. If a root node is already in place then `put`
calls the private, recursive, helper function `_put` to search the tree
according to the following algorithm:
- Starting at the root of the tree, search the binary tree comparing
the new key to the key in the current node. If the new key is less
than the current node, search the left subtree. If the new key is
greater than the current node, search the right subtree.
- When there is no left (or right) child to search, we have found the
position in the tree where the new node should be installed.
- To add a node to the tree, create a new `TreeNode` object and insert
the object at the point discovered in the previous step.
The code below shows the Python code for inserting a new
node in the tree. The `_put` function is written recursively following
the steps outlined above. Notice that when a new child is inserted into
the tree, the `node` is passed to the new tree as the parent.
One important problem with our implementation of insert is that
duplicate keys are not handled properly. As our tree is implemented a
duplicate key will create a new node with the same key value in the
right subtree of the node having the original key. The result of this is
that the node with the new key will never be found during a search. A
better way to handle the insertion of a duplicate key is for the value
associated with the new key to replace the old value. We leave fixing
this bug as an exercise for you.
"""
class BinarySearchTree(object):
TreeNodeClass = TreeNode
def __init__(self):
self.root = None
self.size = 0
def __len__(self):
return self.size
def __iter__(self):
return self.root.__iter__()
def __setitem__(self, key, val):
if self.root:
self._put(key, val, self.root)
else:
self.root = self.TreeNodeClass(key, val)
self.size = self.size + 1
def _put(self, key, val, node):
if key < node.key:
if node.left:
self._put(key, val, node.left)
else:
node.left = self.TreeNodeClass(key, val, parent=node)
else:
if node.right:
self._put(key, val, node.right)
else:
node.right = self.TreeNodeClass(key, val, parent=node)
"""
The diagram below illustrates the process for inserting a new
node into a binary search tree. The lightly shaded nodes indicate the
nodes that were visited during the insertion process.

Once the tree is constructed, the next task is to implement the
retrieval of a value for a given key. The `get` functionality is even easier
than the `put` functionality because we simply search the tree recursively
until we get to a non-matching leaf node or find a matching key. When
a matching key is found, the value stored in the val of the node is
returned.
Again, inorder to enable a `tree[1]` retrieval interface, we overload
one of Python’s magic methods—in this case `__getitem__`. Just like with
`__setitem__`, the primary purpose of this method is to handle presence
and absence of a root node, and delegates the core `get` functionality
to `_get`.
The search code in the `_get` method uses the same logic
for choosing the left or right child as the `_put` method. Notice that
the `_get` method returns a `TreeNode` to `__getitem__`, this allows `_get` to
be used as a flexible helper method for other `BinarySearchTree` methods
that may need to make use of other data from the `TreeNode` besides the
val.
"""
def __getitem__(self, key):
if self.root:
result = self._get(key, self.root)
if result:
return result.val
raise KeyError
def _get(self, key, node):
if not node:
return None
if node.key == key:
return node
if key < node.key:
return self._get(key, node.left)
return self._get(key, node.right)
"""
Using `_get`, we can implement the `in` operation by writing a
`__contains__` method for the `BinarySearchTree`. The `__contains__`
method will simply call `_get` and return `True` if `_get` returns a
value, or `False` if it returns `None`. The code for `__contains__` is
shown below.
"""
def __contains__(self, key):
return bool(self._get(key, self.root))
"""
Finally, we turn our attention to the most challenging method in the
binary search tree: the deletion of a key. The first task is
to find the node to delete by searching the tree. If the tree has more
than one node we search using the `_get` method to find the `TreeNode`
that needs to be removed. If the tree only has a single node, that means
we are removing the root of the tree, but we still must check to make
sure the key of the root matches the key that is to be deleted. In
either case if the key is not found the `del` operator raises an error.
"""
def delete(self, key):
if self.size > 1:
node_to_remove = self._get(key, self.root)
if node_to_remove:
self.remove(node_to_remove)
self.size = self.size - 1
return
elif self.size == 1 and self.root.key == key:
self.root = None
self.size = self.size - 1
return
raise KeyError('Error, key not in tree')
def __delitem__(self, key):
self.delete(key)
"""
Once we’ve found the node containing the key we want to delete, there
are three cases that we must consider:
1. The node to be deleted has no children
2. The node to be deleted has only one child
3. The node to be deleted has two children
The first case is straightforward. If
the current node has no children all we need to do is delete the node
and remove the reference to this node in the parent. The code for this
case is shown below.
"""
def remove(self, node):
if node.is_leaf() and node.parent is not None:
if node == node.parent.left:
node.parent.left = None
else:
node.parent.right = None
"""

The second case is only slightly more complicated (see below). If a node
has only a single child, then we can simply promote the child to take
the place of its parent. The code for this case is shown in the next
code sample. As you look at this code you will see that there are six
cases to consider. Since the cases are symmetric with respect to either
having a left or right child we will just discuss the case where the
current node has a left child. The decision proceeds as follows:
1. If the current node is a left child then we only need to update the
parent reference of the left child to point to the parent of the
current node, and then update the left child reference of the parent
to point to the current node’s left child.
2. If the current node is a right child then we only need to update the
parent reference of the right child to point to the parent of the
current node, and then update the right child reference of the
parent to point to the current node’s right child.
3. If the current node has no parent, it must be the root. In this case
we will just replace the `key`, `val`, `left`, and
`right` data by calling the `replace_node_data` method on
the root.
Code for this decision process may look like:
"""
elif node.has_one_child():
promoted_node = node.left or node.right
if node.is_left_child():
promoted_node.parent = node.parent
node.parent.left = promoted_node
elif node.is_right_child():
promoted_node.parent = node.parent
node.parent.right = promoted_node
else:
node.replace_node_data(
promoted_node.key,
promoted_node.val,
promoted_node.left,
promoted_node.right
)
"""

The third case is the most difficult case to handle (see below). If a
node has two children, then it is unlikely that we can simply promote
one of them to take the node’s place. We can, however, search the tree
for a node that can be used to replace the one scheduled for deletion.
What we need is a node that will preserve the binary search tree
relationships for both of the existing left and right subtrees. The node
that will do this is the node that has the next-largest key in the tree.
We call this node the **successor**, and we will look at a way to find
the successor shortly. The successor is guaranteed to have no more than
one child, so we know how to remove it using the two cases for deletion
that we have already implemented. Once the successor has been removed,
we simply put it in the tree in place of the node to be deleted.

The code to handle the third case is shown below. Notice
that we make use of the helper methods `find_successor` and `find_min` to
find the successor. To remove the successor, we make use of the method
`splice_out`. The reason we use `splice_out` is that it goes directly to
the node we want to splice out and makes the right changes. We could
call `delete` recursively, but then we would waste time re-searching for
the key node.
"""
else: # has both children
successor = node.find_successor()
if successor:
successor.splice_out()
node.key = successor.key
node.val = successor.val
"""
The code to find the successor is shown above and as you can see is a
method of the `TreeNode` class. This code makes use of the same
properties of binary search trees that cause an inorder traversal to
print out the nodes in the tree from smallest to largest. There are
three cases to consider when looking for the successor:
1. If the node has a right child, then the successor is the smallest
key in the right subtree.
2. If the node has no right child and is the left child of its parent,
then the parent is the successor.
3. If the node is the right child of its parent, and itself has no
right child, then the successor to this node is the successor of its
parent, excluding this node.
The first condition is the only one that matters for us when deleting a
node from a binary search tree.
The `find_min` method is called to find the minimum key in a subtree. You
should convince yourself that the minimum valued key in any binary
search tree is the leftmost child of the tree. Therefore the `find_min`
method simply follows the `left` references in each node of the
subtree until it reaches a node that does not have a left child.
"""
| 37.775862
| 78
| 0.684684
| 2,443
| 15,337
| 4.22718
| 0.16537
| 0.010652
| 0.016268
| 0.008909
| 0.165682
| 0.115716
| 0.071269
| 0.066428
| 0.038733
| 0.038733
| 0
| 0.002461
| 0.258134
| 15,337
| 405
| 79
| 37.869136
| 0.90508
| 0.097346
| 0
| 0.313333
| 0
| 0
| 0.004244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153333
| false
| 0
| 0
| 0.06
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86c7d4acbb62e0447380b9c4c68ef07bbf5ead1b
| 28,677
|
py
|
Python
|
fire/core.py
|
adamruth/python-fire
|
6912ccd56f50e0f4bb30a0725d95858ef29f3bde
|
[
"Apache-2.0"
] | 1
|
2020-02-05T04:43:03.000Z
|
2020-02-05T04:43:03.000Z
|
fire/core.py
|
chesnjak/python-fire
|
72604f40314008e562ba47936dcc183b51166b72
|
[
"Apache-2.0"
] | null | null | null |
fire/core.py
|
chesnjak/python-fire
|
72604f40314008e562ba47936dcc183b51166b72
|
[
"Apache-2.0"
] | 1
|
2020-07-15T22:58:25.000Z
|
2020-07-15T22:58:25.000Z
|
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python Fire is a library for creating CLIs from absolutely any Python object.
You can call Fire on any Python object:
functions, classes, modules, objects, dictionaries, lists, tuples, etc.
They all work!
Python Fire turns any Python object into a command line interface.
Simply call the Fire function as your main method to create a CLI.
When using Fire to build a CLI, your main method includes a call to Fire. Eg:
def main(argv):
fire.Fire(Component)
A Fire CLI command is run by consuming the arguments in the command in order to
access a member of current component, call the current component (if it's a
function), or instantiate the current component (if it's a class). The target
component begins as Component, and at each operation the component becomes the
result of the preceding operation.
For example "command fn arg1 arg2" might access the "fn" property of the initial
target component, and then call that function with arguments 'arg1' and 'arg2'.
Additional examples are available in the examples directory.
Fire Flags, common to all Fire CLIs, must go after a separating "--". For
example, to get help for a command you might run: `command -- --help`.
The available flags for all Fire CLIs are:
-v --verbose: Include private members in help and usage information.
-h --help: Provide help and usage information for the command.
-i --interactive: Drop into a Python REPL after running the command.
--completion: Write the Bash completion script for the tool to stdout.
--separator SEPARATOR: Use SEPARATOR in place of the default separator, '-'.
--trace: Get the Fire Trace for the command.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import json
import os
import pipes
import shlex
import sys
import types
from fire import completion
from fire import decorators
from fire import helputils
from fire import inspectutils
from fire import interact
from fire import parser
from fire import trace
import six
def Fire(component=None, command=None, name=None):
"""This function, Fire, is the main entrypoint for Python Fire.
Executes a command either from the `command` argument or from sys.argv by
recursively traversing the target object `component`'s members consuming
arguments, evaluating functions, and instantiating classes as it goes.
When building a CLI with Fire, your main method should call this function.
Args:
component: The initial target component.
command: Optional. If supplied, this is the command executed. If not
supplied, then the command is taken from sys.argv instead. This can be
a string or a list of strings; a list of strings is preferred.
name: Optional. The name of the command as entered at the command line.
Used in interactive mode and for generating the completion script.
Returns:
The result of executing the Fire command. Execution begins with the initial
target component. The component is updated by using the command arguments
to either access a member of the current component, call the current
component (if it's a function), or instantiate the current component (if
it's a class). When all arguments are consumed and there's no function left
to call or class left to instantiate, the resulting current component is
the final result.
Raises:
ValueError: If the command argument is supplied, but not a string or a
sequence of arguments.
FireExit: When Fire encounters a FireError, Fire will raise a FireExit with
code 2. When used with the help or trace flags, Fire will raise a
FireExit with code 0 if successful.
"""
name = name or os.path.basename(sys.argv[0])
# Get args as a list.
if isinstance(command, six.string_types):
args = shlex.split(command)
elif isinstance(command, (list, tuple)):
args = command
elif command is None:
# Use the command line args by default if no command is specified.
args = sys.argv[1:]
else:
raise ValueError('The command argument must be a string or a sequence of '
'arguments.')
# Determine the calling context.
caller = inspect.stack()[1]
caller_frame = caller[0]
caller_globals = caller_frame.f_globals
caller_locals = caller_frame.f_locals
context = {}
context.update(caller_globals)
context.update(caller_locals)
component_trace = _Fire(component, args, context, name)
if component_trace.HasError():
for help_flag in ['-h', '--help']:
if help_flag in component_trace.elements[-1].args:
command = '{cmd} -- --help'.format(cmd=component_trace.GetCommand())
print(('WARNING: The proper way to show help is {cmd}.\n'
'Showing help anyway.\n').format(cmd=pipes.quote(command)),
file=sys.stderr)
print('Fire trace:\n{trace}\n'.format(trace=component_trace),
file=sys.stderr)
result = component_trace.GetResult()
print(
helputils.HelpString(result, component_trace, component_trace.verbose),
file=sys.stderr)
raise FireExit(2, component_trace)
elif component_trace.show_trace and component_trace.show_help:
print('Fire trace:\n{trace}\n'.format(trace=component_trace),
file=sys.stderr)
result = component_trace.GetResult()
print(
helputils.HelpString(result, component_trace, component_trace.verbose),
file=sys.stderr)
raise FireExit(0, component_trace)
elif component_trace.show_trace:
print('Fire trace:\n{trace}'.format(trace=component_trace),
file=sys.stderr)
raise FireExit(0, component_trace)
elif component_trace.show_help:
result = component_trace.GetResult()
print(
helputils.HelpString(result, component_trace, component_trace.verbose),
file=sys.stderr)
raise FireExit(0, component_trace)
else:
_PrintResult(component_trace, verbose=component_trace.verbose)
result = component_trace.GetResult()
return result
def CompletionScript(name, component):
"""Returns the text of the Bash completion script for a Fire CLI."""
return completion.Script(name, component)
class FireError(Exception):
"""Exception used by Fire when a Fire command cannot be executed.
These exceptions are not raised by the Fire function, but rather are caught
and added to the FireTrace.
"""
class FireExit(SystemExit):
"""An exception raised by Fire to the client in the case of a FireError.
The trace of the Fire program is available on the `trace` property.
This exception inherits from SystemExit, so clients may explicitly catch it
with `except SystemExit` or `except FireExit`. If not caught, this exception
will cause the client program to exit without a stacktrace.
"""
def __init__(self, code, component_trace):
"""Constructs a FireExit exception.
Args:
code: (int) Exit code for the Fire CLI.
component_trace: (FireTrace) The trace for the Fire command.
"""
super(FireExit, self).__init__(code)
self.trace = component_trace
def _PrintResult(component_trace, verbose=False):
"""Prints the result of the Fire call to stdout in a human readable way."""
# TODO: Design human readable deserializable serialization method
# and move serialization to it's own module.
result = component_trace.GetResult()
if isinstance(result, (list, set, types.GeneratorType)):
for i in result:
print(_OneLineResult(i))
elif inspect.isgeneratorfunction(result):
raise NotImplementedError
elif isinstance(result, dict):
print(_DictAsString(result, verbose))
elif isinstance(result, tuple):
print(_OneLineResult(result))
elif isinstance(result,
(bool, six.string_types, six.integer_types, float, complex)):
print(result)
elif result is not None:
print(helputils.HelpString(result, component_trace, verbose))
def _DictAsString(result, verbose=False):
"""Returns a dict as a string.
Args:
result: The dict to convert to a string
verbose: Whether to include 'hidden' members, those keys starting with _.
Returns:
A string representing the dict
"""
result = {key: value for key, value in result.items()
if _ComponentVisible(key, verbose)}
if not result:
return '{}'
longest_key = max(len(str(key)) for key in result.keys())
format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1)
lines = []
for key, value in result.items():
line = format_string.format(key=str(key) + ':',
value=_OneLineResult(value))
lines.append(line)
return '\n'.join(lines)
def _ComponentVisible(component, verbose=False):
"""Returns whether a component should be visible in the output."""
return (
verbose
or not isinstance(component, six.string_types)
or not component.startswith('_'))
def _OneLineResult(result):
"""Returns result serialized to a single line string."""
# TODO: Ensure line is fewer than eg 120 characters.
if isinstance(result, six.string_types):
return str(result).replace('\n', ' ')
try:
# Don't force conversion to ascii.
return json.dumps(result, ensure_ascii=False)
except (TypeError, ValueError):
return str(result).replace('\n', ' ')
def _Fire(component, args, context, name=None):
"""Execute a Fire command on a target component using the args supplied.
Arguments that come after a final isolated '--' are treated as Flags, eg for
interactive mode or completion script generation.
Other arguments are consumed by the execution of the Fire command, eg in the
traversal of the members of the component, or in calling a function or
instantiating a class found during the traversal.
The steps performed by this method are:
1. Parse any Flag args (the args after the final --)
2. Start with component as the current component.
2a. If the current component is a class, instantiate it using args from args.
2b. If the current component is a routine, call it using args from args.
2c. Otherwise access a member from component using an arg from args.
2d. Repeat 2a-2c until no args remain.
3a. Embed into ipython REPL if interactive mode is selected.
3b. Generate a completion script if that flag is provided.
In step 2, arguments will only ever be consumed up to a separator; a single
step will never consume arguments from both sides of a separator.
The separator defaults to a hyphen (-), and can be overwritten with the
--separator Fire argument.
Args:
component: The target component for Fire.
args: A list of args to consume in Firing on the component, usually from
the command line.
context: A dict with the local and global variables available at the call
to Fire.
name: Optional. The name of the command. Used in interactive mode and in
the tab completion script.
Returns:
FireTrace of components starting with component, tracing Fire's execution
path as it consumes args.
Raises:
ValueError: If there are arguments that cannot be consumed.
ValueError: If --completion is specified but no name available.
"""
args, flag_args = parser.SeparateFlagArgs(args)
argparser = parser.CreateParser()
parsed_flag_args, unused_args = argparser.parse_known_args(flag_args)
verbose = parsed_flag_args.verbose
interactive = parsed_flag_args.interactive
separator = parsed_flag_args.separator
show_completion = parsed_flag_args.completion
show_help = parsed_flag_args.help
show_trace = parsed_flag_args.trace
# component can be a module, class, routine, object, etc.
if component is None:
component = context
initial_component = component
component_trace = trace.FireTrace(
initial_component=initial_component, name=name, separator=separator,
verbose=verbose, show_help=show_help, show_trace=show_trace)
instance = None
remaining_args = args
while True:
last_component = component
initial_args = remaining_args
if not remaining_args and (show_help or interactive or show_trace
or show_completion):
# Don't initialize the final class or call the final function unless
# there's a separator after it, and instead process the current component.
break
saved_args = []
used_separator = False
if separator in remaining_args:
# For the current component, only use arguments up to the separator.
separator_index = remaining_args.index(separator)
saved_args = remaining_args[separator_index + 1:]
remaining_args = remaining_args[:separator_index]
used_separator = True
assert separator not in remaining_args
if inspect.isclass(component) or inspect.isroutine(component):
# The component is a class or a routine; we'll try to initialize it or
# call it.
isclass = inspect.isclass(component)
try:
target = component.__name__
filename, lineno = inspectutils.GetFileAndLine(component)
component, consumed_args, remaining_args, capacity = _CallCallable(
component, remaining_args)
# Update the trace.
if isclass:
component_trace.AddInstantiatedClass(
component, target, consumed_args, filename, lineno, capacity)
else:
component_trace.AddCalledRoutine(
component, target, consumed_args, filename, lineno, capacity)
except FireError as error:
component_trace.AddError(error, initial_args)
return component_trace
if last_component is initial_component:
# If the initial component is a class, keep an instance for use with -i.
instance = component
elif isinstance(component, (list, tuple)) and remaining_args:
# The component is a tuple or list; we'll try to access a member.
arg = remaining_args[0]
try:
index = int(arg)
component = component[index]
except (ValueError, IndexError):
error = FireError(
'Unable to index into component with argument:', arg)
component_trace.AddError(error, initial_args)
return component_trace
remaining_args = remaining_args[1:]
filename = None
lineno = None
component_trace.AddAccessedProperty(
component, index, [arg], filename, lineno)
elif isinstance(component, dict) and remaining_args:
# The component is a dict; we'll try to access a member.
target = remaining_args[0]
if target in component:
component = component[target]
elif target.replace('-', '_') in component:
component = component[target.replace('-', '_')]
else:
# The target isn't present in the dict as a string, but maybe it is as
# another type.
# TODO: Consider alternatives for accessing non-string keys.
found_target = False
for key, value in component.items():
if target == str(key):
component = value
found_target = True
break
if not found_target:
error = FireError(
'Cannot find target in dict:', target, component)
component_trace.AddError(error, initial_args)
return component_trace
remaining_args = remaining_args[1:]
filename = None
lineno = None
component_trace.AddAccessedProperty(
component, target, [target], filename, lineno)
elif remaining_args:
# We'll try to access a member of the component.
try:
target = remaining_args[0]
component, consumed_args, remaining_args = _GetMember(
component, remaining_args)
filename, lineno = inspectutils.GetFileAndLine(component)
component_trace.AddAccessedProperty(
component, target, consumed_args, filename, lineno)
except FireError as error:
component_trace.AddError(error, initial_args)
return component_trace
if used_separator:
# Add back in the arguments from after the separator.
if remaining_args:
remaining_args = remaining_args + [separator] + saved_args
elif (inspect.isclass(last_component)
or inspect.isroutine(last_component)):
remaining_args = saved_args
component_trace.AddSeparator()
elif component is not last_component:
remaining_args = [separator] + saved_args
else:
# It was an unnecessary separator.
remaining_args = saved_args
if component is last_component and remaining_args == initial_args:
# We're making no progress.
break
if remaining_args:
component_trace.AddError(
FireError('Could not consume arguments:', remaining_args),
initial_args)
return component_trace
if show_completion:
if name is None:
raise ValueError('Cannot make completion script without command name')
script = CompletionScript(name, initial_component)
component_trace.AddCompletionScript(script)
if interactive:
variables = context.copy()
if name is not None:
variables[name] = initial_component
variables['component'] = initial_component
variables['result'] = component
variables['trace'] = component_trace
if instance is not None:
variables['self'] = instance
interact.Embed(variables, verbose)
component_trace.AddInteractiveMode()
return component_trace
def _GetMember(component, args):
"""Returns a subcomponent of component by consuming an arg from args.
Given a starting component and args, this function gets a member from that
component, consuming one arg in the process.
Args:
component: The component from which to get a member.
args: Args from which to consume in the search for the next component.
Returns:
component: The component that was found by consuming an arg.
consumed_args: The args that were consumed by getting this member.
remaining_args: The remaining args that haven't been consumed yet.
Raises:
FireError: If we cannot consume an argument to get a member.
"""
members = dict(inspect.getmembers(component))
arg = args[0]
arg_names = [
arg,
arg.replace('-', '_'), # treat '-' as '_'.
]
for arg_name in arg_names:
if arg_name in members:
return members[arg_name], [arg], args[1:]
raise FireError('Could not consume arg:', arg)
def _CallCallable(fn, args):
"""Calls the function fn by consuming args from args.
Args:
fn: The function to call or class to instantiate.
args: Args from which to consume for calling the function.
Returns:
component: The object that is the result of the function call.
consumed_args: The args that were consumed for the function call.
remaining_args: The remaining args that haven't been consumed yet.
capacity: Whether the call could have taken additional args.
"""
parse = _MakeParseFn(fn)
(varargs, kwargs), consumed_args, remaining_args, capacity = parse(args)
result = fn(*varargs, **kwargs)
return result, consumed_args, remaining_args, capacity
def _MakeParseFn(fn):
"""Creates a parse function for fn.
Args:
fn: The function or class to create the parse function for.
Returns:
A parse function for fn. The parse function accepts a list of arguments
and returns (varargs, kwargs), remaining_args. The original function fn
can then be called with fn(*varargs, **kwargs). The remaining_args are
the leftover args from the arguments to the parse function.
"""
fn_spec = inspectutils.GetFullArgSpec(fn)
all_args = fn_spec.args + fn_spec.kwonlyargs
metadata = decorators.GetMetadata(fn)
# Note: num_required_args is the number of positional arguments without
# default values. All of these arguments are required.
num_required_args = len(fn_spec.args) - len(fn_spec.defaults)
required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults)
def _ParseFn(args):
"""Parses the list of `args` into (varargs, kwargs), remaining_args."""
kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs(
args, all_args, fn_spec.varkw)
# Note: _ParseArgs modifies kwargs.
parsed_args, kwargs, remaining_args, capacity = _ParseArgs(
fn_spec.args, fn_spec.defaults, num_required_args, kwargs,
remaining_args, metadata)
if fn_spec.varargs or fn_spec.varkw:
# If we're allowed *varargs or **kwargs, there's always capacity.
capacity = True
extra_kw = set(kwargs) - set(fn_spec.kwonlyargs)
if fn_spec.varkw is None and extra_kw:
raise FireError('Unexpected kwargs present:', extra_kw)
missing_kwonly = set(required_kwonly) - set(kwargs)
if missing_kwonly:
raise FireError('Missing required flags:', missing_kwonly)
# If we accept *varargs, then use all remaining arguments for *varargs.
if fn_spec.varargs is not None:
varargs, remaining_args = remaining_args, []
else:
varargs = []
for index, value in enumerate(varargs):
varargs[index] = _ParseValue(value, None, None, metadata)
varargs = parsed_args + varargs
remaining_args += remaining_kwargs
consumed_args = args[:len(args) - len(remaining_args)]
return (varargs, kwargs), consumed_args, remaining_args, capacity
return _ParseFn
def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs,
remaining_args, metadata):
"""Parses the positional and named arguments from the available supplied args.
Modifies kwargs, removing args as they are used.
Args:
fn_args: A list of argument names that the target function accepts,
including positional and named arguments, but not the varargs or kwargs
names.
fn_defaults: A list of the default values in the function argspec.
num_required_args: The number of required arguments from the function's
argspec. This is the number of arguments without a default value.
kwargs: Dict with named command line arguments and their values.
remaining_args: The remaining command line arguments, which may still be
used as positional arguments.
metadata: Metadata about the function, typically from Fire decorators.
Returns:
parsed_args: A list of values to be used as positional arguments for calling
the target function.
kwargs: The input dict kwargs modified with the used kwargs removed.
remaining_args: A list of the supplied args that have not been used yet.
capacity: Whether the call could have taken args in place of defaults.
Raises:
FireError: if additional positional arguments are expected, but none are
available.
"""
accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS)
capacity = False # If we see a default get used, we'll set capacity to True
# Select unnamed args.
parsed_args = []
for index, arg in enumerate(fn_args):
value = kwargs.pop(arg, None)
if value is not None: # A value is specified at the command line.
value = _ParseValue(value, index, arg, metadata)
parsed_args.append(value)
else: # No value has been explicitly specified.
if remaining_args and accepts_positional_args:
# Use a positional arg.
value = remaining_args.pop(0)
value = _ParseValue(value, index, arg, metadata)
parsed_args.append(value)
elif index < num_required_args:
raise FireError(
'The function received no value for the required argument:', arg)
else:
# We're past the args for which there's no default value.
# There's a default value for this arg.
capacity = True
default_index = index - num_required_args # index into the defaults.
parsed_args.append(fn_defaults[default_index])
for key, value in kwargs.items():
kwargs[key] = _ParseValue(value, None, key, metadata)
return parsed_args, kwargs, remaining_args, capacity
def _ParseKeywordArgs(args, fn_args, fn_keywords):
"""Parses the supplied arguments for keyword arguments.
Given a list of arguments, finds occurences of --name value, and uses 'name'
as the keyword and 'value' as the value. Constructs and returns a dictionary
of these keyword arguments, and returns a list of the remaining arguments.
Only if fn_keywords is None, this only finds argument names used by the
function, specified through fn_args.
This returns the values of the args as strings. They are later processed by
_ParseArgs, which converts them to the appropriate type.
Args:
args: A list of arguments
fn_args: A list of argument names that the target function accepts,
including positional and named arguments, but not the varargs or kwargs
names.
fn_keywords: The argument name for **kwargs, or None if **kwargs not used
Returns:
kwargs: A dictionary mapping keywords to values.
remaining_kwargs: A list of the unused kwargs from the original args.
remaining_args: A list of the unused arguments from the original args.
"""
kwargs = {}
remaining_kwargs = []
remaining_args = []
if not args:
return kwargs, remaining_kwargs, remaining_args
skip_argument = False
for index, argument in enumerate(args):
if skip_argument:
skip_argument = False
continue
arg_consumed = False
if argument.startswith('--'):
# This is a named argument; get its value from this arg or the next.
got_argument = False
keyword = argument[2:]
contains_equals = '=' in keyword
is_bool_syntax = (
not contains_equals and
(index + 1 == len(args) or args[index + 1].startswith('--')))
if contains_equals:
keyword, value = keyword.split('=', 1)
got_argument = True
elif is_bool_syntax:
# Since there's no next arg or the next arg is a Flag, we consider
# this flag to be a boolean.
got_argument = True
if keyword in fn_args:
value = 'True'
elif keyword.startswith('no'):
keyword = keyword[2:]
value = 'False'
else:
value = 'True'
else:
if index + 1 < len(args):
value = args[index + 1]
got_argument = True
keyword = keyword.replace('-', '_')
# In order for us to consume the argument as a keyword arg, we either:
# Need to be explicitly expecting the keyword, or we need to be
# accepting **kwargs.
if got_argument:
skip_argument = not contains_equals and not is_bool_syntax
arg_consumed = True
if keyword in fn_args or fn_keywords:
kwargs[keyword] = value
else:
remaining_kwargs.append(argument)
if skip_argument:
remaining_kwargs.append(args[index + 1])
if not arg_consumed:
# The argument was not consumed, so it is still a remaining argument.
remaining_args.append(argument)
return kwargs, remaining_kwargs, remaining_args
def _ParseValue(value, index, arg, metadata):
"""Parses value, a string, into the appropriate type.
The function used to parse value is determined by the remaining arguments.
Args:
value: The string value to be parsed, typically a command line argument.
index: The index of the value in the function's argspec.
arg: The name of the argument the value is being parsed for.
metadata: Metadata about the function, typically from Fire decorators.
Returns:
value, parsed into the appropriate type for calling a function.
"""
parse_fn = parser.DefaultParseValue
# We check to see if any parse function from the fn metadata applies here.
parse_fns = metadata.get(decorators.FIRE_PARSE_FNS)
if parse_fns:
default = parse_fns['default']
positional = parse_fns['positional']
named = parse_fns['named']
if index is not None and 0 <= index < len(positional):
parse_fn = positional[index]
elif arg in named:
parse_fn = named[arg]
elif default is not None:
parse_fn = default
return parse_fn(value)
| 36.577806
| 80
| 0.706873
| 3,888
| 28,677
| 5.11214
| 0.147119
| 0.039243
| 0.004931
| 0.003874
| 0.211008
| 0.168897
| 0.133679
| 0.101529
| 0.092725
| 0.092725
| 0
| 0.002512
| 0.222687
| 28,677
| 783
| 81
| 36.624521
| 0.889148
| 0.453569
| 0
| 0.244216
| 0
| 0
| 0.040487
| 0
| 0
| 0
| 0
| 0.003831
| 0.002571
| 1
| 0.03856
| false
| 0
| 0.046272
| 0
| 0.14653
| 0.033419
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86c845d512d008bf07b10c93c9a059cfaa7474a0
| 1,668
|
py
|
Python
|
app.py
|
AmirValeev/auto-ml-classifier
|
e803fe92d1ec71e87509845ea61ecc46b363bae6
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
AmirValeev/auto-ml-classifier
|
e803fe92d1ec71e87509845ea61ecc46b363bae6
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
AmirValeev/auto-ml-classifier
|
e803fe92d1ec71e87509845ea61ecc46b363bae6
|
[
"Apache-2.0"
] | null | null | null |
import os, ast
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
import pickle
def main():
# Get the dataset from the users GitHub repository
dataset_path = "https://raw.githubusercontent.com/" + os.environ["GITHUB_REPOSITORY"] + "/master/dataset.csv"
data = pd.read_csv(dataset_path)
print()
print(data.describe())
x=data.iloc[:,:-1]
y=data.iloc[:,-1]
column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding on output variable
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state=0)
#define a pipeline
pipe = make_pipeline(column_trans,SVC())
pipe.fit(x_train,y_train) #training the model
print("\nModel Training Finished")
accuracy = pipe.score(x_test,y_test)
print("\nAccuracy of the Model: "+str(accuracy*100))
if pipe:
pickle.dump(pipe,open('model.pkl','wb')) # store the artifact in docker container
if not os.environ["INPUT_MYINPUT"] == 'zeroinputs':
inputs = ast.literal_eval(os.environ["INPUT_MYINPUT"])
print("\nThe Predicted Ouput is :")
output = pipe.predict([inputs])
print(output)
else:
output = ["None"]
print("\nUser didn't provided inputs to predict")
print("\n=======================Action Completed========================")
print(f"::set-output name=myOutput::{output[0]}")
if __name__ == "__main__":
main()
| 33.36
| 126
| 0.668465
| 220
| 1,668
| 4.895455
| 0.495455
| 0.051068
| 0.025998
| 0.038997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007331
| 0.182254
| 1,668
| 49
| 127
| 34.040816
| 0.782258
| 0.093525
| 0
| 0
| 0
| 0
| 0.239044
| 0.059761
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0.027778
| 0.222222
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86c8b4810cb292d6be03cbb1ee7d68143bb6929f
| 512
|
py
|
Python
|
util/headers.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 2,027
|
2019-11-12T18:05:48.000Z
|
2022-03-31T22:25:04.000Z
|
util/headers.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 496
|
2019-11-12T18:13:37.000Z
|
2022-03-31T10:43:45.000Z
|
util/headers.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 249
|
2019-11-12T18:02:27.000Z
|
2022-03-22T12:19:19.000Z
|
import base64
def parse_basic_auth(header_value):
"""
Attempts to parse the given header value as a Base64-encoded Basic auth header.
"""
if not header_value:
return None
parts = header_value.split(" ")
if len(parts) != 2 or parts[0].lower() != "basic":
return None
try:
basic_parts = base64.b64decode(parts[1]).split(":", 1)
if len(basic_parts) != 2:
return None
return basic_parts
except ValueError:
return None
| 21.333333
| 83
| 0.599609
| 66
| 512
| 4.530303
| 0.469697
| 0.147157
| 0.100334
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036212
| 0.298828
| 512
| 23
| 84
| 22.26087
| 0.796657
| 0.154297
| 0
| 0.285714
| 0
| 0
| 0.016787
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ca3287dbcbbef744a382d06122c372e95e738d
| 3,294
|
py
|
Python
|
cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_volume.py
|
aarunsai81/netapp
|
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
|
[
"Apache-2.0"
] | 11
|
2015-08-25T13:11:18.000Z
|
2020-10-15T11:29:20.000Z
|
cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_volume.py
|
aarunsai81/netapp
|
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
|
[
"Apache-2.0"
] | 5
|
2018-01-25T11:31:56.000Z
|
2019-05-06T23:13:35.000Z
|
cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_volume.py
|
aarunsai81/netapp
|
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
|
[
"Apache-2.0"
] | 11
|
2015-02-20T18:48:24.000Z
|
2021-01-30T20:26:18.000Z
|
# Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
from cinder.tests.unit.volume.drivers.emc.scaleio import mocks
class TestDeleteVolume(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.delete_volume()``"""
def setUp(self):
"""Setup a test case environment.
Creates a fake volume object and sets up the required API responses.
"""
super(TestDeleteVolume, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.volume = fake_volume.fake_volume_obj(
ctx, **{'provider_id': fake.PROVIDER_ID})
self.volume_name_2x_enc = urllib.parse.quote(
urllib.parse.quote(self.driver._id_to_base64(self.volume.id))
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: self.volume.id,
'instances/Volume::{}/action/removeMappedSdc'.format(
self.volume.provider_id): self.volume.provider_id,
'instances/Volume::{}/action/removeVolume'.format(
self.volume.provider_id
): self.volume.provider_id,
},
self.RESPONSE_MODE.BadStatus: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
'instances/Volume::{}/action/removeVolume'.format(
self.volume.provider_id
): mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
},
}
def test_bad_login_and_volume(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.volume)
def test_delete_volume(self):
"""Setting the unmap volume before delete flag for tests """
self.driver.configuration.set_override(
'sio_unmap_volume_before_deletion',
override=True)
self.driver.delete_volume(self.volume)
| 39.686747
| 78
| 0.610808
| 361
| 3,294
| 5.443213
| 0.409972
| 0.066158
| 0.045802
| 0.050891
| 0.316031
| 0.304326
| 0.242239
| 0.206616
| 0.206616
| 0
| 0
| 0.012554
| 0.298725
| 3,294
| 82
| 79
| 40.170732
| 0.838095
| 0.248634
| 0
| 0.259259
| 0
| 0
| 0.130095
| 0.091807
| 0
| 0
| 0
| 0
| 0.018519
| 1
| 0.055556
| false
| 0
| 0.12963
| 0
| 0.203704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ca3cb4e460e6fa964047e9d8e3d1c032b0dafb
| 1,233
|
py
|
Python
|
example-package/transportation_tutorials/__init__.py
|
chrisc20042001/python-for-transportation-modeling
|
677129daa390fcaa6e5cde45960e27d9bd6ca4bf
|
[
"BSD-3-Clause"
] | null | null | null |
example-package/transportation_tutorials/__init__.py
|
chrisc20042001/python-for-transportation-modeling
|
677129daa390fcaa6e5cde45960e27d9bd6ca4bf
|
[
"BSD-3-Clause"
] | null | null | null |
example-package/transportation_tutorials/__init__.py
|
chrisc20042001/python-for-transportation-modeling
|
677129daa390fcaa6e5cde45960e27d9bd6ca4bf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
__version__ = '1.0.2'
import os
import appdirs
import osmnx as ox
import joblib
import requests
from .files import load_vars, save_vars, cached, inflate_tar, download_zipfile
from .data import data, list_data, problematic
from .tools.view_code import show_file
from . import mapping
cache_dir = None
memory = None
def set_cache_dir(location=None, compress=True, verbose=0, **kwargs):
"""
Set up a cache directory for use with the tutorials.
Parameter
---------
cache_dir : Path-like or False, optional
A path for the cache files. Set to False to disable caching.
"""
global memory, cache_dir
if location is None:
location = appdirs.user_cache_dir('transportation_tutorials')
if location is False:
location = None
memory = joblib.Memory(location, compress=compress, verbose=verbose, **kwargs)
make_cache = (
(ox, 'gdf_from_place'),
(ox, 'graph_from_bbox'),
(requests, 'get'),
(requests, 'post'),
)
for module, func_name in make_cache:
try:
func = getattr(module, f"_{func_name}_orig")
except AttributeError:
func = getattr(module, func_name)
setattr(module, f"_{func_name}_orig", func)
setattr(module, func_name, memory.cache(func))
set_cache_dir()
| 20.55
| 79
| 0.721006
| 177
| 1,233
| 4.819209
| 0.480226
| 0.056272
| 0.049238
| 0.03517
| 0.044549
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004859
| 0.16545
| 1,233
| 59
| 80
| 20.898305
| 0.824101
| 0.167883
| 0
| 0
| 0
| 0
| 0.097345
| 0.023599
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.272727
| 0
| 0.30303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ca8c2e422d5ab12a80680e14af6535e5befd05
| 2,146
|
py
|
Python
|
common/common.py
|
czajowaty/curry-bot
|
91bfbd884342a02c6defd057d27d5b1fcd78cb21
|
[
"MIT"
] | 3
|
2019-10-09T23:17:55.000Z
|
2022-02-01T17:34:27.000Z
|
common/common.py
|
czajowaty/curry-bot
|
91bfbd884342a02c6defd057d27d5b1fcd78cb21
|
[
"MIT"
] | 19
|
2019-10-09T20:42:05.000Z
|
2022-02-01T08:22:25.000Z
|
common/common.py
|
czajowaty/curry-bot
|
91bfbd884342a02c6defd057d27d5b1fcd78cb21
|
[
"MIT"
] | 6
|
2020-08-09T20:17:13.000Z
|
2022-01-27T23:59:28.000Z
|
from requests.models import PreparedRequest
def is_valid_url(url):
prepared_request = PreparedRequest()
try:
prepared_request.prepare_url(url, None)
return True
except Exception as e:
return False
class Timestamp: # a speedrun.com style timestamp e.g. "3h 53m 233s 380ms"
def __init__(self, s):
self.hours, self.minutes, self.seconds, self.milliseconds = 0, 0, 0, 0
for arg in s.split():
if arg.endswith("ms"):
self.milliseconds += int(arg[:-2])
elif arg.endswith("s"):
self.seconds += int(arg[:-1])
elif arg.endswith("m"):
self.minutes += int(arg[:-1])
elif arg.endswith("h"):
self.hours += int(arg[:-1])
@staticmethod
def from_milliseconds(ms):
t = Timestamp("0ms")
temp = ms
t.hours = temp // 3600000
temp %= 3600000
t.minutes = temp // 60000
temp %= 60000
t.seconds = temp // 1000
t.milliseconds = temp % 1000
return t
def __str__(self):
result = []
if self.hours != 0:
result.append("{}h".format(self.hours))
if not (self.hours == 0 and self.minutes == 0):
result.append("{}m".format(self.minutes))
result.append("{}s".format(self.seconds))
if self.milliseconds > 0:
result.append("{}ms".format(self.milliseconds))
return ' '.join(result)
def __eq__(self, other):
return self.hours == other.hours and self.minutes == other.minutes and self.seconds == other.seconds and self.milliseconds == other.milliseconds
def __lt__(self, other):
if self.hours < other.hours:
return True
elif self.hours > other.hours:
return False
if self.minutes < other.minutes:
return True
elif self.minutes > other.minutes:
return False
if self.seconds < other.seconds:
return True
elif self.seconds > other.seconds:
return False
return self.milliseconds < other.milliseconds
| 32.029851
| 152
| 0.56384
| 253
| 2,146
| 4.695652
| 0.27668
| 0.060606
| 0.037879
| 0.04798
| 0.176768
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0.036986
| 0.319664
| 2,146
| 66
| 153
| 32.515152
| 0.776712
| 0.025629
| 0
| 0.140351
| 0
| 0
| 0.010531
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.017544
| 0.017544
| 0.350877
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86cbceec04afe24550cbee582258380f822dc77d
| 5,265
|
py
|
Python
|
hendrix/test/test_ux.py
|
anthonyalmarza/hendrix
|
eebd2a2183cc18ec2267d96a53a70d41b1630ce6
|
[
"MIT"
] | null | null | null |
hendrix/test/test_ux.py
|
anthonyalmarza/hendrix
|
eebd2a2183cc18ec2267d96a53a70d41b1630ce6
|
[
"MIT"
] | null | null | null |
hendrix/test/test_ux.py
|
anthonyalmarza/hendrix
|
eebd2a2183cc18ec2267d96a53a70d41b1630ce6
|
[
"MIT"
] | null | null | null |
import os
import sys
from . import HendrixTestCase, TEST_SETTINGS
from hendrix.contrib import SettingsError
from hendrix.options import options as hx_options
from hendrix import ux
from mock import patch
class TestMain(HendrixTestCase):
def setUp(self):
super(TestMain, self).setUp()
self.DEFAULTS = hx_options()
os.environ['DJANGO_SETTINGS_MODULE'] = ''
self.devnull = open(os.devnull, 'w')
self.args_list = ['hx', 'start']
self.patcher = patch('hendrix.ux.findSettingsModule')
self.patcher.start()
def tearDown(self):
super(TestMain, self).tearDown()
self.devnull.close()
self.patcher.stop()
def test_settings_from_system_variable(self):
django_settings = 'django.inanity'
with patch('hendrix.ux.findSettingsModule') as findSettingsMod:
findSettingsMod.return_value = django_settings
options = self.DEFAULTS
self.assertEqual(options['settings'], '')
options = ux.djangoVsWsgi(options)
self.assertEqual(options['settings'], django_settings)
def test_settings_wsgi_absense(self):
with patch('hendrix.ux.findSettingsModule') as findSettingsMod:
findSettingsMod.return_value = ""
self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS)
def test_user_settings_overrides_system_variable(self):
django_settings = 'django.inanity'
with patch('hendrix.ux.findSettingsModule') as findSettingsMod:
findSettingsMod.return_value = django_settings
options = self.DEFAULTS
user_settings = 'myproject.settings'
options['settings'] = user_settings
self.assertEqual(options['settings'], user_settings)
options = ux.djangoVsWsgi(options)
self.assertEqual(options['settings'], user_settings)
def test_wsgi_correct_wsgi_path_works(self):
wsgi_dot_path = 'hendrix.test.wsgi'
options = self.DEFAULTS
options.update({'wsgi': wsgi_dot_path})
options = ux.djangoVsWsgi(options)
self.assertEqual(options['wsgi'], wsgi_dot_path)
def test_wsgi_wrong_path_raises(self):
wsgi_dot_path = '_this.leads.nowhere.man'
options = self.DEFAULTS
options.update({'wsgi': wsgi_dot_path})
self.assertRaises(ImportError, ux.djangoVsWsgi, options)
def test_cwd_exposure(self):
cwd = os.getcwd()
_path = sys.path
sys.path = [p for p in _path if p != cwd]
self.assertTrue(cwd not in sys.path)
ux.exposeProject(self.DEFAULTS)
self.assertTrue(cwd in sys.path)
def test_pythonpath(self):
options = self.DEFAULTS
test_path = os.path.join(
os.path.dirname(os.getcwd()),
'hendrix/test/testproject'
)
options['pythonpath'] = test_path
ux.exposeProject(options)
self.assertTrue(test_path in sys.path)
sys.path = [p for p in sys.path if p != test_path]
def test_shitty_pythonpath(self):
options = self.DEFAULTS
test_path = '/if/u/have/this/path/you/suck'
options['pythonpath'] = test_path
self.assertRaises(IOError, ux.exposeProject, options)
def test_dev_friendly_options(self):
options = self.DEFAULTS
options['dev'] = True
self.assertFalse(options['reload'])
self.assertFalse(options['loud'])
options = ux.devFriendly(options)
self.assertTrue(options['reload'])
self.assertTrue(options['loud'])
def test_noise_control_daemonize(self):
options = self.DEFAULTS
options['quiet'] = True
options['daemonize'] = True
stdout = sys.stdout
stderr = sys.stderr
redirect = ux.noiseControl(options)
self.assertEqual(sys.stdout.name, stdout.name)
self.assertEqual(sys.stderr.name, stderr.name)
self.assertEqual(redirect, None)
def test_noise_control_traceback(self):
options = self.DEFAULTS
options['quiet'] = True
options['daemonize'] = True
options['traceback'] = True
stdout = sys.stdout
stderr = sys.stderr
redirect = ux.noiseControl(options)
self.assertEqual(sys.stdout.name, stdout.name)
self.assertEqual(sys.stderr.name, stderr.name)
self.assertEqual(redirect, None)
def test_main_with_daemonize(self):
sys.argv = self.args_list + ['-d', '--settings', TEST_SETTINGS]
class Process(object):
def poll(self):
return 0
with patch('time.sleep'):
with patch('subprocess.Popen') as popen:
popen.return_value = Process()
ux.main()
self.assertTrue(popen.called)
self.assertTrue('--settings' in popen.call_args[0][0])
sys.argv = []
def test_options_structure(self):
"""
A test to ensure that HendrixDeploy.options also has the complete set
of options available
"""
deploy = self.wsgiDeploy()
expected_keys = self.DEFAULTS.keys()
actual_keys = deploy.options.keys()
self.assertListEqual(expected_keys, actual_keys)
| 35.816327
| 77
| 0.637607
| 584
| 5,265
| 5.60274
| 0.229452
| 0.057152
| 0.052262
| 0.039731
| 0.406174
| 0.397005
| 0.382946
| 0.342604
| 0.329768
| 0.260697
| 0
| 0.000768
| 0.25774
| 5,265
| 146
| 78
| 36.061644
| 0.836489
| 0.017094
| 0
| 0.338843
| 0
| 0
| 0.090414
| 0.04161
| 0
| 0
| 0
| 0
| 0.198347
| 1
| 0.132231
| false
| 0
| 0.066116
| 0.008264
| 0.223141
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86cc747c2e5f0caead634114a98e5f4a747d16ea
| 15,163
|
py
|
Python
|
local/local_sign.py
|
EVAyo/chaoxing_auto_sign
|
7ae91a5e9aa4d15f57a5419ff3f5a455e151930a
|
[
"MIT"
] | null | null | null |
local/local_sign.py
|
EVAyo/chaoxing_auto_sign
|
7ae91a5e9aa4d15f57a5419ff3f5a455e151930a
|
[
"MIT"
] | null | null | null |
local/local_sign.py
|
EVAyo/chaoxing_auto_sign
|
7ae91a5e9aa4d15f57a5419ff3f5a455e151930a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-
import os
import re
import time
import json
import random
import asyncio
from typing import Optional, List, Dict
from aiohttp import ClientSession
from aiohttp.cookiejar import SimpleCookie
from lxml import etree
from bs4 import BeautifulSoup
from config import *
from message import server_chan_send
class AutoSign(object):
def __init__(self, username, password, schoolid=None, enc=None):
"""初始化就进行登录"""
self.headers = {
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36',
}
self.session = ClientSession(headers=self.headers)
self.username = username
self.password = password
self.schoolid = '' if schoolid is None else schoolid
self.enc = '' if enc is None else enc
async def check_login_status(self, status, text):
if status == 403:
return 1002
data = json.loads(text)
if data['result']:
return 1000 # 登录成功
else:
return 1001 # 登录信息有误
async def set_cookies(self):
"""设置cookies"""
cookie = await self.check_cookies()
if not cookie:
# 无效则重新登录,并保存cookies
status, text, cookie = await self.login()
login_status = await self.check_login_status(status, text)
if login_status == 1000:
cookies = self.dict_from_simple_cookie(cookie)
self.save_cookies(cookies)
else:
return 1001
else:
self.session.cookie_jar.update_cookies(cookie)
return 1000
def dict_from_simple_cookie(self, cookies) -> dict:
"""
从响应对象中抽取cookies
"""
result = {}
for key, value in cookies.items():
result[key] = value.value
return result
def save_cookies(self, cookies: dict):
"""保存cookies"""
with open(COOKIES_FILE_PATH, "r") as f:
data = json.load(f)
data[self.username] = cookies
with open(COOKIES_FILE_PATH, 'w') as f2:
json.dump(data, f2)
async def check_cookies(self) -> Optional[SimpleCookie]:
"""检测json文件内是否存有cookies,有则检测,无则登录"""
if "cookies.json" not in os.listdir(COOKIES_PATH):
with open(COOKIES_FILE_PATH, 'w+') as f:
f.write("{}")
with open(COOKIES_FILE_PATH, 'r') as f:
# json文件有无账号cookies, 没有,则直接返回假
try:
data = json.load(f)
cookies = data[self.username]
except Exception:
return False
# 检测cookies是否有效
async with self.session.request(method='GET',
url='http://mooc1-1.chaoxing.com/api/workTestPendingNew',
allow_redirects=False,
cookies=cookies) as resp:
if resp.status != 200:
print("cookie失效")
return None
else:
print("cookie有效!")
return cookies
async def login(self):
"""
登录并返回响应
"""
params = {
'name': self.username,
'pwd': self.password,
'schoolid': self.schoolid,
'verify': 0
}
async with self.session.request(method='GET',
url='https://passport2.chaoxing.com/api/login',
params=params) as resp:
status = resp.status
text = await resp.text()
cookies = resp.cookies
return status, text, cookies
def check_activeid(self, activeid):
"""检测activeid是否存在,不存在则添加"""
activeid += self.username
if "activeid.json" not in os.listdir(ACTIVEID_PATH):
with open(ACTIVEID_FILE_PATH, 'w+') as f:
f.write("{}")
with open(ACTIVEID_FILE_PATH, 'r') as f:
try:
# 读取文件
data = json.load(f)
if data[activeid]:
return True
except BaseException:
# 如果出错,则表示没有此activeid
return False
def save_activeid(self, activeid):
"""保存已成功签到的activeid"""
activeid += self.username
if "activeid.json" not in os.listdir(ACTIVEID_PATH):
with open(ACTIVEID_FILE_PATH, 'w+') as f:
f.write("{}")
with open(ACTIVEID_FILE_PATH, 'r') as f:
data = json.load(f)
with open(ACTIVEID_FILE_PATH, 'w') as f2:
data[activeid] = True
json.dump(data, f2)
async def get_all_classid(self) -> list:
"""获取课程主页中所有课程的classid和courseid"""
res = []
async with self.session.request(method='GET',
url='http://mooc1-2.chaoxing.com/visit/interaction') as resp:
text = await resp.text()
soup = BeautifulSoup(text, "lxml")
course_list = soup.find_all(
'li', class_="course")
for course in course_list:
res.append((course.attrs['courseid'], course.attrs['clazzid'],
course.find_next('span', class_="course-name").text))
print('课程列表: ', res)
return res
async def get_sign_type(self, classid, courseid, activeid):
"""获取签到类型"""
params = {
'activeId': activeid,
'classId': classid,
'courseId': courseid
}
async with self.session.request(method='GET',
url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign',
params=params) as resp:
text = await resp.text()
h = etree.HTML(text)
sign_type = h.xpath('//div[@class="location"]/span/text()')
return sign_type
async def get_activeid(self, classid, courseid, classname):
"""访问任务面板获取课程的活动id"""
res = []
re_rule = r'([\d]+),2'
params = {
'courseId': courseid,
'jclassId': classid
}
async with self.session.request(method='GET',
url="https://mobilelearn.chaoxing.com/widget/pcpick/stu/index",
verify_ssl=False,
params=params) as resp:
text = await resp.text()
h = etree.HTML(text)
activeid_list = h.xpath('//*[@id="startList"]/div/div/@onclick')
for activeid in activeid_list:
activeid = re.findall(re_rule, activeid)
if not activeid:
continue
sign_type = await self.get_sign_type(classid, courseid, activeid[0])
res.append((activeid[0], sign_type[0]))
n = len(res)
if n:
d = {'num': n, 'class': {}}
for i in range(n):
if not self.check_activeid(res[i][0]):
d['class'][i] = {
'classid': classid,
'courseid': courseid,
'activeid': res[i][0],
'classname': classname,
'sign_type': res[i][1]
}
return d
async def general_sign(self, classid, courseid, activeid):
"""普通签到"""
params = {
'activeId': activeid,
'classId': classid,
'fid': '39037',
'courseId': courseid
}
async with self.session.request(
method='GET',
url="https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign",
params=params,
verify_ssl=False
) as resp:
text = await resp.text()
title = re.findall('<title>(.*)</title>', text)[0]
if "签到成功" not in title:
# 网页标题不含签到成功,则为拍照签到
return self.tphoto_sign(activeid)
else:
s = {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': title
}
return s
async def hand_sign(self, classid, courseid, activeid):
"""手势签到"""
params = {
'courseId': courseid,
'classId': classid,
'activeId': activeid
}
async with self.session.request(
method='GET',
url="https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn",
params=params,
verify_ssl=False
) as resp:
text = await resp.text()
title = re.findall('<title>(.*)</title>', text)
s = {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': title
}
return s
async def qcode_sign(self, activeid):
"""二维码签到"""
params = {
'enc': self.enc,
'name': '',
'activeId': activeid,
'uid': '',
'clientip': '',
'useragent': '',
'latitude': '-1',
'longitude': '-1',
'fid': '',
'appType': '15'
}
async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax',
params=params,
allow_redirects=False) as resp:
text = await resp.text()
return {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': text
}
async def addr_sign(self, activeid):
"""位置签到"""
params = {
'name': '',
'activeId': activeid,
'address': '中国',
'uid': '',
'clientip': clientip,
'latitude': latitude,
'longitude': longitude,
'fid': '',
'appType': '15',
'ifTiJiao': '1'
}
async with self.session.request(
method="GET",
url="https://mobilelearn.chaoxing.com/pptSign/stuSignajax",
params=params
) as resp:
text = await resp.text()
return {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': text
}
async def tphoto_sign(self, activeid, uid):
"""拍照签到"""
objectId = await self.upload_img(uid)
params = {
'name': '',
'activeId': activeid,
'address': '中国',
'uid': '',
'clientip': clientip,
'latitude': latitude,
'longitude': longitude,
'fid': '',
'appType': '15',
'ifTiJiao': '1',
'objectId': objectId
}
async with self.session.request(
method="GET",
url="https://mobilelearn.chaoxing.com/pptSign/stuSignajax",
params=params
) as resp:
text = await resp.text()
return {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': text
}
async def get_token(self):
"""获取上传文件所需参数token"""
url = 'https://pan-yz.chaoxing.com/api/token/uservalid'
async with self.session.request(
method='GET',
url=url
) as resp:
text = await resp.text()
token_dict = json.loads(text)
return token_dict['_token']
async def upload_img(self, uid):
"""上传图片"""
# 从图片文件夹内随机选择一张图片
try:
all_img = os.listdir(IMAGE_PATH)
except Exception as e:
os.mkdir(IMAGE_PATH)
all_img = 0
if len(all_img) == 0:
return "a5d588f7bce1994323c348982332e470"
else:
img = IMAGE_PATH + random.choice(all_img)
# uid = self.session.cookies.get_dict()['UID']
url = 'https://pan-yz.chaoxing.com/upload'
files = {'file': open(img, 'rb')}
uid = self.session.cookie_jar.filter_cookies('').get('UID').value
token = await self.get_token()
param = {
'puid': uid,
'_token': token
}
async with self.session.request(
method='POST',
url=url,
params=param,
data=files
) as resp:
text = await resp.text()
res_dict = json.loads(text)
return res_dict['objectId']
async def send_sign_request(self, classid, courseid, activeid, sign_type):
"""发送签到请求"""
if "手势" in sign_type:
return await self.hand_sign(classid, courseid, activeid)
elif "二维码" in sign_type:
return await self.qcode_sign(activeid)
elif "位置" in sign_type:
return await self.addr_sign(activeid)
elif "拍照" in sign_type:
return await self.tphoto_sign(activeid)
else:
return await self.general_sign(classid, courseid, activeid)
async def send_sign_result(self, results: List[Dict]):
"""
发送签到结果
"""
await server_chan_send(results, self.session)
async def start_sign_task(self):
"""开始所有签到任务"""
tasks = []
res = []
await self.set_cookies()
# 获取所有课程的classid和course_id
classid_courseId = await self.get_all_classid()
# 获取所有课程activeid和签到类型
for i in classid_courseId:
coroutine = self.get_activeid(i[1], i[0], i[2])
tasks.append(coroutine)
results: List[Dict] = await asyncio.gather(*tasks)
for r in results:
if r is None:
continue
for d in r['class'].values():
resp = await self.send_sign_request(
d['classid'],
d['courseid'],
d['activeid'],
d['sign_type']
)
if resp:
# 签到课程, 签到时间, 签到状态
sign_msg = {
'name': d['classname'],
'date': resp['date'],
'status': resp['status']
}
res.append(sign_msg)
if '失败' in resp['status']:
continue
# 签到成功后,新增activeid
self.save_activeid(d['activeid'])
return res
async def close_session(self):
await self.session.close()
| 33.770601
| 149
| 0.485722
| 1,482
| 15,163
| 4.879217
| 0.205128
| 0.023233
| 0.021574
| 0.03319
| 0.366063
| 0.327756
| 0.287097
| 0.277002
| 0.259024
| 0.244088
| 0
| 0.014211
| 0.396689
| 15,163
| 449
| 150
| 33.770601
| 0.776235
| 0.023214
| 0
| 0.394521
| 0
| 0.005479
| 0.130435
| 0.015829
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013699
| false
| 0.010959
| 0.035616
| 0
| 0.134247
| 0.008219
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ccfd65a1bb34c39113feed67502cda22587b34
| 4,240
|
py
|
Python
|
build/scripts-3.6/fit_background_model.py
|
stahlberggroup/umierrorcorrect
|
8ceabe30a87811dad467d04eb5a08d0213065946
|
[
"MIT"
] | null | null | null |
build/scripts-3.6/fit_background_model.py
|
stahlberggroup/umierrorcorrect
|
8ceabe30a87811dad467d04eb5a08d0213065946
|
[
"MIT"
] | null | null | null |
build/scripts-3.6/fit_background_model.py
|
stahlberggroup/umierrorcorrect
|
8ceabe30a87811dad467d04eb5a08d0213065946
|
[
"MIT"
] | 1
|
2022-01-12T13:51:59.000Z
|
2022-01-12T13:51:59.000Z
|
#!python
import numpy as np
from numpy import inf
from numpy import nan
from scipy.optimize import fmin
from scipy.stats import beta
from scipy.special import beta as B
from scipy.special import comb
import argparse
import sys
def parseArgs():
'''Function for parsing arguments'''
parser = argparse.ArgumentParser(description="Pipeline for analyzing barcoded amplicon \
sequencing data with Unique molecular \
identifiers (UMI)")
parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons file, for fitting parameters of the bgmodel')
parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile',
help='Path to file with non-background positions')
parser.add_argument('-out', '--out_file',dest='out_file',help="name of output file, default = %(default)s]",default="bgmodel.params")
parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff) for variant calling. [default = %(default)s]', default=3)
args = parser.parse_args(sys.argv[1:])
return(args)
def parse_cons_file(filename,fsize=3):
n1=[]
f1=[]
c1=[]
posx=[]
data=[]
with open(filename) as f:
for line in f:
if not line.startswith('Sample Name'):
line=line.rstrip('\n')
parts=line.split('\t')
pos=parts[1]+':'+parts[2]
name=parts[3]
#print(name)
if name not in "":
famsize=parts[-4]
if int(famsize)==fsize:
frac=float(parts[-2])
alt=parts[-1]
count=parts[-3]
if frac > 0 and alt not in 'N':
cov=int(parts[-5])
f1.append(float(frac))
n1.append(int(cov))
c1.append(int(count))
posx.append(pos)
data.append(line)
#print(name)
#print(famsize)
return(f1,n1,c1,posx,data)
def betaNLL(params,*args):
a,b = params
data = np.array(args[0])
pdf=beta.pdf(data,a,b,loc=0,scale=1)
lg=np.log(pdf)
#lg=np.where(lg==-np.inf,0,lg)
mask = np.isfinite(lg)
nll = -lg[mask].sum()
nll=-1*np.sum(lg)
return(nll)
def get_beta_parameters(data):
m=np.mean(data)
v=np.var(data)
a0=m*(m * (1-m) / v-1 )
b0=(1-m)*(m * (1-m) / v-1 )
result=fmin(betaNLL,[a0,b0],args=(data,))
return(result)
def run_fit_bgmodel(args):
spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120]
if args.nonbgposfile:
nonbgpos=[]
with open(args.nonbgposfile) as f:
for line in f:
line=line.rstrip()
nonbgpos.append(line)
else:
nonbgpos=spikepositions
if not args.cons_file:
args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0]
args.fsize=int(args.fsize)
f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize)
f1 = np.array(f1)
n1 = np.array(n1)
a1 = np.array(a1)
pos = np.array(pos)
data = np.array(data)
result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True])
#a=prob_bb(n1,a1,result[0],result[1])
print(pos,nonbgpos,np.isin(pos,nonbgpos))
with open(args.out_file,'w') as g:
g.write('{}\n'.format(result[0]))
g.write('{}\n'.format(result[1]))
#a[a==inf]=1e-10
#a[np.isnan(a)]=1e-10
#Q = -10*np.log10(a)
#data=np.array(data)
#plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png')
#if args.vc_method.lower()=='bbmodel':
# rout=data[Q >= float(args.qvalue_threshold)]
# Qsig=Q[Q >= float(args.qvalue_threshold)]
#else:
# rout=data[a1 >= float(args.count_cutoff)]
# Qsig=Q[a1 >= float(args.count_cutoff)]
#outfilename=args.output_path+'/'+args.sample_name+'2.vcf'
#write_vcf(outfilename,rout,Qsig,args.reference_file)
if __name__=='__main__':
args=parseArgs()
run_fit_bgmodel(args)
| 35.932203
| 154
| 0.566274
| 567
| 4,240
| 4.151675
| 0.299824
| 0.027188
| 0.028887
| 0.018692
| 0.112999
| 0.039932
| 0
| 0
| 0
| 0
| 0
| 0.036208
| 0.283491
| 4,240
| 117
| 155
| 36.239316
| 0.738644
| 0.145047
| 0
| 0.022222
| 0
| 0
| 0.109136
| 0.00722
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.1
| 0
| 0.155556
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86cdf766574c9c743ff631f5d4070feb9f763d2a
| 7,654
|
py
|
Python
|
caffe2/python/operator_test/partition_ops_test.py
|
KevinKecc/caffe2
|
a2b6c6e2f0686358a84277df65e9489fb7d9ddb2
|
[
"Apache-2.0"
] | 585
|
2015-08-10T02:48:52.000Z
|
2021-12-01T08:46:59.000Z
|
caffe2/python/operator_test/partition_ops_test.py
|
mingzhe09088/caffe2
|
8f41717c46d214aaf62b53e5b3b9b308b5b8db91
|
[
"Apache-2.0"
] | 27
|
2018-04-14T06:44:22.000Z
|
2018-08-01T18:02:39.000Z
|
caffe2/python/operator_test/partition_ops_test.py
|
mingzhe09088/caffe2
|
8f41717c46d214aaf62b53e5b3b9b308b5b8db91
|
[
"Apache-2.0"
] | 183
|
2015-08-10T02:49:04.000Z
|
2021-12-01T08:47:13.000Z
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase, rand_array
class TestPartitionOps(TestCase):
def test_configs(self):
# (main dims, partitions, main type, [list of (extra dims, type)])
configs = [
((10, ), 3),
((4, ), 10),
((10, 10), 4),
((100, ), 2),
((5, ), 1),
((1, ), 1),
((2, 10), 2),
]
suffixes = [
[],
[((2, 2), np.float32)],
[((3, ), np.int64), ((2, ), np.float32)],
]
return [
(main_dims, parts, main_type, extra, pack)
for main_dims, parts in configs
for main_type in [np.int32, np.int64] for extra in suffixes
for pack in [False, True]
]
def testPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
ins = ['in' + str(i) for i in range(1 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(1 + len(extra_ins))
]
op = core.CreateOperator(
'Partition', ins, outs, pack_first_input=(1 if pack else 0))
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i], d)
x.append(d)
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
print(x)
print(ref)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
# test inverse operation (GatherByKey)
if len(main_dims) == 1:
# currently only 1D key tensor supported
for i in range(len(extra_ins)):
expected_out = ins[i + 1]
gather_ins = [ins[0]] + [
outs[len(ins) * p + i + 1] for p in range(parts)]
actual_out = expected_out + '_actual'
op = core.CreateOperator(
'GatherByKey', gather_ins, actual_out)
workspace.RunOperatorOnce(op)
expected = workspace.FetchBlob(expected_out)
actual = workspace.FetchBlob(actual_out)
np.testing.assert_array_equal(expected, actual)
def testLengthsPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
# For LengthsSharding only 1-D tensors supported as a first input
if len(main_dims) > 1:
continue
ins = ['in' + str(i) for i in range(2 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(2 + len(extra_ins))
]
op = core.CreateOperator(
'LengthsPartition', ins, outs,
pack_first_input=(1 if pack else 0)
)
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i + 1], d)
x.append(d)
# Randomly generate length tensor as well
elements = np.random.randint(2, 10)
lengths = []
total_length = 0
for _ in range(elements - 1):
lengths.append(np.random.randint(main_dims[0] - total_length))
total_length += lengths[-1]
lengths.append(main_dims[0] - total_length)
workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32))
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
sharded_lengths[ind] += 1
idx += 1
out.append(sharded_lengths)
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
if __name__ == "__main__":
import unittest
unittest.main()
| 38.852792
| 80
| 0.468774
| 831
| 7,654
| 4.200963
| 0.237064
| 0.029791
| 0.012031
| 0.022057
| 0.492409
| 0.47293
| 0.437124
| 0.437124
| 0.425666
| 0.425666
| 0
| 0.024627
| 0.42174
| 7,654
| 196
| 81
| 39.05102
| 0.764121
| 0.121505
| 0
| 0.486842
| 0
| 0
| 0.010717
| 0
| 0
| 0
| 0
| 0
| 0.019737
| 1
| 0.046053
| false
| 0
| 0.052632
| 0
| 0.151316
| 0.019737
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ce2b47e96edc2e4a65e6684b182564c236c3d3
| 11,195
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fib_common_cfg.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fib_common_cfg.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fib_common_cfg.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
""" Cisco_IOS_XR_fib_common_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR fib\-common package configuration.
This module contains definitions
for the following management objects\:
fib\: CEF configuration
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class FibPbtsFallback(Enum):
"""
FibPbtsFallback (Enum Class)
Fib pbts fallback
.. data:: list = 1
Fallback to class number list
.. data:: any = 2
Fallback to any class
.. data:: drop = 3
Fallback to drop
"""
list = Enum.YLeaf(1, "list")
any = Enum.YLeaf(2, "any")
drop = Enum.YLeaf(3, "drop")
class FibPbtsForwardClass(Enum):
"""
FibPbtsForwardClass (Enum Class)
Fib pbts forward class
.. data:: any = 8
Any class
"""
any = Enum.YLeaf(8, "any")
class Fib(Entity):
"""
CEF configuration
.. attribute:: pbts_forward_class_fallbacks
PBTS class configuration
**type**\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>`
.. attribute:: platform
FIB platform parameters
**type**\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>`
.. attribute:: auto_hash_recover
Set option for automatcially recovering consistent\-hashing state on interface up
**type**\: bool
.. attribute:: prefer_aib_routes
Set options for adjacency routes overriding RIB routes
**type**\: bool
.. attribute:: encap_sharing_disable
Set true to disable encapsulation sharing
**type**\: bool
.. attribute:: frr_follow_bgp_pic
Set option for fast\-reroute to follow BGP PIC update, not to wait for timeout
**type**\: bool
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib, self).__init__()
self._top_entity = None
self.yang_name = "fib"
self.yang_parent_name = "Cisco-IOS-XR-fib-common-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("pbts-forward-class-fallbacks", ("pbts_forward_class_fallbacks", Fib.PbtsForwardClassFallbacks)), ("platform", ("platform", Fib.Platform))])
self._leafs = OrderedDict([
('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])),
('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])),
('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])),
('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])),
])
self.auto_hash_recover = None
self.prefer_aib_routes = None
self.encap_sharing_disable = None
self.frr_follow_bgp_pic = None
self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks()
self.pbts_forward_class_fallbacks.parent = self
self._children_name_map["pbts_forward_class_fallbacks"] = "pbts-forward-class-fallbacks"
self.platform = Fib.Platform()
self.platform.parent = self
self._children_name_map["platform"] = "platform"
self._segment_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value)
class PbtsForwardClassFallbacks(Entity):
"""
PBTS class configuration
.. attribute:: pbts_forward_class_fallback
Set PBTS class for fallback
**type**\: list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>`
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib.PbtsForwardClassFallbacks, self).__init__()
self.yang_name = "pbts-forward-class-fallbacks"
self.yang_parent_name = "fib"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("pbts-forward-class-fallback", ("pbts_forward_class_fallback", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))])
self._leafs = OrderedDict()
self.pbts_forward_class_fallback = YList(self)
self._segment_path = lambda: "pbts-forward-class-fallbacks"
self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value)
class PbtsForwardClassFallback(Entity):
"""
Set PBTS class for fallback
.. attribute:: forward_class_number (key)
PBTS forward class number
**type**\: union of the below types:
**type**\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>`
**type**\: int
**range:** 0..8
.. attribute:: fallback_type
Set PBTS fallback type
**type**\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>`
**mandatory**\: True
.. attribute:: fallback_class_number_array
Set PBTS fallback class number array
**type**\: list of int
**range:** 0..7
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__()
self.yang_name = "pbts-forward-class-fallback"
self.yang_parent_name = "pbts-forward-class-fallbacks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['forward_class_number']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])),
('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])),
('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])),
])
self.forward_class_number = None
self.fallback_type = None
self.fallback_class_number_array = []
self._segment_path = lambda: "pbts-forward-class-fallback" + "[forward-class-number='" + str(self.forward_class_number) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name, value)
class Platform(Entity):
"""
FIB platform parameters
.. attribute:: label_switched_multicast
Options for label\-switched\-multicast parameters
**type**\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>`
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib.Platform, self).__init__()
self.yang_name = "platform"
self.yang_parent_name = "fib"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("label-switched-multicast", ("label_switched_multicast", Fib.Platform.LabelSwitchedMulticast))])
self._leafs = OrderedDict()
self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast()
self.label_switched_multicast.parent = self
self._children_name_map["label_switched_multicast"] = "label-switched-multicast"
self._segment_path = lambda: "platform"
self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib.Platform, [], name, value)
class LabelSwitchedMulticast(Entity):
"""
Options for label\-switched\-multicast parameters
.. attribute:: frr_holdtime
Set time to keep FRR slots programmed post FRR
**type**\: int
**range:** 3..180
**units**\: second
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib.Platform.LabelSwitchedMulticast, self).__init__()
self.yang_name = "label-switched-multicast"
self.yang_parent_name = "platform"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])),
])
self.frr_holdtime = None
self._segment_path = lambda: "label-switched-multicast"
self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/platform/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value)
def clone_ptr(self):
self._top_entity = Fib()
return self._top_entity
| 34.875389
| 184
| 0.609915
| 1,190
| 11,195
| 5.430252
| 0.152941
| 0.050139
| 0.03714
| 0.032188
| 0.467812
| 0.427731
| 0.365831
| 0.325751
| 0.300371
| 0.294955
| 0
| 0.00869
| 0.280482
| 11,195
| 320
| 185
| 34.984375
| 0.793544
| 0.25958
| 0
| 0.40625
| 0
| 0.007813
| 0.201806
| 0.125996
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085938
| false
| 0
| 0.039063
| 0
| 0.234375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ce2bcecdfa6edd6bc5db700d444829470b263a
| 2,888
|
py
|
Python
|
action/combo.py
|
dl-stuff/dl9
|
1cbe98afc53a1de9d413797fb130946acc4b6ba4
|
[
"MIT"
] | null | null | null |
action/combo.py
|
dl-stuff/dl9
|
1cbe98afc53a1de9d413797fb130946acc4b6ba4
|
[
"MIT"
] | null | null | null |
action/combo.py
|
dl-stuff/dl9
|
1cbe98afc53a1de9d413797fb130946acc4b6ba4
|
[
"MIT"
] | null | null | null |
"""Series of actions that form a combo chain"""
from __future__ import annotations
from typing import Optional, Sequence, TYPE_CHECKING
from action import Action
from core.utility import Array
from core.constants import PlayerForm, SimActKind, MomentType
from core.database import FromDB
if TYPE_CHECKING:
from entity.player import Player
class Combos:
def __init__(self, player: Player, form: PlayerForm, act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]] = None) -> None:
self.player = player
self.actions: Array[Action] = Array()
for idx, act_id in enumerate(act_ids):
self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1))
self.ex_actions = None
if ex_act_ids:
self.ex_actions: Array[Action] = Array()
for idx, act_id in enumerate(ex_act_ids):
if not act_id:
self.ex_actions.append(None)
continue
self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1))
def next(self):
if self.player.current in self.actions:
try:
return self.actions[self.player.current.index + 1]
except IndexError:
pass
return self.actions[1]
def __repr__(self) -> str:
if self.ex_actions:
return "->".join(map(repr, self.actions)) + "\tEX[" + "->".join(map(repr, self.ex_actions)) + "]"
return "->".join(map(repr, self.actions))
class UniqueCombos(Combos, FromDB, table="CharaUniqueCombo"):
def __init__(self, id: int, player: Player) -> None:
FromDB.__init__(self, id)
act_ids = (self._data["_ActionId"] + i for i in range(self._data["_MaxComboNum"]))
ex_act_ids = None if not self._data["_ExActionId"] else (self._data["_ExActionId"] + i for i in range(self._data["_MaxComboNum"]))
Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids)
if self._data["_ShiftConditionType"] == 1:
self.player.events.listen(MomentType.HIT, self.enable)
def enable(self, *args, **kwargs):
pass
class DefaultCombos(Combos, FromDB, table="WeaponType"):
def __init__(self, id: int, player: Player) -> None:
FromDB.__init__(self, id)
act_ids = (self._data[f"_DefaultSkill{i+1:02}"] for i in range(5) if self._data[f"_DefaultSkill{i+1:02}"])
ex_act_ids = None if not self._data["_DefaultSkill05Ex"] else (0, 0, 0, 0, self._data["_DefaultSkill05Ex"])
Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids)
class DragonCombos(Combos):
def __init__(self, id: int, combo_max: int, player: Player) -> None:
act_ids = (id + i for i in range(combo_max))
Combos.__init__(self, player, PlayerForm.DRG, act_ids)
| 42.470588
| 138
| 0.649584
| 390
| 2,888
| 4.541026
| 0.235897
| 0.057595
| 0.040655
| 0.024845
| 0.42349
| 0.39074
| 0.39074
| 0.367024
| 0.303783
| 0.257482
| 0
| 0.008965
| 0.227493
| 2,888
| 67
| 139
| 43.104478
| 0.78485
| 0.014197
| 0
| 0.150943
| 0
| 0
| 0.066174
| 0.014784
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132075
| false
| 0.037736
| 0.132075
| 0
| 0.415094
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ce9b178e942f833e8db993afdcf0aface18b4a
| 3,845
|
py
|
Python
|
sktime/forecasting/base/adapters/_statsmodels.py
|
tombh/sktime
|
53df0b9ed9d1fd800539165c414cc5611bcc56b3
|
[
"BSD-3-Clause"
] | 1
|
2020-06-02T22:24:44.000Z
|
2020-06-02T22:24:44.000Z
|
sktime/forecasting/base/adapters/_statsmodels.py
|
abhishek-parashar/sktime
|
1dfce6b41c2acdb576acfc04b09d11bf115c92d1
|
[
"BSD-3-Clause"
] | 1
|
2020-11-20T13:51:20.000Z
|
2020-11-20T13:51:20.000Z
|
sktime/forecasting/base/adapters/_statsmodels.py
|
abhishek-parashar/sktime
|
1dfce6b41c2acdb576acfc04b09d11bf115c92d1
|
[
"BSD-3-Clause"
] | 3
|
2020-10-18T04:54:30.000Z
|
2021-02-15T18:04:18.000Z
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["Markus Löning"]
__all__ = ["_StatsModelsAdapter"]
import numpy as np
import pandas as pd
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin
from sktime.forecasting.base._sktime import _SktimeForecaster
class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):
"""Base class for interfacing statsmodels forecasting algorithms"""
_fitted_param_names = ()
def __init__(self):
self._forecaster = None
self._fitted_forecaster = None
super(_StatsModelsAdapter, self).__init__()
def fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
# statsmodels does not support the pd.Int64Index as required,
# so we coerce them here to pd.RangeIndex
if isinstance(y, pd.Series) and type(y.index) == pd.Int64Index:
y, X = _coerce_int_to_range_index(y, X)
self._set_y_X(y, X)
self._set_fh(fh)
self._fit_forecaster(y, X)
self._is_fitted = True
return self
def _fit_forecaster(self, y_train, X_train=None):
"""Internal fit"""
raise NotImplementedError("abstract method")
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""
Make forecasts.
Parameters
----------
fh : ForecastingHorizon
The forecasters horizon with the steps ahead to to predict.
Default is one-step ahead forecast,
i.e. np.array([1])
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
return_pred_int : bool, optional (default=False)
alpha : int or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series
Returns series of predicted values.
"""
if return_pred_int:
raise NotImplementedError()
# statsmodels requires zero-based indexing starting at the
# beginning of the training series when passing integers
start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
y_pred = self._fitted_forecaster.predict(start, end)
# statsmodels forecasts all periods from start to end of forecasting
# horizon, but only return given time points in forecasting horizon
return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()]
def get_fitted_params(self):
"""Get fitted parameters
Returns
-------
fitted_params : dict
"""
self.check_is_fitted()
return {
name: self._fitted_forecaster.params.get(name)
for name in self._get_fitted_param_names()
}
def _get_fitted_param_names(self):
"""Get names of fitted parameters"""
return self._fitted_param_names
def _coerce_int_to_range_index(y, X=None):
new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)
try:
np.testing.assert_array_equal(y.index, new_index)
except AssertionError:
raise ValueError(
"Coercion of pd.Int64Index to pd.RangeIndex "
"failed. Please provide `y_train` with a "
"pd.RangeIndex."
)
y.index = new_index
if X is not None:
X.index = new_index
return y, X
| 32.310924
| 79
| 0.628349
| 468
| 3,845
| 4.950855
| 0.333333
| 0.006905
| 0.027622
| 0.032369
| 0.145015
| 0.145015
| 0.113077
| 0.093224
| 0.093224
| 0.093224
| 0
| 0.006491
| 0.278804
| 3,845
| 118
| 80
| 32.584746
| 0.829066
| 0.352666
| 0
| 0
| 0
| 0
| 0.065723
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 1
| 0.137255
| false
| 0
| 0.098039
| 0
| 0.372549
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86cfbb57e1ec13e6ae0711449af6c95612ae3139
| 2,268
|
py
|
Python
|
jupytext/kernels.py
|
st--/jupytext
|
f8e8352859cc22e17b11154d0770fd946c4a430a
|
[
"MIT"
] | 5,378
|
2018-09-01T22:03:43.000Z
|
2022-03-31T06:51:42.000Z
|
jupytext/kernels.py
|
st--/jupytext
|
f8e8352859cc22e17b11154d0770fd946c4a430a
|
[
"MIT"
] | 812
|
2018-08-31T08:26:13.000Z
|
2022-03-30T18:12:11.000Z
|
jupytext/kernels.py
|
st--/jupytext
|
f8e8352859cc22e17b11154d0770fd946c4a430a
|
[
"MIT"
] | 380
|
2018-09-02T01:40:07.000Z
|
2022-03-25T13:57:23.000Z
|
"""Find kernel specifications for a given language"""
import os
import sys
from .languages import same_language
from .reraise import reraise
try:
# I prefer not to take a dependency on jupyter_client
from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec
except ImportError as err:
find_kernel_specs = reraise(err)
get_kernel_spec = reraise(err)
def set_kernelspec_from_language(notebook):
"""Set the kernel specification based on the 'main_language' metadata"""
language = notebook.metadata.get("jupytext", {}).get("main_language")
if "kernelspec" not in notebook.metadata and language:
try:
kernelspec = kernelspec_from_language(language)
except ValueError:
return
notebook.metadata["kernelspec"] = kernelspec
notebook.metadata.get("jupytext", {}).pop("main_language")
def kernelspec_from_language(language):
"""Return the python kernel that matches the current env, or the first kernel that matches the given language"""
if language == "python":
# Return the kernel that matches the current Python executable
for name in find_kernel_specs():
kernel_specs = get_kernel_spec(name)
cmd = kernel_specs.argv[0]
if (
kernel_specs.language == "python"
and os.path.isfile(cmd)
and os.path.samefile(cmd, sys.executable)
):
return {
"name": name,
"language": language,
"display_name": kernel_specs.display_name,
}
raise ValueError(
"No kernel found that matches the current python executable {}\n".format(
sys.executable
)
+ "Install one with 'python -m ipykernel install --name kernel_name [--user]'"
)
for name in find_kernel_specs():
kernel_specs = get_kernel_spec(name)
if same_language(kernel_specs.language, language):
return {
"name": name,
"language": language,
"display_name": kernel_specs.display_name,
}
raise ValueError("No kernel found for the language {}".format(language))
| 36
| 116
| 0.622575
| 254
| 2,268
| 5.393701
| 0.291339
| 0.088321
| 0.043796
| 0.043796
| 0.30438
| 0.262774
| 0.208759
| 0.208759
| 0.208759
| 0.208759
| 0
| 0.000624
| 0.29321
| 2,268
| 62
| 117
| 36.580645
| 0.854024
| 0.147707
| 0
| 0.291667
| 0
| 0
| 0.153445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.229167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d025f02ce51457ef476e760c051f7660045f69
| 5,333
|
py
|
Python
|
scipy/sparse/_matrix_io.py
|
dhruv9vats/scipy
|
48e1dd7e604df3ae57d104b407c5b7a2a6a3247d
|
[
"BSD-3-Clause"
] | 1
|
2021-08-16T09:32:42.000Z
|
2021-08-16T09:32:42.000Z
|
scipy/sparse/_matrix_io.py
|
dhruv9vats/scipy
|
48e1dd7e604df3ae57d104b407c5b7a2a6a3247d
|
[
"BSD-3-Clause"
] | 44
|
2019-06-27T15:56:14.000Z
|
2022-03-15T22:21:10.000Z
|
scipy/sparse/_matrix_io.py
|
dhruv9vats/scipy
|
48e1dd7e604df3ae57d104b407c5b7a2a6a3247d
|
[
"BSD-3-Clause"
] | 4
|
2020-06-13T10:32:25.000Z
|
2021-12-03T15:48:16.000Z
|
import numpy as np
import scipy.sparse
__all__ = ['save_npz', 'load_npz']
# Make loading safe vs. malicious input
PICKLE_KWARGS = dict(allow_pickle=False)
def save_npz(file, matrix, compressed=True):
""" Save a sparse matrix to a file using ``.npz`` format.
Parameters
----------
file : str or file-like object
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already
there.
matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``)
The sparse matrix to save.
compressed : bool, optional
Allow compressing the file. Default: True
See Also
--------
scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format.
numpy.savez: Save several arrays into a ``.npz`` archive.
numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive.
Examples
--------
Store sparse matrix to disk, and load it again:
>>> import scipy.sparse
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
"""
arrays_dict = {}
if matrix.format in ('csc', 'csr', 'bsr'):
arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
elif matrix.format == 'dia':
arrays_dict.update(offsets=matrix.offsets)
elif matrix.format == 'coo':
arrays_dict.update(row=matrix.row, col=matrix.col)
else:
raise NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format))
arrays_dict.update(
format=matrix.format.encode('ascii'),
shape=matrix.shape,
data=matrix.data
)
if compressed:
np.savez_compressed(file, **arrays_dict)
else:
np.savez(file, **arrays_dict)
def load_npz(file):
""" Load a sparse matrix from a file using ``.npz`` format.
Parameters
----------
file : str or file-like object
Either the file name (string) or an open file (file-like object)
where the data will be loaded.
Returns
-------
result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix
A sparse matrix containing the loaded data.
Raises
------
OSError
If the input file does not exist or cannot be read.
See Also
--------
scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz`` format.
numpy.load: Load several arrays from a ``.npz`` archive.
Examples
--------
Store sparse matrix to disk, and load it again:
>>> import scipy.sparse
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
"""
with np.load(file, **PICKLE_KWARGS) as loaded:
try:
matrix_format = loaded['format']
except KeyError as e:
raise ValueError('The file {} does not contain a sparse matrix.'.format(file)) from e
matrix_format = matrix_format.item()
if not isinstance(matrix_format, str):
# Play safe with Python 2 vs 3 backward compatibility;
# files saved with SciPy < 1.0.0 may contain unicode or bytes.
matrix_format = matrix_format.decode('ascii')
try:
cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format))
except AttributeError as e:
raise ValueError('Unknown matrix format "{}"'.format(matrix_format)) from e
if matrix_format in ('csc', 'csr', 'bsr'):
return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape'])
elif matrix_format == 'dia':
return cls((loaded['data'], loaded['offsets']), shape=loaded['shape'])
elif matrix_format == 'coo':
return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape'])
else:
raise NotImplementedError('Load is not implemented for '
'sparse matrix of format {}.'.format(matrix_format))
| 35.553333
| 114
| 0.615976
| 698
| 5,333
| 4.614613
| 0.204871
| 0.126669
| 0.039118
| 0.007451
| 0.524992
| 0.488047
| 0.465073
| 0.44955
| 0.44955
| 0.44955
| 0
| 0.017241
| 0.249578
| 5,333
| 149
| 115
| 35.791946
| 0.787606
| 0.549409
| 0
| 0.111111
| 0
| 0
| 0.145654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.044444
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d07b07d670dc9caa0bd92708721764a364d527
| 1,423
|
py
|
Python
|
src/simulator/services/resources/atlas.py
|
ed741/PathBench
|
50fe138eb1f824f49fe1a862705e435a1c3ec3ae
|
[
"BSD-3-Clause"
] | 46
|
2020-12-25T04:09:15.000Z
|
2022-03-25T12:32:42.000Z
|
src/simulator/services/resources/atlas.py
|
ed741/PathBench
|
50fe138eb1f824f49fe1a862705e435a1c3ec3ae
|
[
"BSD-3-Clause"
] | 36
|
2020-12-21T16:10:02.000Z
|
2022-01-03T01:42:01.000Z
|
src/simulator/services/resources/atlas.py
|
judicaelclair/PathBenchURO
|
101e67674efdfa8e27e1cf7787dac9fdf99552fe
|
[
"BSD-3-Clause"
] | 11
|
2021-01-06T23:34:12.000Z
|
2022-03-21T17:21:47.000Z
|
from typing import Dict, List
from simulator.services.resources.directory import Directory
from simulator.services.services import Services
class Atlas(Directory):
def __init__(self, services: Services, name: str, parent: str, create: bool = False) -> None:
super().__init__(services, name, parent, create)
if create:
metadata: Dict[str, any] = {
"next_index": 0,
}
self._save_metadata(metadata)
def append(self, obj: any) -> None:
self.save(str(self._get_next_index()), obj)
self._increment_index()
def load_all(self, max_els: int = float("inf")) -> List[any]:
ret: List[any] = []
idx: int = 0
while idx < max_els:
obj: any = self.load(str(idx))
if obj:
ret.append(obj)
idx += 1
else:
break
return ret
def _get_next_index(self) -> int:
metadata: Dict[str, any] = self._get_metadata()
return metadata["next_index"]
def _increment_index(self) -> None:
metadata: Dict[str, any] = self._get_metadata()
metadata["next_index"] += 1
self._save_metadata(metadata)
def _save_metadata(self, metadata: Dict[str, any]) -> None:
super().save("metadata", metadata)
def _get_metadata(self) -> Dict[str, any]:
return super().load("metadata")
| 29.040816
| 97
| 0.579761
| 169
| 1,423
| 4.668639
| 0.278107
| 0.04436
| 0.063371
| 0.091255
| 0.152091
| 0.08365
| 0.08365
| 0
| 0
| 0
| 0
| 0.004
| 0.297259
| 1,423
| 48
| 98
| 29.645833
| 0.785
| 0
| 0
| 0.111111
| 0
| 0
| 0.034434
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.194444
| false
| 0
| 0.083333
| 0.027778
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d18fa6bf233db205e6db3a19952144dd79aa36
| 1,427
|
py
|
Python
|
ingestion/src/metadata/great_expectations/builders/table/row_count_to_equal.py
|
ulixius9/OpenMetadata
|
f121698d968717f0932f685ef2a512c2a4d92438
|
[
"Apache-2.0"
] | null | null | null |
ingestion/src/metadata/great_expectations/builders/table/row_count_to_equal.py
|
ulixius9/OpenMetadata
|
f121698d968717f0932f685ef2a512c2a4d92438
|
[
"Apache-2.0"
] | null | null | null |
ingestion/src/metadata/great_expectations/builders/table/row_count_to_equal.py
|
ulixius9/OpenMetadata
|
f121698d968717f0932f685ef2a512c2a4d92438
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TestCase builder
"""
from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest
from metadata.generated.schema.tests.table import tableRowCountToEqual
from metadata.generated.schema.tests.tableTest import TableTestType
from metadata.great_expectations.builders.table.base_table_test_builders import (
BaseTableTestBuilder,
)
class TableRowCountToEqualBuilder(BaseTableTestBuilder):
"""Builder for `expect_table_row_count_to_equal` GE expectation"""
def _build_test(self) -> CreateTableTestRequest:
"""Specific test builder for the test"""
return self.build_test_request(
config=tableRowCountToEqual.TableRowCountToEqual(
value=self.result["expectation_config"]["kwargs"]["value"],
),
test_type=TableTestType.tableRowCountToEqual,
)
| 41.970588
| 86
| 0.756833
| 171
| 1,427
| 6.22807
| 0.590643
| 0.056338
| 0.059155
| 0.076056
| 0.060094
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006734
| 0.167484
| 1,427
| 33
| 87
| 43.242424
| 0.889731
| 0.465312
| 0
| 0
| 0
| 0
| 0.039402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d22671738e4b0cf43566c5aeec7cd2a5f04193
| 6,899
|
py
|
Python
|
tensorflow/bbox/jrieke-tf-parse-v2/jrieke_tf_dataset.py
|
gustavovaliati/obj-det-experiments
|
e81774a18b34c22d971ad15d7ac6eb8663ac6f22
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/bbox/jrieke-tf-parse-v2/jrieke_tf_dataset.py
|
gustavovaliati/obj-det-experiments
|
e81774a18b34c22d971ad15d7ac6eb8663ac6f22
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/bbox/jrieke-tf-parse-v2/jrieke_tf_dataset.py
|
gustavovaliati/obj-det-experiments
|
e81774a18b34c22d971ad15d7ac6eb8663ac6f22
|
[
"Apache-2.0"
] | null | null | null |
'''
This code is based on https://github.com/jrieke/shape-detection/
'''
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import tensorflow as tf
import datetime
class JriekeBboxDataset:
def generate(self):
print('Generating...')
self.WIDTH = 8
self.HEIGHT = 8
num_imgs = 50000
min_object_size = 1
max_object_size = 4
num_objects = 1
self.bboxes = np.zeros((num_imgs, num_objects, 4))
self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set background to 0
for i_img in range(num_imgs):
for i_object in range(num_objects):
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, self.WIDTH - w)
y = np.random.randint(0, self.HEIGHT - h)
self.imgs[i_img, y:y+h, x:x+w] = 1. # set rectangle to 1
self.bboxes[i_img, i_object] = [x, y, w, h]
print("Shapes: imgs ", self.imgs.shape, " bboxes ", self.bboxes.shape)
#why this?
# X = (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) / np.std(self.imgs)
X = self.imgs
y = self.bboxes.reshape(num_imgs, -1) / self.WIDTH
# Split training and test.
i = int(0.8 * num_imgs)
train_X = X[:i] #80% for training
test_X = X[i:]
train_y = y[:i]
test_y = y[i:]
self.test_imgs = self.imgs[i:]
self.test_bboxes = self.bboxes[i:]
return train_X, train_y, test_X, test_y
def check_dataset_image_compability(self, test_X_sample, test_imgs_sample):
fig = plt.figure(figsize=(12, 3))
fig.suptitle('check if the generated imgs match to the test_X slice image')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 2, 1)
plt.gca().set_title('Returned by the dataset class: used for training')
plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.subplot(1, 2, 2)
plt.gca().set_title('Global image holder: used for plotting.')
plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.show()
print('compare:',TMP,test_imgs_sample)
def IOU(self,bbox1, bbox2):
'''Calculate overlap between two bounding boxes [x, y, w, h] as the area of intersection over the area of unity'''
x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3]
x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3]
w_I = min(x1 + w1, x2 + w2) - max(x1, x2)
h_I = min(y1 + h1, y2 + h2) - max(y1, y2)
if w_I <= 0 or h_I <= 0: # no overlap
return 0.
I = w_I * h_I
U = w1 * h1 + w2 * h2 - I
return I / U
def convertDefaultAnnotToCoord(self, annot):
'''
annot -> [x, y, w, h]
'''
w = annot[2] * self.WIDTH
h = annot[3] * self.HEIGHT
x = annot[0] * self.HEIGHT
y = annot[1] * self.HEIGHT
return [x,y,w,h]
def convertYoloAnnotToCoord(self, yolo_annot):
'''
yolo_annot -> [x, y, w, h]
'''
w = yolo_annot[2] * self.WIDTH
h = yolo_annot[3] * self.HEIGHT
x = (yolo_annot[0] * self.WIDTH) - (w/2)
y = (yolo_annot[1] * self.HEIGHT) - (h/2)
return [x,y,w,h]
def show_generated(self, i=0):
fig = plt.figure()
fig.subplots_adjust(top=0.85)
fig.suptitle('Generated image sample + GT')
plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
for bbox in self.bboxes[i]:
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
plt.gca().legend(['GT'])
plt.show()
def plot_rectangle(self, img, bbox):
fig = plt.figure()
fig.suptitle('Plotting rectangle.')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 1, 1)
plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
plt.show()
def check_dataset_image_compability(self, test_X_sample, test_imgs_sample):
fig = plt.figure(figsize=(12, 3))
fig.suptitle('check if the generated imgs match to the test_X slice image')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 2, 1)
plt.gca().set_title('Returned by the dataset class: used for training')
plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.subplot(1, 2, 2)
plt.gca().set_title('Global image holder: used for plotting.')
plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.show()
print('compare:',test_X_sample,test_imgs_sample)
def show_predicted(self, pred_bboxes):
# Show a few images and predicted bounding boxes from the test dataset.
fig = plt.figure(figsize=(12, 3))
fig.subplots_adjust(top=0.85)
fig.suptitle('Prediction demonstration. Random samples.')
legend_plotted = False
for i_subplot in range(1, 11):
plt.subplot(1, 10, i_subplot)
i = np.random.randint(len(pred_bboxes))
plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
for pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]):
# print('before convertion: pred',pred_bbox, 'gt',exp_bbox)
pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox)
# exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox)
print('after convertion: pred',pred_bbox, 'gt',exp_bbox)
plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none'))
#gt
plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none'))
plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r')
if not legend_plotted:
legend_plotted = True
plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True)
plt.show()
# plt.savefig('plots/bw-single-rectangle_prediction_{0:%Y-%m-%d%H:%M:%S}.png'.format(datetime.datetime.now()), dpi=300)
| 40.582353
| 142
| 0.59052
| 1,009
| 6,899
| 3.910803
| 0.193261
| 0.022808
| 0.022808
| 0.046123
| 0.46148
| 0.434617
| 0.413077
| 0.371515
| 0.341105
| 0.332489
| 0
| 0.032845
| 0.258588
| 6,899
| 169
| 143
| 40.822485
| 0.738612
| 0.101899
| 0
| 0.301724
| 0
| 0
| 0.098433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077586
| false
| 0
| 0.043103
| 0
| 0.172414
| 0.043103
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d45952adaab5e1d25729182d1ca80f64803a29
| 8,103
|
py
|
Python
|
census_data_downloader/core/tables.py
|
ian-r-rose/census-data-downloader
|
f8ac9d773e6d3f52be87bf916a2e32249391f966
|
[
"MIT"
] | null | null | null |
census_data_downloader/core/tables.py
|
ian-r-rose/census-data-downloader
|
f8ac9d773e6d3f52be87bf916a2e32249391f966
|
[
"MIT"
] | null | null | null |
census_data_downloader/core/tables.py
|
ian-r-rose/census-data-downloader
|
f8ac9d773e6d3f52be87bf916a2e32249391f966
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*
"""
A base class that governs how to download and process tables from a Census API table.
"""
import os
import logging
import pathlib
from . import geotypes
from . import decorators
logger = logging.getLogger(__name__)
class BaseTableConfig(object):
"""
Configures how to download and process tables from the Census API.
"""
THIS_DIR = pathlib.Path(__file__).parent
PARENT_DIR = THIS_DIR.parent
# All available years
YEAR_LIST = [
2017,
2016,
2015,
2014,
2013,
2012,
2011,
2010,
2009
]
# All available geographies
GEOTYPE_LIST = (
"nationwide",
"regions",
"divisions",
"states",
"congressional_districts",
"state_legislative_upper_districts",
"state_legislative_lower_districts",
"counties",
"places",
"urban_areas",
"msas",
"csas",
"pumas",
"nectas",
"cnectas",
"aiannh_homelands",
"tracts",
"zctas",
"unified_school_districts",
"elementary_school_districts",
"secondary_school_districts"
)
def __init__(
self,
api_key=None,
source="acs5",
years=None,
data_dir=None,
force=False
):
"""
Configuration.
"""
# Set the inputs
self.CENSUS_API_KEY = os.getenv("CENSUS_API_KEY", api_key)
if not self.CENSUS_API_KEY:
raise NotImplementedError("Census API key required. Pass it as the first argument.")
self.source = source
self.force = force
#
# Allow custom years for data download, defaulting to most recent year
#
# If they want all the years, give it to them.
if years == "all":
self.years_to_download = self.YEAR_LIST
# If the user provides a year give them that.
elif isinstance(years, int):
self.years_to_download = [years]
# Or if they provide years as a list, give those then.
elif isinstance(years, list):
self.years_to_download = list(map(int, years))
# If they provided nothing, default to the latest year of data
elif years is None:
self.years_to_download = [max(self.YEAR_LIST), ]
# Validate the years
for year in self.years_to_download:
if year not in self.YEAR_LIST:
error_msg = ("Data only available for the years"
f"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.")
raise NotImplementedError(error_msg)
# Set the data directories
if data_dir:
self.data_dir = pathlib.Path(str(data_dir))
else:
self.data_dir = self.PARENT_DIR.joinpath("data")
self.raw_data_dir = self.data_dir.joinpath("raw")
self.processed_data_dir = self.data_dir.joinpath("processed")
# Make sure they exist
if not self.data_dir.exists():
self.data_dir.mkdir()
if not self.raw_data_dir.exists():
self.raw_data_dir.mkdir()
if not self.processed_data_dir.exists():
self.processed_data_dir.mkdir()
@property
def censusreporter_url(self):
"""
Returns the URL of the Census Reporter page explaining the ACS table.
"""
return f"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/"
#
# Geotype downloaders
#
@decorators.downloader
def download_nationwide(self):
"""
Download nationwide data.
"""
return geotypes.NationwideDownloader
@decorators.downloader
def download_regions(self):
"""
Download data for all regions.
"""
return geotypes.RegionsDownloader
@decorators.downloader
def download_divisions(self):
"""
Download data for all divisions.
"""
return geotypes.DivisionsDownloader
@decorators.downloader
def download_states(self):
"""
Download data for all states.
"""
return geotypes.StatesDownloader
@decorators.downloader
def download_congressional_districts(self):
"""
Download data for all Congressional districts.
"""
return geotypes.CongressionalDistrictsDownloader
@decorators.downloader
def download_state_legislative_upper_districts(self):
"""
Download data for all Census upper legislative districts in the provided state.
"""
return geotypes.StateLegislativeUpperDistrictsDownloader
@decorators.downloader
def download_state_legislative_lower_districts(self):
"""
Download data for all Census lower legislative districts in the provided state.
"""
return geotypes.StateLegislativeLowerDistrictsDownloader
@decorators.downloader
def download_counties(self):
"""
Download data for all counties.
"""
return geotypes.CountiesDownloader
@decorators.downloader
def download_places(self):
"""
Download data for all Census designated places.
"""
return geotypes.PlacesDownloader
@decorators.downloader
def download_urban_areas(self):
"""
Download data for all urban areas
"""
return geotypes.UrbanAreasDownloader
@decorators.downloader
def download_msas(self):
"""
Download data for Metropolitian Statistical Areas.
"""
return geotypes.MsasDownloader
@decorators.downloader
def download_csas(self):
"""
Download data for Combined Statistical Areas.
"""
return geotypes.CsasDownloader
@decorators.downloader
def download_pumas(self):
"""
Download data for Public Use Microdata Areas.
"""
return geotypes.PumasDownloader
@decorators.downloader
def download_nectas(self):
"""
Download data for New England cities and towns.
"""
return geotypes.NectasDownloader
@decorators.downloader
def download_cnectas(self):
"""
Download data for combined New England cities and towns.
"""
return geotypes.CnectasDownloader
@decorators.downloader
def download_aiannh_homelands(self):
"""
Download data for American Indian home lands.
"""
return geotypes.AiannhHomelandsDownloader
@decorators.downloader
def download_tracts(self):
"""
Download data for all Census tracts in the provided state.
"""
return geotypes.TractsDownloader
@decorators.downloader
def download_zctas(self):
"""
Download data for Zip Code Tabulation Areas
"""
return geotypes.ZctasDownloader
@decorators.downloader
def download_unified_school_districts(self):
"""
Download data for unified school districts.
"""
return geotypes.UnifiedSchoolDistrictsDownloader
@decorators.downloader
def download_elementary_school_districts(self):
"""
Download data for elementary school districts.
"""
return geotypes.ElementarySchoolDistrictsDownloader
@decorators.downloader
def download_secondary_school_districts(self):
"""
Download data for secondary school districts.
"""
return geotypes.SecondarySchoolDistrictsDownloader
def download_everything(self):
"""
Download 'em all.
"""
for geo in self.GEOTYPE_LIST:
print(geo)
# Get the downloader function
dl = getattr(self, f"download_{geo}", None)
# Validate it
if not dl or not callable(dl):
raise NotImplementedError(f"Invalid geography type: {geo}")
# Run it
try:
dl()
except NotImplementedError:
pass
| 28.038062
| 96
| 0.60817
| 812
| 8,103
| 5.919951
| 0.270936
| 0.050343
| 0.100478
| 0.135428
| 0.189515
| 0.151654
| 0.06657
| 0.021635
| 0
| 0
| 0
| 0.007183
| 0.312724
| 8,103
| 288
| 97
| 28.135417
| 0.855989
| 0.214488
| 0
| 0.133758
| 0
| 0
| 0.093761
| 0.035875
| 0
| 0
| 0
| 0
| 0
| 1
| 0.152866
| false
| 0.012739
| 0.031847
| 0
| 0.356688
| 0.006369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d61d512c3c9d47b1f63fe91873604a549e077d
| 5,422
|
py
|
Python
|
sgf2ebook.py
|
loujine/sgf2ebook
|
13c87056646cc6c06485b129221ab2028e67ef95
|
[
"MIT"
] | null | null | null |
sgf2ebook.py
|
loujine/sgf2ebook
|
13c87056646cc6c06485b129221ab2028e67ef95
|
[
"MIT"
] | null | null | null |
sgf2ebook.py
|
loujine/sgf2ebook
|
13c87056646cc6c06485b129221ab2028e67ef95
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
from pathlib import Path
import shutil
import subprocess
import sys
from tempfile import TemporaryDirectory
from uuid import uuid4
from zipfile import ZipFile
import jinja2
import sente # type: ignore
__version__ = (1, 0, 0)
SGF_RENDER_EXECUTABLE = './sgf-render'
TEMPLATEDIR = Path(__file__, '..', 'epub_template').resolve()
def load_sgf(sgfpath: Path):
game = sente.sgf.load(str(sgfpath))
comments = {}
seq = game.get_default_sequence()
for idx, move in enumerate(seq, 1):
game.play(move)
if game.comment:
comments[idx] = game.comment
return {
# read only main sequence, not variations
'nb_moves': len(seq),
'metadata': game.get_properties(),
'comments': comments,
}
def main(sgfpath: Path, output_path: Path) -> None:
print()
print(f'Load content of {sgfpath}')
try:
sgf_content = load_sgf(sgfpath)
except (sente.exceptions.InvalidSGFException,
sente.exceptions.IllegalMoveException):
print(f'Could not read {sgfpath}, skipping')
return
nb_moves = sgf_content['nb_moves']
metadata = sgf_content['metadata']
comments = sgf_content['comments']
uuid = uuid4()
with TemporaryDirectory() as tmpdir:
print('Prepare structure of the ebook')
shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True)
template = jinja2.Template(
TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read())
print('Prepare SVG diagrams')
svgdirpath = Path(tmpdir, 'EPUB', 'Images')
for move in range(1, nb_moves + 1):
svgpath = f'diagram_{move:03}.svg'
# generate SVG files with sgf-render
try:
subprocess.check_call([
SGF_RENDER_EXECUTABLE,
str(sgfpath),
'--move-numbers',
'--first-move-number', str(move),
'-n', str(move),
'--style', 'minimalist',
'-o', svgdirpath.joinpath(svgpath),
])
except subprocess.CalledProcessError:
print(f'Move {move} could not be converted to SVG')
continue
# replace move number in SVG
# not possible directly in sgf-render invocation at the moment
svg_content = svgdirpath.joinpath(svgpath).open().read()
svgdirpath.joinpath(svgpath).open('w').write(
svg_content.replace('>1<', f'>{move}<', 1))
# create HTML page with SVG element
html_content = template.render(
title=sgfpath.stem,
svgpath=svgpath,
info=metadata,
first_flag=(move == 1),
last_flag=(move == nb_moves),
comment=comments.get(move, ''),
)
with Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w') as fd:
fd.write(html_content)
# Declare all HTML/SVG files in master file
print('Prepare content.opf file')
template = jinja2.Template(
TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read())
opf_content = template.render(
title=sgfpath.stem,
creator='sgf2ebook',
UUID=uuid,
svgpath=sorted(svgdirpath.glob('*.svg')),
enumerate=enumerate,
)
with Path(tmpdir, 'EPUB', 'content.opf').open('w') as fd:
fd.write(opf_content)
# Generate table of contents
print('Prepare table of contents')
template = jinja2.Template(
TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read())
toc_content = template.render(
title=sgfpath.stem,
UUID=uuid,
nb_moves=nb_moves,
range=range,
)
with Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as fd:
fd.write(toc_content)
# zip all content in EPUB file
output_path.mkdir(exist_ok=True, parents=True)
output_name = f"{metadata.get('EV', 'unknown_event')}{'_' if 'RO' in metadata else ''}{metadata.get('RO', '')}.epub".replace(' ', '_')
with ZipFile(output_path.joinpath(output_name), 'w') as zf:
os.chdir(tmpdir)
# "The first file in the OCF ZIP Container MUST be the mimetype file"
zf.write('mimetype')
for root, dirs, files in os.walk('.'):
for file in sorted(files):
if file != 'mimetype':
zf.write(Path(root, file))
os.chdir(Path(__file__).parent)
print(f'{output_path.joinpath(output_name)} generated')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='')
parser.add_argument('--input-path', '-i', help='Input files or directory')
parser.add_argument('--output-path', '-o', help='Output directory')
args = parser.parse_args()
path = Path(args.input_path)
outpath = Path(args.output_path)
if not path.exists():
print(f'Input path {path} not found')
sys.exit(1)
if path.is_file():
main(path, outpath)
if path.is_dir():
for filepath in sorted(path.rglob('*.sgf')):
main(filepath, outpath.joinpath(filepath.parent.relative_to(path)))
| 34.75641
| 142
| 0.574511
| 614
| 5,422
| 4.949511
| 0.301303
| 0.016124
| 0.018427
| 0.032577
| 0.115169
| 0.096742
| 0
| 0
| 0
| 0
| 0
| 0.006584
| 0.299705
| 5,422
| 155
| 143
| 34.980645
| 0.793785
| 0.07322
| 0
| 0.08
| 0
| 0.008
| 0.151775
| 0.019745
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016
| false
| 0
| 0.088
| 0
| 0.12
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d6728bc96a31ea175e93ab91aadcc559c13053
| 1,788
|
py
|
Python
|
vmis_sql_python/evaluation/metrics/popularity.py
|
bolcom/serenade-experiments-sigmod
|
0a4c7f19d800d1c2784ea5536abb1a628cb12f7a
|
[
"Apache-2.0"
] | null | null | null |
vmis_sql_python/evaluation/metrics/popularity.py
|
bolcom/serenade-experiments-sigmod
|
0a4c7f19d800d1c2784ea5536abb1a628cb12f7a
|
[
"Apache-2.0"
] | null | null | null |
vmis_sql_python/evaluation/metrics/popularity.py
|
bolcom/serenade-experiments-sigmod
|
0a4c7f19d800d1c2784ea5536abb1a628cb12f7a
|
[
"Apache-2.0"
] | null | null | null |
class Popularity:
'''
Popularity( length=20 )
Used to iteratively calculate the average overall popularity of an algorithm's recommendations.
Parameters
-----------
length : int
Coverage@length
training_df : dataframe
determines how many distinct item_ids there are in the training data
'''
def __init__(self, length=20, training_df=None):
self.length = length;
self.sum = 0
self.tests = 0
self.train_actions = len(training_df.index)
#group the data by the itemIds
grp = training_df.groupby('ItemId')
#count the occurence of every itemid in the trainingdataset
self.pop_scores = grp.size()
#sort it according to the score
self.pop_scores.sort_values(ascending=False, inplace=True)
#normalize
self.pop_scores = self.pop_scores / self.pop_scores[:1].values[0]
def add(self, result, next_items, for_item=0, session=0, pop_bin=None, position=None):
'''
Update the metric with a result set and the correct next item.
Result must be sorted correctly.
Parameters
--------
result: pandas.Series
Series of scores with the item id as the index
'''
#only keep the k- first predictions
recs = result[:self.length]
#take the unique values out of those top scorers
items = recs.index.unique()
self.sum += ( self.pop_scores[ items ].sum() / len( items ) )
self.tests += 1
def result(self):
'''
Return a tuple of a description string and the current averaged value
'''
return ("Popularity@" + str( self.length ) + ": "), ( self.sum / self.tests )
| 33.735849
| 100
| 0.597315
| 221
| 1,788
| 4.742081
| 0.497738
| 0.040076
| 0.074427
| 0.032443
| 0.037214
| 0.037214
| 0
| 0
| 0
| 0
| 0
| 0.008907
| 0.309284
| 1,788
| 53
| 101
| 33.735849
| 0.839676
| 0.415548
| 0
| 0
| 0
| 0
| 0.021041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d75a7478a79891b6baf0f18c7802c22b104725
| 918
|
py
|
Python
|
dandeliondiary/household/urls.py
|
amberdiehl/dandeliondiary_project
|
e9bace5bd7980def6ca763840ab5b38f1e05cd3d
|
[
"FSFAP"
] | null | null | null |
dandeliondiary/household/urls.py
|
amberdiehl/dandeliondiary_project
|
e9bace5bd7980def6ca763840ab5b38f1e05cd3d
|
[
"FSFAP"
] | 6
|
2020-04-29T23:54:15.000Z
|
2022-03-11T23:25:24.000Z
|
dandeliondiary/household/urls.py
|
amberdiehl/dandeliondiary_project
|
e9bace5bd7980def6ca763840ab5b38f1e05cd3d
|
[
"FSFAP"
] | null | null | null |
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^settings$', views.household_dashboard, name='household_dashboard'),
url(r'^myinfo$', views.my_info, name='my_info'),
url(r'^profile$', views.household_profile, name='maintain_household'),
url(r'^members$', views.household_members, name='maintain_members'),
url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'),
url(r'^ajax/models-by-make/(?P<make_id>\d+)/$', views.ajax_models_by_make),
url(r'^ajax/makes-by-type/(?P<type_id>\d+)/$', views.ajax_makes_by_type),
url(r'^ajax/add-make/(?P<type_key>\d+)/(?P<make>[\w ]{1,50})/$', views.ajax_add_make),
url(r'^ajax/add-model/(?P<make_key>\d+)/(?P<model>[\w -]{1,128})/$', views.ajax_add_model),
url(r'^ajax/delete-invite/$', views.ajax_delete_invite),
url(r'^ajax/change-member-status/$', views.ajax_change_member_status),
]
| 54
| 95
| 0.683007
| 142
| 918
| 4.211268
| 0.295775
| 0.073579
| 0.080268
| 0.053512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008424
| 0.094771
| 918
| 16
| 96
| 57.375
| 0.711191
| 0
| 0
| 0
| 0
| 0.133333
| 0.397603
| 0.237473
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d8ff6a04670083ea5d1c4de998cdc6916ada2c
| 4,207
|
py
|
Python
|
q2_qemistree/tests/test_fingerprint.py
|
tgroth97/q2-qemistree
|
289c447a6c3a29478bb84212281ef0d7ffc1387a
|
[
"BSD-2-Clause"
] | null | null | null |
q2_qemistree/tests/test_fingerprint.py
|
tgroth97/q2-qemistree
|
289c447a6c3a29478bb84212281ef0d7ffc1387a
|
[
"BSD-2-Clause"
] | null | null | null |
q2_qemistree/tests/test_fingerprint.py
|
tgroth97/q2-qemistree
|
289c447a6c3a29478bb84212281ef0d7ffc1387a
|
[
"BSD-2-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import qiime2
import os
from q2_qemistree import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs
from q2_qemistree import (compute_fragmentation_trees,
rerank_molecular_formulas,
predict_fingerprints)
from q2_qemistree._fingerprint import artifactory
class FingerprintTests(TestCase):
def setUp(self):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin')
self.goodsirpath = os.path.join(THIS_DIR, 'data/'
'sirius-linux64-headless-4.0.1/bin')
# MassSpectrometryFeatures
self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR,
'data/sirius.mgf.qza'))
# SiriusFolder
self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR,
'data/sirFolder.qza'))
# ZodiacFolder
self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR,
'data/zodFolder.qza'))
def test_artifactory(self):
# everything is working fine
obs = os.environ.get('_JAVA_OPTIONS', '')
res = artifactory(self.goodsirpath, ['--help'],
constructor=OutputDirs, java_flags='-Xms2G')
self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS'))
self.assertTrue(isinstance(res, OutputDirs))
# exceptions are raised
with self.assertRaises(OSError):
res = artifactory(self.badsirpath, ['--help'],
constructor=OutputDirs)
def test_fragmentation_trees(self):
ions = self.ions.view(MGFDirFmt)
result = compute_fragmentation_trees(sirius_path=self.goodsirpath,
features=ions,
ppm_max=15, profile='orbitrap')
contents = os.listdir(result.get_path())
self.assertTrue(('version.txt' in contents))
def test_fragmentation_trees_negative_ionization(self):
ions = self.ions.view(MGFDirFmt)
result = compute_fragmentation_trees(sirius_path=self.goodsirpath,
features=ions,
ppm_max=15, profile='orbitrap',
ionization_mode='negative')
contents = os.listdir(result.get_path())
self.assertTrue(('version.txt' in contents))
def test_fragmentation_trees_exception(self):
ions = self.ions.view(MGFDirFmt)
with self.assertRaises(ValueError):
compute_fragmentation_trees(sirius_path=self.goodsirpath,
features=ions,
ppm_max=15,
profile='orbitrap',
ionization_mode='n3gativ3')
def test_reranking(self):
ions = self.ions.view(MGFDirFmt)
sirout = self.sirout.view(SiriusDirFmt)
result = rerank_molecular_formulas(sirius_path=self.goodsirpath,
fragmentation_trees=sirout,
features=ions)
contents = os.listdir(result.get_path())
self.assertTrue(('zodiac_summary.csv' in contents))
def test_fingerid(self):
zodout = self.zodout.view(ZodiacDirFmt)
result = predict_fingerprints(sirius_path=self.goodsirpath,
molecular_formulas=zodout, ppm_max=15)
contents = os.listdir(result.get_path())
self.assertTrue(('summary_csi_fingerid.csv' in contents))
if __name__ == '__main__':
main()
| 45.728261
| 78
| 0.548134
| 389
| 4,207
| 5.74036
| 0.326478
| 0.032244
| 0.022391
| 0.031348
| 0.403045
| 0.403045
| 0.34438
| 0.329601
| 0.290193
| 0.237797
| 0
| 0.011344
| 0.329451
| 4,207
| 91
| 79
| 46.230769
| 0.78022
| 0.103161
| 0
| 0.220588
| 0
| 0
| 0.069415
| 0.01516
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.102941
| false
| 0
| 0.088235
| 0
| 0.205882
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d90c692c5aa920f75d361edbf2de1c22109ec8
| 3,518
|
py
|
Python
|
tempo/worker.py
|
rackerlabs/Tempo
|
60c2adaf5b592ae171987b999e0b9cc46b80c54e
|
[
"Apache-2.0"
] | 4
|
2015-04-26T01:46:51.000Z
|
2020-11-10T13:07:59.000Z
|
tempo/worker.py
|
rackerlabs/Tempo
|
60c2adaf5b592ae171987b999e0b9cc46b80c54e
|
[
"Apache-2.0"
] | null | null | null |
tempo/worker.py
|
rackerlabs/Tempo
|
60c2adaf5b592ae171987b999e0b9cc46b80c54e
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import kombu
from tempo import actions
from tempo import config
from tempo import db
from tempo import notifier
from tempo import queue as tempo_queue
from tempo.openstack.common import cfg
from tempo.openstack.common import exception as common_exception
CFG = config.CFG
logger = logging.getLogger('tempo.worker')
worker_opts = [
cfg.BoolOpt('daemonized',
default=False,
help='Run worker as a daemon'),
cfg.StrOpt('publisher_id',
default='host',
help='Where the notification came from')
]
worker_group = cfg.OptGroup(name='worker', title='Worker options')
CFG.register_group(worker_group)
CFG.register_opts(worker_opts, group=worker_group)
def _perform_task(task):
def _notify(event_type, exception=None):
payload = {'task_uuid': task_uuid}
if exception is not None:
payload['exception'] = exception
publisher_id = CFG.worker.publisher_id
priority = notifier.DEBUG
notifier.notify(publisher_id, event_type, priority, payload)
action = task.action
task_uuid = task.uuid
try:
func = getattr(actions, action)
except AttributeError:
logger.error("unrecognized action '%(action)s' for task task"
" '%(task_uuid)s'" % locals())
return
logger.debug("task '%(task_uuid)s' started: '%(action)s'" % locals())
_notify('Started Task')
try:
func(task)
except Exception as e:
logger.error("task '%(task_uuid)s' errored: %(e)s" % locals())
_notify('Errored Task', exception=e)
else:
logger.debug("task '%(task_uuid)s' finished: returned successfully" %
locals())
_notify('Finished Task')
def _process_message(body, message):
message.ack()
task_uuid = body['task_uuid']
try:
task = db.task_get(task_uuid)
except common_exception.NotFound:
logger.error("Task '%(task_uuid)s' not found" % locals())
return
_perform_task(task)
def _consume_messages(exchange, queue, key):
kombu_xchg = kombu.Exchange(exchange, 'direct', durable=True)
kombu_queue = kombu.Queue(queue, exchange=kombu_xchg, key=key)
connection = tempo_queue.get_connection()
consumer = kombu.Consumer(connection.channel(), kombu_queue)
consumer.register_callback(_process_message)
consumer.consume()
while True:
connection.drain_events()
def consume_messages(exchange, queue, key):
if CFG.worker.daemonized:
# TODO(mdietz): there's a cleaner way to do this, but this works well
# as a way of backgrounding the server for now
import daemon
with daemon.DaemonContext():
_consume_messages(exchange, queue, key)
else:
_consume_messages(exchange, queue, key)
| 29.563025
| 77
| 0.673394
| 446
| 3,518
| 5.188341
| 0.38565
| 0.041487
| 0.032411
| 0.02809
| 0.123596
| 0.070873
| 0
| 0
| 0
| 0
| 0
| 0.004059
| 0.229676
| 3,518
| 118
| 78
| 29.813559
| 0.849816
| 0.212905
| 0
| 0.121622
| 0
| 0
| 0.146545
| 0
| 0
| 0
| 0
| 0.008475
| 0
| 1
| 0.067568
| false
| 0
| 0.135135
| 0
| 0.22973
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86d979010cd46ef001009b94be4cbd36b5242fa0
| 24,187
|
py
|
Python
|
bin/basenji_motifs.py
|
AndyPJiang/basenji
|
64e43570c8bece156b4ab926608014f489b7965e
|
[
"Apache-2.0"
] | 1
|
2020-05-22T20:53:37.000Z
|
2020-05-22T20:53:37.000Z
|
bin/basenji_motifs.py
|
AndyPJiang/basenji
|
64e43570c8bece156b4ab926608014f489b7965e
|
[
"Apache-2.0"
] | null | null | null |
bin/basenji_motifs.py
|
AndyPJiang/basenji
|
64e43570c8bece156b4ab926608014f489b7965e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import copy, os, pdb, random, shutil, subprocess, time
import h5py
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
import seaborn as sns
from sklearn import preprocessing
import tensorflow as tf
import basenji
'''
basenji_motifs.py
Collect statistics and make plots to explore the first convolution layer
of the given model using the given sequences.
'''
weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint ""'
weblogo_opts += ' -C "#CB2026" A A'
weblogo_opts += ' -C "#34459C" C C'
weblogo_opts += ' -C "#FBB116" G G'
weblogo_opts += ' -C "#0C8040" T T'
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <data_file>'
parser = OptionParser(usage)
parser.add_option(
'-a',
dest='act_t',
default=0.5,
type='float',
help=
'Activation threshold (as proportion of max) to consider for PWM [Default: %default]'
)
parser.add_option(
'-d',
dest='model_hdf5_file',
default=None,
help='Pre-computed model output as HDF5.')
parser.add_option('-o', dest='out_dir', default='.')
parser.add_option(
'-m',
dest='meme_db',
default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'],
help='MEME database used to annotate motifs')
parser.add_option(
'-p',
dest='plot_heats',
default=False,
action='store_true',
help=
'Plot heat maps describing filter activations in the test sequences [Default: %default]'
)
parser.add_option(
'-s',
dest='sample',
default=None,
type='int',
help='Sample sequences from the test set [Default:%default]')
parser.add_option(
'-t',
dest='trim_filters',
default=False,
action='store_true',
help='Trim uninformative positions off the filter ends [Default: %default]'
)
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error(
'Must provide Basenji parameters and model files and test data in HDF5'
' format.'
)
else:
params_file = args[0]
model_file = args[1]
data_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
#################################################################
# load data
data_open = h5py.File(data_file)
test_seqs1 = data_open['test_in']
test_targets = data_open['test_out']
try:
target_names = list(data_open['target_labels'])
except KeyError:
target_names = ['t%d' % ti for ti in range(test_targets.shape[1])]
if options.sample is not None:
# choose sampled indexes
sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample))
# filter
test_seqs1 = test_seqs1[sample_i]
test_targets = test_targets[sample_i]
# convert to letters
test_seqs = basenji.dna_io.hot1_dna(test_seqs1)
#################################################################
# model parameters and placeholders
job = basenji.dna_io.read_job_params(params_file)
job['seq_length'] = test_seqs1.shape[1]
job['seq_depth'] = test_seqs1.shape[2]
job['num_targets'] = test_targets.shape[2]
job['target_pool'] = int(np.array(data_open.get('pool_width', 1)))
t0 = time.time()
dr = basenji.seqnn.SeqNN()
dr.build(job)
print('Model building time %ds' % (time.time() - t0))
# adjust for fourier
job['fourier'] = 'train_out_imag' in data_open
if job['fourier']:
test_targets_imag = data_open['test_out_imag']
if options.valid:
test_targets_imag = data_open['valid_out_imag']
#################################################################
# predict
# initialize batcher
if job['fourier']:
batcher_test = basenji.batcher.BatcherF(
test_seqs1,
test_targets,
test_targets_imag,
batch_size=dr.batch_size,
pool_width=job['target_pool'])
else:
batcher_test = basenji.batcher.Batcher(
test_seqs1,
test_targets,
batch_size=dr.batch_size,
pool_width=job['target_pool'])
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
# get weights
filter_weights = sess.run(dr.filter_weights[0])
filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0])
print(filter_weights.shape)
# test
t0 = time.time()
layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0])
filter_outs = layer_filter_outs[0]
print(filter_outs.shape)
# store useful variables
num_filters = filter_weights.shape[0]
filter_size = filter_weights.shape[2]
#################################################################
# individual filter plots
#################################################################
# also save information contents
filters_ic = []
meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs)
for f in range(num_filters):
print('Filter %d' % f)
# plot filter parameters as a heatmap
plot_filter_heat(filter_weights[f, :, :],
'%s/filter%d_heat.pdf' % (options.out_dir, f))
# write possum motif file
filter_possum(filter_weights[f, :, :], 'filter%d' % f,
'%s/filter%d_possum.txt' % (options.out_dir,
f), options.trim_filters)
# plot weblogo of high scoring outputs
plot_filter_logo(
filter_outs[:, :, f],
filter_size,
test_seqs,
'%s/filter%d_logo' % (options.out_dir, f),
maxpct_t=options.act_t)
# make a PWM for the filter
filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' %
(options.out_dir, f))
if nsites < 10:
# no information
filters_ic.append(0)
else:
# compute and save information content
filters_ic.append(info_content(filter_pwm))
# add to the meme motif file
meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters)
meme_out.close()
#################################################################
# annotate filters
#################################################################
# run tomtom
subprocess.call(
'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' %
(options.out_dir, options.out_dir, options.meme_db),
shell=True)
# read in annotations
filter_names = name_filters(
num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db)
#################################################################
# print a table of information
#################################################################
table_out = open('%s/table.txt' % options.out_dir, 'w')
# print header for later panda reading
header_cols = ('', 'consensus', 'annotation', 'ic', 'mean', 'std')
print('%3s %19s %10s %5s %6s %6s' % header_cols, file=table_out)
for f in range(num_filters):
# collapse to a consensus motif
consensus = filter_motif(filter_weights[f, :, :])
# grab annotation
annotation = '.'
name_pieces = filter_names[f].split('_')
if len(name_pieces) > 1:
annotation = name_pieces[1]
# plot density of filter output scores
fmean, fstd = plot_score_density(
np.ravel(filter_outs[:, :, f]),
'%s/filter%d_dens.pdf' % (options.out_dir, f))
row_cols = (f, consensus, annotation, filters_ic[f], fmean, fstd)
print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out)
table_out.close()
#################################################################
# global filter plots
#################################################################
if options.plot_heats:
# plot filter-sequence heatmap
plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir)
# plot filter-segment heatmap
plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir)
plot_filter_seg_heat(
filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False)
# plot filter-target correlation heatmap
plot_target_corr(filter_outs, seq_targets, filter_names, target_names,
'%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean')
plot_target_corr(filter_outs, seq_targets, filter_names, target_names,
'%s/filter_target_cors_max.pdf' % options.out_dir, 'max')
def get_motif_proteins(meme_db_file):
""" Hash motif_id's to protein names using the MEME DB file """
motif_protein = {}
for line in open(meme_db_file):
a = line.split()
if len(a) > 0 and a[0] == 'MOTIF':
if a[2][0] == '(':
motif_protein[a[1]] = a[2][1:a[2].find(')')]
else:
motif_protein[a[1]] = a[2]
return motif_protein
def info_content(pwm, transpose=False, bg_gc=0.415):
""" Compute PWM information content.
In the original analysis, I used a bg_gc=0.5. For any
future analysis, I ought to switch to the true hg19
value of 0.415.
"""
pseudoc = 1e-9
if transpose:
pwm = np.transpose(pwm)
bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc]
ic = 0
for i in range(pwm.shape[0]):
for j in range(4):
# ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j])
ic += -bg_pwm[j] * np.log2(
bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j])
return ic
def make_filter_pwm(filter_fasta):
""" Make a PWM for this filter from its top hits """
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
pwm_counts = []
nsites = 4 # pseudocounts
for line in open(filter_fasta):
if line[0] != '>':
seq = line.rstrip()
nsites += 1
if len(pwm_counts) == 0:
# initialize with the length
for i in range(len(seq)):
pwm_counts.append(np.array([1.0] * 4))
# count
for i in range(len(seq)):
try:
pwm_counts[i][nts[seq[i]]] += 1
except KeyError:
pwm_counts[i] += np.array([0.25] * 4)
# normalize
pwm_freqs = []
for i in range(len(pwm_counts)):
pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in range(4)])
return np.array(pwm_freqs), nsites - 4
def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False):
""" Print a filter to the growing MEME file
Attrs:
meme_out : open file
f (int) : filter index #
filter_pwm (array) : filter PWM array
nsites (int) : number of filter sites
"""
if not trim_filters:
ic_start = 0
ic_end = filter_pwm.shape[0] - 1
else:
ic_t = 0.2
# trim PWM of uninformative prefix
ic_start = 0
while ic_start < filter_pwm.shape[0] and info_content(
filter_pwm[ic_start:ic_start + 1]) < ic_t:
ic_start += 1
# trim PWM of uninformative suffix
ic_end = filter_pwm.shape[0] - 1
while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t:
ic_end -= 1
if ic_start < ic_end:
print('MOTIF filter%d' % f, file=meme_out)
print(
'letter-probability matrix: alength= 4 w= %d nsites= %d' %
(ic_end - ic_start + 1, nsites),
file=meme_out)
for i in range(ic_start, ic_end + 1):
print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out)
print('', file=meme_out)
def meme_intro(meme_file, seqs):
""" Open MEME motif format file and print intro
Attrs:
meme_file (str) : filename
seqs [str] : list of strings for obtaining background freqs
Returns:
mem_out : open MEME file
"""
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
# count
nt_counts = [1] * 4
for i in range(len(seqs)):
for nt in seqs[i]:
try:
nt_counts[nts[nt]] += 1
except KeyError:
pass
# normalize
nt_sum = float(sum(nt_counts))
nt_freqs = [nt_counts[i] / nt_sum for i in range(4)]
# open file for writing
meme_out = open(meme_file, 'w')
# print intro material
print('MEME version 4', file=meme_out)
print('', file=meme_out)
print('ALPHABET= ACGT', file=meme_out)
print('', file=meme_out)
print('Background letter frequencies:', file=meme_out)
print('A %.4f C %.4f G %.4f T %.4f' % tuple(nt_freqs), file=meme_out)
print('', file=meme_out)
return meme_out
def name_filters(num_filters, tomtom_file, meme_db_file):
""" Name the filters using Tomtom matches.
Attrs:
num_filters (int) : total number of filters
tomtom_file (str) : filename of Tomtom output table.
meme_db_file (str) : filename of MEME db
Returns:
filter_names [str] :
"""
# name by number
filter_names = ['f%d' % fi for fi in range(num_filters)]
# name by protein
if tomtom_file is not None and meme_db_file is not None:
motif_protein = get_motif_proteins(meme_db_file)
# hash motifs and q-value's by filter
filter_motifs = {}
tt_in = open(tomtom_file)
tt_in.readline()
for line in tt_in:
a = line.split()
fi = int(a[0][6:])
motif_id = a[1]
qval = float(a[5])
filter_motifs.setdefault(fi, []).append((qval, motif_id))
tt_in.close()
# assign filter's best match
for fi in filter_motifs:
top_motif = sorted(filter_motifs[fi])[0][1]
filter_names[fi] += '_%s' % motif_protein[top_motif]
return np.array(filter_names)
################################################################################
# plot_target_corr
#
# Plot a clustered heatmap of correlations between filter activations and
# targets.
#
# Input
# filter_outs:
# filter_names:
# target_names:
# out_pdf:
################################################################################
def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'):
num_seqs = filter_outs.shape[0]
num_targets = len(target_names)
if seq_op == 'mean':
filter_outs_seq = filter_outs.mean(axis=2)
else:
filter_outs_seq = filter_outs.max(axis=2)
# std is sequence by filter.
filter_seqs_std = filter_outs_seq.std(axis=0)
filter_outs_seq = filter_outs_seq[:, filter_seqs_std > 0]
filter_names_live = filter_names[filter_seqs_std > 0]
filter_target_cors = np.zeros((len(filter_names_live), num_targets))
for fi in range(len(filter_names_live)):
for ti in range(num_targets):
cor, p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti])
filter_target_cors[fi, ti] = cor
cor_df = pd.DataFrame(
filter_target_cors, index=filter_names_live, columns=target_names)
sns.set(font_scale=0.3)
plt.figure()
sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10))
plt.savefig(out_pdf)
plt.close()
################################################################################
# plot_filter_seq_heat
#
# Plot a clustered heatmap of filter activations in
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True):
# compute filter output means per sequence
filter_seqs = filter_outs.mean(axis=2)
# whiten
if whiten:
filter_seqs = preprocessing.scale(filter_seqs)
# transpose
filter_seqs = np.transpose(filter_seqs)
if drop_dead:
filter_stds = filter_seqs.std(axis=1)
filter_seqs = filter_seqs[filter_stds > 0]
# downsample sequences
seqs_i = np.random.randint(0, filter_seqs.shape[1], 500)
hmin = np.percentile(filter_seqs[:, seqs_i], 0.1)
hmax = np.percentile(filter_seqs[:, seqs_i], 99.9)
sns.set(font_scale=0.3)
plt.figure()
sns.clustermap(
filter_seqs[:, seqs_i],
row_cluster=True,
col_cluster=True,
linewidths=0,
xticklabels=False,
vmin=hmin,
vmax=hmax)
plt.savefig(out_pdf)
#out_png = out_pdf[:-2] + 'ng'
#plt.savefig(out_png, dpi=300)
plt.close()
################################################################################
# plot_filter_seq_heat
#
# Plot a clustered heatmap of filter activations in sequence segments.
#
# Mean doesn't work well for the smaller segments for some reason, but taking
# the max looks OK. Still, similar motifs don't cluster quite as well as you
# might expect.
#
# Input
# filter_outs
################################################################################
def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True):
b = filter_outs.shape[0]
f = filter_outs.shape[1]
l = filter_outs.shape[2]
s = 5
while l / float(s) - (l / s) > 0:
s += 1
print('%d segments of length %d' % (s, l / s))
# split into multiple segments
filter_outs_seg = np.reshape(filter_outs, (b, f, s, l / s))
# mean across the segments
filter_outs_mean = filter_outs_seg.max(axis=3)
# break each segment into a new instance
filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s * b, f))
# whiten
if whiten:
filter_seqs = preprocessing.scale(filter_seqs)
# transpose
filter_seqs = np.transpose(filter_seqs)
if drop_dead:
filter_stds = filter_seqs.std(axis=1)
filter_seqs = filter_seqs[filter_stds > 0]
# downsample sequences
seqs_i = np.random.randint(0, filter_seqs.shape[1], 500)
hmin = np.percentile(filter_seqs[:, seqs_i], 0.1)
hmax = np.percentile(filter_seqs[:, seqs_i], 99.9)
sns.set(font_scale=0.3)
if whiten:
dist = 'euclidean'
else:
dist = 'cosine'
plt.figure()
sns.clustermap(
filter_seqs[:, seqs_i],
metric=dist,
row_cluster=True,
col_cluster=True,
linewidths=0,
xticklabels=False,
vmin=hmin,
vmax=hmax)
plt.savefig(out_pdf)
#out_png = out_pdf[:-2] + 'ng'
#plt.savefig(out_png, dpi=300)
plt.close()
################################################################################
# filter_motif
#
# Collapse the filter parameter matrix to a single DNA motif.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def filter_motif(param_matrix):
nts = 'ACGT'
motif_list = []
for v in range(param_matrix.shape[1]):
max_n = 0
for n in range(1, 4):
if param_matrix[n, v] > param_matrix[max_n, v]:
max_n = n
if param_matrix[max_n, v] > 0:
motif_list.append(nts[max_n])
else:
motif_list.append('N')
return ''.join(motif_list)
################################################################################
# filter_possum
#
# Write a Possum-style motif
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200):
# possible trim
trim_start = 0
trim_end = param_matrix.shape[1] - 1
trim_t = 0.3
if trim_filters:
# trim PWM of uninformative prefix
while trim_start < param_matrix.shape[1] and np.max(
param_matrix[:, trim_start]) - np.min(
param_matrix[:, trim_start]) < trim_t:
trim_start += 1
# trim PWM of uninformative suffix
while trim_end >= 0 and np.max(param_matrix[:, trim_end]) - np.min(
param_matrix[:, trim_end]) < trim_t:
trim_end -= 1
if trim_start < trim_end:
possum_out = open(possum_file, 'w')
print('BEGIN GROUP', file=possum_out)
print('BEGIN FLOAT', file=possum_out)
print('ID %s' % motif_id, file=possum_out)
print('AP DNA', file=possum_out)
print('LE %d' % (trim_end + 1 - trim_start), file=possum_out)
for ci in range(trim_start, trim_end + 1):
print(
'MA %s' % ' '.join(['%.2f' % (mult * n)
for n in param_matrix[:, ci]]),
file=possum_out)
print('END', file=possum_out)
print('END', file=possum_out)
possum_out.close()
################################################################################
# plot_filter_heat
#
# Plot a heatmap of the filter's parameters.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_filter_heat(param_matrix, out_pdf):
param_range = abs(param_matrix).max()
sns.set(font_scale=2)
plt.figure(figsize=(param_matrix.shape[1], 4))
sns.heatmap(
param_matrix,
cmap='PRGn',
linewidths=0.2,
vmin=-param_range,
vmax=param_range)
ax = plt.gca()
ax.set_xticklabels(range(1, param_matrix.shape[1] + 1))
ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10)
plt.savefig(out_pdf)
plt.close()
################################################################################
# plot_filter_logo
#
# Plot a weblogo of the filter's occurrences
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None):
if maxpct_t:
all_outs = np.ravel(filter_outs)
all_outs_mean = all_outs.mean()
all_outs_norm = all_outs - all_outs_mean
raw_t = maxpct_t * all_outs_norm.max() + all_outs_mean
left_pad = (filter_size - 1) // 2
right_pad = filter_size - left_pad
# print fasta file of positive outputs
filter_fasta_out = open('%s.fa' % out_prefix, 'w')
filter_count = 0
for i in range(filter_outs.shape[0]):
for j in range(filter_outs.shape[1]):
if filter_outs[i, j] > raw_t:
# construct kmer
kmer = ''
# determine boundaries, considering padding
fstart = j - left_pad
fend = fstart + filter_size
# if it starts in left_pad
if fstart < 0:
kmer += 'N' * (-fstart)
fstart = 0
# add primary sequence
kmer += seqs[i][fstart:fend]
# if it ends in right_pad
if fend > len(seqs[i]):
kmer += 'N' * (fend - len(seqs[i]))
# output
print('>%d_%d' % (i, j), file=filter_fasta_out)
print(kmer, file=filter_fasta_out)
filter_count += 1
filter_fasta_out.close()
# make weblogo
if filter_count > 0:
weblogo_cmd = 'weblogo %s < %s.fa > %s.eps' % (weblogo_opts, out_prefix,
out_prefix)
subprocess.call(weblogo_cmd, shell=True)
################################################################################
# plot_score_density
#
# Plot the score density and print to the stats table.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_score_density(f_scores, out_pdf):
sns.set(font_scale=1.3)
plt.figure()
sns.distplot(f_scores, kde=False)
plt.xlabel('ReLU output')
plt.savefig(out_pdf)
plt.close()
return f_scores.mean(), f_scores.std()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
# pdb.runcall(main)
| 29.282082
| 99
| 0.585687
| 3,259
| 24,187
| 4.136545
| 0.168457
| 0.02893
| 0.016393
| 0.006528
| 0.270306
| 0.221719
| 0.18871
| 0.168237
| 0.151102
| 0.136414
| 0
| 0.014731
| 0.20573
| 24,187
| 825
| 100
| 29.317576
| 0.687002
| 0.187332
| 0
| 0.25
| 0
| 0.002193
| 0.105671
| 0.007983
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032895
| false
| 0.002193
| 0.028509
| 0
| 0.076754
| 0.065789
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86db53b7a1cf34f8c926e78563b430e45842c3b8
| 1,337
|
py
|
Python
|
apps/shop/urls.py
|
Joetib/jshop
|
810ce5dcf2cf2d23b45536dd0c8806efd3b7fc91
|
[
"MIT"
] | 1
|
2021-09-29T18:48:00.000Z
|
2021-09-29T18:48:00.000Z
|
apps/shop/urls.py
|
Joetib/jshop
|
810ce5dcf2cf2d23b45536dd0c8806efd3b7fc91
|
[
"MIT"
] | null | null | null |
apps/shop/urls.py
|
Joetib/jshop
|
810ce5dcf2cf2d23b45536dd0c8806efd3b7fc91
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = "shop"
urlpatterns = [
path('', views.HomePage.as_view(), name="home-page"),
path('shop/', views.ProductListView.as_view(), name="product-list"),
path('shop/<int:category_pk>/', views.ProductListView.as_view(), name="product-list"),
path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name="product-detail"),
path('cart/', views.cart_view, name="cart"),
path('cart/add/<int:product_pk>/', views.add_product_to_order, name="add-product-to-cart"),
path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name="add-product-to-cart-json"),
path('checkout/', views.CheckOut.as_view(), name="checkout"),
path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name="checkout"),
path('payment/', views.PaymentChoice.as_view(), name="payment-choice"),
path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name="momo-payment"),
path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name="confirm-momo-payment"),
path('orders/', views.OrderList.as_view(), name="order-list"),
path('orders/<int:pk>/', views.OrderDetail.as_view(), name="order-detail"),
path('orders/<int:order_id>/items/<int:pk>/', views.OrderItemDetail.as_view(), name="order-item-detail"),
]
| 58.130435
| 109
| 0.691847
| 185
| 1,337
| 4.859459
| 0.243243
| 0.115684
| 0.133482
| 0.05673
| 0.313682
| 0.246941
| 0.246941
| 0.10901
| 0.10901
| 0
| 0
| 0
| 0.091249
| 1,337
| 22
| 110
| 60.772727
| 0.739918
| 0
| 0
| 0
| 0
| 0
| 0.350037
| 0.18175
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86db5b39f7333cdce223e5a0be6e734eb216f5d2
| 11,028
|
py
|
Python
|
surpyval/parametric/expo_weibull.py
|
dfm/SurPyval
|
014fba8f1d4a0f43218a3713ce80a78191ad8be9
|
[
"MIT"
] | null | null | null |
surpyval/parametric/expo_weibull.py
|
dfm/SurPyval
|
014fba8f1d4a0f43218a3713ce80a78191ad8be9
|
[
"MIT"
] | null | null | null |
surpyval/parametric/expo_weibull.py
|
dfm/SurPyval
|
014fba8f1d4a0f43218a3713ce80a78191ad8be9
|
[
"MIT"
] | null | null | null |
import autograd.numpy as np
from scipy.stats import uniform
from autograd import jacobian
from numpy import euler_gamma
from scipy.special import gamma as gamma_func
from scipy.special import ndtri as z
from scipy import integrate
from scipy.optimize import minimize
from surpyval import parametric as para
from surpyval import nonparametric as nonp
from surpyval.parametric.parametric_fitter import ParametricFitter
from .fitters.mpp import mpp
class ExpoWeibull_(ParametricFitter):
def __init__(self, name):
self.name = name
self.k = 3
self.bounds = ((0, None), (0, None), (0, None),)
self.support = (0, np.inf)
self.plot_x_scale = 'log'
self.y_ticks = [0.0001, 0.0002, 0.0003, 0.001, 0.002,
0.003, 0.005, 0.01, 0.02, 0.03, 0.05,
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
0.9, 0.95, 0.99, 0.999, 0.9999]
self.param_names = ['alpha', 'beta', 'mu']
self.param_map = {
'alpha' : 0,
'beta' : 1,
'mu' : 2
}
def _parameter_initialiser(self, x, c=None, n=None, offset=False):
log_x = np.log(x)
log_x[np.isnan(log_x)] = 0
gumb = para.Gumbel.fit(log_x, c, n, how='MLE')
if not gumb.res.success:
gumb = para.Gumbel.fit(log_x, c, n, how='MPP')
mu, sigma = gumb.params
alpha, beta = np.exp(mu), 1. / sigma
if (np.isinf(alpha) | np.isnan(alpha)):
alpha = np.median(x)
if (np.isinf(beta) | np.isnan(beta)):
beta = 1.
if offset:
gamma = np.min(x) - (np.max(x) - np.min(x))/10.
return gamma, alpha, beta, 1.
else:
return alpha, beta, 1.
def sf(self, x, alpha, beta, mu):
r"""
Survival (or reliability) function for the ExpoWeibull Distribution:
.. math::
R(x) = 1 - \left [ 1 - e^{-\left ( \frac{x}{\alpha} \right )^\beta} \right ]^{\mu}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
sf : scalar or numpy array
The value(s) of the reliability function at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.sf(x, 3, 4, 1.2)
array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02,
5.34717283e-04])
"""
return 1 - np.power(1 - np.exp(-(x / alpha)**beta), mu)
def ff(self, x, alpha, beta, mu):
r"""
Failure (CDF or unreliability) function for the ExpoWeibull Distribution:
.. math::
F(x) = \left [ 1 - e^{-\left ( \frac{x}{\alpha} \right )^\beta} \right ]^{\mu}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
sf : scalar or numpy array
The value(s) of the failure function at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.ff(x, 3, 4, 1.2)
array([0.00508867, 0.1270975 , 0.57671321, 0.94933251, 0.99946528])
"""
return np.power(1 - np.exp(-(x / alpha)**beta), mu)
def cs(self, x, X, alpha, beta, mu):
r"""
Conditional survival (or reliability) function for the ExpoWeibull Distribution:
.. math::
R(x, X) = \frac{R(x + X)}{R(X)}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
sf : scalar or numpy array
The value(s) of the reliability function at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.sf(x, 1, 3, 4, 1.2)
array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04,
1.35732908e-07])
"""
return self.sf(x + X, alpha, beta, mu) / self.sf(X, alpha, beta, mu)
def df(self, x, alpha, beta, mu):
r"""
Density function for the ExpoWeibull Distribution:
.. math::
f(x) = \mu \left ( \frac{\beta}{\alpha} \right ) \left ( \frac{x}{\alpha} \right )^{\beta - 1} \left [ 1 - e^{-\left ( \frac{x}{\alpha} \right )^\beta} \right ]^{\mu - 1} e^{- \left ( \frac{x}{\alpha} \right )^\beta}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
df : scalar or numpy array
The value(s) of the density function at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.df(x, 3, 4, 1.2)
array([0.02427515, 0.27589838, 0.53701385, 0.15943643, 0.00330058])
"""
return (beta * mu * x**(beta - 1)) / (alpha**beta) \
* (1 - np.exp(-(x/alpha)**beta))**(mu - 1) \
* np.exp(-(x/alpha)**beta)
def hf(self, x, alpha, beta, mu):
r"""
Instantaneous hazard rate for the ExpoWeibull Distribution:
.. math::
h(x) = \frac{f(x)}{R(x)}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
hf : scalar or numpy array
The value(s) of the instantaneous hazard rate at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.hf(x, 3, 4, 1.2)
array([0.02439931, 0.3160701 , 1.26867613, 3.14672068, 6.17256436])
"""
return self.df(x, alpha, beta, mu) / self.sf(x, alpha, beta, mu)
def Hf(self, x, alpha, beta, mu):
r"""
Instantaneous hazard rate for the ExpoWeibull Distribution:
.. math::
H(x) = -\ln \left ( R(x) \right )
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
Hf : scalar or numpy array
The value(s) of the cumulative hazard rate at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.Hf(x, 3, 4, 1.2)
array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00,
7.53377239e+00])
"""
return -np.log(self.sf(x, alpha, beta, mu))
def qf(self, p, alpha, beta, mu):
r"""
Instantaneous hazard rate for the ExpoWeibull Distribution:
.. math::
q(p) =
Parameters
----------
p : numpy array or scalar
The percentiles at which the quantile will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
Q : scalar or numpy array
The quantiles for the Weibull distribution at each value p
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> p = np.array([.1, .2, .3, .4, .5])
>>> ExpoWeibull.qf(p, 3, 4, 1.2)
array([1.89361341, 2.2261045 , 2.46627621, 2.66992747, 2.85807988])
"""
return alpha * (-np.log(1 - p**(1./mu)))**(1/beta)
def mean(self, alpha, beta, mu):
func = lambda x : x * self.df(x, alpha, beta, mu)
top = 2 * self.qf(0.999, alpha, beta, mu)
return integrate.quadrature(func, 0, top)[0]
def random(self, size, alpha, beta, mu):
U = uniform.rvs(size=size)
return self.qf(U, alpha, beta, mu)
def mpp_x_transform(self, x, gamma=0):
return np.log(x - gamma)
def mpp_y_transform(self, y, *params):
mu = params[-1]
mask = ((y == 0) | (y == 1))
out = np.zeros_like(y)
out[~mask] = np.log(-np.log((1 - y[~mask]**(1./mu))))
out[mask] = np.nan
return out
def mpp_inv_y_transform(self, y, *params):
i = len(params)
mu = params[i-1]
return (1 - np.exp(-np.exp(y)))**mu
def unpack_rr(self, params, rr):
#UPDATE ME
if rr == 'y':
beta = params[0]
alpha = np.exp(params[1]/-beta)
elif rr == 'x':
beta = 1./params[0]
alpha = np.exp(params[1] / (beta * params[0]))
return alpha, beta, 1.
ExpoWeibull = ExpoWeibull_('ExpoWeibull')
| 32.151603
| 228
| 0.537087
| 1,428
| 11,028
| 4.126751
| 0.151261
| 0.059393
| 0.080774
| 0.137791
| 0.648566
| 0.628712
| 0.60784
| 0.597319
| 0.565077
| 0.541999
| 0
| 0.072981
| 0.336507
| 11,028
| 343
| 229
| 32.151604
| 0.732404
| 0.525118
| 0
| 0.020408
| 0
| 0
| 0.011711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153061
| false
| 0
| 0.122449
| 0.010204
| 0.438776
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86db8d66e4f0f969e4dab6cb93ed65e00e44883f
| 3,292
|
py
|
Python
|
tests/test_base_table.py
|
stjordanis/datar
|
4e2b5db026ad35918954576badef9951928c0cb1
|
[
"MIT"
] | 110
|
2021-03-09T04:10:40.000Z
|
2022-03-13T10:28:20.000Z
|
tests/test_base_table.py
|
sthagen/datar
|
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
|
[
"MIT"
] | 54
|
2021-06-20T18:53:44.000Z
|
2022-03-29T22:13:07.000Z
|
tests/test_base_table.py
|
sthagen/datar
|
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
|
[
"MIT"
] | 11
|
2021-06-18T03:03:14.000Z
|
2022-02-25T11:48:26.000Z
|
import pytest
from datar import stats
from datar.base import *
from datar import f
from datar.datasets import warpbreaks, state_division, state_region, airquality
from .conftest import assert_iterable_equal
def test_table():
# https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table
z = stats.rpois(100, 5)
x = table(z)
assert sum(x.values.flatten()) == 100
#-----------------
with data_context(warpbreaks) as _:
tab = table(f.wool, f.tension)
assert tab.columns.tolist() == ['H', 'L', 'M']
assert tab.index.tolist() == ['A', 'B']
assert_iterable_equal(tab.values.flatten(), [9] * 6)
tab = table(warpbreaks.loc[:, ['wool', 'tension']])
assert tab.columns.tolist() == ['H', 'L', 'M']
assert tab.index.tolist() == ['A', 'B']
assert_iterable_equal(tab.values.flatten(), [9] * 6)
#-----------------
tab = table(state_division, state_region)
assert tab.loc['New England', 'Northeast'] == 6
#-----------------
with data_context(airquality) as _:
qt = stats.quantile(f.Temp)
ct = cut(f.Temp, qt)
tab = table(ct, f.Month)
assert tab.iloc[0,0] == 24
#-----------------
a = letters[:3]
tab = table(a, sample(a))
assert sum(tab.values.flatten()) == 3
#-----------------
tab = table(a, sample(a), dnn=['x', 'y'])
assert tab.index.name == 'x'
assert tab.columns.name == 'y'
#-----------------
a = c(NA, Inf, (1.0/(i+1) for i in range(3)))
a = a * 10
# tab = table(a)
# assert_iterable_equal(tab.values.flatten(), [10] * 4)
tab = table(a, exclude=None)
assert_iterable_equal(tab.values.flatten(), [10] * 5)
#------------------
b = as_factor(rep(c("A","B","C"), 10))
tab = table(b)
assert tab.shape == (1, 3)
assert_iterable_equal(tab.values.flatten(), [10] * 3)
tab = table(b, exclude="B")
assert tab.shape == (1, 2)
assert_iterable_equal(tab.values.flatten(), [10] * 2)
assert 'B' not in tab.columns
#-------------------
d = factor(rep(c("A","B","C"), 10), levels=c("A","B","C","D","E"))
tab = table(d, exclude="B", dnn=['x'])
assert_iterable_equal(tab.columns.to_list(), ["A", "C", "D", "E"])
assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0])
d2 = factor(rep(c("A","B","C"), 10), levels=c("A","B","C","D","E"))
tab = table(d, d2, exclude="B")
assert tab.shape == (4, 4)
tab = table("abc", "cba", dnn='x')
assert tab.shape == (3,3)
assert sum(tab.values.flatten()) == 3
with data_context(airquality) as _:
tab = table(f.Ozone, f.Solar_R, exclude=None)
assert '<NA>' in tab.columns
assert '<NA>' in tab.index
def test_table_error():
from datar.datasets import iris, warpbreaks
with pytest.raises(ValueError):
table(iris)
with pytest.raises(ValueError):
table(warpbreaks, iris)
with pytest.raises(ValueError):
table(warpbreaks.wool, iris)
with pytest.raises(ValueError):
table(iris.iloc[:, []])
with pytest.raises(ValueError):
table(iris.iloc[:, [1,2]], iris)
with pytest.raises(ValueError):
table(iris.iloc[:, [1]], iris, iris)
with pytest.raises(ValueError):
table(iris.iloc[:, [1]], iris.iloc[:, []])
| 31.056604
| 79
| 0.564702
| 456
| 3,292
| 4.002193
| 0.225877
| 0.06137
| 0.093699
| 0.096438
| 0.531507
| 0.468493
| 0.404932
| 0.246027
| 0.200548
| 0.200548
| 0
| 0.025365
| 0.209599
| 3,292
| 105
| 80
| 31.352381
| 0.676018
| 0.085055
| 0
| 0.22973
| 0
| 0
| 0.029333
| 0
| 0
| 0
| 0
| 0
| 0.351351
| 1
| 0.027027
| false
| 0
| 0.094595
| 0
| 0.121622
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86dbc8be4491e9aac31a1a68443d62ca3e952415
| 1,922
|
py
|
Python
|
cqlengine/tests/statements/test_update_statement.py
|
dokai/cqlengine
|
a080aff3a73351d37126b14eef606061b445aa37
|
[
"BSD-3-Clause"
] | null | null | null |
cqlengine/tests/statements/test_update_statement.py
|
dokai/cqlengine
|
a080aff3a73351d37126b14eef606061b445aa37
|
[
"BSD-3-Clause"
] | null | null | null |
cqlengine/tests/statements/test_update_statement.py
|
dokai/cqlengine
|
a080aff3a73351d37126b14eef606061b445aa37
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import TestCase
from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause
from cqlengine.operators import *
class UpdateStatementTests(TestCase):
def test_table_rendering(self):
""" tests that fields are properly added to the select statement """
us = UpdateStatement('table')
self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us))
self.assertTrue(str(us).startswith('UPDATE table SET'), str(us))
def test_rendering(self):
us = UpdateStatement('table')
us.add_assignment_clause(AssignmentClause('a', 'b'))
us.add_assignment_clause(AssignmentClause('c', 'd'))
us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))
self.assertEqual(unicode(us), 'UPDATE table SET "a" = :0, "c" = :1 WHERE "a" = :2', unicode(us))
def test_context(self):
us = UpdateStatement('table')
us.add_assignment_clause(AssignmentClause('a', 'b'))
us.add_assignment_clause(AssignmentClause('c', 'd'))
us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))
self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'})
def test_context_update(self):
us = UpdateStatement('table')
us.add_assignment_clause(AssignmentClause('a', 'b'))
us.add_assignment_clause(AssignmentClause('c', 'd'))
us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))
us.update_context_id(3)
self.assertEqual(unicode(us), 'UPDATE table SET "a" = :4, "c" = :5 WHERE "a" = :3')
self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'})
def test_additional_rendering(self):
us = UpdateStatement('table', ttl=60)
us.add_assignment_clause(AssignmentClause('a', 'b'))
us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))
self.assertIn('USING TTL 60', unicode(us))
| 44.697674
| 104
| 0.648283
| 233
| 1,922
| 5.201717
| 0.253219
| 0.04538
| 0.086634
| 0.121287
| 0.622937
| 0.511551
| 0.511551
| 0.511551
| 0.459571
| 0.388614
| 0
| 0.010911
| 0.189386
| 1,922
| 42
| 105
| 45.761905
| 0.767009
| 0.031217
| 0
| 0.454545
| 0
| 0.060606
| 0.109552
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 1
| 0.151515
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86dd8cfba25399e11b5e6b0c69e97eec2cc7d779
| 1,590
|
py
|
Python
|
course-code/imooc-tf-mnist-flask/mnist/module.py
|
le3t/ko-repo
|
50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5
|
[
"Apache-2.0"
] | 30
|
2018-12-06T02:17:45.000Z
|
2021-04-07T09:03:36.000Z
|
course-code/imooc-tf-mnist-flask/mnist/module.py
|
Artister/tutorials-java
|
50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5
|
[
"Apache-2.0"
] | 3
|
2019-08-26T13:41:57.000Z
|
2019-08-26T13:44:21.000Z
|
course-code/imooc-tf-mnist-flask/mnist/module.py
|
Artister/tutorials-java
|
50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5
|
[
"Apache-2.0"
] | 20
|
2018-12-27T08:31:02.000Z
|
2020-12-03T08:35:28.000Z
|
import tensorflow as tf
# y=ax+b linear model
def regression(x):
a = tf.Variable(tf.zeros([784, 10]), name="a")
b = tf.Variable(tf.zeros([10]), name="b")
y = tf.nn.softmax(tf.matmul(x, a) + b)
return y, [a, b]
# 定义卷积模型
def convolutional(x, keep_prob):
def conv2d(x, w):
return tf.nn.conv2d(x, w, [1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
x_image = tf.reshape(x, [-1, 28, 28, 1])
w_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
w_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# 全连接层
w_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
w_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2))
return y, [w_conv1, b_conv1, w_conv2, b_conv2, w_fc1, w_fc2, b_fc2]
| 30.576923
| 72
| 0.620755
| 278
| 1,590
| 3.327338
| 0.23741
| 0.034595
| 0.032432
| 0.036757
| 0.043243
| 0.043243
| 0
| 0
| 0
| 0
| 0
| 0.092742
| 0.220126
| 1,590
| 51
| 73
| 31.176471
| 0.653226
| 0.019497
| 0
| 0.055556
| 0
| 0
| 0.006431
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.027778
| 0.055556
| 0.361111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e1dc1697df65dd8302b1c8457579ff83a8e10d
| 1,074
|
py
|
Python
|
faceai/gender.py
|
dlzdy/faceai
|
4b1e41d4c394c00da51533562b76306d86493f72
|
[
"MIT"
] | 1
|
2021-05-18T07:31:14.000Z
|
2021-05-18T07:31:14.000Z
|
faceai/gender.py
|
dlzdy/faceai
|
4b1e41d4c394c00da51533562b76306d86493f72
|
[
"MIT"
] | null | null | null |
faceai/gender.py
|
dlzdy/faceai
|
4b1e41d4c394c00da51533562b76306d86493f72
|
[
"MIT"
] | null | null | null |
#coding=utf-8
#性别识别
import cv2
from keras.models import load_model
import numpy as np
import chineseText
img = cv2.imread("img/gather.png")
face_classifier = cv2.CascadeClassifier(
"d:\Python36\Lib\site-packages\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml"
)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(
gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140))
gender_classifier = load_model(
"classifier/gender_models/simple_CNN.81-0.96.hdf5")
gender_labels = {0: '女', 1: '男'}
color = (255, 255, 255)
for (x, y, w, h) in faces:
face = img[(y - 60):(y + h + 60), (x - 30):(x + w + 30)]
face = cv2.resize(face, (48, 48))
face = np.expand_dims(face, 0)
face = face / 255.0
gender_label_arg = np.argmax(gender_classifier.predict(face))
gender = gender_labels[gender_label_arg]
cv2.rectangle(img, (x, y), (x + h, y + w), color, 2)
img = chineseText.cv2ImgAddText(img, gender, x + h, y, color, 30)
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 30.685714
| 103
| 0.691806
| 164
| 1,074
| 4.420732
| 0.493902
| 0.024828
| 0.038621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068282
| 0.154562
| 1,074
| 34
| 104
| 31.588235
| 0.730176
| 0.014898
| 0
| 0
| 0
| 0.037037
| 0.157197
| 0.137311
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e1dfa0c33f00a823a44b2f6b5cc3f12ae76c76
| 5,872
|
py
|
Python
|
csm_web/scheduler/tests/utils.py
|
mudit2103/csm_web
|
3b7fd9ca7269ad4cb57bf264cf62a620e02d3780
|
[
"MIT"
] | null | null | null |
csm_web/scheduler/tests/utils.py
|
mudit2103/csm_web
|
3b7fd9ca7269ad4cb57bf264cf62a620e02d3780
|
[
"MIT"
] | null | null | null |
csm_web/scheduler/tests/utils.py
|
mudit2103/csm_web
|
3b7fd9ca7269ad4cb57bf264cf62a620e02d3780
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from os import path
from rest_framework import status
from rest_framework.test import APIClient
import random
from scheduler.models import Profile
from scheduler.factories import (
CourseFactory,
SpacetimeFactory,
UserFactory,
ProfileFactory,
SectionFactory,
AttendanceFactory,
OverrideFactory,
create_attendances_for,
)
random.seed(0)
COURSE_NAMES = ("CS88", "CS61A", "CS61B", "CS70", "CS61C", "EE16A")
ROLE_MAP = Profile.ROLE_MAP
BASE_PATH = "/scheduler"
# ----- REQUEST UTILITIES -----
def fail_msg(ep, resp):
return "Endpoint: {}\nResponse Content: {}".format(ep, resp.content)
class APITestCase(TestCase):
def get_client_for(self, user):
"""Returns an APIClient object that is logged in as the provided user."""
client = APIClient()
client.force_authenticate(user)
return client
def request(self, method, endpoint, exp_code=None, data=None):
"""
Performs a request to the specified endpoint and returns the response object.
Also checks if the status code of the response is exp_code, if provided.
The method parameter should be a get/post/etc from an APIClient object.
"""
resp = method(path.join(BASE_PATH, endpoint.strip("/")), follow=True, data=data)
if exp_code is not None:
self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp))
return resp
def req_fails_perms(self, method, endpoint, data=None):
"""
Performs a request to the specified endpoint, and checks that it fails
due to the user lacking proper permissions.
The method parameter should be a get/post/etc from an APIClient object.
Returns the response object afterwards.
"""
return self.request(
method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data
)
def req_fails_method(self, method, endpoint, data=None):
"""
Performs a request to the specified endpoint, and checks that it fails
due to the endpoint not supporting the provided method.
Returns the response object.
"""
return self.request(
method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data
)
def req_succeeds(self, method, endpoint, data=None):
"""
Performs a request to the specified endpoint, and checks that it succeeds.
The method parameter should be a get/post/etc from an APIClient object.
Returns the response object.
"""
return self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data)
# ----- MODEL GENERATION -----
def random_objs(clazz, n=1):
"""
Generates N instances of the provided class, retrieved from the database.
"""
src = clazz.objects.all()
for _ in range(n):
yield random.choice(src)
def make_test_courses():
"""Creates course objects and persists them to database."""
return [CourseFactory.create(name=name) for name in COURSE_NAMES]
def make_test_users(n):
"""Creates N test users and persists them to database."""
return UserFactory.create_batch(n)
def give_role(user, role, course):
"""
Creates a profile for USER in a given ROLE for the provided COURSE, and
saves the profile to database.
"""
return ProfileFactory.create(
user=user, course=course, leader=None, section=None, role=role
)
def create_empty_section_for(mentor):
"""
Creates a section for MENTOR without populated students.
"""
return SectionFactory.create(course=mentor.course, mentor=mentor)
def enroll_user_as_student(user, section):
"""
Creates a student profile for USER, and assigns them to the given SECTION.
Also creates blank attendances as necessary.
Returns the created profile.
"""
student = give_role(user, Profile.STUDENT, section.course)
student.section = section
student.leader = section.leader
create_attendances_for(student)
return student
def gen_test_data(cls, NUM_USERS=300):
"""
Adds NUM_USERS users to the database and initializes profiles for them as follows:
- 2 coords per course
- 4 SMs per coord, each with a section of 3-6 students
- 3 JMs per SM, each with a section of 3-6 students
"""
users = iter(make_test_users(NUM_USERS))
courses = make_test_courses()
# for sanity tests, everyone only has one role for now
num_courses = len(courses)
coords, seniors, juniors, students = [], [], [], []
COORD_COUNT = 2
SM_COUNT = 4
JM_COUNT = 3
def assign(role, leader, c, lst):
# returns the profile created
profile = give_role(next(users), role, c)
profile.leader = leader
lst.append(profile)
return profile
try:
for c in courses:
# coords
for i in range(COORD_COUNT):
coord = assign(Profile.COORDINATOR, None, c, coords)
# SMs
for j in range(SM_COUNT):
sm = assign(Profile.SENIOR_MENTOR, coord, c, seniors)
section = create_empty_section_for(sm)
for k in range(random.randint(3, 6)):
students.append(enroll_user_as_student(next(users), section))
# JMs
for k in range(JM_COUNT):
jm = assign(Profile.JUNIOR_MENTOR, sm, c, juniors)
for _ in range(random.randint(3, 6)):
students.append(
enroll_user_as_student(next(users), section)
)
except StopIteration:
pass
cls.users = users
cls.courses = courses
cls.coords = coords
cls.seniors = seniors
cls.juniors = juniors
cls.students = students
| 32.804469
| 88
| 0.647309
| 748
| 5,872
| 4.971925
| 0.270053
| 0.010756
| 0.018284
| 0.022587
| 0.258672
| 0.258672
| 0.242001
| 0.242001
| 0.226943
| 0.214036
| 0
| 0.009287
| 0.266519
| 5,872
| 178
| 89
| 32.988764
| 0.854191
| 0.290531
| 0
| 0.020408
| 0
| 0
| 0.018608
| 0
| 0
| 0
| 0
| 0
| 0.010204
| 1
| 0.142857
| false
| 0.010204
| 0.071429
| 0.010204
| 0.346939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e1fd3bf7ee00e117356675760b13ae01e5890a
| 3,282
|
py
|
Python
|
coldtype/beziers.py
|
tallpauley/coldtype
|
c1811e1d3713ff9c3c804511d6cd607b1d802065
|
[
"Apache-2.0"
] | null | null | null |
coldtype/beziers.py
|
tallpauley/coldtype
|
c1811e1d3713ff9c3c804511d6cd607b1d802065
|
[
"Apache-2.0"
] | null | null | null |
coldtype/beziers.py
|
tallpauley/coldtype
|
c1811e1d3713ff9c3c804511d6cd607b1d802065
|
[
"Apache-2.0"
] | null | null | null |
import math
from fontTools.pens.recordingPen import RecordingPen, replayRecording
from fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT
from coldtype.geometry import Rect, Point
def raise_quadratic(start, a, b):
c0 = start
c1 = (c0[0] + (2/3)*(a[0] - c0[0]), c0[1] + (2/3)*(a[1] - c0[1]))
c2 = (b[0] + (2/3)*(a[0] - b[0]), b[1] + (2/3)*(a[1] - b[1]))
c3 = (b[0], b[1])
return [c1, c2, c3]
__length_cache = {}
__split_cache = {}
def splitCubicAtT_cached(a, b, c, d, t):
global __split_cache
abcdt = (a, b, c, d, t)
sc = __split_cache.get(abcdt)
if sc:
return sc
else:
s = splitCubicAtT(a, b, c, d, t)
__split_cache[abcdt] = s
return s
def calcCubicArcLength_cached(a, b, c, d):
#return calcCubicArcLength(a, b, c, d)
global __length_cache
abcd = (a, b, c, d)
lc = __length_cache.get(abcd)
if lc:
return lc
else:
l = calcCubicArcLength(a, b, c, d)
__length_cache[abcd] = l
return l
class CurveCutter():
def __init__(self, g, inc=0.0015):
if isinstance(g, RecordingPen):
self.pen = g
else:
self.pen = RecordingPen()
g.draw(self.pen)
self.inc = inc
self.length = self.calcCurveLength()
def calcCurveLength(self):
length = 0
for i, (t, pts) in enumerate(self.pen.value):
if t == "curveTo":
p1, p2, p3 = pts
p0 = self.pen.value[i-1][-1][-1]
length += calcCubicArcLength_cached(p0, p1, p2, p3)
elif t == "lineTo":
pass # todo
return length
def subsegment(self, start=None, end=None):
global __cut_cache
inc = self.inc
length = self.length
ended = False
_length = 0
out = []
for i, (t, pts) in enumerate(self.pen.value):
if t == "curveTo":
p1, p2, p3 = pts
p0 = self.pen.value[i-1][-1][-1]
length_arc = calcCubicArcLength_cached(p0, p1, p2, p3)
if _length + length_arc < end:
_length += length_arc
else:
t = inc
tries = 0
while not ended:
a, b = splitCubicAtT_cached(p0, p1, p2, p3, t)
length_a = calcCubicArcLength_cached(*a)
if _length + length_a > end:
ended = True
out.append(("curveTo", a[1:]))
else:
t += inc
tries += 1
if t == "lineTo":
pass # TODO
if not ended:
out.append((t, pts))
if out[-1][0] != "endPath":
out.append(("endPath",[]))
return out
def subsegmentPoint(self, start=0, end=1):
inc = self.inc
subsegment = self.subsegment(start=start, end=end)
try:
t, (a, b, c) = subsegment[-2]
tangent = math.degrees(math.atan2(c[1] - b[1], c[0] - b[0]) + math.pi*.5)
return c, tangent
except ValueError:
return None, None
| 31.557692
| 85
| 0.482937
| 404
| 3,282
| 3.806931
| 0.237624
| 0.013004
| 0.015605
| 0.018205
| 0.208713
| 0.137841
| 0.096229
| 0.096229
| 0.096229
| 0.096229
| 0
| 0.04004
| 0.391225
| 3,282
| 104
| 86
| 31.557692
| 0.72973
| 0.014321
| 0
| 0.182796
| 0
| 0
| 0.014538
| 0
| 0
| 0
| 0
| 0.009615
| 0
| 1
| 0.075269
| false
| 0.021505
| 0.043011
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e21cfc54ba4f492a89adb3a5ddc21c8d452d78
| 3,930
|
py
|
Python
|
p1_navigation/train.py
|
nick0lay/deep-reinforcement-learning
|
5af4daca9850b4e12aec5d8b0dad87f1e22a1f98
|
[
"MIT"
] | null | null | null |
p1_navigation/train.py
|
nick0lay/deep-reinforcement-learning
|
5af4daca9850b4e12aec5d8b0dad87f1e22a1f98
|
[
"MIT"
] | null | null | null |
p1_navigation/train.py
|
nick0lay/deep-reinforcement-learning
|
5af4daca9850b4e12aec5d8b0dad87f1e22a1f98
|
[
"MIT"
] | null | null | null |
"""
Project for Udacity Danaodgree in Deep Reinforcement Learning
This script train an agent to navigate (and collect bananas!) in a large, square world.
A reward of +1 is provided for collecting a yellow banana, and a reward of -1 is provided for collecting a blue banana. Thus, the goal of your agent is to collect as many yellow bananas as possible while avoiding blue bananas.
The state space has 37 dimensions and contains the agent's velocity, along with ray-based perception of objects around the agent's forward direction. Given this information, the agent has to learn how to best select actions. Four discrete actions are available, corresponding to:
0 - move forward.
1 - move backward.
2 - turn left.
3 - turn right.
The task is episodic, and in order to solve the environment, your agent must get an average score of +13 over 100 consecutive episodes.
"""
from unityagents import UnityEnvironment
import numpy as np
from collections import deque
from dqn_agent import Agent
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
"""
Unity environment configuration
Mac: "path/to/Banana.app"
Windows (x86): "path/to/Banana_Windows_x86/Banana.exe"
Windows (x86_64): "path/to/Banana_Windows_x86_64/Banana.exe"
Linux (x86): "path/to/Banana_Linux/Banana.x86"
Linux (x86_64): "path/to/Banana_Linux/Banana.x86_64"
Linux (x86, headless): "path/to/Banana_Linux_NoVis/Banana.x86"
Linux (x86_64, headless): "path/to/Banana_Linux_NoVis/Banana.x86_64"
"""
# start Unity environment
env = UnityEnvironment(file_name="Banana.app")
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=False)[brain_name]
action_size = brain.vector_action_space_size
state_size = len(env_info.vector_observations[0])
# initialize agent
agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device)
def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
# reset environment
env_info = env.reset(train_mode=True)[brain_name]
# get initial state
state = env_info.vector_observations[0]
# set initial score
score = 0
while True:
action = agent.act(state, eps)
env_info = env.step(action)[brain_name]
next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0]
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=14:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
train()
| 42.717391
| 279
| 0.689567
| 559
| 3,930
| 4.711986
| 0.363148
| 0.02126
| 0.031891
| 0.025816
| 0.22855
| 0.172741
| 0.100987
| 0.100987
| 0.071374
| 0.045558
| 0
| 0.029155
| 0.214504
| 3,930
| 92
| 280
| 42.717391
| 0.824101
| 0.362341
| 0
| 0.047619
| 0
| 0
| 0.08055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.119048
| 0
| 0.166667
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e5087a507beef54f4930afdd98c56727fc0500
| 2,869
|
py
|
Python
|
models/model_factory.py
|
jac99/Egonn
|
075e00368a1676df741a35f42f6f38497da9d58f
|
[
"MIT"
] | 9
|
2021-10-31T07:11:58.000Z
|
2022-03-29T14:06:49.000Z
|
models/model_factory.py
|
jac99/Egonn
|
075e00368a1676df741a35f42f6f38497da9d58f
|
[
"MIT"
] | null | null | null |
models/model_factory.py
|
jac99/Egonn
|
075e00368a1676df741a35f42f6f38497da9d58f
|
[
"MIT"
] | 3
|
2021-11-12T17:42:41.000Z
|
2022-03-11T00:41:47.000Z
|
# Warsaw University of Technology
from layers.eca_block import ECABasicBlock
from models.minkgl import MinkHead, MinkTrunk, MinkGL
from models.minkloc import MinkLoc
from third_party.minkloc3d.minkloc import MinkLoc3D
from misc.utils import ModelParams
def model_factory(model_params: ModelParams):
in_channels = 1
if model_params.model == 'MinkLoc':
model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size,
output_dim=model_params.output_dim, planes=model_params.planes,
layers=model_params.layers, num_top_down=model_params.num_top_down,
conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block,
pooling_method=model_params.pooling)
elif model_params.model == 'MinkLoc3D':
model = MinkLoc3D()
elif 'egonn' in model_params.model:
model = create_egonn_model(model_params)
else:
raise NotImplementedError('Model not implemented: {}'.format(model_params.model))
return model
def create_egonn_model(model_params: ModelParams):
model_name = model_params.model
global_normalize = False
local_normalize = True
if model_name == 'egonn':
# THIS IS OUR BEST MODEL
block = ECABasicBlock
planes = [32, 64, 64, 128, 128, 128, 128]
layers = [1, 1, 1, 1, 1, 1, 1]
global_in_levels = [5, 6, 7]
global_map_channels = 128
global_descriptor_size = 256
local_in_levels = [3, 4]
local_map_channels = 64
local_descriptor_size = 128
else:
raise NotImplementedError(f'Unknown model: {model_name}')
# Planes list number of channels for level 1 and above
global_in_channels = [planes[i-1] for i in global_in_levels]
head_global = MinkHead(global_in_levels, global_in_channels, global_map_channels)
if len(local_in_levels) > 0:
local_in_channels = [planes[i-1] for i in local_in_levels]
head_local = MinkHead(local_in_levels, local_in_channels, local_map_channels)
else:
head_local = None
min_out_level = len(planes)
if len(global_in_levels) > 0:
min_out_level = min(min_out_level, min(global_in_levels))
if len(local_in_levels) > 0:
min_out_level = min(min_out_level, min(local_in_levels))
trunk = MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5, block=block,
min_out_level=min_out_level)
net = MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size,
local_normalize=local_normalize, global_head=head_global,
global_descriptor_size=global_descriptor_size, global_pool_method='GeM',
global_normalize=global_normalize, quantizer=model_params.quantizer)
return net
| 36.782051
| 100
| 0.694319
| 379
| 2,869
| 4.926121
| 0.245383
| 0.100161
| 0.041243
| 0.037493
| 0.113551
| 0.084628
| 0.065345
| 0.065345
| 0.039636
| 0.039636
| 0
| 0.025792
| 0.229697
| 2,869
| 78
| 101
| 36.782051
| 0.819005
| 0.037295
| 0
| 0.090909
| 0
| 0
| 0.029358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.090909
| 0
| 0.163636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e596ecc94466fc1c8a56bb395c9ae7c14904e6
| 19,380
|
py
|
Python
|
mdns/Phidget22Python/Phidget22/Phidget.py
|
rabarar/phidget_docker
|
ceca56c86d27f291a4300a1257c02096862335ec
|
[
"MIT"
] | null | null | null |
mdns/Phidget22Python/Phidget22/Phidget.py
|
rabarar/phidget_docker
|
ceca56c86d27f291a4300a1257c02096862335ec
|
[
"MIT"
] | null | null | null |
mdns/Phidget22Python/Phidget22/Phidget.py
|
rabarar/phidget_docker
|
ceca56c86d27f291a4300a1257c02096862335ec
|
[
"MIT"
] | null | null | null |
import sys
import ctypes
from Phidget22.PhidgetSupport import PhidgetSupport
from Phidget22.Async import *
from Phidget22.ChannelClass import ChannelClass
from Phidget22.ChannelSubclass import ChannelSubclass
from Phidget22.DeviceClass import DeviceClass
from Phidget22.DeviceID import DeviceID
from Phidget22.ErrorEventCode import ErrorEventCode
from Phidget22.PhidgetException import PhidgetException
class Phidget:
def __init__(self):
self.handle = ctypes.c_void_p()
if sys.platform == 'win32':
self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
else:
self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
self._Attach = None
self._onAttach = None
if sys.platform == 'win32':
self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
else:
self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
self._Detach = None
self._onDetach = None
if sys.platform == 'win32':
self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p)
else:
self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p)
self._Error = None
self._onError = None
if sys.platform == 'win32':
self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p)
else:
self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p)
self._PropertyChange = None
self._onPropertyChange = None
def __eq__(self, other):
return hasattr(other, 'handle') and self.handle.value == other.handle.value
def __hash__(self):
return self.handle.value
def __str__(self):
_value = (ctypes.c_char * 65536)()
_valueLen = ctypes.c_int32(65536)
if self.getIsChannel():
__func = PhidgetSupport.getDll().channelInfo
else:
__func = PhidgetSupport.getDll().deviceInfo
result = __func(self.handle, ctypes.byref(_value), _valueLen)
return _value.value.decode('utf- 8')
def __del__(self):
__func = PhidgetSupport.getDll().Phidget_delete
__func.restype = ctypes.c_int32
res = __func(ctypes.byref(self.handle))
self.handle = None
if res > 0:
raise PhidgetException(res)
def _localAttachEvent(self, handle, userPtr):
if self._Attach == None:
return
self._Attach(self)
def setOnAttachHandler(self, handler):
if handler == None:
self._Attach = None
self._onAttach = None
else:
self._Attach = handler
self._onAttach = self._AttachFactory(self._localAttachEvent)
try:
__func = PhidgetSupport.getDll().Phidget_setOnAttachHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onAttach, None)
except RuntimeError:
self._Attach = None
self._onAttach = None
def _localDetachEvent(self, handle, userPtr):
if self._Detach == None:
return
self._Detach(self)
def setOnDetachHandler(self, handler):
if handler == None:
self._Detach = None
self._onDetach = None
else:
self._Detach = handler
self._onDetach = self._DetachFactory(self._localDetachEvent)
try:
__func = PhidgetSupport.getDll().Phidget_setOnDetachHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onDetach, None)
except RuntimeError:
self._Detach = None
self._onDetach = None
def _localErrorEvent(self, handle, userPtr, Code, Description):
if self._Error == None:
return
Description = Description.decode('utf-8')
self._Error(self, Code, Description)
def setOnErrorHandler(self, handler):
if handler == None:
self._Error = None
self._onError = None
else:
self._Error = handler
self._onError = self._ErrorFactory(self._localErrorEvent)
try:
__func = PhidgetSupport.getDll().Phidget_setOnErrorHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onError, None)
except RuntimeError:
self._Error = None
self._onError = None
def _localPropertyChangeEvent(self, handle, userPtr, propertyName):
if self._PropertyChange == None:
return
propertyName = propertyName.decode('utf-8')
self._PropertyChange(self, propertyName)
def setOnPropertyChangeHandler(self, handler):
if handler == None:
self._PropertyChange = None
self._onPropertyChange = None
else:
self._PropertyChange = handler
self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent)
try:
__func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onPropertyChange, None)
except RuntimeError:
self._PropertyChange = None
self._onPropertyChange = None
@staticmethod
def finalize(flags):
_flags = ctypes.c_int32(flags)
__func = PhidgetSupport.getDll().Phidget_finalize
__func.restype = ctypes.c_int32
result = __func(_flags)
if result > 0:
raise PhidgetException(result)
@staticmethod
def getLibraryVersion():
_LibraryVersion = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getLibraryVersion
__func.restype = ctypes.c_int32
result = __func(ctypes.byref(_LibraryVersion))
if result > 0:
raise PhidgetException(result)
return _LibraryVersion.value.decode('utf-8')
@staticmethod
def getLibraryVersionNumber():
_LibraryVersionNumber = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber
__func.restype = ctypes.c_int32
result = __func(ctypes.byref(_LibraryVersionNumber))
if result > 0:
raise PhidgetException(result)
return _LibraryVersionNumber.value.decode('utf-8')
@staticmethod
def resetLibrary():
__func = PhidgetSupport.getDll().Phidget_resetLibrary
__func.restype = ctypes.c_int32
result = __func()
if result > 0:
raise PhidgetException(result)
def getAttached(self):
_Attached = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getAttached
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Attached))
if result > 0:
raise PhidgetException(result)
return _Attached.value
def getChannel(self):
_Channel = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getChannel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Channel))
if result > 0:
raise PhidgetException(result)
return _Channel.value
def setChannel(self, Channel):
_Channel = ctypes.c_int(Channel)
__func = PhidgetSupport.getDll().Phidget_setChannel
__func.restype = ctypes.c_int32
result = __func(self.handle, _Channel)
if result > 0:
raise PhidgetException(result)
def getChannelClass(self):
_ChannelClass = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getChannelClass
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ChannelClass))
if result > 0:
raise PhidgetException(result)
return _ChannelClass.value
def getChannelClassName(self):
_ChannelClassName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getChannelClassName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ChannelClassName))
if result > 0:
raise PhidgetException(result)
return _ChannelClassName.value.decode('utf-8')
def getChannelName(self):
_ChannelName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getChannelName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ChannelName))
if result > 0:
raise PhidgetException(result)
return _ChannelName.value.decode('utf-8')
def getChannelSubclass(self):
_ChannelSubclass = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getChannelSubclass
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ChannelSubclass))
if result > 0:
raise PhidgetException(result)
return _ChannelSubclass.value
def close(self):
__func = PhidgetSupport.getDll().Phidget_close
__func.restype = ctypes.c_int32
result = __func(self.handle)
if result > 0:
raise PhidgetException(result)
def getDeviceChannelCount(self, cls):
_cls = ctypes.c_int(cls)
_count = ctypes.c_uint32()
__func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount
__func.restype = ctypes.c_int32
result = __func(self.handle, _cls, ctypes.byref(_count))
if result > 0:
raise PhidgetException(result)
return _count.value
def getDeviceClass(self):
_DeviceClass = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getDeviceClass
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceClass))
if result > 0:
raise PhidgetException(result)
return _DeviceClass.value
def getDeviceClassName(self):
_DeviceClassName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getDeviceClassName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceClassName))
if result > 0:
raise PhidgetException(result)
return _DeviceClassName.value.decode('utf-8')
def getDeviceID(self):
_DeviceID = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getDeviceID
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceID))
if result > 0:
raise PhidgetException(result)
return _DeviceID.value
def getDeviceLabel(self):
_DeviceLabel = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getDeviceLabel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceLabel))
if result > 0:
raise PhidgetException(result)
return _DeviceLabel.value.decode('utf-8')
def setDeviceLabel(self, DeviceLabel):
_DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8'))
__func = PhidgetSupport.getDll().Phidget_setDeviceLabel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceLabel))
if result > 0:
raise PhidgetException(result)
def getDeviceName(self):
_DeviceName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getDeviceName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceName))
if result > 0:
raise PhidgetException(result)
return _DeviceName.value.decode('utf-8')
def getDeviceSerialNumber(self):
_DeviceSerialNumber = ctypes.c_int32()
__func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceSerialNumber))
if result > 0:
raise PhidgetException(result)
return _DeviceSerialNumber.value
def setDeviceSerialNumber(self, DeviceSerialNumber):
_DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber)
__func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber
__func.restype = ctypes.c_int32
result = __func(self.handle, _DeviceSerialNumber)
if result > 0:
raise PhidgetException(result)
def getDeviceSKU(self):
_DeviceSKU = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getDeviceSKU
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceSKU))
if result > 0:
raise PhidgetException(result)
return _DeviceSKU.value.decode('utf-8')
def getDeviceVersion(self):
_DeviceVersion = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getDeviceVersion
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceVersion))
if result > 0:
raise PhidgetException(result)
return _DeviceVersion.value
def getHub(self):
_Hub = ctypes.c_void_p()
__func = PhidgetSupport.getDll().Phidget_getHub
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Hub))
if result > 0:
raise PhidgetException(result)
__Hub = Phidget()
__Hub.handle = _Hub
return __Hub
def getHubPort(self):
_HubPort = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getHubPort
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_HubPort))
if result > 0:
raise PhidgetException(result)
return _HubPort.value
def setHubPort(self, HubPort):
_HubPort = ctypes.c_int(HubPort)
__func = PhidgetSupport.getDll().Phidget_setHubPort
__func.restype = ctypes.c_int32
result = __func(self.handle, _HubPort)
if result > 0:
raise PhidgetException(result)
def getHubPortCount(self):
_HubPortCount = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getHubPortCount
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_HubPortCount))
if result > 0:
raise PhidgetException(result)
return _HubPortCount.value
def getHubPortSpeed(self):
_HubPortSpeed = ctypes.c_uint32()
__func = PhidgetSupport.getDll().Phidget_getHubPortSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_HubPortSpeed))
if result > 0:
raise PhidgetException(result)
return _HubPortSpeed.value
def setHubPortSpeed(self, HubPortSpeed):
_HubPortSpeed = ctypes.c_uint32(HubPortSpeed)
__func = PhidgetSupport.getDll().Phidget_setHubPortSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, _HubPortSpeed)
if result > 0:
raise PhidgetException(result)
def getMaxHubPortSpeed(self):
_MaxHubPortSpeed = ctypes.c_uint32()
__func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed))
if result > 0:
raise PhidgetException(result)
return _MaxHubPortSpeed.value
def getHubPortSupportsSetSpeed(self):
_HubPortSupportsSetSpeed = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed))
if result > 0:
raise PhidgetException(result)
return _HubPortSupportsSetSpeed.value
def getIsChannel(self):
_IsChannel = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getIsChannel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IsChannel))
if result > 0:
raise PhidgetException(result)
return _IsChannel.value
def getIsHubPortDevice(self):
_IsHubPortDevice = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IsHubPortDevice))
if result > 0:
raise PhidgetException(result)
return _IsHubPortDevice.value
def setIsHubPortDevice(self, IsHubPortDevice):
_IsHubPortDevice = ctypes.c_int(IsHubPortDevice)
__func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice
__func.restype = ctypes.c_int32
result = __func(self.handle, _IsHubPortDevice)
if result > 0:
raise PhidgetException(result)
def getIsLocal(self):
_IsLocal = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getIsLocal
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IsLocal))
if result > 0:
raise PhidgetException(result)
return _IsLocal.value
def setIsLocal(self, IsLocal):
_IsLocal = ctypes.c_int(IsLocal)
__func = PhidgetSupport.getDll().Phidget_setIsLocal
__func.restype = ctypes.c_int32
result = __func(self.handle, _IsLocal)
if result > 0:
raise PhidgetException(result)
def getIsRemote(self):
_IsRemote = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getIsRemote
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IsRemote))
if result > 0:
raise PhidgetException(result)
return _IsRemote.value
def setIsRemote(self, IsRemote):
_IsRemote = ctypes.c_int(IsRemote)
__func = PhidgetSupport.getDll().Phidget_setIsRemote
__func.restype = ctypes.c_int32
result = __func(self.handle, _IsRemote)
if result > 0:
raise PhidgetException(result)
def open(self):
__func = PhidgetSupport.getDll().Phidget_open
__func.restype = ctypes.c_int32
result = __func(self.handle)
if result > 0:
raise PhidgetException(result)
def openWaitForAttachment(self, timeout):
_timeout = ctypes.c_uint32(timeout)
__func = PhidgetSupport.getDll().Phidget_openWaitForAttachment
__func.restype = ctypes.c_int32
result = __func(self.handle, _timeout)
if result > 0:
raise PhidgetException(result)
def getParent(self):
_Parent = ctypes.c_void_p()
__func = PhidgetSupport.getDll().Phidget_getParent
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Parent))
if result > 0:
raise PhidgetException(result)
__Parent = Phidget()
__Parent.handle = _Parent
return __Parent
def getServerHostname(self):
_ServerHostname = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getServerHostname
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerHostname))
if result > 0:
raise PhidgetException(result)
return _ServerHostname.value.decode('utf-8')
def getServerName(self):
_ServerName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getServerName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerName))
if result > 0:
raise PhidgetException(result)
return _ServerName.value.decode('utf-8')
def setServerName(self, ServerName):
_ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8'))
__func = PhidgetSupport.getDll().Phidget_setServerName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerName))
if result > 0:
raise PhidgetException(result)
def getServerPeerName(self):
_ServerPeerName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getServerPeerName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerPeerName))
if result > 0:
raise PhidgetException(result)
return _ServerPeerName.value.decode('utf-8')
def getServerUniqueName(self):
_ServerUniqueName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getServerUniqueName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerUniqueName))
if result > 0:
raise PhidgetException(result)
return _ServerUniqueName.value.decode('utf-8')
def getMaxVINTDeviceSpeed(self):
_MaxVINTDeviceSpeed = ctypes.c_uint32()
__func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed))
if result > 0:
raise PhidgetException(result)
return _MaxVINTDeviceSpeed.value
def getVINTDeviceSupportsSetSpeed(self):
_VINTDeviceSupportsSetSpeed = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed))
if result > 0:
raise PhidgetException(result)
return _VINTDeviceSupportsSetSpeed.value
def writeDeviceLabel(self, deviceLabel):
_deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8'))
__func = PhidgetSupport.getDll().Phidget_writeDeviceLabel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_deviceLabel))
if result > 0:
raise PhidgetException(result)
ANY_SERIAL_NUMBER = -1
ANY_HUB_PORT = -1
ANY_CHANNEL = -1
ANY_LABEL = None
INFINITE_TIMEOUT = 0
DEFAULT_TIMEOUT = 1000
| 26.083445
| 113
| 0.757482
| 2,223
| 19,380
| 6.250562
| 0.081871
| 0.061965
| 0.05009
| 0.120475
| 0.584167
| 0.547679
| 0.492191
| 0.297517
| 0.254192
| 0.220007
| 0
| 0.01436
| 0.144788
| 19,380
| 742
| 114
| 26.118598
| 0.824001
| 0
| 0
| 0.413462
| 0
| 0
| 0.006037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.119231
| false
| 0
| 0.019231
| 0.003846
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e5ef7ddc4f844bf23ef6fa4d846ed9f0547af6
| 1,826
|
py
|
Python
|
openprocurement/auctions/geb/tests/blanks/create.py
|
oleksiyVeretiuk/openprocurement.auctions.geb
|
2965b52bf8826b9a8f8870c9a4d2052f945f5799
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/geb/tests/blanks/create.py
|
oleksiyVeretiuk/openprocurement.auctions.geb
|
2965b52bf8826b9a8f8870c9a4d2052f945f5799
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/geb/tests/blanks/create.py
|
oleksiyVeretiuk/openprocurement.auctions.geb
|
2965b52bf8826b9a8f8870c9a4d2052f945f5799
|
[
"Apache-2.0"
] | null | null | null |
def create_auction(self):
expected_http_status = '201 Created'
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data)
self.assertEqual(response.status, expected_http_status)
def create_auction_check_minNumberOfQualifiedBids(self):
expected_minNumberOfQualifiedBids = 2
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data)
self.assertEqual(response.json['data']['minNumberOfQualifiedBids'],
expected_minNumberOfQualifiedBids)
def create_auction_check_auctionParameters(self):
expected_auctionParameters = {'type': 'texas'}
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data)
self.assertEqual(response.json['data']['auctionParameters'],
expected_auctionParameters)
def create_auction_invalid_auctionPeriod(self):
expected_http_status = '422 Unprocessable Entity'
auction = self.auction
auction.pop('auctionPeriod')
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data, status=422)
self.assertEqual(response.status, expected_http_status)
entrypoint = '/auctions'
auction['auctionPeriod'] = {'startDate': None}
response = self.app.post_json(entrypoint, request_data, status=422)
self.assertEqual(response.status, expected_http_status)
def create_auction_dump(self):
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data)
filename = 'docs/source/tutorial/create_auction.http'
self.dump(response.request, response, filename)
| 33.814815
| 71
| 0.728916
| 194
| 1,826
| 6.634021
| 0.190722
| 0.094017
| 0.06993
| 0.088578
| 0.57265
| 0.57265
| 0.57265
| 0.57265
| 0.57265
| 0.57265
| 0
| 0.008491
| 0.161555
| 1,826
| 53
| 72
| 34.45283
| 0.832136
| 0
| 0
| 0.526316
| 0
| 0
| 0.13253
| 0.035049
| 0
| 0
| 0
| 0
| 0.131579
| 1
| 0.131579
| false
| 0
| 0
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e649d303431093f68ab23ef3215809292e639b
| 4,872
|
py
|
Python
|
tests/integration/test_celery.py
|
crossscreenmedia/scout_apm_python
|
5cd31bf21f5acd0be0df4f40ec0bd29ec050ec01
|
[
"MIT"
] | null | null | null |
tests/integration/test_celery.py
|
crossscreenmedia/scout_apm_python
|
5cd31bf21f5acd0be0df4f40ec0bd29ec050ec01
|
[
"MIT"
] | null | null | null |
tests/integration/test_celery.py
|
crossscreenmedia/scout_apm_python
|
5cd31bf21f5acd0be0df4f40ec0bd29ec050ec01
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from contextlib import contextmanager
import celery
import pytest
from celery.signals import setup_logging
import scout_apm.celery
from scout_apm.api import Config
# http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test
skip_unless_celery_4_plus = pytest.mark.skipif(
celery.VERSION < (4, 0), reason="pytest fixtures added in Celery 4.0"
)
@setup_logging.connect
def do_nothing(**kwargs):
# Just by connecting to this signal, we prevent Celery from setting up
# logging - and stop it from interfering with global state
# http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging
pass
@contextmanager
def app_with_scout(app=None, config=None):
"""
Context manager that configures a Celery app with Scout installed.
"""
if app is None:
app = celery.Celery("tasks", broker="memory://")
# Enable Scout by default in tests.
if config is None:
config = {"monitor": True}
# Disable running the agent.
config["core_agent_launch"] = False
@app.task
def hello():
return "Hello World!"
# Setup according to https://docs.scoutapm.com/#celery
Config.set(**config)
scout_apm.celery.install()
try:
yield app
finally:
scout_apm.celery.uninstall()
# Reset Scout configuration.
Config.reset_all()
def test_hello_eager(tracked_requests):
with app_with_scout() as app:
result = app.tasks["tests.integration.test_celery.hello"].apply()
assert result.result == "Hello World!"
assert len(tracked_requests) == 1
tracked_request = tracked_requests[0]
assert "task_id" in tracked_request.tags
assert tracked_request.tags["is_eager"] is True
assert tracked_request.tags["exchange"] == "unknown"
assert tracked_request.tags["routing_key"] == "unknown"
assert tracked_request.tags["queue"] == "unknown"
assert tracked_request.active_spans == []
assert len(tracked_request.complete_spans) == 1
span = tracked_request.complete_spans[0]
assert span.operation == "Job/tests.integration.test_celery.hello"
@skip_unless_celery_4_plus
def test_hello_worker(celery_app, celery_worker, tracked_requests):
with app_with_scout(app=celery_app) as app:
result = app.tasks["tests.integration.test_celery.hello"].delay().get()
assert result == "Hello World!"
assert len(tracked_requests) == 1
tracked_request = tracked_requests[0]
assert "task_id" in tracked_request.tags
assert tracked_request.tags["is_eager"] is False
assert tracked_request.tags["exchange"] == ""
assert tracked_request.tags["routing_key"] == "celery"
assert tracked_request.tags["queue"] == "unknown"
assert (
0.0 <= tracked_request.tags["queue_time"] < 60.0
) # Assume test took <60 seconds
assert tracked_request.active_spans == []
assert len(tracked_request.complete_spans) == 1
span = tracked_request.complete_spans[0]
assert span.operation == "Job/tests.integration.test_celery.hello"
@skip_unless_celery_4_plus
def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests):
with app_with_scout(app=celery_app) as app:
result = (
app.tasks["tests.integration.test_celery.hello"]
.apply_async(headers={"scout_task_start": "an evil string"})
.get()
)
assert result == "Hello World!"
assert len(tracked_requests) == 1
tracked_request = tracked_requests[0]
assert tracked_request.active_spans == []
assert len(tracked_request.complete_spans) == 1
span = tracked_request.complete_spans[0]
assert span.operation == "Job/tests.integration.test_celery.hello"
assert "queue_time" not in span.tags
@skip_unless_celery_4_plus
def test_hello_worker_chain(celery_app, celery_worker, tracked_requests):
with app_with_scout(app=celery_app) as app:
hello = app.tasks["tests.integration.test_celery.hello"]
result = (hello.si() | hello.si()).apply_async().get()
assert result == "Hello World!"
assert len(tracked_requests) == 2
assert [t.complete_spans[0].operation for t in tracked_requests] == [
"Job/tests.integration.test_celery.hello",
"Job/tests.integration.test_celery.hello",
]
assert "parent_task_id" not in tracked_requests[0].tags
first_task_id = tracked_requests[0].tags["task_id"]
assert tracked_requests[1].tags["parent_task_id"] == first_task_id
def test_no_monitor(tracked_requests):
# With an empty config, "monitor" defaults to False.
with app_with_scout(config={}) as app:
result = app.tasks["tests.integration.test_celery.hello"].apply()
assert result.result == "Hello World!"
assert tracked_requests == []
| 34.553191
| 82
| 0.70936
| 650
| 4,872
| 5.089231
| 0.252308
| 0.09734
| 0.059855
| 0.078597
| 0.571645
| 0.525998
| 0.485792
| 0.436518
| 0.436518
| 0.409915
| 0
| 0.009009
| 0.179803
| 4,872
| 140
| 83
| 34.8
| 0.818819
| 0.117406
| 0
| 0.373737
| 0
| 0
| 0.166706
| 0.086631
| 0
| 0
| 0
| 0
| 0.343434
| 1
| 0.080808
| false
| 0.010101
| 0.070707
| 0.010101
| 0.161616
| 0.010101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e79f3939b52fb2b048dd2d47804d7ba195c64a
| 12,893
|
py
|
Python
|
quapy/model_selection.py
|
OneToolsCollection/HLT-ISTI-QuaPy
|
6a5c528154c2d6d38d9f3258e667727bf692fc8b
|
[
"BSD-3-Clause"
] | null | null | null |
quapy/model_selection.py
|
OneToolsCollection/HLT-ISTI-QuaPy
|
6a5c528154c2d6d38d9f3258e667727bf692fc8b
|
[
"BSD-3-Clause"
] | null | null | null |
quapy/model_selection.py
|
OneToolsCollection/HLT-ISTI-QuaPy
|
6a5c528154c2d6d38d9f3258e667727bf692fc8b
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import signal
from copy import deepcopy
from typing import Union, Callable
import numpy as np
import quapy as qp
from quapy.data.base import LabelledCollection
from quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction
from quapy.method.aggregative import BaseQuantifier
import inspect
from util import _check_sample_size
class GridSearchQ(BaseQuantifier):
"""Grid Search optimization targeting a quantification-oriented metric.
Optimizes the hyperparameters of a quantification method, based on an evaluation method and on an evaluation
protocol for quantification.
:param model: the quantifier to optimize
:type model: BaseQuantifier
:param param_grid: a dictionary with keys the parameter names and values the list of values to explore
:param sample_size: the size of the samples to extract from the validation set (ignored if protocl='gen')
:param protocol: either 'app' for the artificial prevalence protocol, 'npp' for the natural prevalence
protocol, or 'gen' for using a custom sampling generator function
:param n_prevpoints: if specified, indicates the number of equally distant points to extract from the interval
[0,1] in order to define the prevalences of the samples; e.g., if n_prevpoints=5, then the prevalences for
each class will be explored in [0.00, 0.25, 0.50, 0.75, 1.00]. If not specified, then eval_budget is requested.
Ignored if protocol!='app'.
:param n_repetitions: the number of repetitions for each combination of prevalences. This parameter is ignored
for the protocol='app' if eval_budget is set and is lower than the number of combinations that would be
generated using the value assigned to n_prevpoints (for the current number of classes and n_repetitions).
Ignored for protocol='npp' and protocol='gen' (use eval_budget for setting a maximum number of samples in
those cases).
:param eval_budget: if specified, sets a ceil on the number of evaluations to perform for each hyper-parameter
combination. For example, if protocol='app', there are 3 classes, n_repetitions=1 and eval_budget=20, then
n_prevpoints will be set to 5, since this will generate 15 different prevalences, i.e., [0, 0, 1],
[0, 0.25, 0.75], [0, 0.5, 0.5] ... [1, 0, 0], and since setting it to 6 would generate more than
20. When protocol='gen', indicates the maximum number of samples to generate, but less samples will be
generated if the generator yields less samples.
:param error: an error function (callable) or a string indicating the name of an error function (valid ones
are those in qp.error.QUANTIFICATION_ERROR
:param refit: whether or not to refit the model on the whole labelled collection (training+validation) with
the best chosen hyperparameter combination. Ignored if protocol='gen'
:param val_split: either a LabelledCollection on which to test the performance of the different settings, or
a float in [0,1] indicating the proportion of labelled data to extract from the training set, or a callable
returning a generator function each time it is invoked (only for protocol='gen').
:param n_jobs: number of parallel jobs
:param random_seed: set the seed of the random generator to replicate experiments. Ignored if protocol='gen'.
:param timeout: establishes a timer (in seconds) for each of the hyperparameters configurations being tested.
Whenever a run takes longer than this timer, that configuration will be ignored. If all configurations end up
being ignored, a TimeoutError exception is raised. If -1 (default) then no time bound is set.
:param verbose: set to True to get information through the stdout
"""
def __init__(self,
model: BaseQuantifier,
param_grid: dict,
sample_size: Union[int, None] = None,
protocol='app',
n_prevpoints: int = None,
n_repetitions: int = 1,
eval_budget: int = None,
error: Union[Callable, str] = qp.error.mae,
refit=True,
val_split=0.4,
n_jobs=1,
random_seed=42,
timeout=-1,
verbose=False):
self.model = model
self.param_grid = param_grid
self.sample_size = sample_size
self.protocol = protocol.lower()
self.n_prevpoints = n_prevpoints
self.n_repetitions = n_repetitions
self.eval_budget = eval_budget
self.refit = refit
self.val_split = val_split
self.n_jobs = n_jobs
self.random_seed = random_seed
self.timeout = timeout
self.verbose = verbose
self.__check_error(error)
assert self.protocol in {'app', 'npp', 'gen'}, \
'unknown protocol: valid ones are "app" or "npp" for the "artificial" or the "natural" prevalence ' \
'protocols. Use protocol="gen" when passing a generator function thorough val_split that yields a ' \
'sample (instances) and their prevalence (ndarray) at each iteration.'
assert self.eval_budget is None or isinstance(self.eval_budget, int)
if self.protocol in ['npp', 'gen']:
if self.protocol=='npp' and (self.eval_budget is None or self.eval_budget <= 0):
raise ValueError(f'when protocol="npp" the parameter eval_budget should be '
f'indicated (and should be >0).')
if self.n_repetitions != 1:
print('[warning] n_repetitions has been set and will be ignored for the selected protocol')
def _sout(self, msg):
if self.verbose:
print(f'[{self.__class__.__name__}]: {msg}')
def __check_training_validation(self, training, validation):
if isinstance(validation, LabelledCollection):
return training, validation
elif isinstance(validation, float):
assert 0. < validation < 1., 'validation proportion should be in (0,1)'
training, validation = training.split_stratified(train_prop=1 - validation, random_state=self.random_seed)
return training, validation
elif self.protocol=='gen' and inspect.isgenerator(validation()):
return training, validation
else:
raise ValueError(f'"validation" must either be a LabelledCollection or a float in (0,1) indicating the'
f'proportion of training documents to extract (type found: {type(validation)}). '
f'Optionally, "validation" can be a callable function returning a generator that yields '
f'the sample instances along with their true prevalence at each iteration by '
f'setting protocol="gen".')
def __check_error(self, error):
if error in qp.error.QUANTIFICATION_ERROR:
self.error = error
elif isinstance(error, str):
self.error = qp.error.from_name(error)
elif hasattr(error, '__call__'):
self.error = error
else:
raise ValueError(f'unexpected error type; must either be a callable function or a str representing\n'
f'the name of an error function in {qp.error.QUANTIFICATION_ERROR_NAMES}')
def __generate_predictions(self, model, val_split):
commons = {
'n_repetitions': self.n_repetitions,
'n_jobs': self.n_jobs,
'random_seed': self.random_seed,
'verbose': False
}
if self.protocol == 'app':
return artificial_prevalence_prediction(
model, val_split, self.sample_size,
n_prevpoints=self.n_prevpoints,
eval_budget=self.eval_budget,
**commons
)
elif self.protocol == 'npp':
return natural_prevalence_prediction(
model, val_split, self.sample_size,
**commons)
elif self.protocol == 'gen':
return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget)
else:
raise ValueError('unknown protocol')
def fit(self, training: LabelledCollection, val_split: Union[LabelledCollection, float, Callable] = None):
""" Learning routine. Fits methods with all combinations of hyperparameters and selects the one minimizing
the error metric.
:param training: the training set on which to optimize the hyperparameters
:param val_split: either a LabelledCollection on which to test the performance of the different settings, or
a float in [0,1] indicating the proportion of labelled data to extract from the training set
:return: self
"""
if val_split is None:
val_split = self.val_split
training, val_split = self.__check_training_validation(training, val_split)
if self.protocol != 'gen':
self.sample_size = _check_sample_size(self.sample_size)
params_keys = list(self.param_grid.keys())
params_values = list(self.param_grid.values())
model = self.model
if self.timeout > 0:
def handler(signum, frame):
self._sout('timeout reached')
raise TimeoutError()
signal.signal(signal.SIGALRM, handler)
self.param_scores_ = {}
self.best_score_ = None
some_timeouts = False
for values in itertools.product(*params_values):
params = dict({k: values[i] for i, k in enumerate(params_keys)})
if self.timeout > 0:
signal.alarm(self.timeout)
try:
# overrides default parameters with the parameters being explored at this iteration
model.set_params(**params)
model.fit(training)
true_prevalences, estim_prevalences = self.__generate_predictions(model, val_split)
score = self.error(true_prevalences, estim_prevalences)
self._sout(f'checking hyperparams={params} got {self.error.__name__} score {score:.5f}')
if self.best_score_ is None or score < self.best_score_:
self.best_score_ = score
self.best_params_ = params
self.best_model_ = deepcopy(model)
self.param_scores_[str(params)] = score
if self.timeout > 0:
signal.alarm(0)
except TimeoutError:
print(f'timeout reached for config {params}')
some_timeouts = True
if self.best_score_ is None and some_timeouts:
raise TimeoutError('all jobs took more than the timeout time to end')
self._sout(f'optimization finished: best params {self.best_params_} (score={self.best_score_:.5f})')
if self.refit:
self._sout(f'refitting on the whole development set')
self.best_model_.fit(training + val_split)
return self
def quantify(self, instances):
"""Estimate class prevalence values using the best model found after calling the :meth:`fit` method.
:param instances: sample contanining the instances
:return: a ndarray of shape `(n_classes)` with class prevalence estimates as according to the best model found
by the model selection process.
"""
assert hasattr(self, 'best_model_'), 'quantify called before fit'
return self.best_model().quantify(instances)
@property
def classes_(self):
"""
Classes on which the quantifier has been trained on.
:return: a ndarray of shape `(n_classes)` with the class identifiers
"""
return self.best_model().classes_
def set_params(self, **parameters):
"""Sets the hyper-parameters to explore.
:param parameters: a dictionary with keys the parameter names and values the list of values to explore
"""
self.param_grid = parameters
def get_params(self, deep=True):
"""Returns the dictionary of hyper-parameters to explore (`param_grid`)
:param deep: Unused
:return: the dictionary `param_grid`
"""
return self.param_grid
def best_model(self):
"""
Returns the best model found after calling the :meth:`fit` method, i.e., the one trained on the combination
of hyper-parameters that minimized the error function.
:return: a trained quantifier
"""
if hasattr(self, 'best_model_'):
return self.best_model_
raise ValueError('best_model called before fit')
| 48.65283
| 119
| 0.649732
| 1,629
| 12,893
| 5.014119
| 0.199509
| 0.01763
| 0.011998
| 0.007835
| 0.150343
| 0.113002
| 0.090475
| 0.090475
| 0.067826
| 0.067826
| 0
| 0.007948
| 0.277825
| 12,893
| 264
| 120
| 48.837121
| 0.869294
| 0.35973
| 0
| 0.08125
| 0
| 0.00625
| 0.185796
| 0.014711
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.075
| false
| 0.00625
| 0.06875
| 0
| 0.21875
| 0.01875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e7e28fd96ba38477835a4f1f9a0169efabb855
| 2,841
|
py
|
Python
|
python/day09/smoke_basin.py
|
aesdeef/advent-of-code-2021
|
4561bcf12ac03d360f5b28c48ef80134f97613b9
|
[
"MIT"
] | 2
|
2021-12-03T06:18:27.000Z
|
2021-12-06T11:28:33.000Z
|
python/day09/smoke_basin.py
|
aesdeef/advent-of-code-2021
|
4561bcf12ac03d360f5b28c48ef80134f97613b9
|
[
"MIT"
] | null | null | null |
python/day09/smoke_basin.py
|
aesdeef/advent-of-code-2021
|
4561bcf12ac03d360f5b28c48ef80134f97613b9
|
[
"MIT"
] | null | null | null |
INPUT_FILE = "../../input/09.txt"
Point = tuple[int, int]
Heightmap = dict[Point, int]
Basin = set[Point]
def parse_input() -> Heightmap:
"""
Parses the input and returns a Heightmap
"""
with open(INPUT_FILE) as f:
heights = [[int(x) for x in line.strip()] for line in f]
heightmap: Heightmap = dict()
for (y, row) in enumerate(heights):
for (x, height) in enumerate(row):
heightmap[(x, y)] = height
return heightmap
def get_surrounding_points(heightmap: Heightmap, point: Point) -> set[Point]:
"""
Returns a set of surrounding points within the heightmap
"""
x, y = point
return {
(x - 1, y),
(x, y - 1),
(x, y + 1),
(x + 1, y),
} & heightmap.keys()
def get_surrounding_heights(heightmap: Heightmap, point: Point) -> set[int]:
"""
Returns the heights of points surrounding the given point
"""
surrounding_points = get_surrounding_points(heightmap, point)
return {heightmap[point] for point in surrounding_points}
def get_low_points(heightmap: Heightmap) -> set[Point]:
"""
Finds the low points on the heightmap
"""
low_points: set[Point] = set()
for point in heightmap:
surrounding_heights = get_surrounding_heights(heightmap, point)
if all(heightmap[point] < height for height in surrounding_heights):
low_points.add(point)
return low_points
def solve_part1(heightmap: Heightmap, low_points: set[Point]) -> int:
"""
Calculates the sum of the risk levels of all low points
"""
return sum(1 + heightmap[point] for point in low_points)
def get_basins(heightmap: Heightmap, low_points: set[Point]) -> list[Basin]:
"""
Finds all basins on the heightmap
"""
basins: list[Basin] = []
for low_point in low_points:
basin: Basin = set()
points_to_consider = {low_point}
while points_to_consider:
point = points_to_consider.pop()
if heightmap[point] == 9:
continue
surrounding_points = get_surrounding_points(heightmap, point)
points_to_consider.update(surrounding_points - basin)
basin.add(point)
basins.append(basin)
return basins
def solve_part2(heightmap: Heightmap, low_points: set[Point]) -> int:
"""
Calculates the product of the sizes of the three largest basins
"""
basins = get_basins(heightmap, low_points)
basin_sizes = sorted((len(basin) for basin in basins), reverse=True)
return basin_sizes[0] * basin_sizes[1] * basin_sizes[2]
if __name__ == "__main__":
heightmap = parse_input()
low_points = get_low_points(heightmap)
part1 = solve_part1(heightmap, low_points)
part2 = solve_part2(heightmap, low_points)
print(part1)
print(part2)
| 27.852941
| 77
| 0.642027
| 362
| 2,841
| 4.861878
| 0.212707
| 0.081818
| 0.071591
| 0.047727
| 0.213068
| 0.135795
| 0.115909
| 0.057955
| 0.057955
| 0
| 0
| 0.008916
| 0.249912
| 2,841
| 101
| 78
| 28.128713
| 0.816987
| 0.12214
| 0
| 0.035088
| 0
| 0
| 0.010888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122807
| false
| 0
| 0
| 0
| 0.245614
| 0.035088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e856cc4992e6f53fef41d1cfe0de4271ac6642
| 1,667
|
py
|
Python
|
playground.py
|
NHGmaniac/voctoconfig
|
55a803a5f9bc81b48eaa72ced1fddd402aa7a2e9
|
[
"MIT"
] | null | null | null |
playground.py
|
NHGmaniac/voctoconfig
|
55a803a5f9bc81b48eaa72ced1fddd402aa7a2e9
|
[
"MIT"
] | null | null | null |
playground.py
|
NHGmaniac/voctoconfig
|
55a803a5f9bc81b48eaa72ced1fddd402aa7a2e9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import signal
import logging
import sys
from gi.repository import GObject
GObject.threads_init()
import time
from lib.args import Args
from lib.loghandler import LogHandler
import lib.connection as Connection
def testCallback(args):
log = logging.getLogger("Test")
log.info(str(args))
class Voctoconfig(object):
def __init__(self):
self.log = logging.getLogger("Voctoconfig")
self.log.debug("Creating GObject Mainloop")
self.mainloop = GObject.MainLoop()
def run(self):
self.log.info("Running MainLoop")
try:
self.mainloop.run()
except KeyboardInterrupt:
self.log.info("Terminated via KeyboardInterrupt")
def quit(self):
self.log.info("Quitting MainLoop")
self.mainloop.quit()
def main():
docolor = (Args.color == 'always') or (Args.color == 'auto' and
sys.stderr.isatty())
loghandler = LogHandler(docolor, Args.timestamp)
logging.root.addHandler(loghandler)
if Args.verbose >= 2:
level = logging.DEBUG
elif Args.verbose == 1:
level = logging.INFO
else:
level = logging.WARNING
logging.root.setLevel(level)
logging.debug('setting SIGINT handler')
signal.signal(signal.SIGINT, signal.SIG_DFL)
Connection.establish(Args.host)
Connection.enterNonblockingMode()
Connection.on("message", testCallback)
mainloop = GObject.MainLoop()
mainloop.run()
while True:
logging.debug("mimimi...")
Connection.send("message", "test2")
time.sleep(10)
if __name__ == '__main__':
main()
| 24.514706
| 67
| 0.644271
| 186
| 1,667
| 5.698925
| 0.44086
| 0.033019
| 0.031132
| 0.028302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004739
| 0.240552
| 1,667
| 68
| 68
| 24.514706
| 0.832543
| 0.012597
| 0
| 0
| 0
| 0
| 0.105103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098039
| false
| 0
| 0.156863
| 0
| 0.27451
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e873cdab8920252696e3d917e54b578dd9f428
| 3,220
|
py
|
Python
|
tianshou/utils/logger/tensorboard.py
|
Aceticia/tianshou
|
6377dc5006ba1111adac42472447b9de4a021c2d
|
[
"MIT"
] | 4,714
|
2018-04-16T22:52:05.000Z
|
2022-03-31T14:14:51.000Z
|
tianshou/utils/logger/tensorboard.py
|
Aceticia/tianshou
|
6377dc5006ba1111adac42472447b9de4a021c2d
|
[
"MIT"
] | 529
|
2020-03-26T00:58:03.000Z
|
2022-03-31T01:59:14.000Z
|
tianshou/utils/logger/tensorboard.py
|
Aceticia/tianshou
|
6377dc5006ba1111adac42472447b9de4a021c2d
|
[
"MIT"
] | 798
|
2018-05-26T23:34:07.000Z
|
2022-03-30T11:26:19.000Z
|
import warnings
from typing import Any, Callable, Optional, Tuple
from tensorboard.backend.event_processing import event_accumulator
from torch.utils.tensorboard import SummaryWriter
from tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger
class TensorboardLogger(BaseLogger):
"""A logger that relies on tensorboard SummaryWriter by default to visualize \
and log statistics.
:param SummaryWriter writer: the writer to log data.
:param int train_interval: the log interval in log_train_data(). Default to 1000.
:param int test_interval: the log interval in log_test_data(). Default to 1.
:param int update_interval: the log interval in log_update_data(). Default to 1000.
:param int save_interval: the save interval in save_data(). Default to 1 (save at
the end of each epoch).
"""
def __init__(
self,
writer: SummaryWriter,
train_interval: int = 1000,
test_interval: int = 1,
update_interval: int = 1000,
save_interval: int = 1,
) -> None:
super().__init__(train_interval, test_interval, update_interval)
self.save_interval = save_interval
self.last_save_step = -1
self.writer = writer
def write(self, step_type: str, step: int, data: LOG_DATA_TYPE) -> None:
for k, v in data.items():
self.writer.add_scalar(k, v, global_step=step)
def save_data(
self,
epoch: int,
env_step: int,
gradient_step: int,
save_checkpoint_fn: Optional[Callable[[int, int, int], None]] = None,
) -> None:
if save_checkpoint_fn and epoch - self.last_save_step >= self.save_interval:
self.last_save_step = epoch
save_checkpoint_fn(epoch, env_step, gradient_step)
self.write("save/epoch", epoch, {"save/epoch": epoch})
self.write("save/env_step", env_step, {"save/env_step": env_step})
self.write(
"save/gradient_step", gradient_step,
{"save/gradient_step": gradient_step}
)
def restore_data(self) -> Tuple[int, int, int]:
ea = event_accumulator.EventAccumulator(self.writer.log_dir)
ea.Reload()
try: # epoch / gradient_step
epoch = ea.scalars.Items("save/epoch")[-1].step
self.last_save_step = self.last_log_test_step = epoch
gradient_step = ea.scalars.Items("save/gradient_step")[-1].step
self.last_log_update_step = gradient_step
except KeyError:
epoch, gradient_step = 0, 0
try: # offline trainer doesn't have env_step
env_step = ea.scalars.Items("save/env_step")[-1].step
self.last_log_train_step = env_step
except KeyError:
env_step = 0
return epoch, env_step, gradient_step
class BasicLogger(TensorboardLogger):
"""BasicLogger has changed its name to TensorboardLogger in #427.
This class is for compatibility.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
warnings.warn(
"Deprecated soon: BasicLogger has renamed to TensorboardLogger in #427."
)
super().__init__(*args, **kwargs)
| 37.011494
| 87
| 0.647205
| 412
| 3,220
| 4.822816
| 0.252427
| 0.042275
| 0.040262
| 0.032209
| 0.226975
| 0.114243
| 0
| 0
| 0
| 0
| 0
| 0.013825
| 0.258696
| 3,220
| 86
| 88
| 37.44186
| 0.818601
| 0.20559
| 0
| 0.135593
| 0
| 0
| 0.077015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084746
| false
| 0
| 0.084746
| 0
| 0.220339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86e8affd139b8a4dffaf5cdc66c6797adccdf84b
| 7,326
|
py
|
Python
|
PythonAPI/pythonwrappers/jetfuel/gui/menu.py
|
InsightGit/JetfuelGameEngine
|
3ea0bf2fb5e09aadf304b7b5a16882d72336c408
|
[
"Apache-2.0"
] | 4
|
2018-02-05T03:40:10.000Z
|
2021-06-18T16:22:13.000Z
|
PythonAPI/pythonwrappers/jetfuel/gui/menu.py
|
InsightGit/JetfuelGameEngine
|
3ea0bf2fb5e09aadf304b7b5a16882d72336c408
|
[
"Apache-2.0"
] | null | null | null |
PythonAPI/pythonwrappers/jetfuel/gui/menu.py
|
InsightGit/JetfuelGameEngine
|
3ea0bf2fb5e09aadf304b7b5a16882d72336c408
|
[
"Apache-2.0"
] | null | null | null |
# Jetfuel Game Engine- A SDL-based 2D game-engine
# Copyright (C) 2018 InfernoStudios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import c_uint
from ctypes import c_int
from ctypes import c_void_p
from ctypes import c_bool
from ctypes import c_wchar_p
from jetfuel.draw.rectangleinterface import rectangle_interface
from jetfuel.draw.image import image
class menu(rectangle_interface):
def __init__(self, jetfuelsoloader, maxheight=None, columngap=None,
buttongap=None):
self._jetfuel = jetfuelsoloader.jetfuelso;
if(maxheight is not None and columngap is not None and
buttongap is not None):
self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint,
c_uint,
c_uint];
self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p;
self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps(
maxheight,
columngap,
buttongap);
else:
self._jetfuel.Menu_new.restype = c_void_p;
self.drawableref = self._jetfuel.Menu_new();
print("Constructed empty drawableref!");
def get_max_height(self):
self._jetfuel.Menu_get_max_height.argtypes = [c_void_p];
self._jetfuel.Menu_get_max_height.restype = c_uint;
return self._jetfuel.Menu_get_max_height(self.drawableref);
def set_max_height(self, maxheight):
self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint];
self._jetfuel.Menu_set_max_height(self.drawableref, maxheight);
def get_column_gap(self):
self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p];
self._jetfuel.Menu_get_column_gap.restype = c_uint;
return self._jetfuel.Menu_get_column_gap(self.drawableref);
def set_column_gap(self, columngap):
self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p, c_uint];
self._jetfuel.Menu_set_column_height(self.drawableref, columngap);
def get_button_gap(self):
self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p];
self._jetfuel.Menu_get_button_gap.restype = c_uint;
return self._jetfuel.Menu_get_column_gap(self.drawableref);
def set_button_gap(self, buttongap):
self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint];
self._jetfuel.Menu_set_max_height(self.drawableref, buttongap);
def get_container_box_image(self, jetfuelsoloader):
self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p];
self._jetfuel.Menu_get_container_box_image.restype = c_void_p;
containerboximage = image(jetfuelsoloader);
self._jetfuel.Image_delete.argtypes = [c_void_p];
self._jetfuel.Image_delete(containerboximage.imageref);
containerboximage.imageref = self._jetfuel.Menu_get_container_box_image(
self.drawableref);
return containerboximage;
def set_container_box_image(self, image, borderwidth, borderheight):
self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p,
c_void_p, c_uint,
c_uint];
self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth,
borderheight);
def get_container_box_border_width(self):
self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p];
self._jetfuel.Menu_get_container_box_border_width.restype = c_uint;
return self._jetfuel.Menu_get_container_box_border_width(
self.drawableref);
def get_container_box_border_height(self):
self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p];
self._jetfuel.Menu_get_container_box_border_height.restype = c_uint;
return self._jetfuel.Menu_get_container_box_border_height(
self.drawableref);
def add_button(self, buttoncharsreplacement, uisactiontowatchfor,
messagetosenduponclick, messagebus):
self._jetfuel.Menu_add_button.argtypes = [c_void_p, c_void_p,
c_wchar_p, c_wchar_p,
c_void_p];
self._jetfuel.Menu_add_button.restype = c_bool;
return self._jetfuel.Menu_add_button(self.drawableref,
buttoncharsreplacement.buttoncharsref,
uisactiontowatchfor,
messagetosenduponclick,
messagebus.messagebusref);
def get_position_x(self):
self._jetfuel.Menu_get_position_x.argtypes = [c_void_p];
self._jetfuel.Menu_get_position_x.restype = c_int;
return self.Menu_get_position_x(self.drawableref);
def get_position_y(self):
self._jetfuel.Menu_get_position_y.argtypes = [c_void_p];
self._jetfuel.Menu_get_position_y.restype = c_int;
return self.Menu_get_position_y(self.drawableref);
def set_position(self, x, y):
self._jetfuel.Menu_set_position.argtypes = [c_void_p, c_int, c_int];
self._jetfuel.Menu_set_position(self.drawableref, x, y);
def get_rect_to_draw_width(self):
self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p];
self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int;
return self.Menu_get_rect_to_draw_width(self.drawableref);
def get_rect_to_draw_height(self):
self._jetfuel.Menu_get_rect_to_draw_height.argtypes = [c_void_p];
self._jetfuel.Menu_get_rect_to_draw_height.restype = c_int;
return self.Menu_get_rect_to_draw_height(self.drawableref);
| 46.367089
| 81
| 0.590227
| 813
| 7,326
| 4.897909
| 0.163592
| 0.129834
| 0.165746
| 0.117529
| 0.559518
| 0.503516
| 0.414867
| 0.373682
| 0.279508
| 0.214716
| 0
| 0.001876
| 0.345072
| 7,326
| 157
| 82
| 46.66242
| 0.828053
| 0.083129
| 0
| 0.089109
| 0
| 0
| 0.004479
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168317
| false
| 0
| 0.069307
| 0
| 0.356436
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ea235dbd8e630be7e48c8aa27ae5d388c7bc1d
| 30,649
|
py
|
Python
|
latent_programmer/decomposition_transformer_attention/train.py
|
ParikhKadam/google-research
|
00a282388e389e09ce29109eb050491c96cfab85
|
[
"Apache-2.0"
] | 2
|
2022-01-21T18:15:34.000Z
|
2022-01-25T15:21:34.000Z
|
latent_programmer/decomposition_transformer_attention/train.py
|
ParikhKadam/google-research
|
00a282388e389e09ce29109eb050491c96cfab85
|
[
"Apache-2.0"
] | 110
|
2021-10-01T18:22:38.000Z
|
2021-12-27T22:08:31.000Z
|
latent_programmer/decomposition_transformer_attention/train.py
|
admariner/google-research
|
7cee4b22b925581d912e8d993625c180da2a5a4f
|
[
"Apache-2.0"
] | 1
|
2022-02-10T10:43:10.000Z
|
2022-02-10T10:43:10.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Train seq-to-seq model on random supervised training tasks."""
# pytype: disable=wrong-arg-count
# pytype: disable=attribute-error
import collections
import functools
import json
import os
import random
import sys
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import linen as nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
from latent_programmer import decode
from latent_programmer import models as base_models
from latent_programmer.decomposition_transformer_attention import decomposition_models as models
from latent_programmer.decomposition_transformer_attention import input_pipeline
from latent_programmer.tasks.robust_fill import dsl
from latent_programmer.tasks.robust_fill import tokens as dsl_tokens
sys.path.append('../../')
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_integer('seed', 0, 'Fixed random seed for training.')
flags.DEFINE_float('lr', 1e-3, 'Learning rate.')
flags.DEFINE_float('weight_decay', 1e-1,
'Decay factor for AdamW-style weight decay.')
flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.')
flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.')
flags.DEFINE_integer('num_heads', 4, 'Number of layers.')
flags.DEFINE_integer('num_layers', 3, 'Number of Transformer heads.')
flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding for prediction?')
flags.DEFINE_string('dataset_filepattern', None,
'Filepattern for TFRecord dataset.')
flags.DEFINE_integer('per_device_batch_size', 16,
'Number of program tasks in a batch.')
flags.DEFINE_integer('num_strings_per_task', 4,
'Number of input/output strings per task.')
flags.DEFINE_integer('max_program_length', 100,
'Maximum number of tokens in program.')
flags.DEFINE_integer('max_characters', 120,
'Maximum number of characters in input/output strings.')
flags.DEFINE_string('save_dir', None, 'Directory to save results to.')
flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training steps.')
flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation steps.')
flags.DEFINE_integer('log_freq', 1000, 'Number of steps between training logs.')
flags.DEFINE_integer('eval_freq', 2000, 'Number of steps between eval.')
flags.DEFINE_integer('predict_freq', 50000,
'Number of steps between prediction (beam search).')
flags.DEFINE_integer('checkpoint_freq', 50000,
'Number of steps between checkpoint saves.')
flags.DEFINE_integer('finetune_start_step', -1,
'Step the initial checkpoint should start at for '
'finetuning, or -1 if not finetuning.')
flags.DEFINE_bool('restore_checkpoints', True,
'Whether to restore from existing model checkpoints.')
flags.DEFINE_string('attention_mask_type', 'bos_full_attention',
'The kind of attention mask to use. Options are: baseline, '
'bos_to_bos, bos_full_attention')
flags.DEFINE_bool('use_relative_attention', True,
'Whether to use relative positonal embeddings.')
flags.DEFINE_bool('bos_special_attention', False,
'Whether to use special relative attention computation for '
'BOS tokens.')
_internal = False
if not _internal:
flags.DEFINE_string('xm_parameters', None,
'String specifying hyperparamter search.')
def create_learning_rate_scheduler(
base_learning_rate=0.5,
factors='constant * linear_warmup * rsqrt_normalized_decay',
warmup_steps=16000,
decay_factor=0.5,
steps_per_decay=50000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
base_learning_rate: float, the starting constant for the lr schedule.
factors: a string with factors separated by '*' that defines the schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
steps_per_cycle: Steps per cycle when using cosine decay.
Returns:
A function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def compute_weighted_cross_entropy(logits, targets, weights=None):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
onehot_targets = common_utils.onehot(targets, logits.shape[-1])
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar accuracy and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
acc = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
acc = acc * weights
normalizing_factor = weights.sum()
return acc.sum(), normalizing_factor
def compute_metrics(logits, targets, weights):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights)
acc, _ = compute_weighted_accuracy(logits, targets, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
# Train / eval / decode step functions.
# -----------------------------------------------------------------------------
def train_step(optimizer,
inputs,
outputs,
programs,
learning_rate_fn,
config,
dropout_rng):
"""Train on batch of program tasks."""
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)
def loss_fn(params):
"""Loss function used for training."""
logits = models.DecomposeAttentionTransformer(config).apply(
{'params': params},
inputs,
outputs,
programs,
rngs={'dropout': dropout_rng})
loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
# Get metrics.
metrics = compute_metrics(logits, programs, weights)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(params, inputs, outputs, programs, eos_token, config):
"""Collect metrics for evaluation during training."""
weights = jnp.where(
jnp.logical_and(programs > 0,
jnp.logical_and(programs != config.base_config.bos_token,
programs != eos_token)),
1, 0).astype(jnp.float32)
logits = models.DecomposeAttentionTransformer(config).apply(
{'params': params}, inputs, outputs, programs)
return compute_metrics(logits, programs, weights)
def initialize_cache(inputs, outputs, programs, max_decode_len, config):
"""Initialize a cache for a given input shape and max decode length."""
target_shape = (programs.shape[0], max_decode_len)
dtype = config.base_config.dtype
initial_variables = models.DecomposeAttentionTransformer(config).init(
jax.random.PRNGKey(0),
jnp.ones(inputs.shape, dtype),
jnp.ones(outputs.shape, dtype),
jnp.ones(target_shape, dtype))
return initial_variables['cache']
def predict_step(params,
inputs,
outputs,
cache,
beam_size,
eos_token,
max_decode_len,
config,
slow_decode=True):
"""Predict translation with fast decoding beam search on a batch."""
# Prepare transformer fast-decoder call for beam search: for beam search, we
# need to set up our decoder model to handle a batch size equal to
# batch_size * beam_size, where each batch item's data is expanded in-place
# rather than tiled.
flat_encoded = decode.flat_batch_beam_expand(
models.DecomposeAttentionTransformer(config).apply(
{'params': params},
inputs,
outputs,
method=models.DecomposeAttentionTransformer.encode),
beam_size)
encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32)
flat_encoded_padding_mask = decode.flat_batch_beam_expand(
encoded_padding_mask, beam_size)
if slow_decode:
def tokens_ids_to_logits(flat_ids):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits = models.DecomposeAttentionTransformer(config=config).apply(
{'params': params},
flat_ids,
flat_encoded,
flat_encoded_padding_mask,
method=models.DecomposeAttentionTransformer.decode)
return flat_logits
else:
def tokens_ids_to_logits(flat_ids, flat_cache):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits, new_vars = models.DecomposeAttentionTransformer(
config=config).apply(
{'params': params, 'cache': flat_cache},
flat_ids,
flat_encoded,
flat_encoded_padding_mask,
mutable=['cache'],
method=models.DecomposeAttentionTransformer.decode)
new_flat_cache = new_vars['cache']
# Remove singleton sequence-length dimension:
# [batch * beam, 1, vocab] --> [batch * beam, vocab]
flat_logits = flat_logits.squeeze(axis=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
beam_seqs, _ = decode.beam_search(
inputs,
cache,
tokens_ids_to_logits,
beam_size=beam_size,
alpha=0.6,
bos_token=config.base_config.bos_token,
eos_token=eos_token,
max_decode_len=max_decode_len,
slow_decode=slow_decode)
# Beam search returns [n_batch, n_beam, n_length] with beam dimension
# sorted in increasing order of log-probability.
return beam_seqs
# Util functions for prediction
# -----------------------------------------------------------------------------
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
tile_dims = [1] * len(x.shape)
tile_dims[0] = batch_pad
return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0)
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return x.reshape((n_device * n_batch,) + tuple(remaining_dims))
def per_host_sum_pmap(in_tree):
"""Execute psum on in_tree's leaves over one device per host."""
host2devices = collections.defaultdict(list)
for d in jax.devices():
host2devices[d.host_id].append(d)
devices = [host2devices[k][0] for k in host2devices]
host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices)
def pre_pmap(xs):
return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs)
def post_pmap(xs):
return jax.tree_map(lambda x: x[0], xs)
return post_pmap(host_psum(pre_pmap(in_tree)))
def eval_predicted(predicted, inputs, outputs, parse_beam_fn):
"""Evaluate predicted program beams."""
best_p, best_score = None, -1
# predicted shape [beam_size, length]
for beam in predicted[::-1]:
try:
p = parse_beam_fn(beam)
p_outs = [p(inp) for inp in inputs]
score = np.sum([p_out == out for p_out, out in zip(p_outs, outputs)])
if score > best_score:
best_p, best_score = p, score
except: # pylint: disable=bare-except
pass
if best_score >= len(inputs): # Found solution.
break
return best_p, best_score
def shorten(key):
splits = key.split('_')
return ''.join(s[0] for s in splits)
def main(_):
tf.enable_v2_behavior()
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
# BOS special attention only makes sense if we are using relative attention
# and it's not the baseline.
if FLAGS.bos_special_attention and (not FLAGS.use_relative_attention or
FLAGS.attention_mask_type == 'baseline'):
raise ValueError(
"bos_special_attention doesn't work when use_relative_attention={} and "
'attention_mask_type={}'.format(FLAGS.use_relative_attention,
FLAGS.attention_mask_type))
if not gfile.isdir(FLAGS.save_dir):
gfile.makedirs(FLAGS.save_dir)
hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr)
# Get hyperparmaters
if FLAGS.xm_parameters:
for key, value in json.loads(FLAGS.xm_parameters).items():
if key not in hparam_str_dict:
hparam_str_dict[key] = value
hparam_str = ','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k]))
for k in sorted(hparam_str_dict.keys())])
# Number of local devices for this host.
n_devices = jax.local_device_count()
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.save_dir, 'tb', hparam_str))
batch_size = FLAGS.per_device_batch_size * n_devices
io_shape = (FLAGS.per_device_batch_size,
FLAGS.num_strings_per_task,
FLAGS.max_characters)
program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length)
# Setup DSL
# ---------------------------------------------------------------------------
# Build token tables.
id_char_table = {i+1: char for (i, char) in enumerate(dsl.CHARACTER)}
char_id_table = {char: id for id, char in id_char_table.items()}
id_token_table, token_id_table = dsl_tokens.build_token_tables()
io_vocab_size = len(char_id_table) + 1 # For padding.
program_vocab_size = len(token_id_table) + 1
bos_token = token_id_table[dsl.BOS]
eos_token = token_id_table[dsl.EOS]
# Parse io and program token sequences (for eval).
def decode_io(inputs, outputs):
"""Decode io examples tokens."""
def decode_str(s):
"""Decode string tokens."""
return ''.join([id_char_table[c_id] for c_id in s if c_id > 0])
inps, outs = [], []
for inp, out in zip(inputs, outputs):
inps.append(decode_str(inp))
outs.append(decode_str(out))
return inps, outs
def decode_program(program):
"""Decode program tokens."""
program = program[:np.argmax(program == eos_token) + 1].astype(np.int32)
program = program[program != bos_token]
try:
return dsl.decode_program(program.tolist(), id_token_table)
except: # pylint: disable=bare-except
return None # Program does not compile.
# Load Dataset
# ---------------------------------------------------------------------------
logging.info('Initializing dataset.')
if not FLAGS.dataset_filepattern:
raise ValueError('Must specify filepattern to dataset.')
# Training dataset.
logging.info('Loading dataset from %s', FLAGS.dataset_filepattern)
padded_shapes = (io_shape[1:], io_shape[1:], program_shape[1:])
logging.info('padded_shapes: %s', padded_shapes)
dataset = input_pipeline.create_dataset_from_tf_record(
FLAGS.dataset_filepattern, token_id_table, char_id_table)
dataset = dataset.padded_batch(
batch_size,
padded_shapes=padded_shapes,
drop_remainder=True)
# Split evaluation and training.
eval_ds = dataset.take(FLAGS.num_eval_steps)
# Decrease batch of predict dataset to handle beam search.
predict_ds = eval_ds.unbatch().padded_batch(
int(np.ceil(batch_size / 10)),
padded_shapes=padded_shapes)
train_ds = dataset.skip(FLAGS.num_eval_steps).repeat()
train_iter = train_ds.as_numpy_iterator()
# Build Model and Optimizer
# ---------------------------------------------------------------------------
use_dropout = False
base_config = base_models.TransformerConfig(
vocab_size=io_vocab_size,
output_vocab_size=program_vocab_size,
shift=True,
emb_dim=FLAGS.embedding_dim,
num_heads=FLAGS.num_heads,
num_layers=FLAGS.num_layers,
qkv_dim=FLAGS.embedding_dim,
mlp_dim=FLAGS.hidden_dim,
max_len=max(FLAGS.max_characters, FLAGS.max_program_length),
use_relative_attention=FLAGS.use_relative_attention,
deterministic=not use_dropout,
decode=False,
bos_token=bos_token)
train_config = models.DecomposeAttentionTransformerConfig(
base_config=base_config,
attention_mask_type=FLAGS.attention_mask_type,
bos_special_attention=FLAGS.bos_special_attention)
eval_config = models.DecomposeAttentionTransformerConfig(
base_config=base_config.replace(deterministic=not use_dropout),
attention_mask_type=FLAGS.attention_mask_type,
bos_special_attention=FLAGS.bos_special_attention)
predict_config = models.DecomposeAttentionTransformerConfig(
base_config=base_config.replace(
shift=False, deterministic=not use_dropout,
decode=not FLAGS.slow_decode),
attention_mask_type=FLAGS.attention_mask_type,
bos_special_attention=FLAGS.bos_special_attention)
rng = jax.random.PRNGKey(FLAGS.seed)
rng = jax.random.fold_in(rng, jax.host_id())
rng, init_rng = jax.random.split(rng)
m = models.DecomposeAttentionTransformer(eval_config)
initial_variables = jax.jit(m.init)(
{'params': init_rng, 'dropout': init_rng},
jnp.ones(io_shape, jnp.float32),
jnp.ones(io_shape, jnp.float32),
jnp.ones(program_shape, jnp.float32))
optimizer_def = optim.Adam(
FLAGS.lr,
beta1=0.9,
beta2=0.98,
eps=1e-9,
weight_decay=FLAGS.weight_decay)
optimizer = optimizer_def.create(initial_variables['params'])
del initial_variables # Don't keep a copy of the initial model.
start_step = 0
if FLAGS.restore_checkpoints:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(
os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
logging.info('Found model checkpointed at step %d.', start_step)
if FLAGS.finetune_start_step > 0:
logging.info('Checking that start_step (%s) == finetune_start_step (%s)',
start_step, FLAGS.finetune_start_step)
assert start_step == FLAGS.finetune_start_step
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
# TODO(jxihong): Implement fast decoding.
assert FLAGS.slow_decode, 'Fast decoding is not implemented yet.'
if FLAGS.finetune_start_step <= 0:
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=FLAGS.lr)
else:
# Constant LR for finetuning.
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=FLAGS.lr,
factors='constant')
p_train_step = jax.pmap(
functools.partial(
train_step,
learning_rate_fn=learning_rate_fn,
config=train_config),
axis_name='batch')
p_eval_step = jax.pmap(
functools.partial(eval_step,
eos_token=eos_token,
config=eval_config),
axis_name='batch')
p_init_cache = jax.pmap(
functools.partial(
initialize_cache,
max_decode_len=FLAGS.max_program_length,
config=predict_config),
axis_name='batch')
p_pred_step = jax.pmap(
functools.partial(
predict_step,
eos_token=eos_token,
max_decode_len=FLAGS.max_program_length,
config=predict_config,
slow_decode=FLAGS.slow_decode),
axis_name='batch',
static_broadcasted_argnums=(4,))
# Main Train Loop
# ---------------------------------------------------------------------------
dropout_rng = jax.random.split(rng, jax.local_device_count())
del rng
metrics_all = []
tick = time.time()
for step in range(start_step, FLAGS.num_train_steps):
inputs, outputs, programs = common_utils.shard(next(train_iter))
optimizer, metrics, dropout_rng = p_train_step(
optimizer, inputs, outputs, programs, dropout_rng=dropout_rng)
metrics_all.append(metrics)
is_last_step = step == FLAGS.num_train_steps - 1
# Save a Checkpoint
if (step % FLAGS.checkpoint_freq == 0 and step > 0) or is_last_step:
if jax.host_id() == 0:
# Save unreplicated optimizer + model state.
checkpoints.save_checkpoint(
os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str),
jax_utils.unreplicate(optimizer),
step)
# Periodic metric handling.
# Training Metrics
if (step and step % FLAGS.log_freq == 0) or is_last_step:
logging.info('Gathering training metrics.')
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(
lambda x: x / denominator, # pylint: disable=cell-var-from-loop
metrics_sums)
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)
if jax.host_id() == 0:
logging.info('Train in step: %d, loss: %.4f', step, summary['loss'])
tock = time.time()
steps_per_sec = FLAGS.log_freq / (tock - tick)
tick = tock
summary_writer.scalar('train/steps per second', steps_per_sec, step)
for key, val in summary.items():
summary_writer.scalar('train/' + key, val, step)
summary_writer.flush()
# Reset metric accumulation for next evaluation cycle.
metrics_all = []
# Evaluation Metrics
if (step and step % FLAGS.eval_freq == 0) or is_last_step:
logging.info('Gathering evaluation metrics.')
t_evaluation_start = time.time()
eval_metrics = []
for batches in eval_ds.as_numpy_iterator():
inputs, outputs, programs = common_utils.shard(batches)
metrics = p_eval_step(optimizer.target, inputs, outputs, programs)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
if jax.host_id() == 0:
logging.info('Evaluation time: %.4f s step %d, loss: %.4f.',
time.time()-t_evaluation_start, step, eval_summary['loss'])
for key, val in eval_summary.items():
summary_writer.scalar('eval/' + key, val, step)
summary_writer.flush()
# Beam search metrics.
if (step and step % FLAGS.predict_freq == 0) or is_last_step:
logging.info('Gathering beam search metrics.')
for beam_size in [1, 5, 10, 20, 50]:
t_inference_start = time.time()
pred_acc = 0
pred_denominator = 0
ios, targets, predictions, top_of_beams = [], [], [], []
for batches in predict_ds.as_numpy_iterator():
pred_batch = batches
# Handle final odd-sized batch by padding instead of dropping it.
cur_pred_batch_size = pred_batch[0].shape[0]
if cur_pred_batch_size % n_devices:
padded_size = int(
np.ceil(cur_pred_batch_size / n_devices) * n_devices)
# pylint: disable=cell-var-from-loop
pred_batch = jax.tree_map(
lambda x: pad_examples(x, padded_size), pred_batch)
inputs, outputs, programs = common_utils.shard(pred_batch)
cache = (p_init_cache(inputs, outputs, programs)
if not FLAGS.slow_decode else None)
predicted = p_pred_step(optimizer.target, inputs, outputs, cache,
beam_size)
predicted = tohost(predicted)
inputs, outputs, programs = map(tohost, (inputs, outputs, programs))
pred_denominator += programs.shape[0]
for i, beams in enumerate(predicted):
inps, outs = decode_io(inputs[i], outputs[i])
p, p_score = eval_predicted(
beams, inps, outs, parse_beam_fn=decode_program)
if p_score >= len(inps):
pred_acc += 1
ios.append(' ; '.join(map(str, zip(inps, outs))))
targets.append(decode_program(programs[i]).to_string())
try:
predictions.append(p.to_string())
except: # pylint: disable=bare-except
predictions.append('Did not compile')
logging.info('ios: %s', ios[-1])
logging.info('target: %s', targets[-1])
beams_log = []
for beam in beams:
try:
beams_log.append(decode_program(beam).to_string())
except: # pylint: disable=bare-except
beams_log.append('Did not compile')
logging.info('predicted beam: %s', '\n'.join(beams_log))
top_of_beam = []
for index, beam in enumerate(beams[:-5:-1]):
try:
decoded_program = decode_program(beam).to_string()
except: # pylint: disable=bare-except
decoded_program = 'Did not compile'
top_of_beam.append('index: {}, decoded: {}, tokens: {}'.format(
index, decoded_program, beam))
top_of_beams.append('\n\n'.join(top_of_beam))
all_pred_acc, all_pred_denominator = per_host_sum_pmap(
jax.tree_map(np.array, (pred_acc, pred_denominator)))
# Record beam search results as text summaries.
message = []
for n in np.random.choice(np.arange(len(predictions)), 8):
text = (f'ios: {ios[n]}\n\ntarget: {targets[n]}\n\n'
f'predicted: {predictions[n]}\n\n'
f'top of beam:\n\n{top_of_beams[n]}\n\n')
message.append(text)
# Write to tensorboard.
if jax.host_id() == 0:
slow_or_fast = 'slow' if FLAGS.slow_decode else 'fast'
logging.info(
'Prediction time, %s (beam %d): %.4f s, step %d, score %.4f',
slow_or_fast, beam_size, time.time() - t_inference_start, step,
all_pred_acc / all_pred_denominator)
summary_writer.scalar(
'predict-{}/score-{}'.format(slow_or_fast, beam_size),
all_pred_acc / all_pred_denominator, step)
summary_writer.text('samples-{}'.format(beam_size),
'\n------\n'.join(message), step)
summary_writer.flush()
if __name__ == '__main__':
app.run(main)
| 38.263421
| 96
| 0.661131
| 3,966
| 30,649
| 4.893343
| 0.159607
| 0.014737
| 0.01484
| 0.00541
| 0.278559
| 0.221054
| 0.170815
| 0.129077
| 0.093317
| 0.087597
| 0
| 0.009025
| 0.222683
| 30,649
| 800
| 97
| 38.31125
| 0.805574
| 0.181148
| 0
| 0.153025
| 0
| 0.001779
| 0.111071
| 0.00829
| 0
| 0
| 0
| 0.00125
| 0.003559
| 1
| 0.040925
| false
| 0.001779
| 0.046263
| 0.003559
| 0.128114
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ebf47f1f35ac5baec5295be6bb6feebf67dc9a
| 5,412
|
py
|
Python
|
plot/profile_interpolation/plot_profile.py
|
ziyixi/SeisScripts
|
a484bc1747eae52b2441f0bfd47ac7e093150f1d
|
[
"MIT"
] | null | null | null |
plot/profile_interpolation/plot_profile.py
|
ziyixi/SeisScripts
|
a484bc1747eae52b2441f0bfd47ac7e093150f1d
|
[
"MIT"
] | null | null | null |
plot/profile_interpolation/plot_profile.py
|
ziyixi/SeisScripts
|
a484bc1747eae52b2441f0bfd47ac7e093150f1d
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import click
import numba
def prepare_data(data_pd, parameter):
lon_set = set(data_pd["lon"])
lat_set = set(data_pd["lat"])
dep_set = set(data_pd["dep"])
lon_list = sorted(lon_set)
lat_list = sorted(lat_set)
dep_list = sorted(dep_set)
lon_mesh, lat_mesh, dep_mesh = np.meshgrid(
lon_list, lat_list, dep_list, indexing="ij")
dx, dy, dz = np.shape(lon_mesh)
value_mesh = np.zeros_like(lon_mesh)
x_mesh = np.zeros_like(lon_mesh)
y_mesh = np.zeros_like(lon_mesh)
z_mesh = np.zeros_like(lon_mesh)
r_mesh = np.zeros_like(lon_mesh)
for i in range(dx):
for j in range(dy):
for k in range(dz):
x_mesh[i, j, k], y_mesh[i, j, k], z_mesh[i, j, k], r_mesh[i, j, k] = lld2xyzr(
lat_mesh[i, j, k], lon_mesh[i, j, k], dep_mesh[i, j, k])
for index, row in data_pd.iterrows():
i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0))
j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0))
k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0))
value_mesh[i, j, k] = row[parameter]
return x_mesh, y_mesh, z_mesh, value_mesh
def get_value(data_pd, lat, lon, dep, parameter):
return data_pd.loc[(data_pd.lat == lat) & (data_pd.lon == lon) & (data_pd.dep == dep)][parameter].values[0]
@numba.njit()
def lld2xyzr(lat, lon, dep):
R_EARTH_KM = 6371.0
r = (R_EARTH_KM-dep)/R_EARTH_KM
theta = 90-lat
phi = lon
z = r*cosd(theta)
h = r*sind(theta)
x = h*cosd(phi)
y = h*sind(phi)
return (x, y, z, r)
@numba.njit()
def cosd(x):
return np.cos(np.deg2rad(x))
@numba.njit()
def sind(x):
return np.sin(np.deg2rad(x))
# def get_value_func(x_mesh, y_mesh, z_mesh, value_mesh):
# value_func = RegularGridInterpolator(
# (x_mesh, y_mesh, z_mesh), value_mesh, method="nearest")
# return value_func
@numba.njit()
def interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh, value_mesh):
x, y, z, _ = lld2xyzr(lat, lon, dep)
distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2
mindistance2 = np.min(distance2)
coors = np.where(distance2 == mindistance2)
value = value_mesh[coors[0][0], coors[1][0], coors[2][0]]
return value
def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts):
lons = np.linspace(lon_list[0], lon_list[1], hnpts)
lats = np.linspace(lat_list[0], lat_list[1], hnpts)
deps = np.linspace(dep_list[0], dep_list[1], vnpts)
return lons, lats, deps
@click.command()
@click.option('--lon1', required=True, type=float, help="lon1")
@click.option('--lon2', required=True, type=float, help="lon2")
@click.option('--lat1', required=True, type=float, help="lat1")
@click.option('--lat2', required=True, type=float, help="lat2")
@click.option('--dep1', required=True, type=float, help="dep1")
@click.option('--dep2', required=True, type=float, help="dep2")
@click.option('--data', required=True, type=str, help="the pickle file")
@click.option('--parameter', required=True, type=str, help="physicial parameter to plot")
@click.option('--hnpts', required=True, type=int, help="horizontal npts")
@click.option('--vnpts', required=True, type=int, help="vertical npts")
def main(lon1, lon2, lat1, lat2, dep1, dep2, data, parameter, hnpts, vnpts):
lon_list = [lon1, lon2]
lat_list = [lat1, lat2]
dep_list = [dep1, dep2]
data_pd_raw = pd.read_pickle(data)
# data_pd is too big
minlon = min(lon1, lon2)
maxlon = max(lon1, lon2)
minlat = min(lat1, lat2)
maxlat = max(lat1, lat2)
mindep = min(dep1, dep2)
maxdep = max(dep1, dep2)
data_pd = data_pd_raw.loc[(data_pd_raw.lat <= maxlat) & (
data_pd_raw.lat >= minlat) & (data_pd_raw.lon < maxlon) & (data_pd_raw.lon > minlon) & (data_pd_raw.dep >= mindep) & (data_pd_raw.dep <= maxdep)]
x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_pd, parameter)
lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids(
lon_list, lat_list, dep_list, hnpts, vnpts)
values = np.zeros((hnpts, vnpts))
for ih in range(hnpts):
for iv in range(vnpts):
values[ih, iv] = interp_value(
lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh)
# print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv])
# plotting part
plt.figure()
mesh_plot_lat, mesh_plot_dep = np.meshgrid(
lats_plot, deps_plot, indexing="ij")
# get vmin and vmax
vmin_round = round(np.min(values), 2)
if(vmin_round < np.min(values)):
vmin = vmin_round
else:
vmin = vmin_round-0.01
vmax_round = round(np.max(values), 2)
if(vmax_round > np.max(values)):
vmax = vmax_round
else:
vmax = vmax_round+0.01
print(vmin, vmax, np.max(values), np.min(values), vmin_round, vmax_round)
plt.contourf(mesh_plot_lat, mesh_plot_dep,
values, 101, cmap=plt.cm.seismic_r)
v = np.arange(vmin, vmax, 0.01)
plt.colorbar(ticks=v, label="perturbation")
plt.gca().invert_yaxis()
plt.xlabel(
f"latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)")
plt.ylabel("depth(km)")
plt.show()
if __name__ == "__main__":
main()
| 34.037736
| 153
| 0.636179
| 879
| 5,412
| 3.715586
| 0.17975
| 0.038579
| 0.04899
| 0.017146
| 0.271586
| 0.181568
| 0.098592
| 0.098592
| 0.054501
| 0.03613
| 0
| 0.022911
| 0.201589
| 5,412
| 158
| 154
| 34.253165
| 0.731775
| 0.055617
| 0
| 0.05
| 0
| 0.008333
| 0.055664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.041667
| 0.025
| 0.166667
| 0.008333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ebfc32e5da592e6e4c3fa48a02c7a3cbe0a2ce
| 367
|
py
|
Python
|
tests/test_heroku.py
|
edpaget/flask-appconfig
|
5264719ac9229339070b219a4358a3203ffd05b0
|
[
"MIT"
] | 61
|
2015-01-28T21:19:11.000Z
|
2020-12-28T10:12:28.000Z
|
tests/test_heroku.py
|
edpaget/flask-appconfig
|
5264719ac9229339070b219a4358a3203ffd05b0
|
[
"MIT"
] | 3
|
2016-01-25T00:09:55.000Z
|
2017-09-25T11:36:19.000Z
|
tests/test_heroku.py
|
edpaget/flask-appconfig
|
5264719ac9229339070b219a4358a3203ffd05b0
|
[
"MIT"
] | 14
|
2015-07-22T12:58:06.000Z
|
2021-03-24T02:06:30.000Z
|
from flask import Flask
from flask_appconfig import HerokuConfig
def create_sample_app():
app = Flask('testapp')
HerokuConfig(app)
return app
def test_herokupostgres(monkeypatch):
monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri')
app = create_sample_app()
assert app.config['SQLALCHEMY_DATABASE_URI'] == 'heroku-db-uri'
| 22.9375
| 71
| 0.746594
| 46
| 367
| 5.717391
| 0.543478
| 0.068441
| 0.114068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152589
| 367
| 15
| 72
| 24.466667
| 0.845659
| 0
| 0
| 0
| 0
| 0
| 0.228883
| 0.138965
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ec0f9bcbbfb50a7fe60cb1505775e1803a9dd4
| 396
|
py
|
Python
|
flask/util/logger.py
|
Dev-Jahn/cms
|
84ea115bdb865daff83d069502f6f0dd105fc4f0
|
[
"RSA-MD"
] | null | null | null |
flask/util/logger.py
|
Dev-Jahn/cms
|
84ea115bdb865daff83d069502f6f0dd105fc4f0
|
[
"RSA-MD"
] | 9
|
2021-01-05T07:48:28.000Z
|
2021-05-14T06:38:27.000Z
|
flask/util/logger.py
|
Dev-Jahn/cms
|
84ea115bdb865daff83d069502f6f0dd105fc4f0
|
[
"RSA-MD"
] | 4
|
2021-01-05T06:46:09.000Z
|
2021-05-06T01:44:28.000Z
|
import logging
"""
Formatter
"""
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S')
"""
Set Flask logger
"""
logger = logging.getLogger('FLASK_LOG')
logger.setLevel(logging.DEBUG)
stream_log = logging.StreamHandler()
stream_log.setFormatter(formatter)
logger.addHandler(stream_log)
# if disabled
# logger.disabled = True
| 20.842105
| 114
| 0.699495
| 50
| 396
| 5.46
| 0.54
| 0.098901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118687
| 396
| 18
| 115
| 22
| 0.782235
| 0.085859
| 0
| 0
| 0
| 0
| 0.251613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ecab271dab8a62fddc0d43582d82c9d0efb150
| 1,592
|
py
|
Python
|
utils/backups/backup_psql.py
|
Krovatkin/NewsBlur
|
2a5b52984c9d29c864eb80e9c60c658b1f25f7c5
|
[
"MIT"
] | null | null | null |
utils/backups/backup_psql.py
|
Krovatkin/NewsBlur
|
2a5b52984c9d29c864eb80e9c60c658b1f25f7c5
|
[
"MIT"
] | null | null | null |
utils/backups/backup_psql.py
|
Krovatkin/NewsBlur
|
2a5b52984c9d29c864eb80e9c60c658b1f25f7c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import os
import sys
import socket
CURRENT_DIR = os.path.dirname(__file__)
NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../'])
sys.path.insert(0, NEWSBLUR_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings'
import threading
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify, assume this is hooked up to a single filename
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
import time
import boto3
from django.conf import settings
BACKUP_DIR = '/srv/newsblur/backup/'
s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET)
hostname = socket.gethostname().replace('-','_')
s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime("%Y-%m-%d-%H-%M")}.sql'
path = os.listdir(BACKUP_DIR)[0]
full_path = os.path.join(BACKUP_DIR, path)
print('Uploading %s to %s on S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET))
s3.upload_file(full_path, settings.S3_BACKUP_BUCKET, s3_object_name, Callback=ProgressPercentage(full_path))
os.remove(full_path)
| 34.608696
| 108
| 0.682161
| 220
| 1,592
| 4.609091
| 0.409091
| 0.039448
| 0.039448
| 0.051282
| 0.117357
| 0.04142
| 0
| 0
| 0
| 0
| 0
| 0.016204
| 0.18593
| 1,592
| 45
| 109
| 35.377778
| 0.766204
| 0.047739
| 0
| 0
| 0
| 0.028571
| 0.135403
| 0.090489
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.2
| 0
| 0.285714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ecdb5499de55821a90a7d456c0a5f3e2bbff3c
| 22,780
|
py
|
Python
|
onap_tests/scenario/solution.py
|
Orange-OpenSource/xtesting-onap-tests
|
ce4237f49089a91c81f5fad552f78fec384fd504
|
[
"Apache-2.0"
] | null | null | null |
onap_tests/scenario/solution.py
|
Orange-OpenSource/xtesting-onap-tests
|
ce4237f49089a91c81f5fad552f78fec384fd504
|
[
"Apache-2.0"
] | null | null | null |
onap_tests/scenario/solution.py
|
Orange-OpenSource/xtesting-onap-tests
|
ce4237f49089a91c81f5fad552f78fec384fd504
|
[
"Apache-2.0"
] | 2
|
2018-06-08T15:49:51.000Z
|
2021-06-22T10:06:30.000Z
|
#!/usr/bin/python
#
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# pylint: disable=missing-docstring
# pylint: disable=duplicate-code
import logging
import time
import onap_tests.components.aai as aai
import onap_tests.components.so as so
import onap_tests.components.sdnc as sdnc
import onap_tests.components.nbi as nbi
import onap_tests.utils.stack_checker as sc
import onap_tests.utils.utils as onap_utils
PROXY = onap_utils.get_config("general.proxy")
class Solution(object):
"""
VNF: Class to automate the instantiation of a VNF
It is assumed that the Design phase has been already done
The yaml template is available and stored in the template directory
TODO: automate the design phase
"""
__logger = logging.getLogger(__name__)
def __init__(self, **kwargs):
"""Initialize Solution object."""
super(Solution, self).__init__()
self.vnf_config = {}
self.components = {}
if "case" not in kwargs:
# by convention is VNF is not precised we set mrf
kwargs["case"] = "mrf"
self.vnf_config["vnf"] = kwargs["case"]
if "nbi" in kwargs:
self.vnf_config["nbi"] = kwargs["nbi"]
# can be useful to destroy resources, sdnc module name shall be given
if "sdnc_vnf_name" in kwargs:
self.vnf_config["sdnc_vnf_name"] = kwargs["sdnc_vnf_name"]
# Random part = 6 last char of the the vnf name
self.vnf_config["random_string"] = kwargs["sdnc_vnf_name"][-6:]
else:
self.vnf_config["random_string"] = (
onap_utils.random_string_generator())
self.vnf_config["sdnc_vnf_name"] = (
onap_utils.get_config("onap.service.name") + "_" +
kwargs["case"] + "_" + self.vnf_config["random_string"])
vnf_list = list(onap_utils.get_template_param(
self.vnf_config["vnf"],
"topology_template.node_templates"))
vf_module_list = list(onap_utils.get_template_param(
self.vnf_config["vnf"],
"topology_template.groups"))
# Class attributes for instance, vnf and module VF
self.service_infos = {}
self.vnf_infos = {'list': vnf_list}
self.module_infos = {'list': vf_module_list}
# retrieve infos from the configuration files
self.set_service_instance_var()
self.set_vnf_var()
self.set_module_var()
self.set_onap_components()
def set_service_instance_var(self):
"""
set service instance variables from the config file
"""
self.vnf_config["vnf_name"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "metadata.name")
self.vnf_config["invariant_uuid"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "metadata.invariantUUID")
self.vnf_config["uuid"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "metadata.UUID")
def set_vnf_var(self):
"""
set vnf variables from the config file
"""
for i, elt in enumerate(self.vnf_infos['list']):
vnf_config = {}
self.__logger.info("get VNF %s info", elt)
vnf_config["vnf_customization_name"] = elt
vnf_config["vnf_model_name"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.node_templates." +
vnf_config["vnf_customization_name"] + ".metadata.name")
vnf_config["vnf_invariant_id"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.node_templates." +
vnf_config["vnf_customization_name"] +
".metadata.invariantUUID")
vnf_config["vnf_version_id"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.node_templates." +
vnf_config["vnf_customization_name"] + ".metadata.UUID")
vnf_config["vnf_customization_id"] = (
onap_utils.get_template_param(
self.vnf_config["vnf"],
"topology_template.node_templates." +
vnf_config["vnf_customization_name"] +
".metadata.customizationUUID"))
vnf_config["vnf_type"] = list(onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups"))[i]
vnf_config["vnf_generic_name"] = (
self.vnf_config["vnf_name"] + "-service-instance-" +
self.vnf_config["random_string"])
vnf_config["vnf_generic_type"] = (
self.vnf_config["vnf_name"] + "/" +
vnf_config["vnf_customization_name"])
self.vnf_config[elt] = vnf_config
def set_module_var(self):
"""
set module variables from the config file
"""
for elt in self.vnf_infos['list']:
vf_config = {}
# we cannot be sure that the modules are in teh same order
# than the vnf
vf_index = onap_utils.get_vf_module_index(
self.module_infos['list'],
elt)
vnf_type = list(onap_utils.get_template_param(
self.vnf_config["vnf"],
"topology_template.groups"))[vf_index]
self.__logger.info("Complete Module info for VNF %s", elt)
vf_config["sdnc_vnf_type"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type +
".metadata.vfModuleModelName")
vnf_param = (self.vnf_config["vnf"] + "." +
str(elt) + ".vnf_parameters")
vf_config["vnf_parameters"] = onap_utils.get_config(vnf_param)
vf_config["module_invariant_id"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type + ".metadata.vfModuleModelInvariantUUID")
vf_config["module_name_version_id"] = (
onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type + ".metadata.vfModuleModelUUID"))
vf_config["module_customization_id"] = (
onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type + ".metadata.vfModuleModelCustomizationUUID"))
vf_config["module_version_id"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type + ".metadata.vfModuleModelUUID")
self.vnf_config[elt].update(vf_config)
def set_onap_components(self):
"""
Set ONAP component objects
"""
self.components["aai"] = aai.Aai(PROXY, self.__logger)
self.components["so"] = so.So(PROXY, self.__logger)
self.components["sdnc"] = sdnc.Sdnc(PROXY, self.__logger)
self.components["nbi"] = nbi.Nbi(PROXY, self.__logger)
def instantiate(self):
"""
Instantiate a VNF with ONAP
* Create the service instance (SO)
* Create the VNF instance (SO)
* preload the VNF in the SDNC
* Create the VF module instance (SO)
"""
instance_info = {"instance_id": ""}
vnf_info = {"vnf_id": ""}
module_info = {}
module_ref = {"instanceId": ""}
module_ok = False
check_vnf = False
self.__logger.info("Start the instantiation of the VNF")
instance_info = self.create_service_instance()
service_ok = self.components["aai"].check_service_instance(
self.vnf_config["vnf_name"],
instance_info["instance_id"])
if service_ok:
# create VNF instance(s)
for elt in self.vnf_infos['list']:
vnf_info = self.create_vnf_instance(elt)
self.__logger.info("Check vnf %s ....", elt)
vnf_ok = True
self.__logger.info("Check vnf %s ....", elt)
if not self.components["aai"].check_vnf_instance(
vnf_info["vnf_id"]):
vnf_ok = False
break
else:
# preload VNF(s) in SDNC
self.preload(elt)
time.sleep(10)
if vnf_ok:
# create VF module(s)
for elt in self.vnf_infos['list']:
module_info = self.create_module_instance(elt)
module_ok = True
module_ref = module_info['module_instance']
if not self.components["aai"].check_module_instance(
vnf_info["vnf_id"],
module_ref["requestReferences"]["instanceId"]):
module_ok = False
break
else:
# check VNF using OpenStack directly
check_vnf = self.check_vnf(
self.module_infos[elt]["module_instance_name"])
if check_vnf:
self.__logger.info("Stack successfully checked")
return {"status": module_ok,
"instance_id": instance_info,
"vnf_info": vnf_info,
"module_info": module_info,
"check_heat": check_vnf}
def clean(self):
"""
Clean VNF from ONAP
Args:
instance_id: The ID of the VNF service instance
vnf_id: The ID of the VNF instance
module_id: The ID of the VF module instance
"""
instance_id = self.service_infos['instance_id']
for elt in self.vnf_infos['list']:
vnf_id = self.vnf_infos[elt]["vnf_id"]
module_id = (self.module_infos[elt]["module_instance"]
["requestReferences"]["instanceId"])
self.clean_module(elt)
if not self.components["aai"].check_module_cleaned(vnf_id,
module_id):
return False
else:
self.clean_vnf(elt)
if not self.components["aai"].check_vnf_cleaned(vnf_id):
return False
else:
self.clean_instance(instance_id)
if self.components["aai"].check_service_instance_cleaned(
self.vnf_config["vnf_name"], instance_id):
self.__logger.debug("Instance still in AAI DB")
else:
return False
time.sleep(10)
self.clean_preload(elt)
return True
def create_service_instance(self):
"""
Create service instance
2 options to create the instance
* with SO
* with NBI
"""
instance_id = None
model_info = self.components["so"].get_service_model_info(
self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])
if self.vnf_config["nbi"]:
self.__logger.info("1) Create Service instance from NBI")
self.__logger.info("***********************************")
request_info = self.components["nbi"].get_request_info()
service_payload = (
self.components["nbi"].get_nbi_service_order_payload())
nbi_info = self.components["nbi"].create_service_order_nbi(
service_payload)
time.sleep(5)
instance_id = (
self.components["nbi"].get_service_instance_id_from_order(
nbi_info["id"]))
else:
self.__logger.info("1) Create Service instance in SO")
self.__logger.info("********************************")
request_info = self.components["so"].get_request_info(
self.vnf_config["vnf"] + "-service-instance-" +
self.vnf_config['random_string'])
service_payload = self.components["so"].get_service_payload(
self.vnf_config["vnf"],
request_info,
model_info)
instance_id = self.components["so"].create_instance(
service_payload)
service_instance_info = {"instance_id": instance_id,
"request_info": request_info,
"service_payload": service_payload}
self.__logger.info("Service instance created: %s",
service_instance_info)
self.service_infos = service_instance_info
return service_instance_info
def create_vnf_instance(self, elt):
"""
Create VNF instance
Args:
* elt: the VNF
"""
vnf_id = None
self.__logger.info("2) Create VNF instance in SO")
self.__logger.info("****************************")
model_info = self.components["so"].get_vnf_model_info(
self.vnf_config[elt]['vnf_invariant_id'],
self.vnf_config[elt]['vnf_version_id'],
self.vnf_config[elt]['vnf_model_name'],
self.vnf_config[elt]['vnf_customization_id'],
self.vnf_config[elt]['vnf_customization_name'])
vnf_related_instance = self.components["so"].get_vnf_related_instance(
self.service_infos["instance_id"],
self.vnf_config['invariant_uuid'],
self.vnf_config['uuid'])
vnf_instance_name = (self.vnf_config["vnf"] + "-vnf-instance-" +
str(elt).replace(" ", "_") + ("_") +
self.vnf_config['random_string'])
request_info = self.components["so"].get_request_info(
vnf_instance_name)
vnf_payload = self.components["so"].get_vnf_payload(
self.vnf_config["vnf"],
request_info,
model_info,
vnf_related_instance)
# self.__logger.debug("VNF payload: %s", vnf_payload)
vnf_id = self.components["so"].create_vnf(
self.service_infos["instance_id"],
vnf_payload)
vnf_info = {"vnf_id": vnf_id,
"vnf_instance_name": vnf_instance_name,
"vnf_payload": vnf_payload,
"vnf_related_instance": vnf_related_instance}
self.__logger.info(">>>> SO vnf instance created %s", vnf_info)
self.vnf_infos[elt] = vnf_info
return vnf_info
def preload(self, elt):
"""
Preload VNF in SDNC
Args:
* elt: the VNF
"""
vnf_preload_infos = {}
self.__logger.info("3) Preload VNF %s in SDNC", elt)
self.__logger.info("*******************************")
vnf_name = (self.vnf_config["vnf"] +
"-vfmodule-instance-" +
str(elt).replace(" ", "_") + "_" +
self.vnf_config['random_string'])
vnf_topology_identifier = {
"generic-vnf-name": vnf_name,
"generic-vnf-type": (
self.vnf_config[elt]['vnf_generic_type']),
"service-type": self.service_infos["instance_id"],
"vnf-name": vnf_name,
"vnf-type": self.vnf_config[elt]['sdnc_vnf_type']}
sdnc_payload = self.components["sdnc"].get_preload_payload(
self.vnf_config[elt]['vnf_parameters'],
vnf_topology_identifier)
self.__logger.info("SDNC preload payload %s", sdnc_payload)
sdnc_preload = self.components["sdnc"].preload(sdnc_payload)
self.__logger.debug("SDNC preload answer: %s", sdnc_preload)
vnf_preload_infos[elt] = ({"sdnc_payload": sdnc_payload,
"sdnc_preload": sdnc_preload})
return vnf_preload_infos[elt]
def create_module_instance(self, elt):
"""
Create module instance
Args:
* instance_info: dict including the instance_id, the request_info and
the service payload
* vnf_info: dict including the vnf_id, vnf_related_instance and the
vnf payload
"""
module_info = {}
self.__logger.info("4) Create MODULE %s instance in SO", elt)
self.__logger.info("***************************************")
module_model_info = self.components["so"].get_module_model_info(
self.vnf_config[elt]['module_invariant_id'],
self.vnf_config[elt]['module_name_version_id'],
self.vnf_config[elt]['sdnc_vnf_type'],
self.vnf_config[elt]['module_customization_id'],
self.vnf_config[elt]['module_version_id'])
module_related_instance = (
self.components["so"].get_module_related_instance(
self.vnf_infos[elt]["vnf_id"],
self.vnf_config[elt]['vnf_invariant_id'],
self.vnf_config[elt]['vnf_version_id'],
self.vnf_config[elt]['vnf_model_name'],
self.vnf_config[elt]['vnf_customization_id'],
self.vnf_config[elt]['vnf_customization_name']))
module_instance_name = (self.vnf_config["vnf"] +
"-vfmodule-instance-" +
str(elt).replace(" ", "_") + "_" +
self.vnf_config['random_string'])
request_info = self.components["so"].get_request_info(
module_instance_name)
module_payload = self.components["so"].get_module_payload(
self.vnf_config["vnf"],
request_info,
module_model_info,
self.vnf_infos[elt]["vnf_related_instance"],
module_related_instance)
self.__logger.debug("Module payload %s", module_payload)
module_instance = self.components["so"].create_module(
self.service_infos["instance_id"],
self.vnf_infos[elt]["vnf_id"],
module_payload)
self.__logger.info(">>>> Module instance created: %s", module_instance)
module_info = (
{'module_instance': module_instance,
'module_instance_name': module_instance_name,
'module_payload': module_payload,
'module_model_info': module_model_info,
'module_related_instance': module_related_instance})
self.__logger.info("SO module vf(s) created: %s", module_info)
self.module_infos[elt] = module_info
return module_info
def check_vnf(self, stack_name):
"""
Check VNF stack has been properly started
"""
check_vnf = False
try:
my_stack_checker = sc.StackChecker()
if my_stack_checker.check_stack_is_complete(stack_name):
check_vnf = True
except Exception: # pylint: disable=broad-except
self.__logger.error("Impossible to find the stack %s in OpenStack",
stack_name)
return check_vnf
def clean_instance(self, instance_id):
"""
Clean VNF instance
Args:
* instance_id: The service instance of the VNF
"""
self.__logger.info(" Clean Service Instance ")
service_payload = self.components["so"].get_service_payload(
self.vnf_config["vnf"],
self.components["so"].get_request_info(
self.vnf_config['sdnc_vnf_name']),
self.components["so"].get_service_model_info(
self.vnf_config['invariant_uuid'],
self.vnf_config['uuid']))
self.components["so"].delete_instance(instance_id, service_payload)
def clean_vnf(self, elt):
"""
Clean VNF
Args:
* instance_id: The service instance of the VNF
* vnf_id:The VNF id of the VNF
"""
self.__logger.info(" Clean vnf Instance %s ", elt)
self.components["so"].delete_vnf(
self.service_infos["instance_id"],
self.vnf_infos[elt]["vnf_id"],
self.vnf_infos[elt]["vnf_payload"])
def clean_module(self, elt):
"""
Clean VNF Module
Args:
* instance_id: The service instance id of the VNF
* vnf_id:The VNF id of the VNF
* module_id: the VF module id of the VNF
"""
self.__logger.info(" Clean Module VF Instance %s ", elt)
instance_id = self.service_infos["instance_id"]
vnf_id = self.vnf_infos[elt]["vnf_id"]
module_id = (self.module_infos[elt]["module_instance"]
["requestReferences"]["instanceId"])
module_payload = self.module_infos[elt]["module_payload"]
self.components["so"].delete_module(
module_payload,
instance_id,
vnf_id,
module_id)
def clean_preload(self, elt):
"""
Clean VNF SDNC preload
"""
self.__logger.info(" Clean Preload of %s ", elt)
# if 1 of the expected preload clean is FAIL we return False
clean_preload = self.components["sdnc"].delete_preload(
self.module_infos[elt]["module_instance_name"],
self.vnf_config[elt]["sdnc_vnf_type"])
return clean_preload
def clean_all_preload(self):
"""
Clean VNF SDNC preload with the preload id
"""
self.__logger.info(" Clean Preload ")
for elt in self.vnf_infos['list']:
clean_preload = self.components["sdnc"].delete_preload(
self.module_infos[elt]["module_instance_name"],
self.vnf_config[elt]['sdnc_vnf_type'])
return clean_preload
def get_info(self):
"""
Get VNFs Info
"""
self.__logger.info("Class to manage VNFs")
self.__logger.info("VNF config: %s", self.vnf_config)
| 42.342007
| 80
| 0.554478
| 2,474
| 22,780
| 4.785772
| 0.090946
| 0.069932
| 0.083446
| 0.041892
| 0.516892
| 0.425929
| 0.343159
| 0.28277
| 0.262922
| 0.240625
| 0
| 0.001187
| 0.334197
| 22,780
| 537
| 81
| 42.420857
| 0.779455
| 0.106234
| 0
| 0.251989
| 0
| 0
| 0.186011
| 0.054534
| 0
| 0
| 0
| 0.001862
| 0
| 1
| 0.047745
| false
| 0
| 0.02122
| 0
| 0.106101
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ecf5f6a01c26c5389153d1137d146050eff0e3
| 3,262
|
py
|
Python
|
tutorials/Controls4Docs/ControlEventsGraph.py
|
dominic-dev/pyformsd
|
23e31ceff2943bc0f7286d25dd14450a14b986af
|
[
"MIT"
] | null | null | null |
tutorials/Controls4Docs/ControlEventsGraph.py
|
dominic-dev/pyformsd
|
23e31ceff2943bc0f7286d25dd14450a14b986af
|
[
"MIT"
] | null | null | null |
tutorials/Controls4Docs/ControlEventsGraph.py
|
dominic-dev/pyformsd
|
23e31ceff2943bc0f7286d25dd14450a14b986af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Ricardo Ribeiro"
__credits__ = ["Ricardo Ribeiro"]
__license__ = "MIT"
__version__ = "0.0"
__maintainer__ = "Ricardo Ribeiro"
__email__ = "ricardojvr@gmail.com"
__status__ = "Development"
from __init__ import *
import random, time
from PyQt4 import QtCore
class SimpleExample(BaseWidget):
def __init__(self):
super(SimpleExample,self).__init__('Simple example')
#Definition of the forms fields
self._control0 = ControlEventsGraph('Check me')
self._control1 = ControlEventsGraph('Check me')
self._control2 = ControlEventsGraph('Check me')
self._control3 = ControlEventsGraph('Check me')
self._txt = ControlText('Time')
self._btn = ControlButton('Click')
self._btn1 = ControlButton('Click 1')
self._save = ControlButton('Save button')
self._load = ControlButton('Load button')
self.formset = [
('_btn','_btn1'),
('_control0','_control1'),
('_control2','_control3'),
'_txt',
('_save','_load')]
self._btn.value = self.__btn
self._btn1.value = self.__btn1
self._save.value = self.save_window
self._load.value = self.load_window
self._start = time.time()
self.INTERVAL = 500
self.N_TRACKS = 8
def __btn(self):
for i in range(40):
s = random.randint( 0, 10000 )
o = random.randint( 0, 1000 )
self._control0.add_event( s, s+o, track=random.randint(0,self.N_TRACKS) )
#self._control0.add_event( random.randint(0, 10000), s+o, track=random.randint(0,self.N_TRACKS), color="#00FFDD")
self._control0.value = 5000
def __addEvent0(self):
b = self._control0.value
e = b+self.INTERVAL
self._control0.add_event( b, e, track=random.randint(0,self.N_TRACKS) )
self._control0.value = e
self._txt.value = str(time.time() - self._start)
def __addEvent1(self):
b = self._control1.value
e = b+self.INTERVAL
self._control1.add_event( b, e, track=random.randint(0,self.N_TRACKS) )
self._control1.value = e
def __addEvent2(self):
b = self._control2.value
e = b+self.INTERVAL
self._control2.add_event( b, e, track=random.randint(0,self.N_TRACKS) )
self._control2.value = e
def __addEvent3(self):
b = self._control3.value
e = b+self.INTERVAL
self._control3.add_event( b, e, track=random.randint(0,self.N_TRACKS) )
self._control3.value = e
def __btn1(self):
self._start = time.time()
timer = QtCore.QTimer(self.form)
timer.timeout.connect(self.__addEvent0)
timer.start(self.INTERVAL)
timer = QtCore.QTimer(self.form)
timer.timeout.connect(self.__addEvent1)
timer.start(self.INTERVAL)
timer = QtCore.QTimer(self.form)
timer.timeout.connect(self.__addEvent2)
timer.start(self.INTERVAL)
timer = QtCore.QTimer(self.form)
timer.timeout.connect(self.__addEvent3)
timer.start(self.INTERVAL)
##################################################################################################################
##################################################################################################################
##################################################################################################################
#Execute the application
if __name__ == "__main__": pyforms.start_app( SimpleExample )
| 27.183333
| 116
| 0.62477
| 388
| 3,262
| 4.920103
| 0.260309
| 0.056574
| 0.066003
| 0.059717
| 0.319539
| 0.319539
| 0.271346
| 0.271346
| 0.271346
| 0.2022
| 0
| 0.02726
| 0.14531
| 3,262
| 120
| 117
| 27.183333
| 0.657461
| 0.061925
| 0
| 0.181818
| 0
| 0
| 0.085883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.038961
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86eee025668f1ba4e581d9197ce7264211e57bc7
| 3,704
|
py
|
Python
|
tempest/tests/lib/services/compute/test_security_group_default_rules_client.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | 254
|
2015-01-05T19:22:52.000Z
|
2022-03-29T08:14:54.000Z
|
tempest/tests/lib/services/compute/test_security_group_default_rules_client.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | 13
|
2015-03-02T15:53:04.000Z
|
2022-02-16T02:28:14.000Z
|
tempest/tests/lib/services/compute/test_security_group_default_rules_client.py
|
mail2nsrajesh/tempest
|
1a3b3dc50b418d3a15839830d7d1ff88c8c76cff
|
[
"Apache-2.0"
] | 367
|
2015-01-07T15:05:39.000Z
|
2022-03-04T09:50:35.000Z
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.compute import security_group_default_rules_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest):
FAKE_RULE = {
"from_port": 80,
"id": 1,
"ip_protocol": "TCP",
"ip_range": {
"cidr": "10.10.10.0/24"
},
"to_port": 80
}
def setUp(self):
super(TestSecurityGroupDefaultRulesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = (security_group_default_rules_client.
SecurityGroupDefaultRulesClient(fake_auth, 'compute',
'regionOne'))
def _test_list_security_group_default_rules(self, bytes_body=False):
self.check_service_client_function(
self.client.list_security_group_default_rules,
'tempest.lib.common.rest_client.RestClient.get',
{"security_group_default_rules": [self.FAKE_RULE]},
to_utf=bytes_body)
def test_list_security_group_default_rules_with_str_body(self):
self._test_list_security_group_default_rules()
def test_list_security_group_default_rules_with_bytes_body(self):
self._test_list_security_group_default_rules(bytes_body=True)
def _test_show_security_group_default_rule(self, bytes_body=False):
self.check_service_client_function(
self.client.show_security_group_default_rule,
'tempest.lib.common.rest_client.RestClient.get',
{"security_group_default_rule": self.FAKE_RULE},
to_utf=bytes_body,
security_group_default_rule_id=1)
def test_show_security_group_default_rule_with_str_body(self):
self._test_show_security_group_default_rule()
def test_show_security_group_default_rule_with_bytes_body(self):
self._test_show_security_group_default_rule(bytes_body=True)
def _test_create_security_default_group_rule(self, bytes_body=False):
request_body = {
"to_port": 80,
"from_port": 80,
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
}
self.check_service_client_function(
self.client.create_security_default_group_rule,
'tempest.lib.common.rest_client.RestClient.post',
{"security_group_default_rule": self.FAKE_RULE},
to_utf=bytes_body, **request_body)
def test_create_security_default_group_rule_with_str_body(self):
self._test_create_security_default_group_rule()
def test_create_security_default_group_rule_with_bytes_body(self):
self._test_create_security_default_group_rule(bytes_body=True)
def test_delete_security_group_default_rule(self):
self.check_service_client_function(
self.client.delete_security_group_default_rule,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=204, security_group_default_rule_id=1)
| 41.617978
| 78
| 0.704644
| 469
| 3,704
| 5.149254
| 0.268657
| 0.113043
| 0.173913
| 0.119255
| 0.619462
| 0.537888
| 0.487785
| 0.375983
| 0.276605
| 0.164803
| 0
| 0.013803
| 0.217603
| 3,704
| 88
| 79
| 42.090909
| 0.819531
| 0.162797
| 0
| 0.229508
| 0
| 0
| 0.12504
| 0.086168
| 0
| 0
| 0
| 0
| 0
| 1
| 0.180328
| false
| 0
| 0.04918
| 0
| 0.262295
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ef847c1cba2674adc29aa5bed41c18d23f595a
| 24,723
|
py
|
Python
|
memos/memos/models/Memo.py
|
iotexpert/docmgr
|
735c7bcbaeb73bc44efecffb175f268f2438ac3a
|
[
"MIT"
] | null | null | null |
memos/memos/models/Memo.py
|
iotexpert/docmgr
|
735c7bcbaeb73bc44efecffb175f268f2438ac3a
|
[
"MIT"
] | null | null | null |
memos/memos/models/Memo.py
|
iotexpert/docmgr
|
735c7bcbaeb73bc44efecffb175f268f2438ac3a
|
[
"MIT"
] | null | null | null |
"""
The model file for a Memo
"""
import re
import os
import shutil
import json
from datetime import datetime
from flask import current_app
from memos import db
from memos.models.User import User
from memos.models.MemoState import MemoState
from memos.models.MemoFile import MemoFile
from memos.models.MemoSignature import MemoSignature
from memos.models.MemoReference import MemoReference
from memos.models.MemoHistory import MemoHistory
from memos.models.MemoActivity import MemoActivity
from memos.revletter import b10_to_rev, rev_to_b10
class Memo(db.Model):
"""This class is the single interface to a "memo" and all of the "memos"
"""
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer) # Memo Number
version = db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA
confidential = db.Column(db.Boolean, default=False) # if true only author, signer, distribution can read
distribution = db.Column(db.String(128), default='') # user names on the distribution
keywords = db.Column(db.String(128), default='') # any keyword
title = db.Column(db.String(128), nullable=False, default='') # The title of the memo
num_files = db.Column(db.Integer, default=0) # The number of files attached to the memo
action_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # The last time anything happened
create_date = db.Column(db.DateTime) # when the memo was created
submit_date = db.Column(db.DateTime) # when the memo was most recently submitted (from created)
active_date = db.Column(db.DateTime) # when the memo was moved to active state (from submitted)
obsolete_date = db.Column(db.DateTime) # when the memo was moved to obsolete state (from active)
user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The key of the user who owns the memo
_signers = db.Column(db.String(128),default='') # the hidden list of signer usernames
_references = db.Column(db.String(128),default='') # The hidden list of references
memo_state = db.Column(db.Enum(MemoState)) # Draft, Signoff, Active, Obsolete
def __init__(self, **kwargs):
super().__init__(**kwargs)
# do custom initialization here
def __repr__(self):
return f"{self.user.username}-{self.number}{self.version}"
def __str__(self):
return f"{self.user.username}-{self.number}{self.version}"
########################################
# Permission Functions
########################################
@staticmethod
def can_create(owner=None, delegate=None):
"""Will return true if the delegate can create a memo for the owner"""
if owner is None:
return False
if delegate is None:
delegate = owner
return owner.is_delegate(delegate=delegate)
def can_revise(self, delegate=None):
"""Is the delgate allowed to update "this" memo?"""
if delegate is None:
return False
if not self.user.is_delegate(delegate):
return False
if self.memo_state == MemoState.Active or self.memo_state == MemoState.Obsolete:
return True
def can_sign(self, signer=None, delegate=None):
"""Can this memo be signed by delegate for the signers"""
if signer is None or delegate is None:
return False
if self.memo_state != MemoState.Signoff:
return False
if not signer.is_delegate(delegate=delegate):
return False
# The list of signers and if they have signed are kept in the MemoSignature table
status = MemoSignature.is_signer(self.id,signer)
return status['is_signer'] and not status['status']
def can_unsign(self, signer=None, delegate=None):
"""Can this memo be unsigned by delegate for the signer """
if signer is None or delegate is None:
return False
if self.memo_state != MemoState.Signoff:
return False
if not signer.is_delegate(delegate=delegate):
return False
status = MemoSignature.is_signer(self.id,signer)
return status['is_signer'] and status['status']
def can_obsolete(self, delegate=None):
""" Can this memo be obsoleted by the delegate? Only active memos can be obsoleted """
if delegate is None:
return False
if not self.user.is_delegate(delegate):
return False
if self.memo_state == MemoState.Active:
return True
return False
def can_cancel(self, delegate=None):
""" can this memo be cancled by the delegate. Only drafts memos can be canceled"""
if delegate is None:
return False
if self.memo_state != MemoState.Draft:
return False
if not self.user.is_delegate(delegate=delegate):
return False
return True
def can_reject(self, signer=None, delegate=None):
""" can this memo be rejected by the delegate. Only memos in signoff can be rejected"""
if signer is None or delegate is None:
return False
if self.memo_state != MemoState.Signoff:
return False
if not signer.is_delegate(delegate):
return False
status = MemoSignature.is_signer(memo_id=self.id,signer=signer)
# if you are a signer you can reject.. even if you have already signed
return status['is_signer']
def has_access(self, user=None):
"""This function will return True of the "username" has access to self"""
# if it is not confidential than anyone can access
if self.confidential == False:
return True
# at this point we know it is confidential so ... they must provide a username
if user is None:
return False
# you alway have access to your own memo's
if self.user.username == user.username:
return True
if user.admin:
return True
if user.readAll:
return True
# if the username is in the distribution list then provide access TODO: ARH do something better
if user.username in re.split('\s|\,|\t|\;|\:',self.distribution):
return True
return False
########################################
# ??? Functions
########################################
def get_fullpath(self):
""" This function gives the os path to a file """
path = os.path.join(current_app.root_path,"static","memos",f"{self.user_id}",f"{self.number}",f"{self.version}")
return path
def get_relpath(self):
""" Return the relative path of this memo """
path = os.path.join("/static","memos",f"{self.user_id}",f"{self.number}",f"{self.version}")
return path
def get_files(self):
""" Return a list of the files attached to this memo"""
memo_list = MemoFile.query.filter_by(memo_id=self.id).all()
return memo_list
def saveJson(self):
""" Create the JSON file which is a copy of all of the meta data """
js = {}
js['title']=self.title
js['number']=self.number
js['version']=self.version
js['confidential']=self.confidential
js['distribution']=self.distribution
js['keywords']=self.keywords
js['userid']=self.user_id
js['memo_state']=f"{self.memo_state}"
js['keywords']= self.keywords
js['signers']=self.signers['signers']
js['references']= self.references['ref_string']
js['files']=[]
for file in self.get_files():
js['files'].append(file.filename)
path = os.path.join(self.get_fullpath())
#current_app.logger.info(f"Making Directory {path}")
os.makedirs(path,exist_ok=True)
#current_app.logger.info(f"Making Succeeded {path}")
path = os.path.join(path,f"meta-{self.user_id}-{self.number}-{self.version}.json")
f = open(path,"w")
json.dump(js,f)
f.close()
@property
def signers(self):
# get the signers from the signing table and turn it back to a string and a list
siglist = MemoSignature.get_signers(self)
for sig in siglist:
sig.signer = User.find(username=sig.signer_id)
sig.delegate = User.find(username=sig.delegate_id)
return {'signers':self._signers,'siglist':siglist}
@signers.setter
def signers(self,signer_names):
self._signers = signer_names
MemoSignature.delete_signers(self)
users = User.valid_usernames(signer_names)
for signer in users['valid_users']:
MemoSignature.add_signer(memo=self,signer=signer)
######################################################################
# References
######################################################################
@staticmethod
def parse_reference(reference):
parts = re.split(r'-',reference)
if len(parts) == 2:
parts.append(None)
return parts
@staticmethod
def valid_references(references):
current_app.logger.info(f'references ={references}')
valid_memos = []
valid_refs = []
invalid = []
for memo_ref in re.split(r'\s|\,|\t|\;|\:',references):
if memo_ref == '':
continue
parts = Memo.parse_reference(memo_ref)
if len(parts) > 3 or len(parts) < 2:
invalid.append(memo_ref)
current_app.logger.info(f"INVALID length append {memo_ref} valid={valid_memos} invalid {invalid}")
continue
username = parts[0]
memo_number = parts[1]
memo_version = parts[2]
memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version)
current_app.logger.info(f"Memo = {memo}")
if memo != None and (memo.memo_state == MemoState.Active or memo.memo_state == MemoState.Obsolete):
valid_memos.append(memo)
valid_refs.append(memo_ref)
else:
invalid.append(memo_ref)
rval = {'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid}
return rval
@property
def references(self):
# this function will return a list of refeference objects + a string of the references
refs = MemoReference.get_refs(self)
rval = []
for ref in refs:
userid=ref[0]
memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2])
if ref[2] == None:
refstring=f"{userid}-{ref[1]}"
else:
refstring=f"{userid}-{ref[1]}-{ref[2]}"
rval.append((refstring,memo))
return {'reflist':rval,'ref_string':self._references}
@references.setter
def references(self,references):
self._references = references
refs = Memo.valid_references(references)
for i in range(len(refs['valid_refs'])):
parsed_ref = Memo.parse_reference(refs['valid_refs'][i])
user = User.find(username=parsed_ref[0])
MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2])
@property
def backrefs(self):
return MemoReference.get_back_refs(self)
######################################################################
#
######################################################################
def get_next_version(self):
memo = Memo.query.join(User).filter(Memo.number == self.number)\
.order_by(Memo.version.desc()).first()
current_app.logger.info(f"get_next_version {memo.id} {memo.number} {memo.version}")
if memo:
return b10_to_rev(rev_to_b10(memo.version)+1)
return b10_to_rev(1) # also known as 'A'
def save(self):
db.session.add(self)
db.session.commit()
self.saveJson()
################################################################################
# functions used to process the state
# these function would classiavally be called private
################################################################################
def obsolete_previous(self,acting=None):
prev_list = Memo.query.join(User).filter(Memo.number == self.number,Memo.version != self.version).all()
for memo in prev_list:
if memo.memo_state == MemoState.Active:
memo.memo_state = MemoState.Obsolete
MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting)
memo.save()
# This function is called when:
# 1- a valid draft is created
# 2- a signature happens
# 3- an unsign happens
def process_state(self,acting=None):
if self.memo_state == MemoState.Draft:
if MemoSignature.status(self.id) == False:
self.memo_state = MemoState.Signoff
self.submit_date = datetime.utcnow()
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting)
self.notify_signers(f"memo {self.user.username}-{self.number}-{self.version} has gone into signoff")
else:
self.memo_state = MemoState.Active
self.active_date = datetime.utcnow()
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting)
self.obsolete_previous(acting=acting)
self.notify_distribution(f"memo {self.user.username}-{self.number}-{self.version} has been published")
if self.memo_state == MemoState.Signoff:
if MemoSignature.status(self.id):
self.memo_state = MemoState.Active
self.active_date = datetime.utcnow()
self.notify_distribution(f"memo {self.user.username}-{self.number}-{self.version} has been published")
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting)
self.obsolete_previous(acting=acting)
else:
current_app.logger.info(f"Signatures Still Required")
self.action_date = datetime.utcnow()
self.save()
# TODO: ARH
def notify_distribution(self,message):
current_app.logger.info(F"Notify Distribution {self.distribution} {message}")
# TODO: ARH
def notify_signers(self,message):
current_app.logger.info(F"Notify signers {message}")
################################################################################
# State machine functions called by the viewcontroller
################################################################################
# Owner Function
@staticmethod
def create_revise(owner=None,delegate=None,memo_number=None):
""" This function will return None or a new Memo if the owner/delgate and revise this memo """
assert owner != None and delegate != None
if owner == None or delegate == None:
return None
if owner.is_delegate(delegate) != True:
return None
memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first()
# create a new memo (i.e. not a new version of an existing memo)
if memo_number == None or memo==None:
memo_number = Memo.get_next_number(owner)
new_memo = Memo(number = memo_number,\
version = 'A',\
confidential = False,\
distribution = '',\
keywords = '',\
title = '',\
num_files = 0,\
user_id = owner.username,\
memo_state = MemoState.Draft,\
action_date = datetime.utcnow(),\
create_date = datetime.utcnow(),\
signers = '' )
new_memo.save()
MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate)
current_app.logger.info(f"Creating new memo {new_memo}")
return new_memo
if memo.memo_state == MemoState.Draft:
current_app.logger.info(f"Found a draft memo {memo}")
return memo
# revise an existing memo
new_memo = Memo(number = memo_number,\
version = memo.get_next_version(),\
confidential = memo.confidential,\
distribution = memo.distribution,\
keywords = memo.keywords,\
title = memo.title,\
num_files = 0,\
user_id = memo.user_id,\
memo_state = MemoState.Draft,\
action_date = datetime.utcnow(),\
create_date = datetime.utcnow(),\
)
new_memo.save()
new_memo.references = memo.references['ref_string'] # cannot be done until there is an id assigned by the save
new_memo.signers = memo._signers # cannot be done until there is an id assigned by the save
new_memo.save()
MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate)
return new_memo
# signer function
def sign(self,signer=None,delegate=None):
current_app.logger.info(f"signer = {signer} delegate={delegate}")
if not self.can_sign(signer,delegate):
current_app.logger.info("NOT!!@ allowed to sign")
return False
current_app.logger.info("allowed to sign")
MemoSignature.sign(self.id,signer,delegate)
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign)
self.process_state(acting=delegate)
return True
# signer function
def unsign(self,signer=None,delegate=None):
if not self.can_unsign(signer,delegate):
return False
MemoSignature.unsign(self.id,signer,delegate)
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign)
self.process_state(acting=delegate)
return True
# Owner Function
def obsolete(self,delegate=None):
current_app.logger.info(f"Obsolete: {self} Delegate={delegate}")
if not self.can_obsolete(delegate=delegate):
return False
self.memo_state = MemoState.Obsolete
self.action_date = datetime.utcnow()
self.obsolete_date = datetime.utcnow()
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete)
self.save()
return True
# Owner Function
def cancel(self,delegate=None):
current_app.logger.info(f"Cancel: {self} Delegate={delegate}")
memostring = f"{self}"
if not self.can_cancel(delegate=delegate):
return False
MemoFile.delete(self)
# delete all of the files in that directory & the directory
shutil.rmtree(self.get_fullpath())
MemoReference.delete(self)
MemoSignature.delete_signers(self)
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel)
db.session.delete(self)
db.session.commit()
current_app.logger.info(f"Canceling")
return True
# signer function
def reject(self,signer=None,delegate=None):
current_app.logger.info(f"signer = {signer} delegate={delegate}")
if not self.can_reject(signer,delegate):
return False
self.memo_state = MemoState.Draft
self.action_date = datetime.utcnow()
self.submit_date = None
self.active_date = None
self.obsolete_date = None
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate)
MemoSignature.unsign_all(self)
self.save()
self.notify_signers(f"Memo {self.user.username}-{self.number}-{self.version} has been rejected for {signer.username} by {delegate.username}")
return True
################################################################################
# End of State machine functions
################################################################################
@staticmethod
def find(memo_id=None,username=None,memo_number=None,memo_version=None):
if memo_id != None:
return Memo.query.filter_by(id=memo_id).first()
current_app.logger.debug(f"FIND: Looking for {username}/{memo_number}/{memo_version}")
memoQry = Memo.query.filter_by(user_id=username,number=memo_number)
if memo_version != None:
memoQry.filter_by(version=memo_version)
memo = memoQry.first()
current_app.logger.debug(f"Found Memo id={memo}")
return memo
@staticmethod
def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None):
if memo_version:
memo_list = Memo.query.join(User).filter(User.username==username,\
Memo.number==memo_number,\
Memo.version==memo_version)\
.paginate(page = page,per_page=pagesize)
elif memo_number:
memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\
.order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
elif username:
memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\
.order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
else:
memo_list = Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\
.order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
return memo_list
@staticmethod
def search(title=None,keywords=None,page=1,pagesize=None):
current_app.logger.info(f"Search title={title}")
if title != None:
memo_list = Memo.query.filter(Memo.title.like(f"%{title}%")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
if keywords != None:
memo_list = Memo.query.filter(Memo.keywords.like(f"%{keywords}%")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
return memo_list
@staticmethod
def get_next_number(user=None):
assert user!=None
memo_list = Memo.query.join(User).filter(User.username==user.username)\
.order_by(Memo.number.desc()).first()
if memo_list == None:
return 1
return memo_list.number+1
@staticmethod
def get_inbox(user=None,page=1,pagesize=None):
assert user!=None,"User must not be none"
if user == None:
return None
msigs = MemoSignature.get_signatures(user,signed=False)
memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
current_app.logger.info(f"Inbox for {user.username} = Items={len(memolist.items)} {memolist}")
return memolist
@staticmethod
def get_drafts(user=None,page=1,pagesize=None):
assert user!=None,"User must not be none"
if user == None:
return None
memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Draft,User.username==user.username).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
return memolist
| 39.367834
| 190
| 0.582049
| 2,871
| 24,723
| 4.896203
| 0.110066
| 0.019919
| 0.032013
| 0.028456
| 0.452728
| 0.393825
| 0.35605
| 0.31358
| 0.291598
| 0.255389
| 0
| 0.003271
| 0.282692
| 24,723
| 628
| 191
| 39.367834
| 0.789388
| 0.118028
| 0
| 0.336516
| 0
| 0.002387
| 0.081437
| 0.02107
| 0
| 0
| 0
| 0.001592
| 0.009547
| 1
| 0.095465
| false
| 0
| 0.0358
| 0.00716
| 0.338902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f07721893f6c50f28bc8f37736be7b92dba3a5
| 8,850
|
py
|
Python
|
juliaset/juliaset.py
|
PageotD/juliaset
|
7c1f98020eeff291fcf040cfcdf25a89e72f46a9
|
[
"BSD-3-Clause"
] | null | null | null |
juliaset/juliaset.py
|
PageotD/juliaset
|
7c1f98020eeff291fcf040cfcdf25a89e72f46a9
|
[
"BSD-3-Clause"
] | null | null | null |
juliaset/juliaset.py
|
PageotD/juliaset
|
7c1f98020eeff291fcf040cfcdf25a89e72f46a9
|
[
"BSD-3-Clause"
] | 1
|
2021-08-09T06:45:43.000Z
|
2021-08-09T06:45:43.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import random
class JuliaSet:
def __init__(self):
"""
Constructor of the JuliaSet class
:param size: size in pixels (for both width and height)
:param dpi: dots per inch (default 300)
"""
# Initialize image related parameters
self.size = 256
self.dpi = 300
self.norm = True
self.mirror = False
# Initialize process related parameters
self.escrad = 3
self.niter = 250
def param(self, **kwargs):
"""
Get parameters from input dictionary and set attributes.
:param kwargs: a dictionary in the form
`{'arg1':value, ..., 'argN': value}`
"""
# Check if kwargs in not empty
if kwargs is not None:
# Image related parameters
if 'size' in kwargs:
self.size = kwargs.pop('size', 256)
if 'dpi' in kwargs:
self.dpi = kwargs.pop('dpi', 300)
if 'norm' in kwargs:
self.norm = kwargs.pop('norm', True)
if 'mirror' in kwargs:
self.mirror = kwargs.pop('mirror', False)
# Process related parameters
if 'escrad' in kwargs:
self.escrad = kwargs.pop('escrad', 3)
if 'niter' in kwargs:
self.niter = kwargs.pop('niter', 250)
# If kwargs is not empty there is some invalid keywords
if kwargs:
print("{} are invalid keyword arguments!".format(kwargs.keys()))
def run(self, show=False, fname='juilaset-output'):
"""
Run the Julia set generator
:param mirror: if True the julia is mirrored horizontally and
vertically; each mirror is concatenate with the original
to produce a new image
:param norm: if true the Julia set is normalized by its
absolute maximum value.
:param show: if show is `False` th eoutput image will be
written as a PNG file named `fname`
:param fname: Name of the output PNG file to write on disk
"""
# Get a complex value among a list of best Julia sets
cpxNum = self.getComplexValue()
# Get the target area
# For more randomness, the target area is a random
# subset of a wide one defined with x[-1.5, 1.5] and
# y[-1.5, 1.5]
xrng, yrng = self.getTargetArea()
# Process
julia = self.processJulia(cpxNum, xrng, yrng)
# Normalization
if(self.norm):
julia /= np.amax(np.abs(julia))
# Mirroring
if(self.mirror):
# Horizontal mirroring and concatenate
juliamirror = np.flip(julia, axis=1)
julia = np.concatenate((julia, juliamirror), axis=1)
# Vertical mirroring and concatenate
juliamirror = np.flip(julia, axis=0)
julia = np.concatenate((julia, juliamirror), axis=0)
# Plot the output with a random colormap using matplotlib
self.plotJuliaSet(julia, show=show, fname=fname)
def getComplexValue(self):
"""
Random choice in a list of best complex values for Julia
sets (real, imag).
:return cpxNum: a semi-random complex value
"""
# Define the list of best complex values
cpxList = [
(-0.10, 0.650), (0.00, 0.80), (0.370, 0.100),
(0.355, 0.355), (-0.54, 0.54), (0.340, -0.05),
(0.37, 0.10), (0.355, 0.355)
]
# Randomly choose one
cpxTmp = random.choice(cpxList)
# Manipulate the base value slightly to make it a little more unique
cpxNum = self.twearkComplex(cpxTmp)
return cpxNum
def twearkComplex(self, cpxTmp):
"""
Manipulate the base value slightly to make it a little more unique.
:param cpxTmp: complex value to modify
:param cpxNum: a slightly manipulate version of the input
"""
# Get the signs for the imaginary parts
isign = random.randrange(-1, 1, 2)
# Get a value variation for for real and imaginary parts
# The possible variation range is fixed at +/- 2% to stay
# In the neightborhood of the initial value
rsigma = random.uniform(0.98, 1.02)
isigma = random.uniform(0.98, 1.02)
# Apply modification and return the new complex value
realPart = cpxTmp[0] * rsigma
imagPart = cpxTmp[1] * isigma * isign
return complex(realPart, imagPart)
def getTargetArea(self):
"""
For more randomness, the target area is a random
subset of a wide one defined with x[-1.5, 1.5] and
y[-1.5, 1.5]
:return xrng, yrng: tuples containing (xmin, xmax)
and (ymin, ymax)
"""
# Randomly choose the center of the target area
# Possible values are in [-1.0, 1.0] to stay in an
# area where there are always pieces of fractals
xctr = random.uniform(-1.0,1.0)
yctr = random.uniform(-1.0,1.0)
# Extend around the center
xrng = (xctr-0.5, xctr+0.5)
yrng = (yctr-0.5, yctr+0.5)
return xrng, yrng
def processJulia(self, cpxNum, xrng, yrng):
"""
Calculate the Julia set for the given input parameters.
:param cpxNum: complex value acting as a seed for the Julia set
:param xrng: range of values (min, max) for the x-axis
:param yrng: range of values (min, max) for the y-axis
:param escrad: escape radius
:param niter: maximum number of iterations
"""
# Initialize numpy array of dimensions (size, size) with zeros
julia = np.ones((self.size, self.size), dtype=np.float32)
# Calculate the width (equal to height) of the image since the
# image is defined as a square
width = xrng[1] - xrng[0] # xmax - xmin = ymax - ymin
# Randomly choose the sign of the shade
#ssign = random.randrange(-1, 1, 2)
ssign = -1.
# Loop over x range
for ix in range(self.size):
# Get the pixel position in the complex plane
# For the real part
realPart = float(ix) / self.size * width + xrng[0]
# Loop over y range
for iy in range(self.size):
# Get the pixel position in the complex plane
# For the imaginary part
imagPart = float(iy) / self.size * width + yrng[0]
# Build the complex
cpxTmp = complex(realPart, imagPart)
# Initialize iteration counter
it = 0
# Loop over iterations
while(np.abs(cpxTmp) <= self.escrad**2 and it < self.niter):
# Quadratic polynomial
cpxTmp = cpxTmp**2 + cpxNum
# Increment iteration counter
it += 1
# Calculate the shade (a cool thing find somewhere on the net)
shade = 1. - np.sqrt(it/self.niter)
# Fill the outpout array
julia[ix][iy] = ssign * shade
return julia
def plotJuliaSet(self, julia, fname='juilaset-output', show=False):
"""
Plot the output Julia set and show it in matplotlib window or
write it on disk as a png file.
:param julia: the Julia set
:param show: if show is `False` th eoutput image will be
written as a PNG file named `fname`
:param fname: Name of the output PNG file to write on disk
"""
# List of beautiful colormap for Julia sets
cmapList = [
cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno,
cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma
]
# Randomly chose one colormap
cmapName = random.choice(cmapList)
# Plot the image with a gaussian interpolation
fig = plt.gcf()
fig.set_size_inches(3., 3.)
plt.imshow(julia, interpolation='gaussian', cmap=cmapName)
# Disable axis
plt.axis('off')
if(show):
plt.show()
else:
# Write on disk
fig.savefig(fname+".png", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight')
def julia(**kwargs):
"""
temp
"""
# Initialize Julia Set instance
juliaInstance = JuliaSet()
# If kwargs not empty update the attributes
if kwargs is not None:
juliaInstance.param(**kwargs)
return juliaInstance
if __name__ == "__main__":
# execute only if run as a script
genJuliaSet = JuliaSet()
genJuliaSet.param()
genJuliaSet.run()
| 32.417582
| 89
| 0.566328
| 1,124
| 8,850
| 4.44395
| 0.268683
| 0.008008
| 0.014414
| 0.003203
| 0.204605
| 0.181381
| 0.151752
| 0.141742
| 0.122122
| 0.122122
| 0
| 0.027298
| 0.345989
| 8,850
| 272
| 90
| 32.536765
| 0.835695
| 0.418757
| 0
| 0.019802
| 0
| 0
| 0.03162
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089109
| false
| 0
| 0.039604
| 0
| 0.188119
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f0877f437e0d2341e2d9c4fb9323bda9c076fe
| 1,212
|
py
|
Python
|
eye_detection.py
|
ShivanS93/VAtest_withOKN
|
8da76f4c3ff526c9e16268194accfdc6221b0a66
|
[
"MIT"
] | null | null | null |
eye_detection.py
|
ShivanS93/VAtest_withOKN
|
8da76f4c3ff526c9e16268194accfdc6221b0a66
|
[
"MIT"
] | null | null | null |
eye_detection.py
|
ShivanS93/VAtest_withOKN
|
8da76f4c3ff526c9e16268194accfdc6221b0a66
|
[
"MIT"
] | null | null | null |
#!python3
# eye_detection.py - detect eyes using webcam
# tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/
import cv2
import math
import numpy as np
def main():
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
eyeCascade = cv2.CascadeClassifier("haarcascade_eye.xml")
# grab the reference to the webcam
# try:
vs = cv2.VideoCapture(0)
print(vs)
while True:
ret, frame = vs.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
faces = faceCascade.detectMultiScale(frame)
for (x, y, w, h) in faces:
roi_gray = gray[y : y + h, x : x + w]
roi_color = frame[y : y + h, x : x + w]
eyes = eyeCascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 0, 255), 2)
cv2.imshow("Video", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q") or key == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 24.734694
| 86
| 0.575908
| 158
| 1,212
| 4.310127
| 0.525316
| 0.035242
| 0.091043
| 0.011747
| 0.017621
| 0.017621
| 0
| 0
| 0
| 0
| 0
| 0.029481
| 0.30033
| 1,212
| 48
| 87
| 25.25
| 0.773585
| 0.143564
| 0
| 0.071429
| 0
| 0
| 0.061955
| 0.03001
| 0
| 0
| 0.003872
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.107143
| 0
| 0.142857
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f19d8269d91051babd1a81669ee8409fe871bc
| 1,328
|
py
|
Python
|
demo/cnn_predict.py
|
huynhtnhut97/keras-video-classifier
|
3ea6a8d671f3bd3cc8eddef64ad75abc2a2d593a
|
[
"MIT"
] | 108
|
2018-03-01T10:03:22.000Z
|
2022-03-27T03:00:30.000Z
|
demo/cnn_predict.py
|
drsagitn/lstm-video-classifier
|
3d1bce6773e493bdff5d623883d47ca68d45e890
|
[
"MIT"
] | 18
|
2020-01-28T22:13:07.000Z
|
2022-03-11T23:54:10.000Z
|
demo/cnn_predict.py
|
drsagitn/lstm-video-classifier
|
3d1bce6773e493bdff5d623883d47ca68d45e890
|
[
"MIT"
] | 56
|
2018-03-01T10:03:22.000Z
|
2022-02-23T08:19:10.000Z
|
import numpy as np
from keras import backend as K
import os
import sys
K.set_image_dim_ordering('tf')
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
sys.path.append(patch_path('..'))
data_dir_path = patch_path('very_large_data')
model_dir_path = patch_path('models/UCF-101')
from keras_video_classifier.library.convolutional import CnnVideoClassifier
from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf, scan_ucf_with_labels
config_file_path = CnnVideoClassifier.get_config_file_path(model_dir_path)
weight_file_path = CnnVideoClassifier.get_weight_file_path(model_dir_path)
np.random.seed(42)
load_ucf(data_dir_path)
predictor = CnnVideoClassifier()
predictor.load_model(config_file_path, weight_file_path)
videos = scan_ucf_with_labels(data_dir_path, [label for (label, label_index) in predictor.labels.items()])
video_file_path_list = np.array([file_path for file_path in videos.keys()])
np.random.shuffle(video_file_path_list)
for video_file_path in video_file_path_list:
label = videos[video_file_path]
predicted_label = predictor.predict(video_file_path)
print('predicted: ' + predicted_label + ' actual: ' + label)
if __name__ == '__main__':
main()
| 30.883721
| 110
| 0.758283
| 191
| 1,328
| 4.848168
| 0.350785
| 0.12959
| 0.084233
| 0.055076
| 0.110151
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007086
| 0.149849
| 1,328
| 43
| 111
| 30.883721
| 0.813109
| 0
| 0
| 0
| 0
| 0
| 0.045899
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0.035714
| 0.321429
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f3e8f2399f57967d9da67546eaf7a9b7b31fb7
| 2,139
|
py
|
Python
|
backend/src/baserow/api/user/registries.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | 1
|
2022-01-24T15:12:02.000Z
|
2022-01-24T15:12:02.000Z
|
backend/src/baserow/api/user/registries.py
|
rasata/baserow
|
c6e1d7842c53f801e1c96b49f1377da2a06afaa9
|
[
"MIT"
] | null | null | null |
backend/src/baserow/api/user/registries.py
|
rasata/baserow
|
c6e1d7842c53f801e1c96b49f1377da2a06afaa9
|
[
"MIT"
] | null | null | null |
from baserow.core.registry import Instance, Registry
class UserDataType(Instance):
"""
The user data type can be used to inject an additional payload to the API
JWT response. This is the response when a user authenticates or refreshes his
token. The returned dict of the `get_user_data` method is added to the payload
under the key containing the type name.
Example:
class TestUserDataType(UserDataType):
type = "test"
def get_user_data(user, request):
return {"test": "value"}
user_data_registry.register(TestUserDataType())
Will result into the following response when the user authenticates:
{
"token": "eyJ....",
"user: {
"id": 1,
...
},
"test": {
"test": "value"
}
}
"""
def get_user_data(self, user, request) -> dict:
"""
Should return a dict containing the additional information that must be added
to the response payload after the user authenticates.
:param user: The related user that just authenticated.
:type user: User
:param request: The request when the user authenticated.
:type request: Request
:return: a dict containing the user data that must be added to the response.
"""
raise NotImplementedError(
"The get_user_data must be implemented and should return a dict."
)
class UserDataRegistry(Registry):
name = "api_user_data"
def get_all_user_data(self, user, request) -> dict:
"""
Collects the additional user data of all the registered user data type
instances.
:param user: The user that just authenticated.
:type user: User
:param request: The request when the user authenticated.
:type request: Request
:return: a dict containing all additional user data payload for all the
registered instances.
"""
return {
key: value.get_user_data(user, request)
for key, value in self.registry.items()
}
user_data_registry = UserDataRegistry()
| 28.905405
| 85
| 0.633006
| 259
| 2,139
| 5.15444
| 0.305019
| 0.083895
| 0.041199
| 0.047191
| 0.318352
| 0.265169
| 0.224719
| 0.182772
| 0.182772
| 0.182772
| 0
| 0.000663
| 0.29453
| 2,139
| 73
| 86
| 29.30137
| 0.884029
| 0.614306
| 0
| 0
| 0
| 0
| 0.131261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f4958fe557f64425c53fe4dff977306ba95b20
| 17,197
|
py
|
Python
|
Week 2/code.py
|
aklsh/EE2703
|
546b70c9adac4a4de294d83affbb74e480c2f65d
|
[
"MIT"
] | null | null | null |
Week 2/code.py
|
aklsh/EE2703
|
546b70c9adac4a4de294d83affbb74e480c2f65d
|
[
"MIT"
] | null | null | null |
Week 2/code.py
|
aklsh/EE2703
|
546b70c9adac4a4de294d83affbb74e480c2f65d
|
[
"MIT"
] | 3
|
2020-07-15T08:02:05.000Z
|
2021-03-07T06:50:07.000Z
|
'''
-------------------------------------
Assignment 2 - EE2703 (Jan-May 2020)
Done by Akilesh Kannan (EE18B122)
Created on 18/01/20
Last Modified on 04/02/20
-------------------------------------
'''
# importing necessary libraries
import sys
import cmath
import numpy as np
import pandas as pd
# To improve readability
CIRCUIT_START = ".circuit"
CIRCUIT_END = ".end"
RESISTOR = "R"
CAPACITOR = "C"
INDUCTOR = "L"
IVS = "V"
ICS = "I"
VCVS = "E"
VCCS = "G"
CCVS = "H"
CCCS = "F"
PI = np.pi
# Classes for each circuit component
class resistor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class inductor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class capacitor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class voltageSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class currentSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class vcvs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class vccs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class ccvs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
class cccs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
# Convert a number in engineer's format to math
def enggToMath(enggNumber):
try:
return float(enggNumber)
except:
lenEnggNumber = len(enggNumber)
# Kilo
if enggNumber[lenEnggNumber-1] == 'k':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e3
# Milli
elif enggNumber[lenEnggNumber-1] == 'm':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-3
# Micro
elif enggNumber[lenEnggNumber-1] == 'u':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-6
# Nano
elif enggNumber[lenEnggNumber-1] == 'n':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-9
# Mega
elif enggNumber[lenEnggNumber-1] == 'M':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e6
else:
sys.exit("Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential format (eg. 1e3 = 1000).")
if __name__ == "__main__":
# checking number of command line arguments
if len(sys.argv)!=2 :
sys.exit("Invalid number of arguments!")
else:
try:
circuitFile = sys.argv[1]
circuitFreq = 1e-100
circuitComponents = { RESISTOR: [], CAPACITOR: [], INDUCTOR: [], IVS: [], ICS: [], VCVS: [], VCCS: [], CCVS: [], CCCS: [] }
circuitNodes = []
# checking if given netlist file is of correct type
if (not circuitFile.endswith(".netlist")):
print("Wrong file type!")
else:
netlistFileLines = []
with open (circuitFile, "r") as f:
for line in f.readlines():
netlistFileLines.append(line.split('#')[0].split('\n')[0])
# Getting frequency, if any
if(line[:3] == '.ac'):
circuitFreq = float(line.split()[2])
# Setting Angular Frequency w
w = 2*PI*circuitFreq
try:
# Finding the location of the identifiers
identifier1 = netlistFileLines.index(CIRCUIT_START)
identifier2 = netlistFileLines.index(CIRCUIT_END)
circuitBody = netlistFileLines[identifier1+1:identifier2]
for line in circuitBody:
# Extracting the data from the line
lineTokens = line.split()
# Appending new nodes to a list
try:
if lineTokens[1] not in circuitNodes:
circuitNodes.append(lineTokens[1])
if lineTokens[2] not in circuitNodes:
circuitNodes.append(lineTokens[2])
except IndexError:
continue
# Resistor
if lineTokens[0][0] == RESISTOR:
circuitComponents[RESISTOR].append(resistor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Capacitor
elif lineTokens[0][0] == CAPACITOR:
circuitComponents[CAPACITOR].append(capacitor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Inductor
elif lineTokens[0][0] == INDUCTOR:
circuitComponents[INDUCTOR].append(inductor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Voltage Source
elif lineTokens[0][0] == IVS:
if len(lineTokens) == 5: # DC Source
circuitComponents[IVS].append(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif len(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[IVS].append(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# Current Source
elif lineTokens[0][0] == ICS:
if len(lineTokens) == 5: # DC Source
circuitComponents[ICS].append(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif len(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[ICS].append(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# VCVS
elif lineTokens[0][0] == VCVS:
circuitComponents[VCVS].append(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# VCCS
elif lineTokens[0][0] == VCCS:
circuitComponents[VCCS].append(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# CCVS
elif lineTokens[0][0] == CCVS:
circuitComponents[CCVS].append(ccvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# CCCS
elif lineTokens[0][0] == CCCS:
circuitComponents[CCCS].append(cccs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# Erroneous Component Name
else:
sys.exit("Wrong Component Given. ABORT!")
try:
circuitNodes.remove('GND')
circuitNodes = ['GND'] + circuitNodes
except:
sys.exit("No ground node specified in the circuit!!")
# Creating a dictionary with node names and their numbers (to reduce the time taken by later parts of the program)
nodeNumbers = {circuitNodes[i]:i for i in range(len(circuitNodes))}
numNodes = len(circuitNodes)
numVS = len(circuitComponents[IVS])+len(circuitComponents[VCVS])+len(circuitComponents[CCVS])
# Creating Matrices M and b
matrixM = np.zeros((numNodes+numVS, numNodes+numVS), np.complex)
matrixB = np.zeros((numNodes+numVS,), np.complex)
# GND Equation
matrixM[0][0] = 1.0
# Resistor Equations
for r in circuitComponents[RESISTOR]:
if r.node1 != 'GND':
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node1]] += 1/r.value
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node2]] -= 1/r.value
if r.node2 != 'GND':
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node1]] -= 1/r.value
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node2]] += 1/r.value
# Capacitor Equations
for c in circuitComponents[CAPACITOR]:
if c.node1 != 'GND':
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node1]] += complex(0, w*c.value)
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node2]] -= complex(0, w*c.value)
if c.node2 != 'GND':
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node1]] -= complex(0, w*c.value)
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node2]] += complex(0, w*c.value)
# Inductor Equations
for l in circuitComponents[INDUCTOR]:
if l.node1 != 'GND':
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node1]] += complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node2]] -= complex(0, -1.0/(w*l.value))
if l.node2 != 'GND':
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node1]] -= complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node2]] += complex(0, -1.0/(w*l.value))
# Voltage Source Equations
for i in range(len(circuitComponents[IVS])):
# Equation accounting for current through the source
if circuitComponents[IVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node1]][numNodes+i] = 1.0
if circuitComponents[IVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node2]][numNodes+i] = -1.0
# Auxiliary Equations
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node1]] = -1.0
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node2]] = +1.0
matrixB[numNodes+i] = cmath.rect(circuitComponents[IVS][i].value, circuitComponents[IVS][i].phase*PI/180)
# Current Source Equations
for i in circuitComponents[ICS]:
if i.node1 != 'GND':
matrixB[nodeNumbers[i.node1]] = -1*i.value
if i.node2 != 'GND':
matrixB[nodeNumbers[i.node2]] = i.value
# VCVS Equations
for i in range(len(circuitComponents[VCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node1]][numNodes+len(circuitComponents[IVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+len(circuitComponents[IVS])+i] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node1]] = 1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node2]] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node3]] = -1.0*circuitComponents[VCVS][i].value
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node4]] = 1.0*circuitComponents[VCVS][i].value
# CCVS Equations
for i in range(len(circuitComponents[CCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[CCVS][i].node1]][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node1]] = 1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node2]] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = -1.0*circuitComponents[CCVS][i].value
# VCCS Equations
for vccs in circuitComponents[VCCS]:
if vccs.node1 != 'GND':
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node4]]+=vccs.value
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node3]]-=vccs.value
if vccs.node2 != 'GND':
matrixM[nodeNumbers[vccs.node2]][nodeNumbers[vccs.node4]]-=vccs.value
matrixM[nodeNumbers[vccs.node3]][nodeNumbers[vccs.node3]]+=vccs.value
# CCCS Equations
for cccs in circuitComponents[CCCS]:
def getIndexIVS(vName):
for i in range(len(circuitComponents[IVS])):
if circuitComponents[IVS][i].name == vName:
return i
if cccs.node1 != 'GND':
matrixM[nodeNumbers[cccs.node1]][numNodes+getIndexIVS(cccs.vSource)]-=cccs.value
if cccs.node2 != 'GND':
matrixM[nodeNumbers[cccs.node2]][numNodes+getIndexIVS(cccs.vSource)]+=cccs.value
try:
x = np.linalg.solve(matrixM, matrixB)
circuitCurrents = []
# Formatting Output Data
for v in circuitComponents[IVS]:
circuitCurrents.append("current in "+v.name)
for v in circuitComponents[VCVS]:
circuitCurrents.append("current in "+v.name)
for v in circuitComponents[CCVS]:
circuitCurrents.append("current in "+v.name)
# Printing output in table format
print(pd.DataFrame(x, circuitNodes+circuitCurrents, columns=['Voltage / Current']))
print("The values given above are AMPLITUDE values and NOT RMS values.")
except np.linalg.LinAlgError:
sys.exit("Singular Matrix Formed! Please check if you have entered the circuit definition correctly!")
except ValueError:
sys.exit("Netlist does not abide to given format!")
except FileNotFoundError:
sys.exit("Given file does not exist!")
| 53.241486
| 209
| 0.51823
| 1,700
| 17,197
| 5.214118
| 0.151176
| 0.060921
| 0.047157
| 0.041968
| 0.581115
| 0.562049
| 0.483529
| 0.446074
| 0.407604
| 0.378384
| 0
| 0.032548
| 0.365761
| 17,197
| 322
| 210
| 53.406832
| 0.780141
| 0.075536
| 0
| 0.338521
| 0
| 0.003891
| 0.04532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042802
| false
| 0
| 0.015564
| 0
| 0.120623
| 0.011673
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f530cec67d3e933cfc6fd5269d65218a8b2c49
| 880
|
py
|
Python
|
Lib/Co.py
|
M507/Guessing-passwords-using-machine-learning
|
da90cfa30ce2e7a5e08ee528f594fa047ecea75c
|
[
"Apache-2.0"
] | 6
|
2020-05-18T14:20:23.000Z
|
2021-04-23T16:31:34.000Z
|
Lib/Co.py
|
M507/Guessing-passwords-using-machine-learning
|
da90cfa30ce2e7a5e08ee528f594fa047ecea75c
|
[
"Apache-2.0"
] | null | null | null |
Lib/Co.py
|
M507/Guessing-passwords-using-machine-learning
|
da90cfa30ce2e7a5e08ee528f594fa047ecea75c
|
[
"Apache-2.0"
] | 1
|
2020-05-18T21:19:52.000Z
|
2020-05-18T21:19:52.000Z
|
import subprocess
import os.path
"""
Stylish input()
"""
def s_input(string):
return input(string+">").strip("\n")
"""
Execute command locally
"""
def execute_command(command):
if len(command) > 0:
print(command)
proc = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, cwd="/tmp")
return proc
"""
Get all subdirectories of a directory.
"""
def getSubs(dirname):
dirs = [d for d in os.listdir(dirname) if os.path.isdir(os.path.join(dirname, d))]
# subdirectories = [dirname + "/" + subDirName for subDirName in subdirectories]
subdirectories = []
for dir in dirs:
subdirectories.append(dirname + '/' + dir)
return subdirectories
"""
Rocket science
"""
def answer(string):
a = input(string)
if a == "Y" or a == 'y' or a == 'Yes' or a == 'yes':
return True
else:
return False
| 20.952381
| 87
| 0.619318
| 110
| 880
| 4.936364
| 0.481818
| 0.033149
| 0.014733
| 0.018416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001477
| 0.230682
| 880
| 41
| 88
| 21.463415
| 0.800591
| 0.088636
| 0
| 0
| 0
| 0
| 0.025074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.095238
| 0.047619
| 0.52381
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f661785147d1c962908ad8a5f0840e9e70661d
| 446
|
py
|
Python
|
project3_code/part_0/main.py
|
rachelbrown347/CS294-26_code
|
72a20a9ab75345091d2a743b13857d7a88adf9be
|
[
"MIT"
] | 1
|
2022-03-12T00:55:52.000Z
|
2022-03-12T00:55:52.000Z
|
project3_code/part_0/main.py
|
rachelbrown347/CS294-26_code
|
72a20a9ab75345091d2a743b13857d7a88adf9be
|
[
"MIT"
] | null | null | null |
project3_code/part_0/main.py
|
rachelbrown347/CS294-26_code
|
72a20a9ab75345091d2a743b13857d7a88adf9be
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from skimage.exposure import rescale_intensity
from unsharp import *
# Load file and normalize to 0-1
fname = 'iguana.jpg'
im = plt.imread(fname)
if im.mean() >= 1:
im = im/255.
sigma = 5
amplitude = 1.5
imsharp = unsharp_mask(im, sigma, amplitude)
imsharp = rescale_intensity(imsharp, in_range=(0, 1), out_range=(0,1))
new_fname = fname[:-4]+'_sharp.jpg'
plt.imsave(new_fname, imsharp)
| 23.473684
| 70
| 0.726457
| 73
| 446
| 4.328767
| 0.547945
| 0.018987
| 0.044304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036842
| 0.147982
| 446
| 19
| 71
| 23.473684
| 0.794737
| 0.067265
| 0
| 0
| 0
| 0
| 0.048193
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f7e1041ab1f4accc4c1f71bcc457ad4e75b7b3
| 6,672
|
py
|
Python
|
tools/lucid/engine.py
|
Petr-By/qtpyvis
|
0b9a151ee6b9a56b486c2bece9c1f03414629efc
|
[
"MIT"
] | 3
|
2017-10-04T14:51:26.000Z
|
2017-10-22T09:35:50.000Z
|
tools/lucid/engine.py
|
CogSciUOS/DeepLearningToolbox
|
bf07578b9486d8c48e25df357bc4b9963b513b46
|
[
"MIT"
] | 13
|
2017-09-05T12:56:11.000Z
|
2017-11-22T10:38:27.000Z
|
tools/lucid/engine.py
|
CogSciUOS/DeepLearningToolbox
|
bf07578b9486d8c48e25df357bc4b9963b513b46
|
[
"MIT"
] | 2
|
2017-09-24T21:39:42.000Z
|
2017-10-04T15:29:54.000Z
|
import logging
logger = logging.getLogger(__name__)
print(f"!!!!!!!!!! getEffectiveLevel: {logger.getEffectiveLevel()} !!!!!!!!!!!!!")
from dltb.base.observer import Observable, change
from network import Network, loader
from network.lucid import Network as LucidNetwork
# lucid.modelzoo.vision_models:
# A module providinge the pretrained networks by name, e.g.
# models.AlexNet
import lucid.modelzoo.vision_models as models
import lucid.modelzoo.nets_factory as nets
from lucid.modelzoo.vision_base import Model as LucidModel
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform
class Engine(Observable, method='engine_changed',
changes={'engine_changed', 'model_changed', 'unit_changed'}):
"""The Engine is a wrapper around the lucid module.
Attributes
----------
_network: LucidNetwork
The currently selected lucid network. None if no model
is selected.
_model: LucidModel
The currently selected lucid model. None if no model is
selected.
"""
def __init__(self):
super().__init__()
self._network = None
self._model = None
self._layer = None
self._unit = None
self.image = None
self.running = False
@property
def model(self) -> LucidModel:
"""The currently selected lucid model. None if no model is
selected.
"""
return self._model
@property
def model_name(self) -> str:
"""The name of the currently selected lucid model. None if
no model is selected.
"""
return None if self._network is None else self._network.name
@change
def load_model(self, name: str) -> LucidModel:
"""Load the Lucid model with the given name.
Returns
-------
model: LucidModel
A reference to the LucidModel.
"""
logger.info(f"load_model({name})")
try:
#self._network = LucidNetwork(name=name)
self._network = loader.load_lucid(name)
self._model = self._network.model
except KeyError as e:
self._network = None
self._model = None
logger.info(f"NAME={name}/{self.model_name} : {self._model}")
self._layer = None
self._unit = None
self.change(model_changed=True, unit_changed=True)
return self._model
@change
def set_layer(self, name: str, unit: int=0) -> None:
"""Set the currently selected layer.
Arguments
---------
name: str
The name of the layer.
unit: int
The index of the unit in the layer.
"""
if name == self.layer:
return
if self._model is None:
return
try:
self._layer = next(x for x in self._model.layers
if x['name'] == name)
self._unit = unit
except StopIteration: # name not in layer list
self._layer = None
self._unit = None
self.change(unit_changed=True)
@property
def layer(self) -> str:
"""The name of the currently selected layer.
"""
return None if self._layer is None else self._layer['name']
@layer.setter
def layer(self, name: str) -> None:
"""Set the currently selected layer.
"""
self.set_layer(name)
@property
def layer_type(self) -> str:
"""The type of the currently selected layer.
"""
return None if self._layer is None else self._layer['type']
@property
def layer_units(self) -> int:
"""The number of units in the currently selected layer.
"""
return None if self._layer is None else self._layer['size']
@change
def _set_unit(self, unit: int) -> None:
if unit == self.unit:
return
if unit is None:
self._unit = None
self.change(unit_changed=True)
elif self._layer is None:
raise ValueError('Setting unit failed as no layer is selected')
elif not 0 <= unit < self._layer['size']:
raise ValueError(f"Invalid unit {unit} for current layer"
f" of size {self._layer['size']}")
else:
self._unit = unit
self.change(unit_changed=True)
@property
def unit(self) -> int:
"""The index of the currently selected unit or None if no
unit is selected.
"""
return None if self._unit is None else self._unit
@unit.setter
def unit(self, unit: int) -> None:
"""The index of the currently selected unit or None if no
unit is selected.
"""
self._set_unit(unit)
@property
def layer_id(self) -> str:
"""The id of the currently selected layer or None if no
unit is selected.
"""
if self._layer is None:
return None
if self._layer['type'] == 'conv':
return self._layer['name'] + '_pre_relu'
return self._layer['name']
@property
def unit_id(self) -> str:
"""The id of the currently selected unit or None if no
unit is selected.
"""
return (None if self._layer is None
else self.layer_id + ':' + str(self._unit))
def _doRun(self, running: bool=True) -> None:
self.running = running
self.notify_observers(EngineChange(engine_changed=True))
def start(self):
self.image = None
self._doRun(True)
obj = objectives.channel(self.layer_id, self.unit)
self.image = render.render_vis(self.model, obj)
#self.image = render.render_vis(self.model, self.unit_id)
self._doRun(False)
def stop(self):
self._doRun(False)
def start_multi(self):
self.image = None
self._doRun(True)
logger.info("!!! running all:")
for unit in range(self.layer_units):
self.unit = unit
self.notify_observers(EngineChange(unit_changed=True))
logger.info(f"!!! running unit {unit}")
obj = objectives.channel(self.layer_id, unit)
self.image = render.render_vis(self.model, obj)
if not self.running:
break
self._doRun(True)
self._doRun(False)
# FIXME[old]: this is too make old code happy. New code should use
# Engine.Change and Engine.Observer directly.
EngineChange = Engine.Change
EngineObserver = Engine.Observer
| 30.190045
| 82
| 0.590528
| 821
| 6,672
| 4.669915
| 0.16687
| 0.053991
| 0.067814
| 0.040167
| 0.370631
| 0.340636
| 0.284559
| 0.23422
| 0.214919
| 0.159624
| 0
| 0.000435
| 0.310402
| 6,672
| 220
| 83
| 30.327273
| 0.832862
| 0.22482
| 0
| 0.351563
| 0
| 0
| 0.078839
| 0.016056
| 0
| 0
| 0
| 0.004545
| 0
| 1
| 0.140625
| false
| 0
| 0.085938
| 0
| 0.34375
| 0.007813
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f8485704c303133a8ffd7f513a5c4076214c94
| 87,649
|
py
|
Python
|
synapse/storage/events.py
|
natamelo/synapse
|
3d870ecfc5353e455917166cb5c2bb8ba48a6ebd
|
[
"Apache-2.0"
] | null | null | null |
synapse/storage/events.py
|
natamelo/synapse
|
3d870ecfc5353e455917166cb5c2bb8ba48a6ebd
|
[
"Apache-2.0"
] | null | null | null |
synapse/storage/events.py
|
natamelo/synapse
|
3d870ecfc5353e455917166cb5c2bb8ba48a6ebd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from collections import Counter as c_counter, OrderedDict, deque, namedtuple
from functools import wraps
from six import iteritems, text_type
from six.moves import range
from canonicaljson import json
from prometheus_client import Counter, Histogram
from twisted.internet import defer
import synapse.metrics
from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
from synapse.metrics import BucketCollector
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.state import StateResolutionStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.event_federation import EventFederationStore
from synapse.storage.events_worker import EventsWorkerStore
from synapse.storage.state import StateGroupWorkerStore
from synapse.types import RoomStreamToken, get_domain_from_id
from synapse.util import batch_iter
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
persist_event_counter = Counter("synapse_storage_events_persisted_events", "")
event_counter = Counter(
"synapse_storage_events_persisted_events_sep",
"",
["type", "origin_type", "origin_entity"],
)
# The number of times we are recalculating the current state
state_delta_counter = Counter("synapse_storage_events_state_delta", "")
# The number of times we are recalculating state when there is only a
# single forward extremity
state_delta_single_event_counter = Counter(
"synapse_storage_events_state_delta_single_event", ""
)
# The number of times we are reculating state when we could have resonably
# calculated the delta when we calculated the state for an event we were
# persisting.
state_delta_reuse_delta_counter = Counter(
"synapse_storage_events_state_delta_reuse_delta", ""
)
# The number of forward extremities for each new event.
forward_extremities_counter = Histogram(
"synapse_storage_events_forward_extremities_persisted",
"Number of forward extremities for each new event",
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
# The number of stale forward extremities for each new event. Stale extremities
# are those that were in the previous set of extremities as well as the new.
stale_forward_extremities_counter = Histogram(
"synapse_storage_events_stale_forward_extremities_persisted",
"Number of unchanged forward extremities for each new event",
buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
def encode_json(json_object):
"""
Encode a Python object as JSON and return it in a Unicode string.
"""
out = frozendict_json_encoder.encode(json_object)
if isinstance(out, bytes):
out = out.decode("utf8")
return out
class _EventPeristenceQueue(object):
"""Queues up events so that they can be persisted in bulk with only one
concurrent transaction per room.
"""
_EventPersistQueueItem = namedtuple(
"_EventPersistQueueItem", ("events_and_contexts", "backfilled", "deferred")
)
def __init__(self):
self._event_persist_queues = {}
self._currently_persisting_rooms = set()
def add_to_queue(self, room_id, events_and_contexts, backfilled):
"""Add events to the queue, with the given persist_event options.
NB: due to the normal usage pattern of this method, it does *not*
follow the synapse logcontext rules, and leaves the logcontext in
place whether or not the returned deferred is ready.
Args:
room_id (str):
events_and_contexts (list[(EventBase, EventContext)]):
backfilled (bool):
Returns:
defer.Deferred: a deferred which will resolve once the events are
persisted. Runs its callbacks *without* a logcontext.
"""
queue = self._event_persist_queues.setdefault(room_id, deque())
if queue:
# if the last item in the queue has the same `backfilled` setting,
# we can just add these new events to that item.
end_item = queue[-1]
if end_item.backfilled == backfilled:
end_item.events_and_contexts.extend(events_and_contexts)
return end_item.deferred.observe()
deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
queue.append(
self._EventPersistQueueItem(
events_and_contexts=events_and_contexts,
backfilled=backfilled,
deferred=deferred,
)
)
return deferred.observe()
def handle_queue(self, room_id, per_item_callback):
"""Attempts to handle the queue for a room if not already being handled.
The given callback will be invoked with for each item in the queue,
of type _EventPersistQueueItem. The per_item_callback will continuously
be called with new items, unless the queue becomnes empty. The return
value of the function will be given to the deferreds waiting on the item,
exceptions will be passed to the deferreds as well.
This function should therefore be called whenever anything is added
to the queue.
If another callback is currently handling the queue then it will not be
invoked.
"""
if room_id in self._currently_persisting_rooms:
return
self._currently_persisting_rooms.add(room_id)
@defer.inlineCallbacks
def handle_queue_loop():
try:
queue = self._get_drainining_queue(room_id)
for item in queue:
try:
ret = yield per_item_callback(item)
except Exception:
with PreserveLoggingContext():
item.deferred.errback()
else:
with PreserveLoggingContext():
item.deferred.callback(ret)
finally:
queue = self._event_persist_queues.pop(room_id, None)
if queue:
self._event_persist_queues[room_id] = queue
self._currently_persisting_rooms.discard(room_id)
# set handle_queue_loop off in the background
run_as_background_process("persist_events", handle_queue_loop)
def _get_drainining_queue(self, room_id):
queue = self._event_persist_queues.setdefault(room_id, deque())
try:
while True:
yield queue.popleft()
except IndexError:
# Queue has been drained.
pass
_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
def _retry_on_integrity_error(func):
"""Wraps a database function so that it gets retried on IntegrityError,
with `delete_existing=True` passed in.
Args:
func: function that returns a Deferred and accepts a `delete_existing` arg
"""
@wraps(func)
@defer.inlineCallbacks
def f(self, *args, **kwargs):
try:
res = yield func(self, *args, **kwargs)
except self.database_engine.module.IntegrityError:
logger.exception("IntegrityError, retrying.")
res = yield func(self, *args, delete_existing=True, **kwargs)
defer.returnValue(res)
return f
# inherits from EventFederationStore so that we can call _update_backward_extremities
# and _handle_mult_prev_events (though arguably those could both be moved in here)
class EventsStore(
StateGroupWorkerStore,
EventFederationStore,
EventsWorkerStore,
BackgroundUpdateStore,
):
def __init__(self, db_conn, hs):
super(EventsStore, self).__init__(db_conn, hs)
self._event_persist_queue = _EventPeristenceQueue()
self._state_resolution_handler = hs.get_state_resolution_handler()
# Collect metrics on the number of forward extremities that exist.
# Counter of number of extremities to count
self._current_forward_extremities_amount = c_counter()
BucketCollector(
"synapse_forward_extremities",
lambda: self._current_forward_extremities_amount,
buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"],
)
# Read the extrems every 60 minutes
def read_forward_extremities():
# run as a background process to make sure that the database transactions
# have a logcontext to report to
return run_as_background_process(
"read_forward_extremities", self._read_forward_extremities
)
hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000)
@defer.inlineCallbacks
def _read_forward_extremities(self):
def fetch(txn):
txn.execute(
"""
select count(*) c from event_forward_extremities
group by room_id
"""
)
return txn.fetchall()
res = yield self.runInteraction("read_forward_extremities", fetch)
self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
@defer.inlineCallbacks
def persist_events(self, events_and_contexts, backfilled=False):
"""
Write events to the database
Args:
events_and_contexts: list of tuples of (event, context)
backfilled (bool): Whether the results are retrieved from federation
via backfill or not. Used to determine if they're "new" events
which might update the current state etc.
Returns:
Deferred[int]: the stream ordering of the latest persisted event
"""
partitioned = {}
for event, ctx in events_and_contexts:
partitioned.setdefault(event.room_id, []).append((event, ctx))
deferreds = []
for room_id, evs_ctxs in iteritems(partitioned):
d = self._event_persist_queue.add_to_queue(
room_id, evs_ctxs, backfilled=backfilled
)
deferreds.append(d)
for room_id in partitioned:
self._maybe_start_persisting(room_id)
yield make_deferred_yieldable(
defer.gatherResults(deferreds, consumeErrors=True)
)
max_persisted_id = yield self._stream_id_gen.get_current_token()
defer.returnValue(max_persisted_id)
@defer.inlineCallbacks
@log_function
def persist_event(self, event, context, backfilled=False):
"""
Args:
event (EventBase):
context (EventContext):
backfilled (bool):
Returns:
Deferred: resolves to (int, int): the stream ordering of ``event``,
and the stream ordering of the latest persisted event
"""
deferred = self._event_persist_queue.add_to_queue(
event.room_id, [(event, context)], backfilled=backfilled
)
self._maybe_start_persisting(event.room_id)
yield make_deferred_yieldable(deferred)
max_persisted_id = yield self._stream_id_gen.get_current_token()
defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id))
def _maybe_start_persisting(self, room_id):
@defer.inlineCallbacks
def persisting_queue(item):
with Measure(self._clock, "persist_events"):
yield self._persist_events(
item.events_and_contexts, backfilled=item.backfilled
)
self._event_persist_queue.handle_queue(room_id, persisting_queue)
@_retry_on_integrity_error
@defer.inlineCallbacks
def _persist_events(
self, events_and_contexts, backfilled=False, delete_existing=False
):
"""Persist events to db
Args:
events_and_contexts (list[(EventBase, EventContext)]):
backfilled (bool):
delete_existing (bool):
Returns:
Deferred: resolves when the events have been persisted
"""
if not events_and_contexts:
return
if backfilled:
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
len(events_and_contexts)
)
else:
stream_ordering_manager = self._stream_id_gen.get_next_mult(
len(events_and_contexts)
)
with stream_ordering_manager as stream_orderings:
for (event, context), stream in zip(events_and_contexts, stream_orderings):
event.internal_metadata.stream_ordering = stream
chunks = [
events_and_contexts[x : x + 100]
for x in range(0, len(events_and_contexts), 100)
]
for chunk in chunks:
# We can't easily parallelize these since different chunks
# might contain the same event. :(
# NB: Assumes that we are only persisting events for one room
# at a time.
# map room_id->list[event_ids] giving the new forward
# extremities in each room
new_forward_extremeties = {}
# map room_id->(type,state_key)->event_id tracking the full
# state in each room after adding these events.
# This is simply used to prefill the get_current_state_ids
# cache
current_state_for_room = {}
# map room_id->(to_delete, to_insert) where to_delete is a list
# of type/state keys to remove from current state, and to_insert
# is a map (type,key)->event_id giving the state delta in each
# room
state_delta_for_room = {}
if not backfilled:
with Measure(self._clock, "_calculate_state_and_extrem"):
# Work out the new "current state" for each room.
# We do this by working out what the new extremities are and then
# calculating the state from that.
events_by_room = {}
for event, context in chunk:
events_by_room.setdefault(event.room_id, []).append(
(event, context)
)
for room_id, ev_ctx_rm in iteritems(events_by_room):
latest_event_ids = yield self.get_latest_event_ids_in_room(
room_id
)
new_latest_event_ids = yield self._calculate_new_extremities(
room_id, ev_ctx_rm, latest_event_ids
)
latest_event_ids = set(latest_event_ids)
if new_latest_event_ids == latest_event_ids:
# No change in extremities, so no change in state
continue
# there should always be at least one forward extremity.
# (except during the initial persistence of the send_join
# results, in which case there will be no existing
# extremities, so we'll `continue` above and skip this bit.)
assert new_latest_event_ids, "No forward extremities left!"
new_forward_extremeties[room_id] = new_latest_event_ids
len_1 = (
len(latest_event_ids) == 1
and len(new_latest_event_ids) == 1
)
if len_1:
all_single_prev_not_state = all(
len(event.prev_event_ids()) == 1
and not event.is_state()
for event, ctx in ev_ctx_rm
)
# Don't bother calculating state if they're just
# a long chain of single ancestor non-state events.
if all_single_prev_not_state:
continue
state_delta_counter.inc()
if len(new_latest_event_ids) == 1:
state_delta_single_event_counter.inc()
# This is a fairly handwavey check to see if we could
# have guessed what the delta would have been when
# processing one of these events.
# What we're interested in is if the latest extremities
# were the same when we created the event as they are
# now. When this server creates a new event (as opposed
# to receiving it over federation) it will use the
# forward extremities as the prev_events, so we can
# guess this by looking at the prev_events and checking
# if they match the current forward extremities.
for ev, _ in ev_ctx_rm:
prev_event_ids = set(ev.prev_event_ids())
if latest_event_ids == prev_event_ids:
state_delta_reuse_delta_counter.inc()
break
logger.info("Calculating state delta for room %s", room_id)
with Measure(
self._clock, "persist_events.get_new_state_after_events"
):
res = yield self._get_new_state_after_events(
room_id,
ev_ctx_rm,
latest_event_ids,
new_latest_event_ids,
)
current_state, delta_ids = res
# If either are not None then there has been a change,
# and we need to work out the delta (or use that
# given)
if delta_ids is not None:
# If there is a delta we know that we've
# only added or replaced state, never
# removed keys entirely.
state_delta_for_room[room_id] = ([], delta_ids)
elif current_state is not None:
with Measure(
self._clock, "persist_events.calculate_state_delta"
):
delta = yield self._calculate_state_delta(
room_id, current_state
)
state_delta_for_room[room_id] = delta
# If we have the current_state then lets prefill
# the cache with it.
if current_state is not None:
current_state_for_room[room_id] = current_state
yield self.runInteraction(
"persist_events",
self._persist_events_txn,
events_and_contexts=chunk,
backfilled=backfilled,
delete_existing=delete_existing,
state_delta_for_room=state_delta_for_room,
new_forward_extremeties=new_forward_extremeties,
)
persist_event_counter.inc(len(chunk))
if not backfilled:
# backfilled events have negative stream orderings, so we don't
# want to set the event_persisted_position to that.
synapse.metrics.event_persisted_position.set(
chunk[-1][0].internal_metadata.stream_ordering
)
for event, context in chunk:
if context.app_service:
origin_type = "local"
origin_entity = context.app_service.id
elif self.hs.is_mine_id(event.sender):
origin_type = "local"
origin_entity = "*client*"
else:
origin_type = "remote"
origin_entity = get_domain_from_id(event.sender)
event_counter.labels(event.type, origin_type, origin_entity).inc()
for room_id, new_state in iteritems(current_state_for_room):
self.get_current_state_ids.prefill((room_id,), new_state)
for room_id, latest_event_ids in iteritems(new_forward_extremeties):
self.get_latest_event_ids_in_room.prefill(
(room_id,), list(latest_event_ids)
)
@defer.inlineCallbacks
def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
"""Calculates the new forward extremities for a room given events to
persist.
Assumes that we are only persisting events for one room at a time.
"""
# we're only interested in new events which aren't outliers and which aren't
# being rejected.
new_events = [
event
for event, ctx in event_contexts
if not event.internal_metadata.is_outlier()
and not ctx.rejected
and not event.internal_metadata.is_soft_failed()
]
latest_event_ids = set(latest_event_ids)
# start with the existing forward extremities
result = set(latest_event_ids)
# add all the new events to the list
result.update(event.event_id for event in new_events)
# Now remove all events which are prev_events of any of the new events
result.difference_update(
e_id for event in new_events for e_id in event.prev_event_ids()
)
# Remove any events which are prev_events of any existing events.
existing_prevs = yield self._get_events_which_are_prevs(result)
result.difference_update(existing_prevs)
# Finally handle the case where the new events have soft-failed prev
# events. If they do we need to remove them and their prev events,
# otherwise we end up with dangling extremities.
existing_prevs = yield self._get_prevs_before_rejected(
e_id for event in new_events for e_id in event.prev_event_ids()
)
result.difference_update(existing_prevs)
# We only update metrics for events that change forward extremities
# (e.g. we ignore backfill/outliers/etc)
if result != latest_event_ids:
forward_extremities_counter.observe(len(result))
stale = latest_event_ids & result
stale_forward_extremities_counter.observe(len(stale))
defer.returnValue(result)
@defer.inlineCallbacks
def _get_events_which_are_prevs(self, event_ids):
"""Filter the supplied list of event_ids to get those which are prev_events of
existing (non-outlier/rejected) events.
Args:
event_ids (Iterable[str]): event ids to filter
Returns:
Deferred[List[str]]: filtered event ids
"""
results = []
def _get_events_which_are_prevs_txn(txn, batch):
sql = """
SELECT prev_event_id, internal_metadata
FROM event_edges
INNER JOIN events USING (event_id)
LEFT JOIN rejections USING (event_id)
LEFT JOIN event_json USING (event_id)
WHERE
prev_event_id IN (%s)
AND NOT events.outlier
AND rejections.event_id IS NULL
""" % (
",".join("?" for _ in batch),
)
txn.execute(sql, batch)
results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed"))
for chunk in batch_iter(event_ids, 100):
yield self.runInteraction(
"_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
)
defer.returnValue(results)
@defer.inlineCallbacks
def _get_prevs_before_rejected(self, event_ids):
"""Get soft-failed ancestors to remove from the extremities.
Given a set of events, find all those that have been soft-failed or
rejected. Returns those soft failed/rejected events and their prev
events (whether soft-failed/rejected or not), and recurses up the
prev-event graph until it finds no more soft-failed/rejected events.
This is used to find extremities that are ancestors of new events, but
are separated by soft failed events.
Args:
event_ids (Iterable[str]): Events to find prev events for. Note
that these must have already been persisted.
Returns:
Deferred[set[str]]
"""
# The set of event_ids to return. This includes all soft-failed events
# and their prev events.
existing_prevs = set()
def _get_prevs_before_rejected_txn(txn, batch):
to_recursively_check = batch
while to_recursively_check:
sql = """
SELECT
event_id, prev_event_id, internal_metadata,
rejections.event_id IS NOT NULL
FROM event_edges
INNER JOIN events USING (event_id)
LEFT JOIN rejections USING (event_id)
LEFT JOIN event_json USING (event_id)
WHERE
event_id IN (%s)
AND NOT events.outlier
""" % (
",".join("?" for _ in to_recursively_check),
)
txn.execute(sql, to_recursively_check)
to_recursively_check = []
for event_id, prev_event_id, metadata, rejected in txn:
if prev_event_id in existing_prevs:
continue
soft_failed = json.loads(metadata).get("soft_failed")
if soft_failed or rejected:
to_recursively_check.append(prev_event_id)
existing_prevs.add(prev_event_id)
for chunk in batch_iter(event_ids, 100):
yield self.runInteraction(
"_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
)
defer.returnValue(existing_prevs)
@defer.inlineCallbacks
def _get_new_state_after_events(
self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
):
"""Calculate the current state dict after adding some new events to
a room
Args:
room_id (str):
room to which the events are being added. Used for logging etc
events_context (list[(EventBase, EventContext)]):
events and contexts which are being added to the room
old_latest_event_ids (iterable[str]):
the old forward extremities for the room.
new_latest_event_ids (iterable[str]):
the new forward extremities for the room.
Returns:
Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
Returns a tuple of two state maps, the first being the full new current
state and the second being the delta to the existing current state.
If both are None then there has been no change.
If there has been a change then we only return the delta if its
already been calculated. Conversely if we do know the delta then
the new current state is only returned if we've already calculated
it.
"""
# map from state_group to ((type, key) -> event_id) state map
state_groups_map = {}
# Map from (prev state group, new state group) -> delta state dict
state_group_deltas = {}
for ev, ctx in events_context:
if ctx.state_group is None:
# This should only happen for outlier events.
if not ev.internal_metadata.is_outlier():
raise Exception(
"Context for new event %s has no state "
"group" % (ev.event_id,)
)
continue
if ctx.state_group in state_groups_map:
continue
# We're only interested in pulling out state that has already
# been cached in the context. We'll pull stuff out of the DB later
# if necessary.
current_state_ids = ctx.get_cached_current_state_ids()
if current_state_ids is not None:
state_groups_map[ctx.state_group] = current_state_ids
if ctx.prev_group:
state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
# We need to map the event_ids to their state groups. First, let's
# check if the event is one we're persisting, in which case we can
# pull the state group from its context.
# Otherwise we need to pull the state group from the database.
# Set of events we need to fetch groups for. (We know none of the old
# extremities are going to be in events_context).
missing_event_ids = set(old_latest_event_ids)
event_id_to_state_group = {}
for event_id in new_latest_event_ids:
# First search in the list of new events we're adding.
for ev, ctx in events_context:
if event_id == ev.event_id and ctx.state_group is not None:
event_id_to_state_group[event_id] = ctx.state_group
break
else:
# If we couldn't find it, then we'll need to pull
# the state from the database
missing_event_ids.add(event_id)
if missing_event_ids:
# Now pull out the state groups for any missing events from DB
event_to_groups = yield self._get_state_group_for_events(missing_event_ids)
event_id_to_state_group.update(event_to_groups)
# State groups of old_latest_event_ids
old_state_groups = set(
event_id_to_state_group[evid] for evid in old_latest_event_ids
)
# State groups of new_latest_event_ids
new_state_groups = set(
event_id_to_state_group[evid] for evid in new_latest_event_ids
)
# If they old and new groups are the same then we don't need to do
# anything.
if old_state_groups == new_state_groups:
defer.returnValue((None, None))
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
# If we're going from one state group to another, lets check if
# we have a delta for that transition. If we do then we can just
# return that.
new_state_group = next(iter(new_state_groups))
old_state_group = next(iter(old_state_groups))
delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
if delta_ids is not None:
# We have a delta from the existing to new current state,
# so lets just return that. If we happen to already have
# the current state in memory then lets also return that,
# but it doesn't matter if we don't.
new_state = state_groups_map.get(new_state_group)
defer.returnValue((new_state, delta_ids))
# Now that we have calculated new_state_groups we need to get
# their state IDs so we can resolve to a single state set.
missing_state = new_state_groups - set(state_groups_map)
if missing_state:
group_to_state = yield self._get_state_for_groups(missing_state)
state_groups_map.update(group_to_state)
if len(new_state_groups) == 1:
# If there is only one state group, then we know what the current
# state is.
defer.returnValue((state_groups_map[new_state_groups.pop()], None))
# Ok, we need to defer to the state handler to resolve our state sets.
state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
events_map = {ev.event_id: ev for ev, _ in events_context}
# We need to get the room version, which is in the create event.
# Normally that'd be in the database, but its also possible that we're
# currently trying to persist it.
room_version = None
for ev, _ in events_context:
if ev.type == EventTypes.Create and ev.state_key == "":
room_version = ev.content.get("room_version", "1")
break
if not room_version:
room_version = yield self.get_room_version(room_id)
logger.debug("calling resolve_state_groups from preserve_events")
res = yield self._state_resolution_handler.resolve_state_groups(
room_id,
room_version,
state_groups,
events_map,
state_res_store=StateResolutionStore(self),
)
defer.returnValue((res.state, None))
@defer.inlineCallbacks
def _calculate_state_delta(self, room_id, current_state):
"""Calculate the new state deltas for a room.
Assumes that we are only persisting events for one room at a time.
Returns:
tuple[list, dict] (to_delete, to_insert): where to_delete are the
type/state_keys to remove from current_state_events and `to_insert`
are the updates to current_state_events.
"""
existing_state = yield self.get_current_state_ids(room_id)
to_delete = [key for key in existing_state if key not in current_state]
to_insert = {
key: ev_id
for key, ev_id in iteritems(current_state)
if ev_id != existing_state.get(key)
}
defer.returnValue((to_delete, to_insert))
@log_function
def _persist_events_txn(
self,
txn,
events_and_contexts,
backfilled,
delete_existing=False,
state_delta_for_room={},
new_forward_extremeties={},
):
"""Insert some number of room events into the necessary database tables.
Rejected events are only inserted into the events table, the events_json table,
and the rejections table. Things reading from those table will need to check
whether the event was rejected.
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]):
events to persist
backfilled (bool): True if the events were backfilled
delete_existing (bool): True to purge existing table rows for the
events from the database. This is useful when retrying due to
IntegrityError.
state_delta_for_room (dict[str, (list, dict)]):
The current-state delta for each room. For each room, a tuple
(to_delete, to_insert), being a list of type/state keys to be
removed from the current state, and a state set to be added to
the current state.
new_forward_extremeties (dict[str, list[str]]):
The new forward extremities for each room. For each room, a
list of the event ids which are the forward extremities.
"""
all_events_and_contexts = events_and_contexts
min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering
max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
self._update_forward_extremities_txn(
txn,
new_forward_extremities=new_forward_extremeties,
max_stream_order=max_stream_order,
)
# Ensure that we don't have the same event twice.
events_and_contexts = self._filter_events_and_contexts_for_duplicates(
events_and_contexts
)
self._update_room_depths_txn(
txn, events_and_contexts=events_and_contexts, backfilled=backfilled
)
# _update_outliers_txn filters out any events which have already been
# persisted, and returns the filtered list.
events_and_contexts = self._update_outliers_txn(
txn, events_and_contexts=events_and_contexts
)
# From this point onwards the events are only events that we haven't
# seen before.
if delete_existing:
# For paranoia reasons, we go and delete all the existing entries
# for these events so we can reinsert them.
# This gets around any problems with some tables already having
# entries.
self._delete_existing_rows_txn(txn, events_and_contexts=events_and_contexts)
self._store_event_txn(txn, events_and_contexts=events_and_contexts)
# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)
# We want to store event_auth mappings for rejected events, as they're
# used in state res v2.
# This is only necessary if the rejected event appears in an accepted
# event's auth chain, but its easier for now just to store them (and
# it doesn't take much storage compared to storing the entire event
# anyway).
self._simple_insert_many_txn(
txn,
table="event_auth",
values=[
{
"event_id": event.event_id,
"room_id": event.room_id,
"auth_id": auth_id,
}
for event, _ in events_and_contexts
for auth_id in event.auth_event_ids()
if event.is_state()
],
)
# _store_rejected_events_txn filters out any events which were
# rejected, and returns the filtered list.
events_and_contexts = self._store_rejected_events_txn(
txn, events_and_contexts=events_and_contexts
)
# From this point onwards the events are only ones that weren't
# rejected.
self._update_metadata_tables_txn(
txn,
events_and_contexts=events_and_contexts,
all_events_and_contexts=all_events_and_contexts,
backfilled=backfilled,
)
def _update_current_state_txn(self, txn, state_delta_by_room, stream_id):
for room_id, current_state_tuple in iteritems(state_delta_by_room):
to_delete, to_insert = current_state_tuple
# First we add entries to the current_state_delta_stream. We
# do this before updating the current_state_events table so
# that we can use it to calculate the `prev_event_id`. (This
# allows us to not have to pull out the existing state
# unnecessarily).
#
# The stream_id for the update is chosen to be the minimum of the stream_ids
# for the batch of the events that we are persisting; that means we do not
# end up in a situation where workers see events before the
# current_state_delta updates.
#
sql = """
INSERT INTO current_state_delta_stream
(stream_id, room_id, type, state_key, event_id, prev_event_id)
SELECT ?, ?, ?, ?, ?, (
SELECT event_id FROM current_state_events
WHERE room_id = ? AND type = ? AND state_key = ?
)
"""
txn.executemany(
sql,
(
(
stream_id,
room_id,
etype,
state_key,
None,
room_id,
etype,
state_key,
)
for etype, state_key in to_delete
# We sanity check that we're deleting rather than updating
if (etype, state_key) not in to_insert
),
)
txn.executemany(
sql,
(
(
stream_id,
room_id,
etype,
state_key,
ev_id,
room_id,
etype,
state_key,
)
for (etype, state_key), ev_id in iteritems(to_insert)
),
)
# Now we actually update the current_state_events table
txn.executemany(
"DELETE FROM current_state_events"
" WHERE room_id = ? AND type = ? AND state_key = ?",
(
(room_id, etype, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
)
self._simple_insert_many_txn(
txn,
table="current_state_events",
values=[
{
"event_id": ev_id,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
}
for key, ev_id in iteritems(to_insert)
],
)
txn.call_after(
self._curr_state_delta_stream_cache.entity_has_changed,
room_id,
stream_id,
)
# Invalidate the various caches
# Figure out the changes of membership to invalidate the
# `get_rooms_for_user` cache.
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# those users.
members_changed = set(
state_key
for ev_type, state_key in itertools.chain(to_delete, to_insert)
if ev_type == EventTypes.Member
)
for member in members_changed:
txn.call_after(
self.get_rooms_for_user_with_stream_ordering.invalidate, (member,)
)
self._invalidate_state_caches_and_stream(txn, room_id, members_changed)
def _update_forward_extremities_txn(
self, txn, new_forward_extremities, max_stream_order
):
for room_id, new_extrem in iteritems(new_forward_extremities):
self._simple_delete_txn(
txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
)
txn.call_after(self.get_latest_event_ids_in_room.invalidate, (room_id,))
self._simple_insert_many_txn(
txn,
table="event_forward_extremities",
values=[
{"event_id": ev_id, "room_id": room_id}
for room_id, new_extrem in iteritems(new_forward_extremities)
for ev_id in new_extrem
],
)
# We now insert into stream_ordering_to_exterm a mapping from room_id,
# new stream_ordering to new forward extremeties in the room.
# This allows us to later efficiently look up the forward extremeties
# for a room before a given stream_ordering
self._simple_insert_many_txn(
txn,
table="stream_ordering_to_exterm",
values=[
{
"room_id": room_id,
"event_id": event_id,
"stream_ordering": max_stream_order,
}
for room_id, new_extrem in iteritems(new_forward_extremities)
for event_id in new_extrem
],
)
@classmethod
def _filter_events_and_contexts_for_duplicates(cls, events_and_contexts):
"""Ensure that we don't have the same event twice.
Pick the earliest non-outlier if there is one, else the earliest one.
Args:
events_and_contexts (list[(EventBase, EventContext)]):
Returns:
list[(EventBase, EventContext)]: filtered list
"""
new_events_and_contexts = OrderedDict()
for event, context in events_and_contexts:
prev_event_context = new_events_and_contexts.get(event.event_id)
if prev_event_context:
if not event.internal_metadata.is_outlier():
if prev_event_context[0].internal_metadata.is_outlier():
# To ensure correct ordering we pop, as OrderedDict is
# ordered by first insertion.
new_events_and_contexts.pop(event.event_id, None)
new_events_and_contexts[event.event_id] = (event, context)
else:
new_events_and_contexts[event.event_id] = (event, context)
return list(new_events_and_contexts.values())
def _update_room_depths_txn(self, txn, events_and_contexts, backfilled):
"""Update min_depth for each room
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
backfilled (bool): True if the events were backfilled
"""
depth_updates = {}
for event, context in events_and_contexts:
# Remove the any existing cache entries for the event_ids
txn.call_after(self._invalidate_get_event_cache, event.event_id)
if not backfilled:
txn.call_after(
self._events_stream_cache.entity_has_changed,
event.room_id,
event.internal_metadata.stream_ordering,
)
if not event.internal_metadata.is_outlier() and not context.rejected:
depth_updates[event.room_id] = max(
event.depth, depth_updates.get(event.room_id, event.depth)
)
for room_id, depth in iteritems(depth_updates):
self._update_min_depth_for_room_txn(txn, room_id, depth)
def _update_outliers_txn(self, txn, events_and_contexts):
"""Update any outliers with new event info.
This turns outliers into ex-outliers (unless the new event was
rejected).
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without events which
are already in the events table.
"""
txn.execute(
"SELECT event_id, outlier FROM events WHERE event_id in (%s)"
% (",".join(["?"] * len(events_and_contexts)),),
[event.event_id for event, _ in events_and_contexts],
)
have_persisted = {event_id: outlier for event_id, outlier in txn}
to_remove = set()
for event, context in events_and_contexts:
if event.event_id not in have_persisted:
continue
to_remove.add(event)
if context.rejected:
# If the event is rejected then we don't care if the event
# was an outlier or not.
continue
outlier_persisted = have_persisted[event.event_id]
if not event.internal_metadata.is_outlier() and outlier_persisted:
# We received a copy of an event that we had already stored as
# an outlier in the database. We now have some state at that
# so we need to update the state_groups table with that state.
# insert into event_to_state_groups.
try:
self._store_event_state_mappings_txn(txn, ((event, context),))
except Exception:
logger.exception("")
raise
metadata_json = encode_json(event.internal_metadata.get_dict())
sql = (
"UPDATE event_json SET internal_metadata = ?" " WHERE event_id = ?"
)
txn.execute(sql, (metadata_json, event.event_id))
# Add an entry to the ex_outlier_stream table to replicate the
# change in outlier status to our workers.
stream_order = event.internal_metadata.stream_ordering
state_group_id = context.state_group
self._simple_insert_txn(
txn,
table="ex_outlier_stream",
values={
"event_stream_ordering": stream_order,
"event_id": event.event_id,
"state_group": state_group_id,
},
)
sql = "UPDATE events SET outlier = ?" " WHERE event_id = ?"
txn.execute(sql, (False, event.event_id))
# Update the event_backward_extremities table now that this
# event isn't an outlier any more.
self._update_backward_extremeties(txn, [event])
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
@classmethod
def _delete_existing_rows_txn(cls, txn, events_and_contexts):
if not events_and_contexts:
# nothing to do here
return
logger.info("Deleting existing")
for table in (
"events",
"event_auth",
"event_json",
"event_edges",
"event_forward_extremities",
"event_reference_hashes",
"event_search",
"event_to_state_groups",
"guest_access",
"history_visibility",
"local_invites",
"room_names",
"state_events",
"rejections",
"redactions",
"room_memberships",
"topics",
):
txn.executemany(
"DELETE FROM %s WHERE event_id = ?" % (table,),
[(ev.event_id,) for ev, _ in events_and_contexts],
)
for table in ("event_push_actions",):
txn.executemany(
"DELETE FROM %s WHERE room_id = ? AND event_id = ?" % (table,),
[(ev.room_id, ev.event_id) for ev, _ in events_and_contexts],
)
def _store_event_txn(self, txn, events_and_contexts):
"""Insert new events into the event and event_json tables
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
"""
if not events_and_contexts:
# nothing to do here
return
def event_dict(event):
d = event.get_dict()
d.pop("redacted", None)
d.pop("redacted_because", None)
return d
self._simple_insert_many_txn(
txn,
table="event_json",
values=[
{
"event_id": event.event_id,
"room_id": event.room_id,
"internal_metadata": encode_json(
event.internal_metadata.get_dict()
),
"json": encode_json(event_dict(event)),
"format_version": event.format_version,
}
for event, _ in events_and_contexts
],
)
self._simple_insert_many_txn(
txn,
table="events",
values=[
{
"stream_ordering": event.internal_metadata.stream_ordering,
"topological_ordering": event.depth,
"depth": event.depth,
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"processed": True,
"outlier": event.internal_metadata.is_outlier(),
"origin_server_ts": int(event.origin_server_ts),
"received_ts": self._clock.time_msec(),
"sender": event.sender,
"contains_url": (
"url" in event.content
and isinstance(event.content["url"], text_type)
),
}
for event, _ in events_and_contexts
],
)
def _store_rejected_events_txn(self, txn, events_and_contexts):
"""Add rows to the 'rejections' table for received events which were
rejected
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without the rejected
events.
"""
# Remove the rejected events from the list now that we've added them
# to the events table and the events_json table.
to_remove = set()
for event, context in events_and_contexts:
if context.rejected:
# Insert the event_id into the rejections table
self._store_rejections_txn(txn, event.event_id, context.rejected)
to_remove.add(event)
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
def _update_metadata_tables_txn(
self, txn, events_and_contexts, all_events_and_contexts, backfilled
):
"""Update all the miscellaneous tables for new events
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
all_events_and_contexts (list[(EventBase, EventContext)]): all
events that we were going to persist. This includes events
we've already persisted, etc, that wouldn't appear in
events_and_context.
backfilled (bool): True if the events were backfilled
"""
# Insert all the push actions into the event_push_actions table.
self._set_push_actions_for_event_and_users_txn(
txn,
events_and_contexts=events_and_contexts,
all_events_and_contexts=all_events_and_contexts,
)
if not events_and_contexts:
# nothing to do here
return
for event, context in events_and_contexts:
if event.type == EventTypes.Redaction and event.redacts is not None:
# Remove the entries in the event_push_actions table for the
# redacted event.
self._remove_push_actions_for_event_id_txn(
txn, event.room_id, event.redacts
)
# Remove from relations table.
self._handle_redaction(txn, event.redacts)
# Update the event_forward_extremities, event_backward_extremities and
# event_edges tables.
self._handle_mult_prev_events(
txn, events=[event for event, _ in events_and_contexts]
)
for event, _ in events_and_contexts:
if event.type == EventTypes.Name:
# Insert into the room_names and event_search tables.
self._store_room_name_txn(txn, event)
elif event.type == EventTypes.Topic:
# Insert into the topics table and event_search table.
self._store_room_topic_txn(txn, event)
elif event.type == EventTypes.Message:
# Insert into the event_search table.
self._store_room_message_txn(txn, event)
elif event.type == EventTypes.Redaction:
# Insert into the redactions table.
self._store_redaction(txn, event)
elif event.type == EventTypes.RoomHistoryVisibility:
# Insert into the event_search table.
self._store_history_visibility_txn(txn, event)
elif event.type == EventTypes.GuestAccess:
# Insert into the event_search table.
self._store_guest_access_txn(txn, event)
self._handle_event_relations(txn, event)
# Insert into the room_memberships table.
self._store_room_members_txn(
txn,
[
event
for event, _ in events_and_contexts
if event.type == EventTypes.Member
],
backfilled=backfilled,
)
# Insert event_reference_hashes table.
self._store_event_reference_hashes_txn(
txn, [event for event, _ in events_and_contexts]
)
state_events_and_contexts = [
ec for ec in events_and_contexts if ec[0].is_state()
]
state_values = []
for event, context in state_events_and_contexts:
vals = {
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"state_key": event.state_key,
}
# TODO: How does this work with backfilling?
if hasattr(event, "replaces_state"):
vals["prev_state"] = event.replaces_state
state_values.append(vals)
self._simple_insert_many_txn(txn, table="state_events", values=state_values)
# Prefill the event cache
self._add_to_cache(txn, events_and_contexts)
def _add_to_cache(self, txn, events_and_contexts):
to_prefill = []
rows = []
N = 200
for i in range(0, len(events_and_contexts), N):
ev_map = {e[0].event_id: e[0] for e in events_and_contexts[i : i + N]}
if not ev_map:
break
sql = (
"SELECT "
" e.event_id as event_id, "
" r.redacts as redacts,"
" rej.event_id as rejects "
" FROM events as e"
" LEFT JOIN rejections as rej USING (event_id)"
" LEFT JOIN redactions as r ON e.event_id = r.redacts"
" WHERE e.event_id IN (%s)"
) % (",".join(["?"] * len(ev_map)),)
txn.execute(sql, list(ev_map))
rows = self.cursor_to_dict(txn)
for row in rows:
event = ev_map[row["event_id"]]
if not row["rejects"] and not row["redacts"]:
to_prefill.append(
_EventCacheEntry(event=event, redacted_event=None)
)
def prefill():
for cache_entry in to_prefill:
self._get_event_cache.prefill((cache_entry[0].event_id,), cache_entry)
txn.call_after(prefill)
def _store_redaction(self, txn, event):
# invalidate the cache for the redacted event
txn.call_after(self._invalidate_get_event_cache, event.redacts)
txn.execute(
"INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
(event.event_id, event.redacts),
)
@defer.inlineCallbacks
def count_daily_messages(self):
"""
Returns an estimate of the number of messages sent in the last day.
If it has been significantly less or more than one day since the last
call to this function, it will return None.
"""
def _count_messages(txn):
sql = """
SELECT COALESCE(COUNT(*), 0) FROM events
WHERE type = 'm.room.message'
AND stream_ordering > ?
"""
txn.execute(sql, (self.stream_ordering_day_ago,))
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_messages", _count_messages)
defer.returnValue(ret)
@defer.inlineCallbacks
def count_daily_sent_messages(self):
def _count_messages(txn):
# This is good enough as if you have silly characters in your own
# hostname then thats your own fault.
like_clause = "%:" + self.hs.hostname
sql = """
SELECT COALESCE(COUNT(*), 0) FROM events
WHERE type = 'm.room.message'
AND sender LIKE ?
AND stream_ordering > ?
"""
txn.execute(sql, (like_clause, self.stream_ordering_day_ago))
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_daily_sent_messages", _count_messages)
defer.returnValue(ret)
@defer.inlineCallbacks
def count_daily_active_rooms(self):
def _count(txn):
sql = """
SELECT COALESCE(COUNT(DISTINCT room_id), 0) FROM events
WHERE type = 'm.room.message'
AND stream_ordering > ?
"""
txn.execute(sql, (self.stream_ordering_day_ago,))
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_daily_active_rooms", _count)
defer.returnValue(ret)
def get_current_backfill_token(self):
"""The current minimum token that backfilled events have reached"""
return -self._backfill_id_gen.get_current_token()
def get_current_events_token(self):
"""The current maximum token that events have reached"""
return self._stream_id_gen.get_current_token()
def get_all_new_forward_event_rows(self, last_id, current_id, limit):
if last_id == current_id:
return defer.succeed([])
def get_all_new_forward_event_rows(txn):
sql = (
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? < stream_ordering AND stream_ordering <= ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (last_id, current_id, limit))
new_event_updates = txn.fetchall()
if len(new_event_updates) == limit:
upper_bound = new_event_updates[-1][0]
else:
upper_bound = current_id
sql = (
"SELECT event_stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" INNER JOIN ex_outlier_stream USING (event_id)"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? < event_stream_ordering"
" AND event_stream_ordering <= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (last_id, upper_bound))
new_event_updates.extend(txn)
return new_event_updates
return self.runInteraction(
"get_all_new_forward_event_rows", get_all_new_forward_event_rows
)
def get_all_new_backfill_event_rows(self, last_id, current_id, limit):
if last_id == current_id:
return defer.succeed([])
def get_all_new_backfill_event_rows(txn):
sql = (
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? > stream_ordering AND stream_ordering >= ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (-last_id, -current_id, limit))
new_event_updates = txn.fetchall()
if len(new_event_updates) == limit:
upper_bound = new_event_updates[-1][0]
else:
upper_bound = current_id
sql = (
"SELECT -event_stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" INNER JOIN ex_outlier_stream USING (event_id)"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (-last_id, -upper_bound))
new_event_updates.extend(txn.fetchall())
return new_event_updates
return self.runInteraction(
"get_all_new_backfill_event_rows", get_all_new_backfill_event_rows
)
@cached(num_args=5, max_entries=10)
def get_all_new_events(
self,
last_backfill_id,
last_forward_id,
current_backfill_id,
current_forward_id,
limit,
):
"""Get all the new events that have arrived at the server either as
new events or as backfilled events"""
have_backfill_events = last_backfill_id != current_backfill_id
have_forward_events = last_forward_id != current_forward_id
if not have_backfill_events and not have_forward_events:
return defer.succeed(AllNewEventsResult([], [], [], [], []))
def get_all_new_events_txn(txn):
sql = (
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" WHERE ? < stream_ordering AND stream_ordering <= ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
if have_forward_events:
txn.execute(sql, (last_forward_id, current_forward_id, limit))
new_forward_events = txn.fetchall()
if len(new_forward_events) == limit:
upper_bound = new_forward_events[-1][0]
else:
upper_bound = current_forward_id
sql = (
"SELECT event_stream_ordering, event_id, state_group"
" FROM ex_outlier_stream"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (last_forward_id, upper_bound))
forward_ex_outliers = txn.fetchall()
else:
new_forward_events = []
forward_ex_outliers = []
sql = (
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" WHERE ? > stream_ordering AND stream_ordering >= ?"
" ORDER BY stream_ordering DESC"
" LIMIT ?"
)
if have_backfill_events:
txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit))
new_backfill_events = txn.fetchall()
if len(new_backfill_events) == limit:
upper_bound = new_backfill_events[-1][0]
else:
upper_bound = current_backfill_id
sql = (
"SELECT -event_stream_ordering, event_id, state_group"
" FROM ex_outlier_stream"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (-last_backfill_id, -upper_bound))
backward_ex_outliers = txn.fetchall()
else:
new_backfill_events = []
backward_ex_outliers = []
return AllNewEventsResult(
new_forward_events,
new_backfill_events,
forward_ex_outliers,
backward_ex_outliers,
)
return self.runInteraction("get_all_new_events", get_all_new_events_txn)
def purge_history(self, room_id, token, delete_local_events):
"""Deletes room history before a certain point
Args:
room_id (str):
token (str): A topological token to delete events before
delete_local_events (bool):
if True, we will delete local events as well as remote ones
(instead of just marking them as outliers and deleting their
state groups).
"""
return self.runInteraction(
"purge_history",
self._purge_history_txn,
room_id,
token,
delete_local_events,
)
def _purge_history_txn(self, txn, room_id, token_str, delete_local_events):
token = RoomStreamToken.parse(token_str)
# Tables that should be pruned:
# event_auth
# event_backward_extremities
# event_edges
# event_forward_extremities
# event_json
# event_push_actions
# event_reference_hashes
# event_search
# event_to_state_groups
# events
# rejections
# room_depth
# state_groups
# state_groups_state
# we will build a temporary table listing the events so that we don't
# have to keep shovelling the list back and forth across the
# connection. Annoyingly the python sqlite driver commits the
# transaction on CREATE, so let's do this first.
#
# furthermore, we might already have the table from a previous (failed)
# purge attempt, so let's drop the table first.
txn.execute("DROP TABLE IF EXISTS events_to_purge")
txn.execute(
"CREATE TEMPORARY TABLE events_to_purge ("
" event_id TEXT NOT NULL,"
" should_delete BOOLEAN NOT NULL"
")"
)
# First ensure that we're not about to delete all the forward extremeties
txn.execute(
"SELECT e.event_id, e.depth FROM events as e "
"INNER JOIN event_forward_extremities as f "
"ON e.event_id = f.event_id "
"AND e.room_id = f.room_id "
"WHERE f.room_id = ?",
(room_id,),
)
rows = txn.fetchall()
max_depth = max(row[1] for row in rows)
if max_depth < token.topological:
# We need to ensure we don't delete all the events from the database
# otherwise we wouldn't be able to send any events (due to not
# having any backwards extremeties)
raise SynapseError(
400, "topological_ordering is greater than forward extremeties"
)
logger.info("[purge] looking for events to delete")
should_delete_expr = "state_key IS NULL"
should_delete_params = ()
if not delete_local_events:
should_delete_expr += " AND event_id NOT LIKE ?"
# We include the parameter twice since we use the expression twice
should_delete_params += ("%:" + self.hs.hostname, "%:" + self.hs.hostname)
should_delete_params += (room_id, token.topological)
# Note that we insert events that are outliers and aren't going to be
# deleted, as nothing will happen to them.
txn.execute(
"INSERT INTO events_to_purge"
" SELECT event_id, %s"
" FROM events AS e LEFT JOIN state_events USING (event_id)"
" WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?"
% (should_delete_expr, should_delete_expr),
should_delete_params,
)
# We create the indices *after* insertion as that's a lot faster.
# create an index on should_delete because later we'll be looking for
# the should_delete / shouldn't_delete subsets
txn.execute(
"CREATE INDEX events_to_purge_should_delete"
" ON events_to_purge(should_delete)"
)
# We do joins against events_to_purge for e.g. calculating state
# groups to purge, etc., so lets make an index.
txn.execute("CREATE INDEX events_to_purge_id" " ON events_to_purge(event_id)")
txn.execute("SELECT event_id, should_delete FROM events_to_purge")
event_rows = txn.fetchall()
logger.info(
"[purge] found %i events before cutoff, of which %i can be deleted",
len(event_rows),
sum(1 for e in event_rows if e[1]),
)
logger.info("[purge] Finding new backward extremities")
# We calculate the new entries for the backward extremeties by finding
# events to be purged that are pointed to by events we're not going to
# purge.
txn.execute(
"SELECT DISTINCT e.event_id FROM events_to_purge AS e"
" INNER JOIN event_edges AS ed ON e.event_id = ed.prev_event_id"
" LEFT JOIN events_to_purge AS ep2 ON ed.event_id = ep2.event_id"
" WHERE ep2.event_id IS NULL"
)
new_backwards_extrems = txn.fetchall()
logger.info("[purge] replacing backward extremities: %r", new_backwards_extrems)
txn.execute(
"DELETE FROM event_backward_extremities WHERE room_id = ?", (room_id,)
)
# Update backward extremeties
txn.executemany(
"INSERT INTO event_backward_extremities (room_id, event_id)"
" VALUES (?, ?)",
[(room_id, event_id) for event_id, in new_backwards_extrems],
)
logger.info("[purge] finding redundant state groups")
# Get all state groups that are referenced by events that are to be
# deleted. We then go and check if they are referenced by other events
# or state groups, and if not we delete them.
txn.execute(
"""
SELECT DISTINCT state_group FROM events_to_purge
INNER JOIN event_to_state_groups USING (event_id)
"""
)
referenced_state_groups = set(sg for sg, in txn)
logger.info(
"[purge] found %i referenced state groups", len(referenced_state_groups)
)
logger.info("[purge] finding state groups that can be deleted")
_ = self._find_unreferenced_groups_during_purge(txn, referenced_state_groups)
state_groups_to_delete, remaining_state_groups = _
logger.info(
"[purge] found %i state groups to delete", len(state_groups_to_delete)
)
logger.info(
"[purge] de-delta-ing %i remaining state groups",
len(remaining_state_groups),
)
# Now we turn the state groups that reference to-be-deleted state
# groups to non delta versions.
for sg in remaining_state_groups:
logger.info("[purge] de-delta-ing remaining state group %s", sg)
curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
curr_state = curr_state[sg]
self._simple_delete_txn(
txn, table="state_groups_state", keyvalues={"state_group": sg}
)
self._simple_delete_txn(
txn, table="state_group_edges", keyvalues={"state_group": sg}
)
self._simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": sg,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in iteritems(curr_state)
],
)
logger.info("[purge] removing redundant state groups")
txn.executemany(
"DELETE FROM state_groups_state WHERE state_group = ?",
((sg,) for sg in state_groups_to_delete),
)
txn.executemany(
"DELETE FROM state_groups WHERE id = ?",
((sg,) for sg in state_groups_to_delete),
)
logger.info("[purge] removing events from event_to_state_groups")
txn.execute(
"DELETE FROM event_to_state_groups "
"WHERE event_id IN (SELECT event_id from events_to_purge)"
)
for event_id, _ in event_rows:
txn.call_after(self._get_state_group_for_event.invalidate, (event_id,))
# Delete all remote non-state events
for table in (
"events",
"event_json",
"event_auth",
"event_edges",
"event_forward_extremities",
"event_reference_hashes",
"event_search",
"rejections",
):
logger.info("[purge] removing events from %s", table)
txn.execute(
"DELETE FROM %s WHERE event_id IN ("
" SELECT event_id FROM events_to_purge WHERE should_delete"
")" % (table,)
)
# event_push_actions lacks an index on event_id, and has one on
# (room_id, event_id) instead.
for table in ("event_push_actions",):
logger.info("[purge] removing events from %s", table)
txn.execute(
"DELETE FROM %s WHERE room_id = ? AND event_id IN ("
" SELECT event_id FROM events_to_purge WHERE should_delete"
")" % (table,),
(room_id,),
)
# Mark all state and own events as outliers
logger.info("[purge] marking remaining events as outliers")
txn.execute(
"UPDATE events SET outlier = ?"
" WHERE event_id IN ("
" SELECT event_id FROM events_to_purge "
" WHERE NOT should_delete"
")",
(True,),
)
# synapse tries to take out an exclusive lock on room_depth whenever it
# persists events (because upsert), and once we run this update, we
# will block that for the rest of our transaction.
#
# So, let's stick it at the end so that we don't block event
# persistence.
#
# We do this by calculating the minimum depth of the backwards
# extremities. However, the events in event_backward_extremities
# are ones we don't have yet so we need to look at the events that
# point to it via event_edges table.
txn.execute(
"""
SELECT COALESCE(MIN(depth), 0)
FROM event_backward_extremities AS eb
INNER JOIN event_edges AS eg ON eg.prev_event_id = eb.event_id
INNER JOIN events AS e ON e.event_id = eg.event_id
WHERE eb.room_id = ?
""",
(room_id,),
)
min_depth, = txn.fetchone()
logger.info("[purge] updating room_depth to %d", min_depth)
txn.execute(
"UPDATE room_depth SET min_depth = ? WHERE room_id = ?",
(min_depth, room_id),
)
# finally, drop the temp table. this will commit the txn in sqlite,
# so make sure to keep this actually last.
txn.execute("DROP TABLE events_to_purge")
logger.info("[purge] done")
def _find_unreferenced_groups_during_purge(self, txn, state_groups):
"""Used when purging history to figure out which state groups can be
deleted and which need to be de-delta'ed (due to one of its prev groups
being scheduled for deletion).
Args:
txn
state_groups (set[int]): Set of state groups referenced by events
that are going to be deleted.
Returns:
tuple[set[int], set[int]]: The set of state groups that can be
deleted and the set of state groups that need to be de-delta'ed
"""
# Graph of state group -> previous group
graph = {}
# Set of events that we have found to be referenced by events
referenced_groups = set()
# Set of state groups we've already seen
state_groups_seen = set(state_groups)
# Set of state groups to handle next.
next_to_search = set(state_groups)
while next_to_search:
# We bound size of groups we're looking up at once, to stop the
# SQL query getting too big
if len(next_to_search) < 100:
current_search = next_to_search
next_to_search = set()
else:
current_search = set(itertools.islice(next_to_search, 100))
next_to_search -= current_search
# Check if state groups are referenced
sql = """
SELECT DISTINCT state_group FROM event_to_state_groups
LEFT JOIN events_to_purge AS ep USING (event_id)
WHERE state_group IN (%s) AND ep.event_id IS NULL
""" % (
",".join("?" for _ in current_search),
)
txn.execute(sql, list(current_search))
referenced = set(sg for sg, in txn)
referenced_groups |= referenced
# We don't continue iterating up the state group graphs for state
# groups that are referenced.
current_search -= referenced
rows = self._simple_select_many_txn(
txn,
table="state_group_edges",
column="prev_state_group",
iterable=current_search,
keyvalues={},
retcols=("prev_state_group", "state_group"),
)
prevs = set(row["state_group"] for row in rows)
# We don't bother re-handling groups we've already seen
prevs -= state_groups_seen
next_to_search |= prevs
state_groups_seen |= prevs
for row in rows:
# Note: Each state group can have at most one prev group
graph[row["state_group"]] = row["prev_state_group"]
to_delete = state_groups_seen - referenced_groups
to_dedelta = set()
for sg in referenced_groups:
prev_sg = graph.get(sg)
if prev_sg and prev_sg in to_delete:
to_dedelta.add(sg)
return to_delete, to_dedelta
@defer.inlineCallbacks
def is_event_after(self, event_id1, event_id2):
"""Returns True if event_id1 is after event_id2 in the stream
"""
to_1, so_1 = yield self._get_event_ordering(event_id1)
to_2, so_2 = yield self._get_event_ordering(event_id2)
defer.returnValue((to_1, so_1) > (to_2, so_2))
@cachedInlineCallbacks(max_entries=5000)
def _get_event_ordering(self, event_id):
res = yield self._simple_select_one(
table="events",
retcols=["topological_ordering", "stream_ordering"],
keyvalues={"event_id": event_id},
allow_none=True,
)
if not res:
raise SynapseError(404, "Could not find event %s" % (event_id,))
defer.returnValue(
(int(res["topological_ordering"]), int(res["stream_ordering"]))
)
def get_all_updated_current_state_deltas(self, from_token, to_token, limit):
def get_all_updated_current_state_deltas_txn(txn):
sql = """
SELECT stream_id, room_id, type, state_key, event_id
FROM current_state_delta_stream
WHERE ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC LIMIT ?
"""
txn.execute(sql, (from_token, to_token, limit))
return txn.fetchall()
return self.runInteraction(
"get_all_updated_current_state_deltas",
get_all_updated_current_state_deltas_txn,
)
AllNewEventsResult = namedtuple(
"AllNewEventsResult",
[
"new_forward_events",
"new_backfill_events",
"forward_ex_outliers",
"backward_ex_outliers",
],
)
| 39.339767
| 89
| 0.578455
| 10,183
| 87,649
| 4.718943
| 0.087892
| 0.022725
| 0.035731
| 0.007908
| 0.385616
| 0.307639
| 0.250328
| 0.200882
| 0.168501
| 0.147608
| 0
| 0.003698
| 0.355235
| 87,649
| 2,227
| 90
| 39.357432
| 0.846602
| 0.24686
| 0
| 0.312454
| 0
| 0
| 0.157629
| 0.024721
| 0
| 0
| 0
| 0.000449
| 0.000737
| 1
| 0.043478
| false
| 0.000737
| 0.021371
| 0.000737
| 0.091378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86f92c20143e35ec634b684ad280aeb864a0957c
| 3,074
|
py
|
Python
|
dev/buildtool/metrics.py
|
premm1983/Spinnaker
|
535f78b8f5402eea942c260cb9ca26682772a3e6
|
[
"Apache-2.0"
] | null | null | null |
dev/buildtool/metrics.py
|
premm1983/Spinnaker
|
535f78b8f5402eea942c260cb9ca26682772a3e6
|
[
"Apache-2.0"
] | null | null | null |
dev/buildtool/metrics.py
|
premm1983/Spinnaker
|
535f78b8f5402eea942c260cb9ca26682772a3e6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics support manager."""
import logging
from buildtool import in_memory_metrics
from buildtool import prometheus_metrics
from buildtool import stackdriver_metrics
from buildtool.util import add_parser_argument
class MetricsManager(object):
"""Acts as factory for specialized BaseMetricsRegistry singleton."""
__metrics_registry = None
@staticmethod
def singleton():
"""Returns the BaseMetricsRegistry once startup_metrics is called."""
if MetricsManager.__metrics_registry is None:
raise Exception('startup_metrics was not called.')
return MetricsManager.__metrics_registry
@staticmethod
def init_argument_parser(parser, defaults):
"""Init argparser with metrics-related options."""
in_memory_metrics.init_argument_parser(parser, defaults)
prometheus_metrics.init_argument_parser(parser, defaults)
stackdriver_metrics.init_argument_parser(parser, defaults)
add_parser_argument(
parser, 'metric_name_scope', defaults, 'buildtool',
help='scope prefix for metrics generated by this tool')
add_parser_argument(
parser, 'monitoring_enabled', defaults, False, type=bool,
help='Enable monitoring to stackdriver.')
add_parser_argument(
parser, 'monitoring_flush_frequency', defaults, 5,
help='Frequency at which to push metrics in seconds.')
add_parser_argument(
parser, 'monitoring_system', defaults, 'file',
choices=['file', 'prometheus', 'stackdriver'],
help='Where to store metrics.')
@staticmethod
def startup_metrics(options):
"""Startup metrics module with concrete system."""
monitoring_systems = {
'file': in_memory_metrics.InMemoryMetricsRegistry,
'prometheus': prometheus_metrics.PrometheusMetricsRegistry,
'stackdriver': stackdriver_metrics.StackdriverMetricsRegistry
}
klas = monitoring_systems[options.monitoring_system]
logging.info('Initializing monitoring with systme="%s"', klas.__name__)
MetricsManager.__metrics_registry = klas(options)
if options.monitoring_enabled and options.monitoring_flush_frequency > 0:
MetricsManager.__metrics_registry.start_pusher_thread()
return MetricsManager.__metrics_registry
@staticmethod
def shutdown_metrics():
"""Write final metrics out to metrics server."""
registry = MetricsManager.singleton()
registry.stop_pusher_thread()
registry.flush_updated_metrics()
registry.flush_final_metrics()
| 39.410256
| 77
| 0.754717
| 357
| 3,074
| 6.285714
| 0.420168
| 0.049911
| 0.037879
| 0.042781
| 0.15508
| 0.096702
| 0
| 0
| 0
| 0
| 0
| 0.003896
| 0.164932
| 3,074
| 77
| 78
| 39.922078
| 0.870277
| 0.278139
| 0
| 0.204082
| 0
| 0
| 0.165901
| 0.011949
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.102041
| 0
| 0.265306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86fafa9c771d30389c672ab69f2d0d2991d82592
| 4,967
|
py
|
Python
|
fat/fat_bert_nq/ppr/apr_lib.py
|
kiss2u/google-research
|
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
|
[
"Apache-2.0"
] | 1
|
2020-05-27T15:40:17.000Z
|
2020-05-27T15:40:17.000Z
|
fat/fat_bert_nq/ppr/apr_lib.py
|
kiss2u/google-research
|
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
|
[
"Apache-2.0"
] | 7
|
2021-08-25T16:15:53.000Z
|
2022-02-10T03:26:55.000Z
|
fat/fat_bert_nq/ppr/apr_lib.py
|
kiss2u/google-research
|
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a class which acts as a wrapper around the PPR algorithm.
This class has the following functionality:
1. Load the KB graph,
2. Given list of seed entities, get topk entities from PPR.
3. Get unique facts between all extracted entities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from fat.fat_bert_nq.ppr.apr_algo import csr_personalized_pagerank
from fat.fat_bert_nq.ppr.apr_algo import csr_topk_fact_extractor
from fat.fat_bert_nq.ppr.kb_csr_io import CsrData
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'verbose_logging', False,
'If true, all of the warnings related to data processing will be printed. '
'A number of warnings are expected for a normal NQ evaluation.')
class ApproximatePageRank(object):
"""APR main lib which is used to wrap functions around ppr algo."""
def __init__(self):
self.data = CsrData()
self.data.load_csr_data(
full_wiki=FLAGS.full_wiki, files_dir=FLAGS.apr_files_dir)
def get_topk_extracted_ent(self, seeds, alpha, topk):
"""Extract topk entities given seeds.
Args:
seeds: An Ex1 vector with weight on every seed entity
alpha: probability for PPR
topk: max top entities to extract
Returns:
extracted_ents: list of selected entities
extracted_scores: list of scores of selected entities
"""
ppr_scores = csr_personalized_pagerank(seeds, self.data.adj_mat_t_csr,
alpha)
sorted_idx = np.argsort(ppr_scores)[::-1]
extracted_ents = sorted_idx[:topk]
extracted_scores = ppr_scores[sorted_idx[:topk]]
# Check for really low values
# Get idx of First value < 1e-6, limit extracted ents till there
zero_idx = np.where(ppr_scores[extracted_ents] < 1e-6)[0]
if zero_idx.shape[0] > 0:
extracted_ents = extracted_ents[:zero_idx[0]]
return extracted_ents, extracted_scores
def get_facts(self, entities, topk, alpha, seed_weighting=True):
"""Get subgraph describing a neighbourhood around given entities.
Args:
entities: A list of Wikidata entities
topk: Max entities to extract from PPR
alpha: Node probability for PPR
seed_weighting: Boolean for performing weighting seeds by freq in passage
Returns:
unique_facts: A list of unique facts around the seeds.
"""
if FLAGS.verbose_logging:
tf.logging.info('Getting subgraph')
entity_ids = [
int(self.data.ent2id[x]) for x in entities if x in self.data.ent2id
]
if FLAGS.verbose_logging:
tf.logging.info(
str([self.data.entity_names['e'][str(x)]['name'] for x in entity_ids
]))
freq_dict = {x: entity_ids.count(x) for x in entity_ids}
seed = np.zeros((self.data.adj_mat.shape[0], 1))
if not seed_weighting:
seed[entity_ids] = 1. / len(set(entity_ids))
else:
for x, y in freq_dict.items():
seed[x] = y
seed = seed / seed.sum()
extracted_ents, extracted_scores = self.get_topk_extracted_ent(
seed, alpha, topk)
if FLAGS.verbose_logging:
tf.logging.info('Extracted ents: ')
tf.logging.info(
str([
self.data.entity_names['e'][str(x)]['name']
for x in extracted_ents
]))
facts = csr_topk_fact_extractor(self.data.adj_mat_t_csr, self.data.rel_dict,
freq_dict, self.data.entity_names,
extracted_ents, extracted_scores)
if FLAGS.verbose_logging:
tf.logging.info('Extracted facts: ')
tf.logging.info(str(facts))
# Extract 1 unique fact per pair of entities (fact with highest score)
# Sort by scores
unique_facts = {}
for (sub, obj, rel, score) in facts:
fwd_dir = (sub, obj)
rev_dir = (obj, sub)
if fwd_dir in unique_facts and score > unique_facts[fwd_dir][1]:
unique_facts[fwd_dir] = (rel, score)
elif rev_dir in unique_facts and score > unique_facts[rev_dir][1]:
unique_facts[fwd_dir] = (rel, score)
del unique_facts[rev_dir] # Remove existing entity pair
else:
unique_facts[(sub, obj)] = (rel, score)
unique_facts = list(unique_facts.items())
return unique_facts
| 35.478571
| 80
| 0.684518
| 725
| 4,967
| 4.514483
| 0.32
| 0.050412
| 0.023831
| 0.025665
| 0.158265
| 0.150932
| 0.134128
| 0.117324
| 0.05194
| 0.05194
| 0
| 0.007849
| 0.230521
| 4,967
| 139
| 81
| 35.733813
| 0.848509
| 0.342259
| 0
| 0.16
| 0
| 0
| 0.065802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.106667
| 0
| 0.186667
| 0.026667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86fc7c6a00ab6863dd9ce69648b4b5568994e8af
| 6,941
|
py
|
Python
|
src/optimal_gardening.py
|
evanlynch/optimal-gardening
|
447ca8575efac1ad5cdd975091f3cbb67721e167
|
[
"MIT"
] | null | null | null |
src/optimal_gardening.py
|
evanlynch/optimal-gardening
|
447ca8575efac1ad5cdd975091f3cbb67721e167
|
[
"MIT"
] | null | null | null |
src/optimal_gardening.py
|
evanlynch/optimal-gardening
|
447ca8575efac1ad5cdd975091f3cbb67721e167
|
[
"MIT"
] | null | null | null |
import os
import sys
import time
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
sb.set_style("dark")
#### Initial Setup ####
#plant info
plant_info = pd.read_csv('../data/plant_data.csv')
plant_info.index.name = 'plant_index'
plants = plant_info.name.to_numpy()
plant_index = plant_info.index.to_numpy()
num_plants = len(plants)
plant_sun_req = plant_info.sun.to_numpy()
perennials = plant_info[plant_info.perennial==1].index.to_list()
problem_plants = plant_info[plant_info.problem_plant==1].index.to_list()
#calculate weighted average preference for each plant
family = ['evan','gina','liesse','lizzie','jack']
plant_info['avg_pref'] = np.average(plant_info[family],axis=1,weights=[.5,.5,0,0,0])
plant_info.drop(family,axis=1,inplace=True)
preferences = plant_info.avg_pref.to_numpy()
#bed info
bed_info = pd.read_csv('../data/bed_data.csv')
bed_info.index.name = 'bed_index'
beds = bed_info.bed.to_numpy()
bed_index = bed_info.index.to_numpy()
bed_sun_req = bed_info.sun.to_numpy()
num_beds = len(beds)
#time dimension
num_years = 3
years = np.array(range(1,num_years+1))
year_index = np.array(range(num_years))
#for keeping track of what axis is which
plant_axis = 0
bed_axis = 1
year_axis = 2
##### Constraints #####
#initialize sun constraint. 1 where plant can feasibly be planted in bed. 0 where sun requirements do not match.
sun_constraint = np.ones(shape=(num_plants,num_beds,num_years))
for p in plant_index:
for b in bed_index:
p_sun = plant_sun_req[p]
b_sun = bed_sun_req[b]
if p_sun != b_sun:
sun_constraint[p,b,:] = 0
def enforce_sun_constraint(plan,sun_constraint):
"""
Force plan to be 0 where sun requirements for plant and bed do not match.
"""
return plan*sun_constraint
def enforce_perennial_constraint(plan,plant,bed,year,perennials):
"""Forward fill plan for perennial plants. If 1 in a given bed/year, it will be 1 in same bed thereafter."""
perennial_plan = plan.copy()
#what was planted the year before
plant_last_year = perennial_plan[:,bed,year-1].argmax()
#if the plant is a perennial, plant it this year and every year thereafter
if plant in perennials:
perennial_plan[:,bed,year:] = 0 # zeros out anything else that may have been planted in bed in current and subsequent years during a previous make_neighbor call
perennial_plan[plant,bed,year:] = 1 #sets plant to 1 in bed every year after the current year
#if what was planted already in this bed was a perennial, remove it from previous years
elif plant_last_year in perennials:
perennial_plan[plant_last_year,bed,:year] = 0
return perennial_plan
def enforce_disease_constraint(plan,problem_plants):
"""Creates a mask to determine if the same veg was planted in the same bed over multiple years.
Multiplies the plan for problem plants by 0 in subsequent years where we planned to put them in the same bed
"""
disease_plan = plan.copy()
#mask to determine cases where same thing was planted in the same bed yoy
same_veg_in_bed_yoy = disease_plan.cumsum(axis=year_axis)>1
#multiply plan for specific problem plants by 0
disease_plan[problem_plants] = disease_plan[problem_plants]*(abs(1-same_veg_in_bed_yoy)[problem_plants])
return disease_plan
##### Objectives #####
#the most satisfied you could be (planting fruit or vegetable with highest preference in all beds every year)
max_yums = num_beds*num_years*np.max(preferences)
def compute_yummy_score(plan,preferences,max_yums):
"""Takes the weighted average of the preferences of each plant, weighted by the total qty of plants
in the current plan for each plant. Maximization encourages plants with higher preferences to be planted in higher quantities."""
plan_yummy = plan.copy()
plan_by_plant = plan_yummy.sum(axis=(bed_axis,year_axis))
yums = round(np.dot(preferences,plan_by_plant)/max_yums*100,1)
return yums
def compute_variety_score(plan,num_plants):
"""Sums the number of unique plants that are actually planted in the garden. Counts the number of plants that are being planted across all beds.
Then counts the number of plants with non-zero planting plan.
Maximization encourages more unique plants to be planted."""
plan_variety = plan.copy()
num_plants_in_plan = (plan_variety.sum(axis=(bed_axis,year_axis)) > 0).sum()
variety_score = round(num_plants_in_plan/num_plants*100,1)
return variety_score
#### Analysis & Visualization ####
def visualize_garden(bed_info):
garden_layout = bed_info.sun.map({'Full sun':1,'Partial sun':2,'Partial shade':3}).to_numpy().reshape(14,3)
palette = ["#ffa200","#fcbd53","#ffd58f"]
f, ax = plt.subplots(figsize=(10, 6))
ax = sb.heatmap(garden_layout,linewidths=5,linecolor='white',cmap=sb.color_palette(palette),cbar=False)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.rcParams.update({'font.size': 13})
return ax
def visualize_plan(bed_info,bed_index,years):
for year in years:
garden_viz = visualize_garden(bed_info)
garden_viz.set_title(f'Year {year}')
for bed in bed_index:
x = bed_info.iloc[bed].x
y = bed_info.iloc[bed].y
plt.text(x + 0.5, y + 0.5, bed_info.loc[(bed_info.x==x)&(bed_info.y==y)][f'year_{year}'].iloc[0],
horizontalalignment='center',verticalalignment='center')
def annual_bed_plan(best_plan,bed_info,plant_info,bed_index,year_index):
for t in year_index:
bed_plan = []
for b in bed_index:
plant_idx = np.argmax(best_plan[:,b,t])
plant = plant_info.iloc[plant_idx]['name']
bed_plan.append(plant)
bed_info[f'year_{t+1}'] = pd.Series(bed_plan)
return bed_info
def visualize_obj_iters(current_plan_obj_values):
objectives = []
yummy_scores = []
variety_scores = []
for i in current_plan_obj_values:
objectives.append(i[1]['objective'])
yummy_scores.append(i[1]['yummy_score'])
variety_scores.append(i[1]['variety_score'])
df = pd.DataFrame([objectives,yummy_scores,variety_scores]).T#,yummy_scores,variety_scores]).T
df.columns = ['obj_value','yummy_scores','variety_scores']#,'yummy_score','variety_score']
df.reset_index(inplace=True)
df = df.melt(id_vars=['index'],var_name='objective')
fig, ax = plt.subplots(figsize=(20,8))
sb.scatterplot(data=df,x='index',y='value',hue='objective',edgecolor=None,s=5)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0)
ax.set_title('Objective Values of Current Solution by Iteration')
# ax2 = plt.twinx()
# sb.scatterplot(data=df.drop_duplicates(['index','total_plants']),x='index',y='objective',edgecolor=None,ax=ax2,color='black',s=5)
| 39.662857
| 168
| 0.711713
| 1,089
| 6,941
| 4.341598
| 0.264463
| 0.02665
| 0.010998
| 0.020305
| 0.090102
| 0.018613
| 0
| 0
| 0
| 0
| 0
| 0.013751
| 0.172309
| 6,941
| 175
| 169
| 39.662857
| 0.809225
| 0.285406
| 0
| 0.018182
| 0
| 0
| 0.077034
| 0.004544
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081818
| false
| 0
| 0.072727
| 0
| 0.218182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86fca5740e3caf795c7b7090059ab5992cec0e59
| 9,453
|
py
|
Python
|
adv_lib/utils/attack_utils.py
|
Daulbaev/adversarial-library
|
6f979a511ad78908374cd55855a9e2c5a874be7d
|
[
"BSD-3-Clause"
] | 55
|
2020-11-25T10:47:48.000Z
|
2022-03-21T12:11:31.000Z
|
adv_lib/utils/attack_utils.py
|
Daulbaev/adversarial-library
|
6f979a511ad78908374cd55855a9e2c5a874be7d
|
[
"BSD-3-Clause"
] | 4
|
2021-03-10T19:25:31.000Z
|
2021-08-06T00:10:49.000Z
|
adv_lib/utils/attack_utils.py
|
Daulbaev/adversarial-library
|
6f979a511ad78908374cd55855a9e2c5a874be7d
|
[
"BSD-3-Clause"
] | 8
|
2020-11-26T08:42:04.000Z
|
2022-01-13T02:55:47.000Z
|
import warnings
from collections import OrderedDict
from distutils.version import LooseVersion
from functools import partial
from inspect import isclass
from typing import Callable, Optional, Dict, Union
import numpy as np
import torch
import tqdm
from torch import Tensor, nn
from torch.nn import functional as F
from adv_lib.distances.lp_norms import l0_distances, l1_distances, l2_distances, linf_distances
from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
"""
Generates one random target in (num_classes - 1) possibilities for each label that is different from the original
label.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random target for each label. Has the same shape as labels.
"""
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze(-1), 0)
return random.argmax(1)
def get_all_targets(labels: Tensor, num_classes: int):
"""
Generates all possible targets that are different from the original labels.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random targets for each label. shape: (len(labels), num_classes - 1).
"""
all_possible_targets = torch.zeros(len(labels), num_classes - 1, dtype=torch.long)
all_classes = set(range(num_classes))
for i in range(len(labels)):
this_label = labels[i].item()
other_labels = list(all_classes.difference({this_label}))
all_possible_targets[i] = torch.tensor(other_labels)
return all_possible_targets
def run_attack(model: nn.Module,
inputs: Tensor,
labels: Tensor,
attack: Callable,
targets: Optional[Tensor] = None,
batch_size: Optional[int] = None) -> dict:
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
targeted, adv_labels = False, labels
if targets is not None:
targeted, adv_labels = True, targets
batch_size = batch_size or len(inputs)
# run attack only on non already adversarial samples
already_adv = []
chunks = [tensor.split(batch_size) for tensor in [inputs, adv_labels]]
for (inputs_chunk, label_chunk) in zip(*chunks):
batch_chunk_d, label_chunk_d = [to_device(tensor) for tensor in [inputs_chunk, label_chunk]]
preds = model(batch_chunk_d).argmax(1)
is_adv = (preds == label_chunk_d) if targeted else (preds != label_chunk_d)
already_adv.append(is_adv.cpu())
not_adv = ~torch.cat(already_adv, 0)
start, end = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
forward_counter, backward_counter = ForwardCounter(), BackwardCounter()
model.register_forward_pre_hook(forward_counter)
if LooseVersion(torch.__version__) >= LooseVersion('1.8'):
model.register_full_backward_hook(backward_counter)
else:
model.register_backward_hook(backward_counter)
average_forwards, average_backwards = [], [] # number of forward and backward calls per sample
advs_chunks = []
chunks = [tensor.split(batch_size) for tensor in [inputs[not_adv], adv_labels[not_adv]]]
total_time = 0
for (inputs_chunk, label_chunk) in tqdm.tqdm(zip(*chunks), ncols=80, total=len(chunks[0])):
batch_chunk_d, label_chunk_d = [to_device(tensor.clone()) for tensor in [inputs_chunk, label_chunk]]
start.record()
advs_chunk_d = attack(model, batch_chunk_d, label_chunk_d, targeted=targeted)
# performance monitoring
end.record()
torch.cuda.synchronize()
total_time += (start.elapsed_time(end)) / 1000 # times for cuda Events are in milliseconds
average_forwards.append(forward_counter.num_samples_called / len(batch_chunk_d))
average_backwards.append(backward_counter.num_samples_called / len(batch_chunk_d))
forward_counter.reset(), backward_counter.reset()
advs_chunks.append(advs_chunk_d.cpu())
if isinstance(attack, partial) and (callback := attack.keywords.get('callback')) is not None:
callback.reset_windows()
adv_inputs = inputs.clone()
adv_inputs[not_adv] = torch.cat(advs_chunks, 0)
data = {
'inputs': inputs,
'labels': labels,
'targets': adv_labels if targeted else None,
'adv_inputs': adv_inputs,
'time': total_time,
'num_forwards': sum(average_forwards) / len(chunks[0]),
'num_backwards': sum(average_backwards) / len(chunks[0]),
}
return data
_default_metrics = OrderedDict([
('linf', linf_distances),
('l0', l0_distances),
('l1', l1_distances),
('l2', l2_distances),
])
def compute_attack_metrics(model: nn.Module,
attack_data: Dict[str, Union[Tensor, float]],
batch_size: Optional[int] = None,
metrics: Dict[str, Callable] = _default_metrics) -> Dict[str, Union[Tensor, float]]:
inputs, labels, targets, adv_inputs = map(attack_data.get, ['inputs', 'labels', 'targets', 'adv_inputs'])
if adv_inputs.min() < 0 or adv_inputs.max() > 1:
warnings.warn('Values of produced adversarials are not in the [0, 1] range -> Clipping to [0, 1].')
adv_inputs.clamp_(min=0, max=1)
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
batch_size = batch_size or len(inputs)
chunks = [tensor.split(batch_size) for tensor in [inputs, labels, adv_inputs]]
all_predictions = [[] for _ in range(6)]
distances = {k: [] for k in metrics.keys()}
metrics = {k: v().to(device) if (isclass(v.func) if isinstance(v, partial) else False) else v for k, v in
metrics.items()}
append = lambda list, data: list.append(data.cpu())
for inputs_chunk, labels_chunk, adv_chunk in zip(*chunks):
inputs_chunk, adv_chunk = map(to_device, [inputs_chunk, adv_chunk])
clean_preds, adv_preds = [predict_inputs(model, chunk.to(device)) for chunk in [inputs_chunk, adv_chunk]]
list(map(append, all_predictions, [*clean_preds, *adv_preds]))
for metric, metric_func in metrics.items():
distances[metric].append(metric_func(adv_chunk, inputs_chunk).detach().cpu())
logits, probs, preds, logits_adv, probs_adv, preds_adv = [torch.cat(l) for l in all_predictions]
for metric in metrics.keys():
distances[metric] = torch.cat(distances[metric], 0)
accuracy_orig = (preds == labels).float().mean().item()
if targets is not None:
success = (preds_adv == targets)
labels = targets
else:
success = (preds_adv != labels)
prob_orig = probs.gather(1, labels.unsqueeze(1)).squeeze(1)
prob_adv = probs_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
labels_infhot = torch.zeros_like(logits_adv).scatter_(1, labels.unsqueeze(1), float('inf'))
real = logits_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
other = (logits_adv - labels_infhot).max(1).values
diff_vs_max_adv = (real - other)
nll = F.cross_entropy(logits, labels, reduction='none')
nll_adv = F.cross_entropy(logits_adv, labels, reduction='none')
data = {
'time': attack_data['time'],
'num_forwards': attack_data['num_forwards'],
'num_backwards': attack_data['num_backwards'],
'targeted': targets is not None,
'preds': preds,
'adv_preds': preds_adv,
'accuracy_orig': accuracy_orig,
'success': success,
'probs_orig': prob_orig,
'probs_adv': prob_adv,
'logit_diff_adv': diff_vs_max_adv,
'nll': nll,
'nll_adv': nll_adv,
'distances': distances,
}
return data
def print_metrics(metrics: dict) -> None:
np.set_printoptions(formatter={'float': '{:0.3f}'.format}, threshold=16, edgeitems=3,
linewidth=120) # To print arrays with less precision
print('Original accuracy: {:.2%}'.format(metrics['accuracy_orig']))
print('Attack done in: {:.2f}s with {:.4g} forwards and {:.4g} backwards.'.format(
metrics['time'], metrics['num_forwards'], metrics['num_backwards']))
success = metrics['success'].numpy()
fail = bool(success.mean() != 1)
print('Attack success: {:.2%}'.format(success.mean()) + fail * ' - {}'.format(success))
for distance, values in metrics['distances'].items():
data = values.numpy()
print('{}: {} - Average: {:.3f} - Median: {:.3f}'.format(distance, data, data.mean(), np.median(data)) +
fail * ' | Avg over success: {:.3f}'.format(data[success].mean()))
attack_type = 'targets' if metrics['targeted'] else 'correct'
print('Logit({} class) - max_Logit(other classes): {} - Average: {:.2f}'.format(
attack_type, metrics['logit_diff_adv'].numpy(), metrics['logit_diff_adv'].numpy().mean()))
print('NLL of target/pred class: {:.3f}'.format(metrics['nll_adv'].numpy().mean()))
| 41.643172
| 117
| 0.66307
| 1,226
| 9,453
| 4.914356
| 0.200653
| 0.012946
| 0.010954
| 0.014108
| 0.243651
| 0.190207
| 0.157344
| 0.13195
| 0.108382
| 0.075021
| 0
| 0.00965
| 0.210727
| 9,453
| 226
| 118
| 41.827434
| 0.797882
| 0.102507
| 0
| 0.086957
| 0
| 0.006211
| 0.091116
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031056
| false
| 0
| 0.080745
| 0
| 0.136646
| 0.049689
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86fd1a571a9b46918806e9e8e71337c7e3431481
| 2,559
|
py
|
Python
|
thawSlumpChangeDet/polygons_compare.py
|
Summer0328/ChangeDet_DL-1
|
f2474ee4200d9ad093c0e5a27a94bfbd3bd038e7
|
[
"MIT"
] | 3
|
2021-07-03T14:33:37.000Z
|
2021-08-03T20:35:32.000Z
|
thawSlumpChangeDet/polygons_compare.py
|
Summer0328/ChangeDet_DL-1
|
f2474ee4200d9ad093c0e5a27a94bfbd3bd038e7
|
[
"MIT"
] | null | null | null |
thawSlumpChangeDet/polygons_compare.py
|
Summer0328/ChangeDet_DL-1
|
f2474ee4200d9ad093c0e5a27a94bfbd3bd038e7
|
[
"MIT"
] | 2
|
2021-07-29T01:45:33.000Z
|
2021-08-10T09:13:58.000Z
|
#!/usr/bin/env python
# Filename: polygons_cd
"""
introduction: compare two polygons in to shape file
authors: Huang Lingcao
email:huanglingcao@gmail.com
add time: 26 February, 2020
"""
import sys,os
from optparse import OptionParser
# added path of DeeplabforRS
sys.path.insert(0, os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS'))
import basic_src.io_function as io_function
import basic_src.basic as basic
import basic_src.map_projection as map_projection
import parameters
import polygons_cd_multi
import polygons_cd
def main(options, args):
old_shp_path = args[0]
new_shp_path = args[1]
# check files do exist
assert io_function.is_file_exist(new_shp_path)
assert io_function.is_file_exist(old_shp_path)
# check projection of the shape file, should be the same
old_shp_proj4 = map_projection.get_raster_or_vector_srs_info_proj4(old_shp_path)
new_shp_proj4 = map_projection.get_raster_or_vector_srs_info_proj4(new_shp_path)
if old_shp_proj4 != new_shp_proj4:
raise ValueError('error, projection insistence between %s and %s' % (old_shp_proj4, new_shp_proj4))
main_shp_name = polygons_cd_multi.get_main_shp_name(old_shp_path,new_shp_path)
# conduct change detection
if options.output is not None:
main_shp_name = options.output
# get expanding and shrinking parts
output_path_expand = 'expand_' + main_shp_name
output_path_shrink = 'shrink_' + main_shp_name
polygons_cd.polygons_change_detection(old_shp_path, new_shp_path, output_path_expand,output_path_shrink)
if __name__ == "__main__":
usage = "usage: %prog [options] old_shape_file new_shape_file "
parser = OptionParser(usage=usage, version="1.0 2020-02-26")
parser.description = 'Introduction: compare two groups of polygons '
parser.add_option("-p", "--para",
action="store", dest="para_file",
help="the parameters file")
parser.add_option('-o', '--output',
action="store", dest = 'output',
help='the path to save the change detection results')
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(2)
# # set parameters files
# if options.para_file is None:
# print('error, no parameters file')
# parser.print_help()
# sys.exit(2)
# else:
# parameters.set_saved_parafile_path(options.para_file)
basic.setlogfile('polygons_changeDetection.log')
main(options, args)
| 30.105882
| 108
| 0.709261
| 359
| 2,559
| 4.740947
| 0.350975
| 0.041128
| 0.029377
| 0.022914
| 0.199177
| 0.166863
| 0.058754
| 0.058754
| 0.058754
| 0.058754
| 0
| 0.014648
| 0.199687
| 2,559
| 84
| 109
| 30.464286
| 0.816406
| 0.208284
| 0
| 0
| 0
| 0
| 0.175412
| 0.031984
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.025
| false
| 0
| 0.2
| 0
| 0.225
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86fdb0073cd3ede47fd363784958394b48bca5e1
| 919
|
py
|
Python
|
andela_labs/Car Class Lab (OOP)/car.py
|
brotich/andela_bootcamp_X
|
19fc5bb66d3c930d4e6b9afeb45abc00bbc4c2ea
|
[
"MIT"
] | null | null | null |
andela_labs/Car Class Lab (OOP)/car.py
|
brotich/andela_bootcamp_X
|
19fc5bb66d3c930d4e6b9afeb45abc00bbc4c2ea
|
[
"MIT"
] | null | null | null |
andela_labs/Car Class Lab (OOP)/car.py
|
brotich/andela_bootcamp_X
|
19fc5bb66d3c930d4e6b9afeb45abc00bbc4c2ea
|
[
"MIT"
] | null | null | null |
class Car(object):
"""
Car class that can be used to instantiate various vehicles.
It takes in arguments that depict the type, model, and name
of the vehicle
"""
def __init__(self, name="General", model="GM", car_type="saloon"):
num_of_wheels = 4
num_of_doors = 4
if car_type == "trailer":
num_of_wheels = 8
if name == "Porshe" or name == "Koenigsegg":
num_of_doors = 2
self.name = name
self.model = model
self.type = car_type
self.num_of_doors = num_of_doors
self.num_of_wheels = num_of_wheels
self.speed = 0
def drive(self, gear):
if self.type == "trailer":
self.speed = gear * 77 / 7
elif self.type == "saloon":
self.speed = gear * 1000 / 3
return self
def is_saloon(self):
return self.type == 'saloon'
| 24.837838
| 70
| 0.553863
| 122
| 919
| 3.97541
| 0.418033
| 0.082474
| 0.090722
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.349293
| 919
| 36
| 71
| 25.527778
| 0.789298
| 0.145811
| 0
| 0
| 0
| 0
| 0.076101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0
| 0.045455
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86ffc174e23653c3f067117004b1a24f8234310f
| 711
|
py
|
Python
|
basicapp/cron.py
|
shivamsinghal212/Url-Shortener
|
4127a993272744f6f8592415314c8e8514d43153
|
[
"MIT"
] | null | null | null |
basicapp/cron.py
|
shivamsinghal212/Url-Shortener
|
4127a993272744f6f8592415314c8e8514d43153
|
[
"MIT"
] | 8
|
2020-06-05T18:23:15.000Z
|
2022-03-11T23:23:57.000Z
|
basicapp/cron.py
|
shivamsinghal212/Url-Shortener
|
4127a993272744f6f8592415314c8e8514d43153
|
[
"MIT"
] | null | null | null |
from django_cron import CronJobBase, Schedule
from .models import Link
from django.utils import timezone
class MyCronJob(CronJobBase):
RUN_EVERY_MINS = 1 # every 2 hours
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'basicapp.cron' # a unique code
def do(self):
current_time = timezone.now()
links = Link.objects.all()
for obj in links:
print("Checking last hit date for: ", obj.shortenURL)
delta = current_time - obj.last_hit
if delta.days > 2:
print('link is older than 2 days, DELETING!')
obj.delete()
else:
print('link was recently hit, Wont Delete.')
| 30.913043
| 65
| 0.610408
| 90
| 711
| 4.711111
| 0.577778
| 0.056604
| 0.084906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008114
| 0.30661
| 711
| 22
| 66
| 32.318182
| 0.851927
| 0.037975
| 0
| 0
| 0
| 0
| 0.164706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.444444
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|