hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf50431e85bfe1d1fea256dbe1c77558789f9eb | 550 | py | Python | protlearn/preprocessing/tests/test_remove_duplicates.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 24 | 2020-09-17T10:35:44.000Z | 2022-03-09T19:19:01.000Z | protlearn/preprocessing/tests/test_remove_duplicates.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 14 | 2020-08-09T18:23:01.000Z | 2020-11-19T05:48:14.000Z | protlearn/preprocessing/tests/test_remove_duplicates.py | tadorfer/ProtClass | da1a01ea9abd3c367b3389dfed683c6a9dfa6afd | [
"MIT"
] | 3 | 2021-03-07T23:41:17.000Z | 2022-02-25T18:48:37.000Z | import pytest
from ..remove_duplicates import remove_duplicates
def test_remove_duplicates():
"Test duplicate sequences"
# define data
x0 = ['ARKLY', 'LYLPGG', 'EECCKHR']
x1 = ['ARKLY', 'LYLPGG', 'ARKLY', 'EECCKHR', 'LYLPGG']
x2 = ['ARKLY', 'LYLPGG', 'ARKLY', 'ARKLY']
# test for duplicates
y0 = remove_duplicates(x0)
y1 = remove_duplicates(x1)
y2 = remove_duplicates(x2, verbose=2) # checking verbosity
assert set(y0) == set(x0)
assert set(y1) == set(x0)
assert set(y2) == set(['ARKLY', 'LYLPGG']) | 28.947368 | 62 | 0.634545 |
acf504b9e7f4ca0d78fc71b16cf853f31e20a2bb | 34 | py | Python | eaglestitch/storage/__init__.py | ardihikaru/eaglestitch | b388f0c3b78b0539812985850905c78830e871aa | [
"MIT"
] | null | null | null | eaglestitch/storage/__init__.py | ardihikaru/eaglestitch | b388f0c3b78b0539812985850905c78830e871aa | [
"MIT"
] | null | null | null | eaglestitch/storage/__init__.py | ardihikaru/eaglestitch | b388f0c3b78b0539812985850905c78830e871aa | [
"MIT"
] | null | null | null | from .module import StorageModule
| 17 | 33 | 0.852941 |
acf504d41ec821243e8e2339b06b7087bfc84564 | 6,041 | gyp | Python | chrome/app/policy/cloud_policy_codegen.gyp | leiferikb/bitpop-private | 4c967307d228e86f07f2576068a169e846c833ca | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-11-15T15:17:43.000Z | 2021-11-15T15:17:43.000Z | chrome/app/policy/cloud_policy_codegen.gyp | houseoflifeproperty/bitpop-private | 4c967307d228e86f07f2576068a169e846c833ca | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/app/policy/cloud_policy_codegen.gyp | houseoflifeproperty/bitpop-private | 4c967307d228e86f07f2576068a169e846c833ca | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-11-04T07:24:02.000Z | 2020-11-04T07:24:02.000Z | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'policy_out_dir': '<(SHARED_INTERMEDIATE_DIR)/policy',
'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out',
'generate_policy_source_script_path':
'<(DEPTH)/chrome/tools/build/generate_policy_source.py',
'policy_constant_header_path':
'<(policy_out_dir)/policy/policy_constants.h',
'policy_constant_source_path':
'<(policy_out_dir)/policy/policy_constants.cc',
'protobuf_decoder_path':
'<(policy_out_dir)/policy/cloud_policy_generated.cc',
# This is the "full" protobuf, which defines one protobuf message per
# policy. It is also the format currently used by the server.
'chrome_settings_proto_path':
'<(policy_out_dir)/policy/chrome_settings.proto',
# This protobuf is equivalent to chrome_settings.proto but shares messages
# for policies of the same type, so that less classes have to be generated
# and compiled.
'cloud_policy_proto_path': '<(policy_out_dir)/policy/cloud_policy.proto',
'proto_path_substr': 'chrome/browser/policy/proto',
'proto_rel_path': '<(DEPTH)/<(proto_path_substr)',
},
'targets': [
{
'target_name': 'cloud_policy_code_generate',
'type': 'none',
'actions': [
{
'inputs': [
'policy_templates.json',
'<(generate_policy_source_script_path)',
],
'outputs': [
'<(policy_constant_header_path)',
'<(policy_constant_source_path)',
'<(protobuf_decoder_path)',
'<(chrome_settings_proto_path)',
'<(cloud_policy_proto_path)',
],
'action_name': 'generate_policy_source',
'action': [
'python',
'<@(generate_policy_source_script_path)',
'--policy-constants-header=<(policy_constant_header_path)',
'--policy-constants-source=<(policy_constant_source_path)',
'--chrome-settings-protobuf=<(chrome_settings_proto_path)',
'--cloud-policy-protobuf=<(cloud_policy_proto_path)',
'--cloud-policy-decoder=<(protobuf_decoder_path)',
'<(OS)',
'<(chromeos)',
'policy_templates.json',
],
'message': 'Generating policy source',
},
],
'direct_dependent_settings': {
'include_dirs': [
'<(policy_out_dir)',
'<(protoc_out_dir)',
],
},
},
{
'target_name': 'cloud_policy_proto_compile',
'type': 'static_library',
'sources': [
'<(cloud_policy_proto_path)',
],
'variables': {
'proto_in_dir': '<(policy_out_dir)/policy',
'proto_out_dir': '<(proto_path_substr)',
},
'dependencies': [
'cloud_policy_code_generate',
],
'includes': [ '../../../build/protoc.gypi' ],
},
{
# This target builds the "full" protobuf, used for tests only.
'target_name': 'chrome_settings_proto_compile',
'type': 'static_library',
'sources': [
'<(chrome_settings_proto_path)',
],
'variables': {
'proto_in_dir': '<(policy_out_dir)/policy',
'proto_out_dir': '<(proto_path_substr)',
},
'dependencies': [
'cloud_policy_code_generate',
'cloud_policy_proto_compile',
],
'includes': [ '../../../build/protoc.gypi' ],
},
{
'target_name': 'policy_proto_compile',
'type': 'static_library',
'sources': [
'<(proto_rel_path)/chrome_device_policy.proto',
'<(proto_rel_path)/device_management_backend.proto',
'<(proto_rel_path)/device_management_local.proto',
'<(proto_rel_path)/install_attributes.proto',
'<(proto_rel_path)/old_generic_format.proto',
],
'variables': {
'proto_in_dir': '<(proto_rel_path)',
'proto_out_dir': '<(proto_path_substr)',
},
'dependencies': [
'cloud_policy_code_generate',
],
'includes': [ '../../../build/protoc.gypi' ],
},
{
'target_name': 'policy',
'type': 'static_library',
'hard_dependency': 1,
'direct_dependent_settings': {
'include_dirs': [
'<(policy_out_dir)',
'<(protoc_out_dir)',
],
},
'sources': [
'<(policy_constant_header_path)',
'<(policy_constant_source_path)',
'<(protobuf_decoder_path)',
],
'include_dirs': [
'<(DEPTH)',
],
'dependencies': [
'cloud_policy_code_generate',
'cloud_policy_proto_compile',
'policy_proto_compile',
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/third_party/protobuf/protobuf.gyp:protobuf_lite',
],
},
{
'target_name': 'policy_test_support',
'type': 'none',
'hard_dependency': 1,
'direct_dependent_settings': {
'include_dirs': [
'<(policy_out_dir)',
'<(protoc_out_dir)',
],
},
'dependencies': [
'policy',
'chrome_settings_proto_compile',
],
},
],
'conditions': [
['OS=="win"', {
'targets': [
{
'target_name': 'policy_win64',
'type': 'static_library',
'hard_dependency': 1,
'sources': [
'<(policy_constant_header_path)',
'<(policy_constant_source_path)',
],
'include_dirs': [
'<(DEPTH)',
],
'direct_dependent_settings': {
'include_dirs': [
'<(policy_out_dir)'
],
},
'dependencies': [
'cloud_policy_code_generate',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
},
],
}],
], # 'conditions'
}
| 31.139175 | 78 | 0.555703 |
acf504e6c7c89334d5855d2090fcdb630dea376c | 290 | py | Python | cookiecutter/__init__.py | Vauxoo/cookiecutter | 422f7d75da11bcaa7f933959b62d0cda2de09545 | [
"BSD-3-Clause"
] | 2 | 2019-06-03T12:44:14.000Z | 2021-09-01T08:28:45.000Z | cookiecutter/__init__.py | Vauxoo/cookiecutter | 422f7d75da11bcaa7f933959b62d0cda2de09545 | [
"BSD-3-Clause"
] | 1 | 2021-02-24T05:31:46.000Z | 2021-02-24T05:31:46.000Z | cookiecutter/__init__.py | Vauxoo/cookiecutter | 422f7d75da11bcaa7f933959b62d0cda2de09545 | [
"BSD-3-Clause"
] | 1 | 2021-01-29T10:04:27.000Z | 2021-01-29T10:04:27.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter
------------
Main package for Cookiecutter.
"""
from .compat import OLD_PY2
__version__ = '1.0.0'
if OLD_PY2:
msg = 'Python 2.6 support was removed from cookiecutter in release 1.0.0.'
raise DeprecationWarning(msg)
| 17.058824 | 78 | 0.658621 |
acf505cbbed3ec78d7c0769cbca238a9df78cdf2 | 3,380 | py | Python | test/functional/test_framework/coverage.py | bvbfan/ain | 71e3b3456f90a858d1325f612bd44393789d74d2 | [
"MIT"
] | null | null | null | test/functional/test_framework/coverage.py | bvbfan/ain | 71e3b3456f90a858d1325f612bd44393789d74d2 | [
"MIT"
] | null | null | null | test/functional/test_framework/coverage.py | bvbfan/ain | 71e3b3456f90a858d1325f612bd44393789d74d2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `defi-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| 30.727273 | 87 | 0.660651 |
acf506dd79bd6cf21450741669d78a9ad1084ce9 | 9,483 | py | Python | StrandModels/extractingNodesIdsOnFaces/pullCylinder.py | MarcoMagl/mySofaDevs | e918a21773268e1008ceb24a398fe694b99f2380 | [
"MIT"
] | 2 | 2020-05-23T12:53:22.000Z | 2020-09-14T00:17:11.000Z | StrandModels/extractingNodesIdsOnFaces/pullCylinder.py | MarcoMagl/mySofaDevs | e918a21773268e1008ceb24a398fe694b99f2380 | [
"MIT"
] | 1 | 2020-06-19T05:23:19.000Z | 2020-06-19T05:23:19.000Z | StrandModels/extractingNodesIdsOnFaces/pullCylinder.py | MarcoMagl/mySofaDevs | e918a21773268e1008ceb24a398fe694b99f2380 | [
"MIT"
] | 1 | 2020-05-07T12:44:21.000Z | 2020-05-07T12:44:21.000Z | import sys
import Sofa
import pickle
import numpy as np
import gmsh
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
class Cyl (Sofa.PythonScriptController):
def __init__(self, node, commandLineArguments) :
self.commandLineArguments = commandLineArguments
print "Command line arguments for python : "+str(commandLineArguments)
self.createGraph(node)
return None;
def createGraph(self,rootNode):
FileMshBase="cylinder.msh"
gmsh.initialize()
gmsh.option.setNumber("General.Terminal", 1)
gmsh.open(FileMshBase)
entities = gmsh.model.getEntities()
FaceFixedFound = 0
FacePulledFound= 0
import numpy as np
for e in entities:
dim = e[0]
tag = e[1]
# Get the mesh nodes for the entity (dim, tag):
nodeTags, nodeCoords, nodeParams = gmsh.model.mesh.getNodes(dim, tag)
# * Type and name of the entity:
type = gmsh.model.getType(e[0], e[1])
name = gmsh.model.getEntityName(e[0], e[1])
if len(name): name += ' '
print("Entity " + name + str(e) + " of type " + type)
physicalTags = gmsh.model.getPhysicalGroupsForEntity(dim, tag)
if len(physicalTags):
for p in physicalTags:
n = gmsh.model.getPhysicalName(dim, p)
if n: n += ' '
if n == "FaceFixed ":
print "\n" + "FaceFixed found" + "\n"
FaceFixedFound = 1
idsBlockedNodes=np.array(nodeTags)
np.savetxt("nodesFaceFixed.txt", idsBlockedNodes ,fmt="%d")
elif n == "FacePulled ":
print "\n" + "FacePulled found" + "\n"
idsMovingNodes=np.array(nodeTags)
np.savetxt("nodesFacePulled.txt", idsMovingNodes, fmt="%d")
FacePulledFound = 1
assert FaceFixedFound and FacePulledFound, "Faces were not found"
# We can use this to clear all the model data:
gmsh.clear()
gmsh.finalize()
a_file = open("cylinder.msh", "r")
lines = a_file.readlines()
a_file.close()
nlines = len(lines)
i = 0
linesToDel = []
content = []
while i < nlines:
if "$PhysicalNames" in lines[i]:
while not "$EndPhysicalNames" in lines[i]:
linesToDel.append(i)
content.append(lines[i])
i += 1
linesToDel.append(i)
content.append(lines[i])
break
i+=1
for l in linesToDel:
print(lines[l])
del lines[linesToDel[0]:linesToDel[-1]+1]
FileMsh=FileMshBase.split('.')[0] + "NoHead.msh"
new_file = open(FileMsh, "w+")
for line in lines:
new_file.write(line)
new_file.close()
self.rootNode = rootNode
rootNode.createObject('RequiredPlugin', name='SofaOpenglVisual')
rootNode.createObject('RequiredPlugin', name='SofaPython')
rootNode.createObject('RequiredPlugin', name='SofaExporter')
rootNode.createObject('VisualStyle', displayFlags='showForceFields')
E = 188E3
nu=0.33
Wire = rootNode.createChild('Wire')
self.Wire= Wire
Wire.gravity = '0 0 0'
Wire.createObject('EulerImplicitSolver', printLog='0', rayleighStiffness='0.1', name='cg_odesolver', rayleighMass='0.1')
Wire.createObject('CGLinearSolver', threshold='1e-09', tolerance='1e-09', name='linear solver', iterations='100') #, template='GraphScattered')
Wire.createObject('MeshGmshLoader', name='meshLoader0', filename=FileMsh)
Wire.createObject('MeshTopology', src='@meshLoader0', name='Topo')
Wire.createObject('MechanicalObject', name='dofs', template='Vec3d')
Wire.createObject('TetrahedronFEMForceField', template='Vec3d', youngModulus=E, poissonRatio=nu)
Wire.createObject('UniformMass', name='mass', template='Vec3d', totalMass='0.5')
import numpy as np
idsBlockedNodes=(np.loadtxt("nodesFaceFixed.txt").astype(int) -1) .tolist()
idsMovingdNodes=(np.loadtxt("nodesFacePulled.txt").astype(int)-1).tolist()
print("\n Ids of nodes blocked" )
print(idsBlockedNodes)
print("\n Ids of nodes Pulled" )
print(idsMovingdNodes)
Wire.createObject('FixedConstraint', indices=idsBlockedNodes, name='HomogeneousBCs', template='Vec3d')
disp = 10
self.endTime = 5
keyTimes = np.zeros(3)
keyTimes[0] = 0
keyTimes[1] = 0
keyTimes[2] = self.endTime
movements = np.zeros((keyTimes.shape[0], 6), dtype=float)
movements[0] = [0, 0, 0, 0, 0, 0]
movements[1] = [0, 0, disp, 0, 0, 0]
movements[2] = [0, 0, disp, 0, 0, 0]
Wire.createObject('LinearMovementConstraint', keyTimes=keyTimes.ravel().tolist(), template='Vec3d', movements=movements.ravel().tolist(), indices=idsMovingdNodes)
return 0;
def onMouseButtonLeft(self, mouseX,mouseY,isPressed):
## usage e.g.
#if isPressed :
# print "Control+Left mouse button pressed at position "+str(mouseX)+", "+str(mouseY)
return 0;
def onKeyReleased(self, c):
## usage e.g.
#if c=="A" :
# print "You released a"
return 0;
def initGraph(self, node):
## Please feel free to add an example for a simple usage in /home/marcomag/WireModels//home/marcomag/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
return 0;
def onKeyPressed(self, c):
## usage e.g.
#if c=="A" :
# print "You pressed control+a"
return 0;
def onMouseWheel(self, mouseX,mouseY,wheelDelta):
## usage e.g.
#if isPressed :
# print "Control button pressed+mouse wheel turned at position "+str(mouseX)+", "+str(mouseY)+", wheel delta"+str(wheelDelta)
return 0;
def storeResetState(self):
## Please feel free to add an example for a simple usage in /home/marcomag/WireModels//home/marcomag/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
return 0;
def cleanup(self):
## Please feel free to add an example for a simple usage in /home/marcomag/WireModels//home/marcomag/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
return 0;
def onGUIEvent(self, strControlID,valueName,strValue):
## Please feel free to add an example for a simple usage in /home/marcomag/WireModels//home/marcomag/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
return 0;
def onEndAnimationStep(self, deltaTime):
## Please feel free to add an example for a simple usage in /Users/marco.magliulo/mySofaCodes/myScriptsToGetStarted//Users/marco.magliulo/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
t = self.rootNode.time
print "t="
print str(t)
print "\n"
if t > self.endTime:
import numpy as np
self.rootNode.animate = False
self.cleanup()
# quit()
return 0;
def onLoaded(self, node):
## Please feel free to add an example for a simple usage in /home/marcomag/WireModels//home/marcomag/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
return 0;
def reset(self):
## Please feel free to add an example for a simple usage in /home/marcomag/WireModels//home/marcomag/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
return 0;
def onMouseButtonMiddle(self, mouseX,mouseY,isPressed):
## usage e.g.
#if isPressed :
# print "Control+Middle mouse button pressed at position "+str(mouseX)+", "+str(mouseY)
return 0;
def bwdInitGraph(self, node):
## Please feel free to add an example for a simple usage in /home/marcomag/WireModels//home/marcomag/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
return 0;
def onScriptEvent(self, senderNode, eventName,data):
## Please feel free to add an example for a simple usage in /home/marcomag/WireModels//home/marcomag/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
return 0;
def onMouseButtonRight(self, mouseX,mouseY,isPressed):
## usage e.g.
#if isPressed :
# print "Control+Right mouse button pressed at position "+str(mouseX)+", "+str(mouseY)
return 0;
def onBeginAnimationStep(self, deltaTime):
## Please feel free to add an example for a simple usage in /home/marcomag/WireModels//home/marcomag/Software/sofa/src/applications/plugins/SofaPython/scn2python.py
return 0;
def createScene(rootNode):
rootNode.findData('dt').value = 0.05
rootNode.findData('gravity').value = '0 -9.81 0'
try :
sys.argv[0]
except :
commandLineArguments = []
else :
commandLineArguments = sys.argv
myCyl = Cyl(rootNode,commandLineArguments)
return 0;
| 38.706122 | 209 | 0.608984 |
acf507dc78178a9fbaee4dba233a890b53dc1cbb | 1,353 | py | Python | helper.py | YanickT/NMR | a326b08d66e86d6545edc54ce558afddd4782495 | [
"MIT"
] | null | null | null | helper.py | YanickT/NMR | a326b08d66e86d6545edc54ce558afddd4782495 | [
"MIT"
] | null | null | null | helper.py | YanickT/NMR | a326b08d66e86d6545edc54ce558afddd4782495 | [
"MIT"
] | null | null | null | import numpy as np
import skimage.draw as draw
import matplotlib.pyplot as plt
LAYERS = ["Density", "T1", "T2"]
PHASES = ["$Grad_x$", "$Grad_y$", "$Grad_z$", "Pulse", "Read"]
def create_example(size=100, radius=10, sep=0):
half_size = size // 2
image = np.zeros((size, size, 3), dtype=np.uint16)
# first sample (oil)
rs, cs = draw.disk((half_size, half_size - radius - sep), radius, shape=None)
image[rs, cs, 0] = 1 # density layer
image[rs, cs, 1] = 114 # t1 layer ms
image[rs, cs, 2] = 112 # t2 layer ms
# second sample (water)
rs, cs = draw.disk((half_size, half_size + radius + sep), radius, shape=None)
image[rs, cs, 0] = 1 # density layer
image[rs, cs, 1] = 3269 # t1 layer ms
image[rs, cs, 2] = 1214 # t2 layer ms
return image
def view(np_array):
f, axs = plt.subplots(1, 3)
for i in range(3):
axs[i].set_title(LAYERS[i])
axs[i].imshow(np_array[:, :, i], cmap="hot")
plt.show()
def view_phase(np_array, tfactor):
f, axs = plt.subplots(4, 1, sharex=True)
ts = np.arange(np_array.shape[0]) / tfactor
for i in range(4):
axs[i].set_title(PHASES[i])
axs[i].plot(ts, np_array[:, i])
plt.show()
if __name__ == "__main__":
img = create_example(size=300, radius=30, sep=1)
view(img)
np.save("sample2_modi", img)
| 27.06 | 81 | 0.597191 |
acf507ec426613a00264272fff14f7f5eeacd1d1 | 5,002 | py | Python | lambda_local/main.py | ksmoore17/python-lambda-local | cc566f52529acdc18a7d7fb4431b1aab97bc5e1b | [
"MIT"
] | null | null | null | lambda_local/main.py | ksmoore17/python-lambda-local | cc566f52529acdc18a7d7fb4431b1aab97bc5e1b | [
"MIT"
] | null | null | null | lambda_local/main.py | ksmoore17/python-lambda-local | cc566f52529acdc18a7d7fb4431b1aab97bc5e1b | [
"MIT"
] | null | null | null | '''
Copyright 2015-2020 HENNGE K.K. (formerly known as HDE, Inc.)
Licensed under MIT.
'''
import sys
import traceback
import json
import logging
import os
import timeit
import multiprocessing
from . import event
from . import context
from .environment_variables import set_environment_variables, export_variables
from .timeout import time_limit
from .timeout import TimeoutException
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(name)s - %(levelname)s - %(asctime)s] %(message)s')
ERR_TYPE_EXCEPTION = 0
ERR_TYPE_TIMEOUT = 1
EXITCODE_ERR = 1
class ContextFilter(logging.Filter):
def __init__(self, context):
super(ContextFilter, self).__init__()
self.context = context
def filter(self, record):
record.aws_request_id = self.context.aws_request_id
return True
class FunctionLoader():
def __init__(self,
request_id=None,
source=None,
function_name=None,
library_path=None,
func=None):
self.request_id = request_id
self.source = source
self.function_name = function_name
self.library_path = library_path
self.func = func
def load(self):
if self.library_path is not None:
load_lib(self.library_path)
self.func = load_source(
self.request_id, self.source, self.function_name)
def call(func, event, context, environment_variables={}):
export_variables(environment_variables)
loader = FunctionLoader(func=func)
return _runner(loader, event, context)
def run(args):
# set env vars if path to json file was given
set_environment_variables(args.environment_variables)
e = event.read_event(args.event)
c = context.Context(
args.timeout,
invoked_function_arn=args.arn_string,
function_version=args.version_name)
loader = FunctionLoader(
request_id=c.aws_request_id,
source=args.file,
function_name=args.function,
library_path=args.library)
(result, err_type) = _runner(loader, e, c)
if err_type is not None:
sys.exit(EXITCODE_ERR)
def _runner(loader, event, context):
logger = logging.getLogger()
logger.info("Event: {}".format(event))
logger.info("START RequestId: {} Version: {}".format(
context.aws_request_id, context.function_version))
queue = multiprocessing.Queue()
p = multiprocessing.Process(
target=execute_in_process,
args=(queue, loader, event, context,))
p.start()
(result, err_type, duration) = queue.get()
p.join()
logger.info("END RequestId: {}".format(context.aws_request_id))
duration = "{0:.2f} ms".format(duration)
logger.info("REPORT RequestId: {}\tDuration: {}".format(
context.aws_request_id, duration))
if type(result) is TimeoutException:
logger.error("RESULT:\n{}".format(result))
else:
logger.info("RESULT:\n{}".format(result))
return (result, err_type)
def load_lib(path):
sys.path.append(os.path.abspath(path))
def load_source(request_id, path, function_name):
mod_name = 'request-' + str(request_id)
file_path = os.path.abspath(path)
file_directory = os.path.dirname(file_path)
sys.path.append(file_directory)
if sys.version_info.major == 2:
import imp
mod = imp.load_source(mod_name, path)
elif sys.version_info.major == 3 and sys.version_info.minor >= 5:
import importlib
spec = importlib.util.spec_from_file_location(mod_name, path)
mod = importlib.util.module_from_spec(spec)
sys.modules[mod_name] = mod
spec.loader.exec_module(mod)
else:
raise Exception("unsupported python version")
object_names = function_name.split('.')
object = mod
for name in object_names:
object = getattr(object, name)
return object
def execute(func, event, context):
err_type = None
logger = logging.getLogger()
log_filter = ContextFilter(context)
logger.addFilter(log_filter)
try:
with time_limit(context._timeout_in_seconds):
result = func(event, context._activate())
except TimeoutException as err:
result = err
err_type = ERR_TYPE_TIMEOUT
except:
err = sys.exc_info()
result = json.dumps({
"errorMessage": str(err[1]),
"stackTrace": traceback.format_tb(err[2]),
"errorType": err[0].__name__
}, indent=4, separators=(',', ': '))
err_type = ERR_TYPE_EXCEPTION
return result, err_type
def execute_in_process(queue, loader, event, context):
if loader.func is None:
loader.load()
start_time = timeit.default_timer()
result, err_type = execute(loader.func, event, context)
end_time = timeit.default_timer()
duration = (end_time - start_time) * 1000
queue.put((result, err_type, duration))
| 27.483516 | 82 | 0.656337 |
acf5084de43c4b5bba70381b22e78e413d59cbcc | 1,104 | py | Python | .ci/tagger.py | yoyowz/openvino_notebooks | bdc23d8ea116586205d59338499b8eb2db59dea5 | [
"Apache-2.0"
] | 1 | 2022-01-27T14:10:26.000Z | 2022-01-27T14:10:26.000Z | .ci/tagger.py | rageshhajela16/openvino_notebooks | 73a30443f7a631fa8ecb63f04a4b659c3701bc94 | [
"Apache-2.0"
] | 1 | 2022-03-02T21:42:32.000Z | 2022-03-02T21:42:32.000Z | .ci/tagger.py | rageshhajela16/openvino_notebooks | 73a30443f7a631fa8ecb63f04a4b659c3701bc94 | [
"Apache-2.0"
] | null | null | null | import json
import glob
import mmap
def get_notebooks(path: str):
return glob.glob(f"{path}/*/[0-9]*.ipynb")
def get_tags(path: str):
return json.load(open(path))
def find_tags_for_notebook(notebook_path: str, tags: dict):
nb_tags = []
with open(notebook_path) as file:
f = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
for tag, keywords in tags.items():
for keyword in keywords:
if f.find(bytes(keyword, 'utf-8')) != -1:
nb_tags.append(tag)
break
return nb_tags
def find_tags_for_all_notebooks(notebooks: list, tags: dict):
notebooks_tags = {}
for notebook in notebooks:
nb_tags = sorted(find_tags_for_notebook(notebook, tags))
if nb_tags:
notebooks_tags[notebook.split('/')[-1].split('.')[0]] = nb_tags
return notebooks_tags
notebooks_paths = sorted(get_notebooks("notebooks"))
tags = get_tags(".ci/keywords.json")['tags']
all_notebooks_tags = find_tags_for_all_notebooks(notebooks_paths, tags)
print(json.dumps(all_notebooks_tags, indent=4))
| 30.666667 | 75 | 0.657609 |
acf50859fe683aa4d8ce11765f7330d99618a1e5 | 1,667 | py | Python | sunpy/map/sources/tests/test_euvi_source.py | DShivansh/sunpy | 94b0cbd4336d5c2af35a77d11bdff729634aa1d9 | [
"BSD-2-Clause"
] | null | null | null | sunpy/map/sources/tests/test_euvi_source.py | DShivansh/sunpy | 94b0cbd4336d5c2af35a77d11bdff729634aa1d9 | [
"BSD-2-Clause"
] | 2 | 2015-06-15T17:16:11.000Z | 2016-11-23T17:12:07.000Z | sunpy/map/sources/tests/test_euvi_source.py | DShivansh/sunpy | 94b0cbd4336d5c2af35a77d11bdff729634aa1d9 | [
"BSD-2-Clause"
] | 2 | 2016-11-22T01:35:15.000Z | 2017-04-26T19:37:53.000Z | """Test cases for STEREO Map subclasses.
This particular test file pertains to EUVIMap.
@Author: Pritish C. (VaticanCameos)
"""
import os
import glob
import pytest
from sunpy.map.sources.stereo import EUVIMap
from sunpy.map import Map
from sunpy.coordinates import sun
import sunpy.data.test
from sunpy.util.exceptions import SunpyUserWarning
path = sunpy.data.test.rootdir
fitspath = glob.glob(os.path.join(path, "euvi_20090615_000900_n4euA_s.fts"))
euvi = Map(fitspath)
# EUVI Tests
def test_fitstoEIT():
"""Tests the creation of EUVIMap using FITS."""
assert isinstance(euvi, EUVIMap)
def test_is_datasource_for():
"""Test the is_datasource_for method of EUVIMap.
Note that header data to be provided as an argument
can be a MetaDict object."""
assert euvi.is_datasource_for(euvi.data, euvi.meta)
def test_measurement():
"""Tests the measurement property of the EUVIMap object."""
assert euvi.measurement.value == 171
def test_observatory():
"""Tests the observatory property of the EUVIMap object."""
assert euvi.observatory == "STEREO A"
def test_rsun_obs():
"""Tests the rsun_obs property"""
assert euvi.rsun_obs.value == euvi.meta['rsun']
def test_rsun_missing():
"""Tests output if 'rsun' is missing"""
euvi_no_rsun = Map(fitspath)
euvi_no_rsun.meta['rsun'] = None
with pytest.warns(SunpyUserWarning, match='Missing metadata for solar radius'):
assert euvi_no_rsun.rsun_obs.value == sun.angular_radius(euvi.date).to('arcsec').value
def test_norm_clip():
# Tests that the default normalizer has clipping disabled
assert euvi.plot_settings['norm'].clip == False
| 26.887097 | 94 | 0.733653 |
acf5088785c6f06655057edb0c495d78fce0fca9 | 33 | py | Python | tests/__init__.py | lakshayarora476/TSIClient | 8d911d8beac3259d0fe86446e6526c1c8d53b74f | [
"MIT"
] | null | null | null | tests/__init__.py | lakshayarora476/TSIClient | 8d911d8beac3259d0fe86446e6526c1c8d53b74f | [
"MIT"
] | null | null | null | tests/__init__.py | lakshayarora476/TSIClient | 8d911d8beac3259d0fe86446e6526c1c8d53b74f | [
"MIT"
] | null | null | null | from TSIClient import TSIClient
| 16.5 | 32 | 0.848485 |
acf508e3c1845772cabb15c23cc51cb1bd8ccb21 | 23,486 | py | Python | pysnmp-with-texts/H3C-RAID-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/H3C-RAID-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/H3C-RAID-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module H3C-RAID-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H3C-RAID-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:23:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
H3cStorageOwnerType, H3cRaidIDType, H3cStorageActionType, h3cStorageRef, H3cStorageEnableState = mibBuilder.importSymbols("H3C-STORAGE-REF-MIB", "H3cStorageOwnerType", "H3cRaidIDType", "H3cStorageActionType", "h3cStorageRef", "H3cStorageEnableState")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, iso, NotificationType, Counter32, Unsigned32, Counter64, IpAddress, Gauge32, Bits, ModuleIdentity, Integer32, TimeTicks, MibIdentifier, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "NotificationType", "Counter32", "Unsigned32", "Counter64", "IpAddress", "Gauge32", "Bits", "ModuleIdentity", "Integer32", "TimeTicks", "MibIdentifier", "ObjectIdentity")
TruthValue, DateAndTime, DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DateAndTime", "DisplayString", "RowStatus", "TextualConvention")
h3cRaid = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4))
if mibBuilder.loadTexts: h3cRaid.setLastUpdated('200709041452Z')
if mibBuilder.loadTexts: h3cRaid.setOrganization('H3C Technologies Co., Ltd.')
if mibBuilder.loadTexts: h3cRaid.setContactInfo('Platform Team H3C Technologies Co., Ltd. Hai-Dian District Beijing P.R. China Http://www.h3c.com Zip:100085')
if mibBuilder.loadTexts: h3cRaid.setDescription('This MIB describes the general information of raid.')
h3cRaidMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1))
h3cRaidCapacityTable = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 1))
h3cPrimaryRaidCount = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPrimaryRaidCount.setStatus('current')
if mibBuilder.loadTexts: h3cPrimaryRaidCount.setDescription('This object identifies the maximal number of primary raids supported.')
h3cRaidTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2), )
if mibBuilder.loadTexts: h3cRaidTable.setStatus('current')
if mibBuilder.loadTexts: h3cRaidTable.setDescription('This table containing some property information of the array.')
h3cRaidEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1), ).setIndexNames((0, "H3C-RAID-MIB", "h3cRaidName"))
if mibBuilder.loadTexts: h3cRaidEntry.setStatus('current')
if mibBuilder.loadTexts: h3cRaidEntry.setDescription('An entry containing management information applicable to create array resource.')
h3cRaidName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: h3cRaidName.setStatus('current')
if mibBuilder.loadTexts: h3cRaidName.setDescription('This object identifies the name of the array.')
h3cRaidId = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidId.setStatus('current')
if mibBuilder.loadTexts: h3cRaidId.setDescription('This object describes the identification of the array.')
h3cRaidUuid = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 3), H3cRaidIDType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidUuid.setStatus('current')
if mibBuilder.loadTexts: h3cRaidUuid.setDescription('This object describes the UUID of the array.')
h3cRaidLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("jbod", 1), ("raid0", 2), ("raid1", 3), ("raid2", 4), ("raid3", 5), ("raid4", 6), ("raid5", 7), ("raid6", 8), ("raid10", 9), ("raid50", 10)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRaidLevel.setStatus('current')
if mibBuilder.loadTexts: h3cRaidLevel.setDescription("This object identifies the type of array. The value 'jbod' means just a bunch of disks, the value 'raid0' means RAID Level 0, and so on.")
h3cRaidTimestamp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 5), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidTimestamp.setStatus('current')
if mibBuilder.loadTexts: h3cRaidTimestamp.setDescription('This object identifies the system time when the array is created.')
h3cRaidDiskList = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 256))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRaidDiskList.setStatus('current')
if mibBuilder.loadTexts: h3cRaidDiskList.setDescription('This object describes a list of the disk composed the array, each two bytes with network-byte order means a single entity index of the disk.')
h3cRaidOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 7), H3cStorageOwnerType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRaidOwner.setStatus('current')
if mibBuilder.loadTexts: h3cRaidOwner.setDescription('This object identifies the owner of the array.')
h3cRaidSize = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidSize.setStatus('current')
if mibBuilder.loadTexts: h3cRaidSize.setDescription("This object describes the size of array's total space. The units is million bytes.")
h3cRaidFreeSize = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 9), Integer32()).setUnits('MB').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidFreeSize.setStatus('current')
if mibBuilder.loadTexts: h3cRaidFreeSize.setDescription("This object describes the size of array's vacancy space. The units is million bytes.")
h3cRaidAutoSync = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 10), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRaidAutoSync.setStatus('current')
if mibBuilder.loadTexts: h3cRaidAutoSync.setDescription('This object identifies if the raid should be synchro automatically when created.')
h3cRaidRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 2, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRaidRowStatus.setStatus('current')
if mibBuilder.loadTexts: h3cRaidRowStatus.setDescription('This object describes the actions to crate or delete an array.')
h3cRaidManageTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3), )
if mibBuilder.loadTexts: h3cRaidManageTable.setStatus('current')
if mibBuilder.loadTexts: h3cRaidManageTable.setDescription('This table containing some management information of the array.')
h3cRaidManageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3, 1), ).setIndexNames((0, "H3C-RAID-MIB", "h3cRaidUuid"))
if mibBuilder.loadTexts: h3cRaidManageEntry.setStatus('current')
if mibBuilder.loadTexts: h3cRaidManageEntry.setDescription('An entry containing management information applicable to particular array resource.')
h3cRaidLocationState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3, 1, 1), H3cStorageEnableState().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidLocationState.setStatus('current')
if mibBuilder.loadTexts: h3cRaidLocationState.setDescription('This object identifies if the array can be located.')
h3cRaidAction = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("run", 1), ("pause", 2), ("rebuild", 3), ("invalid", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidAction.setStatus('current')
if mibBuilder.loadTexts: h3cRaidAction.setDescription("This object identifies the actions to operating the array. When read, this object always returns the value 'invalid'.")
h3cRaidRunState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("degraded", 2), ("failed", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidRunState.setStatus('current')
if mibBuilder.loadTexts: h3cRaidRunState.setDescription("This object identifies the state of the array. The value 'normal' means array works well. The value 'degraded' means array has degraded. The value 'failed' means some disks met failure and array has failed.")
h3cRaidAutoRebuild = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3, 1, 4), H3cStorageEnableState().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidAutoRebuild.setStatus('current')
if mibBuilder.loadTexts: h3cRaidAutoRebuild.setDescription('This object identifies if the array can be auto rebuild.')
h3cRaidSyncPercentage = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidSyncPercentage.setStatus('current')
if mibBuilder.loadTexts: h3cRaidSyncPercentage.setDescription("This object describes the percentage of progress when the array is synchronizing. The value is equal to '100' when the progress has finished.")
h3cRaidHideState = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3, 1, 6), H3cStorageEnableState().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidHideState.setStatus('current')
if mibBuilder.loadTexts: h3cRaidHideState.setDescription("This object identifies if the array is hided. When the value of this object is equal to 'enable', the array will be hided and the files on it can not be accessed by any user.")
h3cRaidLvRestore = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3, 1, 7), H3cStorageActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidLvRestore.setStatus('current')
if mibBuilder.loadTexts: h3cRaidLvRestore.setDescription('This object identifies an action to restore all the logic volume resources.')
h3cRaidType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("virtualDevice", 1), ("directDevice", 2), ("serviceEnabledDevice", 3), ("unassigned", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidType.setStatus('current')
if mibBuilder.loadTexts: h3cRaidType.setDescription("This object identifies the physical type of an array. The value 'virtualDevice' means if a array is prepared for virtual device, it can be used to create LV, for exampe, it is can be split into a lot of LV, or be a part of LV. The value 'directDevice' means if a array is prepared for direct device, it can be used to create a LV which type is direct. Unlike virtual devices, they cannot be combined or split into multiple logical devices. The value 'serviceEnabledDevice' means if a array is prepared for service-enabled device, it can be used to create a LV which type is service-enabled. Unlike virtual devices, they cannot be combined or split into multiple logical devices. The value 'unassigned' means it is the original status of a array, if a array is unassigned, it can't be used to create LV.")
h3cRaidCacheTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4), )
if mibBuilder.loadTexts: h3cRaidCacheTable.setStatus('current')
if mibBuilder.loadTexts: h3cRaidCacheTable.setDescription('This table containing some property information and management information of the array cache.')
h3cRaidCacheEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1), ).setIndexNames((0, "H3C-RAID-MIB", "h3cRaidUuid"))
if mibBuilder.loadTexts: h3cRaidCacheEntry.setStatus('current')
if mibBuilder.loadTexts: h3cRaidCacheEntry.setDescription('An entry containing management information applicable to particular array cache resource.')
h3cRaidReadCache = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1, 1), H3cStorageEnableState().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidReadCache.setStatus('current')
if mibBuilder.loadTexts: h3cRaidReadCache.setDescription("This object identifies the state of read cache. If the value of this object is set to 'disable', the associated percentage of cache hit may be invalid.")
h3cRaidReadCacheHitPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setUnits('minute').setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidReadCacheHitPeriod.setStatus('current')
if mibBuilder.loadTexts: h3cRaidReadCacheHitPeriod.setDescription('This object describes the period during which how often the date matched with the cache when read. The units is minute.')
h3cRaidReadCacheAverageRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidReadCacheAverageRate.setStatus('current')
if mibBuilder.loadTexts: h3cRaidReadCacheAverageRate.setDescription('This object describes the average percentage of cache hit when read.')
h3cRaidReadCachePhaseRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidReadCachePhaseRate.setStatus('current')
if mibBuilder.loadTexts: h3cRaidReadCachePhaseRate.setDescription('This object describes the percentage of cache hit when read during the period set by the h3cRaidReadCacheHitPeriod object.')
h3cRaidWriteCache = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1, 5), H3cStorageEnableState().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidWriteCache.setStatus('current')
if mibBuilder.loadTexts: h3cRaidWriteCache.setDescription("This object identifies the state of write cache. If the value of this object is set to 'disable', the associated percentage of cache hit may be invalid.")
h3cRaidWriteCacheHitPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setUnits('minute').setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidWriteCacheHitPeriod.setStatus('current')
if mibBuilder.loadTexts: h3cRaidWriteCacheHitPeriod.setDescription('This object describes the period during which how often the date matched with the cache when writed. The units is minute.')
h3cRaidWriteCacheAverageRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidWriteCacheAverageRate.setStatus('current')
if mibBuilder.loadTexts: h3cRaidWriteCacheAverageRate.setDescription('This object describes the average percentage of cache hit when writed.')
h3cRaidWriteCachePhaseRate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cRaidWriteCachePhaseRate.setStatus('current')
if mibBuilder.loadTexts: h3cRaidWriteCachePhaseRate.setDescription('This object describes the percentage of cache hit when write during the period set by the h3cRaidWriteCacheHitPeriod object.')
h3cRaidWriteCacheFlush = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 4, 1, 9), H3cStorageActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cRaidWriteCacheFlush.setStatus('current')
if mibBuilder.loadTexts: h3cRaidWriteCacheFlush.setDescription('This object identifies an action to flushing the write cache.')
h3cRaidSpareDiskTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 5), )
if mibBuilder.loadTexts: h3cRaidSpareDiskTable.setStatus('current')
if mibBuilder.loadTexts: h3cRaidSpareDiskTable.setDescription('This table described the spare hot disk information.')
h3cRaidSpareDiskEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 5, 1), ).setIndexNames((0, "H3C-RAID-MIB", "h3cRaidUuid"), (0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: h3cRaidSpareDiskEntry.setStatus('current')
if mibBuilder.loadTexts: h3cRaidSpareDiskEntry.setDescription('An entry containing management information applicable to a spare hot disk.')
h3cRaidSpareDiskRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 5, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cRaidSpareDiskRowStatus.setStatus('current')
if mibBuilder.loadTexts: h3cRaidSpareDiskRowStatus.setDescription('This object describes the actions to create or delete spare hot disk.')
h3cFreezeRaidTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 6), )
if mibBuilder.loadTexts: h3cFreezeRaidTable.setStatus('current')
if mibBuilder.loadTexts: h3cFreezeRaidTable.setDescription('This table described the freezed raid management.')
h3cFreezeRaidEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 6, 1), ).setIndexNames((0, "H3C-RAID-MIB", "h3cFreezeRaidUuid"))
if mibBuilder.loadTexts: h3cFreezeRaidEntry.setStatus('current')
if mibBuilder.loadTexts: h3cFreezeRaidEntry.setDescription('An entry containing management information applicable to manage the freezed raid.')
h3cFreezeRaidUuid = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 6, 1, 1), H3cRaidIDType())
if mibBuilder.loadTexts: h3cFreezeRaidUuid.setStatus('current')
if mibBuilder.loadTexts: h3cFreezeRaidUuid.setDescription('This object describes the uuid of the freezed raid.')
h3cFreezeRaidName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 6, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cFreezeRaidName.setStatus('current')
if mibBuilder.loadTexts: h3cFreezeRaidName.setDescription('This object describes the name of the freezed raid.')
h3cFreezeRaidRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 6, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cFreezeRaidRowStatus.setStatus('current')
if mibBuilder.loadTexts: h3cFreezeRaidRowStatus.setDescription("This object describes the actions to manage the freezed raid. Only the value 'destroy' is supported.")
h3c3rdRaidTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 7), )
if mibBuilder.loadTexts: h3c3rdRaidTable.setStatus('current')
if mibBuilder.loadTexts: h3c3rdRaidTable.setDescription('This table described the 3rd raid management.')
h3c3rdRaidEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 7, 1), ).setIndexNames((0, "H3C-RAID-MIB", "h3c3rdRaidUuid"))
if mibBuilder.loadTexts: h3c3rdRaidEntry.setStatus('current')
if mibBuilder.loadTexts: h3c3rdRaidEntry.setDescription('An entry containing management information applicable to manage the 3rd raid.')
h3c3rdRaidUuid = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 7, 1, 1), H3cRaidIDType())
if mibBuilder.loadTexts: h3c3rdRaidUuid.setStatus('current')
if mibBuilder.loadTexts: h3c3rdRaidUuid.setDescription('This object describes the uuid of the 3rd raid.')
h3c3rdRaidName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 7, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3c3rdRaidName.setStatus('current')
if mibBuilder.loadTexts: h3c3rdRaidName.setDescription('This object describes the name of the 3rd raid.')
h3c3rdRaidOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 7, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3c3rdRaidOwner.setStatus('current')
if mibBuilder.loadTexts: h3c3rdRaidOwner.setDescription('This object describes the owner of the 3rd raid.')
h3c3rdRaidImport = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 7, 1, 4), H3cStorageOwnerType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3c3rdRaidImport.setStatus('current')
if mibBuilder.loadTexts: h3c3rdRaidImport.setDescription("This object describes the owner for the 3rd raid. When read, the value 'none' will be get. If the action is executed successfully, the 3rd raid will become a primary raid, and this entry will be deleted automatically.")
h3c3rdRaidRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 10, 4, 1, 7, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3c3rdRaidRowStatus.setStatus('current')
if mibBuilder.loadTexts: h3c3rdRaidRowStatus.setDescription("This object describes the actions to manage the 3rd raid. Only the value 'destroy' is supported.")
mibBuilder.exportSymbols("H3C-RAID-MIB", h3cRaidLvRestore=h3cRaidLvRestore, h3cFreezeRaidTable=h3cFreezeRaidTable, h3cRaidOwner=h3cRaidOwner, h3cRaid=h3cRaid, h3cRaidAutoSync=h3cRaidAutoSync, h3cRaidHideState=h3cRaidHideState, h3cRaidSpareDiskTable=h3cRaidSpareDiskTable, h3cRaidRunState=h3cRaidRunState, h3cRaidWriteCacheFlush=h3cRaidWriteCacheFlush, h3cRaidRowStatus=h3cRaidRowStatus, h3cRaidSpareDiskRowStatus=h3cRaidSpareDiskRowStatus, h3cRaidFreeSize=h3cRaidFreeSize, h3cRaidTable=h3cRaidTable, h3cRaidMibObjects=h3cRaidMibObjects, h3cRaidWriteCache=h3cRaidWriteCache, h3cRaidCapacityTable=h3cRaidCapacityTable, h3cRaidWriteCacheAverageRate=h3cRaidWriteCacheAverageRate, h3c3rdRaidImport=h3c3rdRaidImport, h3c3rdRaidTable=h3c3rdRaidTable, h3cRaidReadCache=h3cRaidReadCache, h3c3rdRaidOwner=h3c3rdRaidOwner, h3cRaidReadCacheAverageRate=h3cRaidReadCacheAverageRate, h3cRaidReadCachePhaseRate=h3cRaidReadCachePhaseRate, h3cRaidSpareDiskEntry=h3cRaidSpareDiskEntry, h3cRaidLocationState=h3cRaidLocationState, h3cRaidUuid=h3cRaidUuid, h3cRaidId=h3cRaidId, h3cPrimaryRaidCount=h3cPrimaryRaidCount, h3cRaidLevel=h3cRaidLevel, h3cFreezeRaidEntry=h3cFreezeRaidEntry, PYSNMP_MODULE_ID=h3cRaid, h3cRaidTimestamp=h3cRaidTimestamp, h3cRaidDiskList=h3cRaidDiskList, h3cRaidReadCacheHitPeriod=h3cRaidReadCacheHitPeriod, h3c3rdRaidName=h3c3rdRaidName, h3cRaidManageEntry=h3cRaidManageEntry, h3cFreezeRaidRowStatus=h3cFreezeRaidRowStatus, h3cRaidWriteCacheHitPeriod=h3cRaidWriteCacheHitPeriod, h3cRaidManageTable=h3cRaidManageTable, h3cRaidEntry=h3cRaidEntry, h3cRaidType=h3cRaidType, h3cRaidWriteCachePhaseRate=h3cRaidWriteCachePhaseRate, h3c3rdRaidUuid=h3c3rdRaidUuid, h3cRaidCacheTable=h3cRaidCacheTable, h3cRaidCacheEntry=h3cRaidCacheEntry, h3c3rdRaidEntry=h3c3rdRaidEntry, h3c3rdRaidRowStatus=h3c3rdRaidRowStatus, h3cRaidName=h3cRaidName, h3cFreezeRaidName=h3cFreezeRaidName, h3cFreezeRaidUuid=h3cFreezeRaidUuid, h3cRaidSyncPercentage=h3cRaidSyncPercentage, h3cRaidAction=h3cRaidAction, h3cRaidAutoRebuild=h3cRaidAutoRebuild, h3cRaidSize=h3cRaidSize)
| 134.977011 | 2,043 | 0.780508 |
acf509bdc83bf5481cf6ca3506ee0de4160eea99 | 6,292 | py | Python | python/opae.admin/opae/admin/tools/rsu.py | rchriste1/opae-sdk | a2b08cab139ebfb320a720bab98a6168748561a4 | [
"BSD-3-Clause"
] | null | null | null | python/opae.admin/opae/admin/tools/rsu.py | rchriste1/opae-sdk | a2b08cab139ebfb320a720bab98a6168748561a4 | [
"BSD-3-Clause"
] | null | null | null | python/opae.admin/opae/admin/tools/rsu.py | rchriste1/opae-sdk | a2b08cab139ebfb320a720bab98a6168748561a4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright(c) 2019-2020, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import argparse
import fcntl
import logging
import os
import re
import sys
from opae.admin.fpga import fpga
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path # noqa
RSU_LOCK_DIR = '/var/lib/opae'
RSU_LOCK_FILE = os.path.join(RSU_LOCK_DIR, 'rsu_lock')
USER_BOOT_PAGE = 'user'
FACTORY_BOOT_PAGE = 'factory'
BOOT_PAGE = {0x0b30: {'bmcimg': {'user': 0,
'factory': 1}},
0x0b2b: {'bmcimg': {'user': 1,
'factory': 0}}}
logger = logging.getLogger('rsu')
DESCRIPTION = '''
Perform RSU (remote system update) operation on PAC device
given its PCIe address.
An RSU operation sends an instruction to the device to trigger
a power cycle of the card only. This will force reconfiguration
from flash for the BMC image.
'''
EPILOG = '''
Example usage:
%(prog)s bmcimg 25:00.0
This will trigger a boot of the BMC image for a device with a pci address
of 25:00.0.
NOTE: Both BMC and FPGA images will be reconfigured from user bank.
%(prog)s bmcimg 25:00.0 -f
This will trigger a factory boot of the BMC image for a device with a
pci address of 25:00.0.
NOTE: Both BMC image will be reconfigured from factory bank and the
FPGA image will be reconfigured from the user bank.
'''
def parse_args():
fc_ = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG,
formatter_class=fc_)
parser.add_argument('type', help='type of operation',
choices=fpga.BOOT_TYPES)
parser.add_argument('bdf', nargs='?',
help=('PCIe address of device to do rsu '
'(e.g. 04:00.0 or 0000:04:00.0)'))
parser.add_argument('-f', '--factory', action='store_true',
help='reload from factory bank')
parser.add_argument('-d', '--debug', action='store_true',
help='log debug statements')
return parser.parse_args()
def normalize_bdf(bdf):
pat = r'[0-9a-fA-F]{4}:[0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F]$'
if re.match(pat, bdf):
return bdf
if re.match(r'[0-9a-fA-F]{2}:[0-9a-fA-F]{2}\.[0-9a-fA-F]$', bdf):
return "0000:{}".format(bdf)
logger.warn('invalid bdf: {}'.format(bdf))
raise SystemExit(os.EX_USAGE)
def do_rsu(rsu_type, device, factory):
dev_id = device.pci_node.pci_id
region = fpga.BOOT_PAGES[dev_id].get(rsu_type, {})
if not region:
logger.error('%s not supported by device', rsu_type)
raise SystemExit(os.EX_SOFTWARE)
device.safe_rsu_boot(factory, type=rsu_type)
def main():
args = parse_args()
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=level,
format='%(asctime)s - %(message)s')
compatible = fpga.enum([{'supports_rsu': True}])
if not compatible:
sys.stderr.write('No compatible devices found\n')
raise SystemExit(os.EX_USAGE)
if args.bdf is None:
if len(compatible) == 1:
args.bdf = compatible[0].pci_node.pci_address
elif len(compatible) > 1:
prog = os.path.basename(sys.argv[0])
sys.stderr.write(('Please specify PCIe address as '
'[<segment>:]<bus>:<device>.<function>\n'))
sys.stderr.write('Acceptable commands:\n')
for dev in compatible:
sys.stderr.write('>{} {} {}\n'.format(prog,
args.type,
dev.pci_node.bdf))
raise SystemExit(os.EX_USAGE)
bdf = normalize_bdf(args.bdf)
Path(RSU_LOCK_DIR).mkdir(parents=True, exist_ok=True)
for device in compatible:
if device.pci_node.pci_address == bdf:
exit_code = os.EX_IOERR
with open(RSU_LOCK_FILE, 'w') as flock:
fcntl.flock(flock.fileno(), fcntl.LOCK_EX)
try:
do_rsu(args.type, device, args.factory)
except IOError:
logging.error('RSU operation failed')
else:
exit_code = os.EX_OK
logging.info('RSU operation complete')
finally:
fcntl.flock(flock.fileno(), fcntl.LOCK_UN)
raise SystemExit(exit_code)
logging.error(('PCIe address (%s) is invalid or does not identify a'
'compatible device'), args.bdf)
if __name__ == "__main__":
main()
| 37.230769 | 78 | 0.629847 |
acf50a5b4febf73521fd521fd9a6483a1e4e1c3b | 60,680 | py | Python | surfcity/ui/urwid.py | cn-uofbasel/SurfCity | fba5ca457741520e4e62486eb83c223050348bf1 | [
"MIT"
] | 16 | 2019-04-08T11:32:22.000Z | 2022-03-19T05:17:45.000Z | surfcity/ui/urwid.py | cn-uofbasel/SurfCity | fba5ca457741520e4e62486eb83c223050348bf1 | [
"MIT"
] | null | null | null | surfcity/ui/urwid.py | cn-uofbasel/SurfCity | fba5ca457741520e4e62486eb83c223050348bf1 | [
"MIT"
] | 2 | 2020-01-16T01:41:05.000Z | 2020-04-28T12:37:00.000Z | # ssb/surfcity/ui/urwid.py
import asyncio
from asyncio import get_event_loop, ensure_future
import base64
import copy
import hashlib
import json
import logging
import os
import random
import re
import sys
import time
import traceback
import urwid
app = None
import surfcity.app.net as net
import surfcity.app.db as db
logger = logging.getLogger('surfcity_ui_urwid')
ui_descr = " (urwid ui, v2019-04-06)"
the_loop = None # urwid loop
back_stack = []
urwid_counter = None
urwid_title = None
urwid_footer = None
urwid_frame = None
urwid_threadList = None
urwid_convoList = None
urwid_msgList = None
urwid_privMsgList = None
urwid_userList = None
screen_size = None
widgets4convoList = []
widgets4threadList = []
# refresh_requested = False
refresh_focus = None
refresh_focus_pos = 0
# new_friends_flag = False
show_extended_network = False
error_message = None
arrow_up = ['up', 'k']
arrow_down = ['down', 'j']
arrow_left = ['left', '<', 'h']
arrow_right = ['enter', 'right', '>', 'l']
arrow_pg_up = ['-']
arrow_pg_down = [' ']
key_quit = ['q', 'Q']
draft_text = None
draft_private_text = None
draft_private_recpts = []
vacuum_intervall = 60*60*24*7 # once a week
# ----------------------------------------------------------------------
def activate_threadList(secr, clear_focus=False):
global urwid_frame, urwid_threadList, widgets4threadList
global refresh_focus, refresh_focus_pos, show_extended_network
wl = copy.copy(widgets4threadList)
urwid_threadList = urwid.AttrMap(ThreadListBox(secr, wl,
show_extended_network),
'fill')
if clear_focus:
refresh_focus = None
refresh_focus_pos = 0
else:
if len(wl) > 0:
i = 0
for w in wl:
if w.key == refresh_focus:
break
i += 1
if i >= len(wl):
i = 0 # refresh_focus_pos
urwid_threadList._original_widget.set_focus(i)
urwid_frame.contents['body'] = (urwid_threadList, None)
output_log("")
def activate_convoList(secr, clearFocus = False):
global urwid_frame, urwid_convoList, widgets4convoList
global refresh_focus, refresh_focus_pos
wl = copy.copy(widgets4convoList)
urwid_convoList = urwid.AttrMap(PrivateConvoListBox(secr, wl), 'fill')
if clearFocus:
refresh_focus = None
refresh_focus_pos = 0
else:
if len(wl) > 0:
i = 0
for w in wl:
if w.key == refresh_focus:
break
i += 1
if i >= len(wl):
i = refresh_focus_pos
urwid_convoList._original_widget.set_focus(i)
urwid_frame.contents['body'] = (urwid_convoList, None)
output_log("")
def activate_help(old_focus = None):
global urwid_helpList
urwid_helpList = urwid.AttrMap(HelpListBox(old_focus), 'fill')
urwid_frame.contents['body'] = (urwid_helpList, None)
output_log("")
back_stack.append(old_focus)
def activate_user(old_focus = None):
global urwid_userList
urwid_userList = urwid.AttrMap(UserListBox(old_focus), 'fill')
urwid_frame.contents['body'] = (urwid_userList, None)
output_log("")
back_stack.append(old_focus)
# ----------------------------------------------------------------------
async def construct_threadList(secr, args,
cache_only=False, extended_network=False):
# public threads
widgets = []
lst = app.mk_thread_list(secr, args, cache_only = cache_only,
extended_network = extended_network)
blocked = app.the_db.get_following(secr.id, 2)
odd = True
logger.info(str(lst))
for t in lst:
logger.info(f"thread {t}")
msgs, txt, _ = await app.expand_thread(secr, t, args, cache_only, blocked)
logger.info(str(msgs))
logger.info(str(txt))
widgets.append(ThreadEntry(t, msgs, txt, 'odd' if odd else 'even'))
odd = not odd
return widgets
async def construct_convoList(secr, args, cache_only=False):
# private conversations
widgets = []
convos = await app.mk_convo_list(secr, args, cache_only)
odd = True
for c in convos:
msgs, txt, new_count = await app.expand_convo(secr, c, args, cache_only)
# 'oddBold' if odd else 'evenBold')
#logger.info('convolist')
#for m in msgs:
# logger.info(m)
widgets.append(ConvoEntry(c, msgs, txt, new_count,
'odd' if odd else 'even'))
odd = not odd
return widgets
async def main(secr, args):
global widgets4threadList, widgets4convoList, error_message
global draft_text, draft_private_text, draft_private_recpts
# global refresh_requested #, new_friends_flag
draft_text = app.the_db.get_config('draft_post')
priv = app.the_db.get_config('draft_private_post')
if priv != None:
try:
priv = json.loads(priv)
draft_private_text, draft_private_recpts = priv
except:
pass
try:
last_vacuum = app.the_db.get_config('last_vacuum')
now = int(time.time())
if not last_vacuum or int(last_vacuum) + vacuum_intervall < now:
logger.info("removing old posts and compacting database")
app.the_db.forget_posts(app.frontier_window);
app.the_db.set_config('last_vacuum', now)
logger.info("database vacuumed")
except Exception as e:
logger.info(f"*** {str(e)}")
logger.info(traceback.format_exc())
try:
host = args.pub.split(':')
port = 8008 if len(host) < 2 else int(host[1])
pubID = secr.id if len(host) < 3 else host[2]
host = host[0]
'''
async def watchdog(host, port, pubID, keypair):
await asyncio.sleep(20)
output_log("disconnect")
logger.info("disconnect")
try:
net.disconnect()
cor = await net.connect(host, port, pubID, keypair)
output_log("connected 2 ...")
ensure_future(cor)
except Exception as e:
s = traceback.format_exc()
output_log(s)
logger.info("watchdog %s", s)
ensure_future(watchdog(host, port, pubID, secr.keypair))
'''
if not args.offline:
send_queue = asyncio.Queue(loop=asyncio.get_event_loop())
net.init(secr.id, send_queue)
try:
api = await net.connect(host, port, pubID, secr.keypair)
output_log("connected, scanning will start soon ...")
except OSError as e:
error_message = str(e) # traceback.format_exc()
logger.exception("exc while connecting")
# print(e)
raise urwid.ExitMainLoop()
return
except Exception as e:
error_message = str(e) # traceback.format_exc()
# urwid.ExitMainLoop()
logger.exception("exc while connecting")
return # raise e
output_log("connected, scanning will start soon ...")
ensure_future(api)
await app.scan_my_log(secr, args, output_log, output_counter)
if not args.noextend:
await app.process_new_friends(secr, output_log, output_counter)
widgets4threadList = await construct_threadList(secr, args,
cache_only=True)
activate_threadList(secr)
widgets4convoList = await construct_convoList(secr, args)
while True:
if not args.offline:
logger.info(f"surfcity {str(time.ctime())} before wavefront")
await app.scan_wavefront(secr.id, secr, args,
output_log, output_counter)
logger.info(f"surfcity {str(time.ctime())} after wavefront")
if app.refresh_requested:
if urwid_frame.contents['body'][0] == urwid_threadList:
output_log("Preparing public content list...")
widgets4threadList = await construct_threadList(secr, args,
extended_network = show_extended_network)
activate_threadList(secr)
widgets4convoList = await construct_convoList(secr, args)
elif urwid_frame.contents['body'][0] == urwid_convoList:
output_log("Preparing private content list...")
widgets4convoList = await construct_convoList(secr, args)
activate_convoList(secr)
widgets4threadList = await construct_threadList(secr, args,
extended_network = show_extended_network)
app.refresh_requested = False
app.counter_reset(output_counter)
else:
# construct the *other* list (that is not being displayed)
if urwid_frame.contents['body'][0] == urwid_threadList:
widgets4convoList = await construct_convoList(secr, args)
elif urwid_frame.contents['body'][0] == urwid_convoList:
widgets4threadList = await construct_threadList(secr, args)
if app.new_friends_flag:
await app.process_new_friends(secr, output_log, output_counter)
app.new_friends_flag = False
if not args.offline:
if (urwid_frame.contents['body'][0] == urwid_threadList or \
urwid_frame.contents['body'][0] == urwid_convoList) and \
app.new_back+app.new_forw > 0:
output_log("Type '!' to refresh screen")
else:
output_log("")
logger.info("%s", f"surfcity {str(time.ctime())} before sleeping")
for i in range(50):
await asyncio.sleep(0.1)
if app.refresh_requested:
break
# if not args.offline:
# fu.cancel()
except Exception as e:
logger.info("exception in main()")
if not error_message:
# s = '\n'.join(traceback.format_exc(-1).split('\n')[1:-2])
s = traceback.format_exc()
logger.error(s)
print(s)
output_log(f"Exception: {str(e)}\n{s}\n\nuse CTRL-C to terminate")
error_message = str(e)
else:
pass
# logger.error(error_message)
# print('x' + error_message)
# output_log(error_message)
raise urwid.ExitMainLoop()
# ----------------------------------------------------------------------
def output_log(txt='', end=None, flush=None):
# print(txt)
if len(txt) > 0 and txt[0] == '\r':
txt = txt[1:]
if len(txt) > 0 and txt[-1] == '\r':
txt = txt[:-1]
urwid_footer.set_text(txt)
pass
def start_menu():
a = 'Q'
b = urwid_frame.contents['body'][0]
if b == urwid_threadList: a = '!CU?Q'
elif b == urwid_convoList: a = '!CU?Q'
elif b == urwid_msgList: a = 'RC?Q'
elif b == urwid_privMsgList: a = 'RC?Q'
elif b == urwid_userList: a = '?Q'
Menu(a).open()
def on_unhandled_input(ev):
screen_size = urwid.raw_display.Screen().get_cols_rows()
if ev == 'esc':
return start_menu()
if type(ev) == tuple and ev[0] == 'mouse press':
if type(the_loop.widget) is Menu:
the_loop.widget.close()
return
if ev[3] < 3:
if screen_size[0] - ev[2] < 16:
start_menu()
else:
if len(back_stack) > 0:
set_frame(back_stack[-1])
elif urwid_frame.contents['body'][0] in [urwid_threadList,
urwid_convoList]:
urwid_frame.keypress(screen_size, 'p')
# else: output_log('?? empty back_stack ??')
elif ev[3] == screen_size[1] - 1 and screen_size[0] - ev[2] < 16:
if type(urwid_frame.contents['body'][0]) is HelpListBox:
activate_help(urwid_frame.contents['body'][0])
return
output_log(f"unhandled event: {str(ev)}")
def output_counter():
urwid_counter.set_text(f" FWD={app.new_forw} BWD={app.new_back} ")
# ----------------------------------------------------------------------
def mouse_scroll(obj, size, button):
if button == 4:
return obj.keypress(size, 'up')
if button == 5:
return obj.keypress(size, 'down')
return False
def smooth_scroll(obj, size, key):
lw = obj.body
if key in arrow_up:
pos = lw.get_focus()[1]
try:
p = lw.get_prev(pos)[1]
lw.set_focus(p)
obj.shift_focus(size, 5)
except:
pass
return True
if key in arrow_down:
pos = lw.get_focus()[1]
n = lw.get_next(pos)[1]
if n:
lw.set_focus(n)
obj.shift_focus(size, size[1]-10)
return True
return False
def list_mouse_event(obj, size, event, button, col, row, focus):
if mouse_scroll(obj, size, button) == True:
return True
if event == 'mouse press':
obj._mouse_down = (col, row)
return True
if event != 'mouse release' or (col,row) != obj._mouse_down:
obj._mouse_down = (-1,-1)
return True
(maxcol, maxrow) = size
middle, top, bottom = obj.calculate_visible((maxcol, maxrow),
focus=True)
if middle is None:
return False
_ignore, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
_ignore, fill_below = bottom
fill_above.reverse() # fill_above is in bottom-up order
w_list = ( fill_above +
[ (focus_widget, focus_pos, focus_rows) ] +
fill_below )
wrow = -trim_top
for w, w_pos, w_rows in w_list:
if wrow + w_rows > row:
break
wrow += w_rows
else:
return False
obj.set_focus(w_pos)
obj.keypress(size, 'enter')
return True
# ----------------------------------------------------------------------
help = [
'''Welcome to SurfCity
Below you find a table with the keyboard bindings followed
by a description of SurfCity's philosophy and an explanation
of the command line options.
You can leave this screen by typing '<' or left-arrow.
Enjoy!
Santa Cruz, Feb 2019
ssb: @AiBJDta+4boyh2USNGwIagH/wKjeruTcDX2Aj1r/haM=.ed25519
email: <christian.tschudin@unibas.ch>''',
'''Keyboard bindings:
? this help screen
q quit
ESC open menu / exit a popup window
! refresh Private or Public screen
p toggle between Private and Public screen
e toggle extended network (when in Public screen)
u simple user directory
c compose new message
r reply in a thread
>, l, rght-arrow, ret enter detail page
<, h, left-arrow leave detail page
down/up-arrow, j/k move upwards/downwards in the list
page-down, page-up scroll through the list''',
'''Mouse support:
The mouse can be used to scroll up and down in the list and text
panels. In the list of public threads, as well as the list of private
conversations, entries can be clicked on in order to expand them.
Clicking in the UPPER LEFT corner is equivalent to either 'back' or
for toggling between public threads and private conversations.
Clicking in the UPPER RIGHT corner opens a menu with clickable
options for trigger a refresh, do a reply, etc without having to
type any key.''',
'''About SurfCity
Secure Scuttlebutt (SSB) brings to you a deluge of information,
all appended to the message logs of the respective authors:
SurfCity is the tool to ride this wavefront.
It does so (a) in forward as well as (b) in backward direction
and (c) widens its scan range dynamically, but WITHOUT having
to store all the participants' huge log files.
Typically, the storage footprint of SurfCity is in the range of tens
of MBytes, while a full SSB client easily requires several hundreds of
MegaBytes of disk storage. Also, when booting freshly into SurfCity,
you will immediately have messages to display: no need to wait for
long download times and indexing pauses. In that sense SurfCity is
sustainable, riding the wave with roughly constant storage space - at
least if YOU behave sustainably, e.g. block or un-follow peers if the
list becomes too large ;-)
What does "riding the wavefront" mean?
a) By this we mean that SurfCity's most important task is to
scan the Scuttleverse for new content in the forward direction.
SurfCity will process these fresh messages and store them for a
few weeks only. It will also take note of a discussion thread's
first post and keep this information around for a few months
so it can later display the thread's "title". Finally, SurfCity
keeps track of the SSB crypto peer identifiers and the human-
readable names that have been assigned to them.
b) SurfCity is also able to scan content in backwards direction.
From these "historic" messages, SurfCity collects essential
information e.g. the name that a peer has assigned to him/herself,
or the other peers that a peer follows or blocks. Eventually this
background scan bottoms out when the logs of all followed peers
have been scanned entirely.
c) Finally, the breadth of the wavefront is enlarged as SurfCity
learns about whom you are following. In this case, these peers
are added to your "following list" and are also scanned. This is
part of the SSB concept that messages sent by a peer are only
accessible in that peer's log, hence the need to scan it. The
width of the wavefront is even larger than this, as the followed
peers of a followed peer (FOAF, "friends of a friend") are also
scanned. SurfCity scans these FOAF peers less frequently by
randomly picking some of them, in each round. But it's all
fine because SSB is based on eventual concistency, and random
selection will eventually lead SurfCity to visit every peer
within the wavefront's current breadth.
Prototype and Future Work
Beware, this is experimental software! Its main purpose is to
validate the concept of wavefront riding for SSB and to prepare
the ground for a SSB browser that can run on a smartphone but
does not come with a huge storage requirement.
''',
'''Explanation of command-line options
-offline prevents SurfCity from doing any scans, but also
from downloading any message content. This means
that only cached messages can be displayed (at
most a few weeks old) and that threads look
less complete when activating this option.
-narrow prevents SurfCity to scan the logs of FOAF
(friends of a friend): Only peers that you
decided to follow will be considered in the
scans. This only affects scanning and is not
a censoring option: All content that SurfCity
already collected will be used and displayed.
-nocatchup "do not scan backwards": prevents SurfCity from
scanning historic messages.
-noextend "do not scan forwards": prevents SurfCity from
probing for new messages that extend the peers'
logs.
This user interface supports four different color modes:
-ui urwid dark mode (default)
-ui urwid_light light mode
-ui urwid_amber monochrome, using a warm amber color on black
-ui urwid_green monochrome, using classic green on black
-ui urwid_mono monochrome, using the terminal's default colors'''
]
class HelpListBox(urwid.ListBox):
_selectable = True
def __init__(self, goback, lst=[]):
self.goback = goback
self.title = "Help Text\n"
urwid_title.set_text(self.title)
lst = [urwid.Text('v--- H E L P ---v', 'center')]
for h in help:
t = urwid.AttrMap(urwid.Text(h), 'even')
p = urwid.Pile([urwid.Text(''), t, urwid.Text('')])
lst.append(urwid.Padding(p, left=2, right=2))
lst.append(urwid.Text('^--- H E L P ---^','center'))
sflw = urwid.SimpleFocusListWalker(lst)
sflw.title = self.title
super().__init__(sflw)
def keypress(self, size, key):
key = super().keypress(size, key)
if key in key_quit:
raise urwid.ExitMainLoop()
if key in arrow_pg_down:
return self.keypress(size, 'page down')
if key in arrow_pg_up:
return self.keypress(size, 'page up')
if not key in arrow_left:
return key
set_frame(self.goback)
def mouse_event(self, size, event, button, x, y, focus):
mouse_scroll(self, size, button)
return True
# ----------------------------------------------------------------------
class UserListBox(urwid.ListBox):
_selectable = True
def _user2line(self, feedID, isFriend = False):
prog = '0'
front,_ = app.the_db.get_id_front(feedID)
if front > 0:
low = app.the_db.get_id_low(feedID)
if low > 0:
prog = str((front - low + 1)*100 // front)
prog = f" {prog}%"[-4:]
n = app.feed2name(feedID)
if not n:
n = '?'
fr = '* ' if isFriend else ' '
return f"{fr}{(n+10*' ')[:10]} {feedID} {prog}"
def _lines2widget(self, lns):
t = urwid.AttrMap(urwid.Text('\n'.join(lns)), 'even')
p = urwid.Pile([urwid.Text(''), t, urwid.Text('')])
return urwid.Padding(p, left=2, right=2)
def __init__(self, goback, lst=[]):
self.goback = goback
self.title = "User Directory\n"
urwid_title.set_text(self.title)
lst = [urwid.Text('v--- U S E R S ---v', 'center')]
me = app.the_db.get_config('id')
lst.append(self._lines2widget([f"My feedID:\n\n{self._user2line(me)}\n"]))
pubs = app.the_db.list_pubs()
frnd = app.the_db.get_friends(me)
fol = app.the_db.get_following(me)
t = []
for f in fol:
if f in pubs:
t.append(self._user2line(f, f in frnd))
t.sort(key=lambda x:x[2:].lower())
t = [f"Accredited pubs: {len(pubs)}\n"] + t
if len(t) > 1:
t.append('')
lst.append(self._lines2widget(t))
fol = app.the_db.get_following(me)
t1, t2 = [], []
for f in fol:
if f in pubs:
continue
ln = self._user2line(f, f in frnd)
if ln[2:12] == '? ':
t2.append(ln)
else:
t1.append(ln)
t1.sort(key=lambda x:x[2:].lower())
t2.sort(key=lambda x:x[2:].lower())
t = [f"Followed feeds (* =friend/following back): {len(fol)-len(pubs)}\n"] + t1 + t2
if len(t) > 1:
t.append('')
lst.append(self._lines2widget(t))
folr = app.the_db.get_followers(me)
t = []
for f in folr:
if f in frnd:
continue
t.append(self._user2line(f))
t.sort(key=lambda x:x[2:].lower())
t = [f"Follower feeds (other than friends): {len(t)}\n"] + t
if len(t) > 1:
t.append('')
lst.append(self._lines2widget(t))
blk = app.the_db.get_following(me, 2)
t = []
for f in blk:
t.append(self._user2line(f))
t.sort(key=lambda x:x.lower())
if len(t) > 0:
t.append('')
t = [f"Blocked feeds: {len(blk)}\n"] + t
lst.append(self._lines2widget(t))
ffol = app.the_db.get_follofollowing(me)
t = []
for f in ffol:
if f in fol:
continue
t.append(self._user2line(f))
t.sort(key=lambda x: '~~~~~'+x[2:].lower() if x[2:3]=='?' else x[2:].lower())
if len(t) > 0:
t.append('')
t = [f"Number of feeds followed by the feeds I follow: {len(ffol)}\n"] + t
lst.append(self._lines2widget(t))
lst.append(urwid.Text('^--- U S E R S ---^', 'center'))
sflw = urwid.SimpleFocusListWalker(lst)
sflw.title = self.title
super().__init__(sflw)
def keypress(self, size, key):
key = super().keypress(size, key)
if key in key_quit:
raise urwid.ExitMainLoop()
if key in arrow_pg_down:
return self.keypress(size, 'page down')
if key in arrow_pg_up:
return self.keypress(size, 'page up')
if key in ['?']:
return activate_help(urwid_userList)
if not key in arrow_left:
return key
set_frame(self.goback)
def mouse_event(self, size, event, button, x, y, focus):
mouse_scroll(self, size, button)
return True
# ----------------------------------------------------------------------
class ConvoEntry(urwid.AttrMap):
_selectable = True
def __init__(self, convo, msgs, txt, new_count, attr=None):
self.convo = convo
self.msgs = msgs
self.key = self.convo # for the jump_to_last_entry_after_refresh logic
self.star = urwid.Text('*' if new_count > 0 else ' ')
self.count = urwid.Text(('selected', f"({new_count} new)" \
if new_count > 0 else ""), 'right')
self.title = txt[0][1]
m = " (1 msg)" if len(msgs) == 1 else f" ({len(msgs)} msgs)"
lines = [ urwid.Columns([urwid.Text((attr+'Bold', f"{self.title[:75]}"), wrap='clip'),
('pack', urwid.Text(m))]) ]
for ln in txt[1:]:
lines.append(urwid.Columns([
(12, urwid.Text(ln[1][:10]+' ', 'left', wrap='clip')),
urwid.Text(ln[2], 'left', wrap='clip'),
(16, urwid.Text(' '+ln[0]+' ','right', wrap='clip'))
]))
lines.append(self.count)
pile = urwid.AttrMap(urwid.Pile(lines), attr)
cols = urwid.Columns([(2,self.star),pile])
super().__init__(cols, None, focus_map='selectedPrivate')
class PrivateConvoListBox(urwid.ListBox):
# the list of private conversations
_selectable = True
_mouse_down = (-1,-1)
def __init__(self, secr, lst=[]):
self.secr = secr
self.title = "PRIVATE conversations"
urwid_title.set_text(self.title)
sflw = urwid.SimpleFocusListWalker(lst)
sflw.title = self.title
super().__init__(sflw)
def keypress(self, size, key):
global urwid_privMsgList
global refresh_focus, refresh_focus_pos # refresh_requested,
if smooth_scroll(self, size, key):
return
key = super().keypress(size, key)
if key in key_quit:
raise urwid.ExitMainLoop()
if key in arrow_pg_down:
return self.keypress(size, 'page down')
if key in arrow_pg_up:
return self.keypress(size, 'page up')
if key in ['!']:
app.refresh_requested = True
if self.focus:
refresh_focus = self.focus.key
refresh_focus_pos = self.get_focus()[1]
else:
refresh_focus = None
refresh_focus_pos = 0
return
if key in ['p', 'p']:
return activate_threadList(self.secr, True)
if key in ['?']:
return activate_help(urwid_convoList)
if key in ['u', 'U']:
return activate_user(urwid_convoList)
if key in ['c']:
r = RecptsDialog()
e = EditDialog('Compose new PRIVATE message', is_private=True)
c = ConfirmTextDialog(True)
r.open(draft_private_recpts, lambda recpts:
e.open(draft_private_text, lambda x: c.open(x, recpts,
lambda : e.reopen(),
lambda y: app.submit_private_post(self.secr,y,recpts))
)
)
return
if not key in ['enter', '>', 'right']:
return key
self.focus.star.set_text('')
self.focus.count.set_text('')
back_stack.append(urwid_convoList)
for t in self.focus.convo['threads']:
app.the_db.update_thread_lastread(t)
lst = [urwid.Text('---oldest---', 'center')]
root, branch = (None, None) # we only want the last one
for m in self.focus.msgs:
branch = m['key']
root = m['content']['root'] if 'root' in m['content'] else branch
a = m['author']
n = app.feed2name(m['author'])
if not n:
n = m['author']
n = urwid.Columns([urwid.Text(n),
(13, urwid.Text(app.utc2txt(m['timestamp'])))])
t = m['content']['text']
t = re.sub(r'\[([^\]]*)\]\([^\)]*\)', r'[\1]', t)
t = urwid.AttrMap(urwid.Text(t), 'even')
r = urwid.Text(m['key'], 'right')
p = urwid.Pile([urwid.Text(''), n, t, r, urwid.Text('')])
lst.append(urwid.Padding(p, left=2, right=2))
lst.append(urwid.Text('---newest---', 'center'))
# nms = []
# for r in self.focus.convo['recps']:
# n = app.feed2name(r)
# if not n:
# n = r[:10]
# nms.append(n)
## if len(nms) == 0:
## nms = [app.feed2name(secr.id)]
# title = f"Private conversation with <{', '.join(nms)[:50]}>:"
title = "Private conversation\n" + f"with {self.focus.title[:50]}"
urwid_privMsgList = urwid.AttrMap(PrivateMessageBox(self.secr,
urwid_convoList,
self.focus.convo['recps'],
title, lst, root, branch),
'fill')
urwid_privMsgList._original_widget.set_focus(len(lst)-1)
urwid_frame.contents['body'] = (urwid_privMsgList, None)
def mouse_event(self, size, event, button, col, row, focus):
return list_mouse_event(self, size, event, button, col, row, focus)
class PrivateMessageBox(urwid.ListBox):
# a single private convo message
_selectable = True
def __init__(self, secr, goback, recpts, title,
lst=[], root=None, branch=None):
self.secr = secr
self.recpts = recpts
self.goback = goback
self.title = title
urwid_title.set_text(title)
self.root = root
self.branch = branch
sflw = urwid.SimpleFocusListWalker(lst)
sflw.title = self.title
super().__init__(sflw)
def keypress(self, size, key):
global screen_size, draft_private_text
screen_size = (size[0], size[1]+3)
key = super().keypress(size, key)
if key in key_quit:
raise urwid.ExitMainLoop()
if key in arrow_pg_down:
return self.keypress(size, 'page down')
if key in arrow_pg_up:
return self.keypress(size, 'page up')
if key in ['?']:
return activate_help(urwid_privMsgList)
if key in ['c']:
r = RecptsDialog()
e = EditDialog('Compose new PRIVATE message', is_private=True)
c = ConfirmTextDialog(True)
r.open(draft_private_recpts, lambda recpts:
e.open(draft_private_text, lambda txt: c.open(txt, recpts,
lambda : e.reopen(),
lambda y: app.submit_private_post(self.secr,y,recpts))
)
)
return
if key in ['r']:
dest = self.title
e = EditDialog(f"Compose PRIVATE reply to {dest[dest.index('<'):]}",
is_private=True)
c = ConfirmTextDialog(True)
e.open(draft_private_text, lambda txt: c.open(txt, self.recpts,
lambda : e.reopen(),
lambda y: app.submit_private_post(self.secr,y,
self.root,
self.branch))
)
return
if not key in arrow_left:
return key
set_frame(self.goback)
def mouse_event(self, size, event, button, col, row, focus):
mouse_scroll(self, size, button)
return True
# ----------------------------------------------------------------------
class ThreadEntry(urwid.AttrMap):
_selectable = True
def __init__(self, key, msgs, txt, attr=None):
self.key = key
self.msgs = msgs
new_count = txt[0][0]
self.star = urwid.Text('*' if new_count > 0 else ' ')
self.count = urwid.Text(('selected', f"({new_count} new)" \
if new_count > 0 else ""), 'right')
# lines = [ urwid.Text((attr+'Bold',f"'{txt[0][1][:75]}'")) ]
lines = [ urwid.Text((attr+'Bold',f"'{txt[0][1][:75]}'"), wrap='clip') ]
for ln in txt[1:]:
lines.append(urwid.Columns([
(12, urwid.Text(ln[1][:10]+' ', 'left', wrap='clip')),
urwid.Text(ln[2], 'left', wrap='clip'),
(16, urwid.Text(' '+ln[0]+' ','right', wrap='clip'))
]))
lines.append(self.count)
pile = urwid.AttrMap(urwid.Pile(lines), attr)
cols = urwid.Columns([(2,self.star),pile])
super().__init__(cols, None, focus_map='selected')
# def mouse_event(self, size, event, button, x, y, focus):
# output_log('hello 1')
# return True
class MessageBox(urwid.ListBox):
# public thread's messages
_selectable = True
def __init__(self, secr, goback, title, lst=[], root=None, branch=None):
self.secr = secr
self.goback = goback
self.title = title
urwid_title.set_text(title[:screen_size[0]-1])
self.root = root
self.branch = branch
sflw = urwid.SimpleFocusListWalker(lst)
sflw.title = self.title
super().__init__(sflw)
def keypress(self, size, key):
key = super().keypress(size, key)
if key in key_quit:
raise urwid.ExitMainLoop()
if key in arrow_pg_down:
return self.keypress(size, 'page down')
if key in arrow_pg_up:
return self.keypress(size, 'page up')
if key in ['?']:
return activate_help(urwid_msgList)
if key in ['c', 'r']:
if key == 'c':
e = EditDialog(f"Compose PUBLIC message in new thread")
root, branch = (None, None)
else:
e = EditDialog("Compose PUBLIC reply in chat\n" + \
f"{self.title[8:50]}'")
root, branch = (self.root, self.branch)
c = ConfirmTextDialog(False)
e.open(draft_text, lambda txt: c.open(txt, None,
lambda : e.reopen(),
lambda y: app.submit_public_post(self.secr, y,
root, branch))
)
return
if not key in arrow_left:
return key
set_frame(self.goback)
def mouse_event(self, size, event, button, col, row, focus):
mouse_scroll(self, size, button)
return True
def set_frame(goback):
global back_stack
back_stack.pop()
urwid_frame.contents['body'] = (goback, None)
# urwid_title.set_text(goback.title)
urwid_title.set_text(goback._original_widget.title)
class ThreadListBox(urwid.ListBox):
# list of public threads
_selectable = True
_mouse_down = (-1,-1)
def __init__(self, secr, lst=[], show_extended_network=False):
self.secr = secr
self.title = "PUBLIC chats (extended network)" \
if show_extended_network else \
"PUBLIC chats (with or from people I follow)"
urwid_title.set_text(self.title)
sflw = urwid.SimpleFocusListWalker(lst)
sflw.title = self.title
super().__init__(sflw)
def keypress(self, size, key):
global urwid_back, urwid_msgList, show_extended_network
global refresh_focus, refresh_focus_pos # refresh_requested,
global screen_size
screen_size = (size[0], size[1]+3)
if smooth_scroll(self, size, key):
return
key = super().keypress(size, key)
if key in key_quit:
raise urwid.ExitMainLoop()
if key in arrow_pg_down:
return self.keypress(size, 'page down')
if key in arrow_pg_up:
return self.keypress(size, 'page up')
if key in ['e', 'E']:
show_extended_network = not show_extended_network
# output_log(f"show_extended_network now= {show_extended_network}")
key = '!'
if key in ['!']:
app.refresh_requested = True
if self.focus:
refresh_focus = self.focus.key
refresh_focus_pos = self.get_focus()[1]
else:
refresh_focus = None
refresh_focus_pos = 0
return
if key in ['?']:
return activate_help(urwid_threadList)
if key in ['u', 'U']:
return activate_user(urwid_threadList)
if key in ['p', 'P']:
return activate_convoList(self.secr, True)
if key in ['c']:
e = EditDialog(f"Compose PUBLIC message in new thread")
c = ConfirmTextDialog(False)
e.open(draft_text, lambda txt: c.open(txt, None,
lambda : e.reopen(),
lambda y: app.submit_public_post(self.secr, y))
)
return
if not key in arrow_right:
return key
self.focus.star.set_text('')
self.focus.count.set_text('')
back_stack.append(urwid_threadList)
app.the_db.update_thread_lastread(self.focus.key)
lst = [urwid.Text('---oldest---', 'center')]
if len(self.focus.msgs) > 0 and 'root' in self.focus.msgs[0]['content']:
lst.append(urwid.Text('[some older messages out of reach]', 'center'))
root, branch = (None, None) # we only want the last one
for m in self.focus.msgs:
branch = m['key']
root = m['content']['root'] if 'root' in m['content'] else branch
a = m['author']
n = app.feed2name(m['author'])
if not n:
n = m['author']
n = urwid.Columns([urwid.Text(n),
(13, urwid.Text(app.utc2txt(m['timestamp'])))])
t = m['content']['text']
t = re.sub(r'\[([^\]]*)\]\([^\)]*\)', r'[\1]', t)
t = urwid.AttrMap(urwid.Text(t), 'even')
r = urwid.Text(m['key'], 'right')
p = urwid.Pile([urwid.Text(''),n,t,r,urwid.Text('')])
lst.append(urwid.Padding(p, left=2, right=2))
lst.append(urwid.Text('---newest---', 'center'))
title = app.the_db.get_thread_title(self.focus.key)
if title:
title = "Public:\n" + f"'{app.text2synopsis(title)}'"
else:
title = "Public:\n<unknown first post>"
urwid_msgList = urwid.AttrMap(MessageBox(self.secr,
urwid_threadList, title, lst,
root, branch),
'fill')
urwid_msgList._original_widget.set_focus(len(lst)-1)
urwid_frame.contents['body'] = (urwid_msgList, None)
def mouse_event(self, size, event, button, col, row, focus):
return list_mouse_event(self, size, event, button, col, row, focus)
if mouse_scroll(self, size, button) == True:
return True
if event == 'mouse press':
self._mouse_down = (col, row)
return True
if event != 'mouse release' or (col,row) != self._mouse_down:
self._mouse_down = (-1,-1)
return True
(maxcol, maxrow) = size
middle, top, bottom = self.calculate_visible((maxcol, maxrow),
focus=True)
if middle is None:
return False
_ignore, focus_widget, focus_pos, focus_rows, cursor = middle
trim_top, fill_above = top
_ignore, fill_below = bottom
fill_above.reverse() # fill_above is in bottom-up order
w_list = ( fill_above +
[ (focus_widget, focus_pos, focus_rows) ] +
fill_below )
wrow = -trim_top
for w, w_pos, w_rows in w_list:
if wrow + w_rows > row:
break
wrow += w_rows
else:
return False
self.set_focus(w_pos)
self.keypress(size, 'enter')
return True
def save_draft(txt, recpts):
global draft_text, draft_private_text, draft_private_recpts
if recpts != None:
draft_private_text = txt
draft_private_recpts = recpts
app.the_db.set_config('draft_private_post', json.dumps((txt, recpts)))
else:
draft_text = txt
app.the_db.set_config('draft_post', txt)
# ----------------------------------------------------------------------
class EditDialog(urwid.Overlay):
def __init__(self, bannerTxt, is_private=False):
self.is_private = is_private
header = urwid.Text( bannerTxt + '\n(use TAB to select buttons)',
align = 'center')
self.edit = urwid.Edit(multiline=True)
body_filler = urwid.Filler(self.edit, valign = 'top')
body_padding = urwid.Padding(
body_filler,
left = 1,
right = 1
)
body = urwid.LineBox(body_padding)
w = the_loop.widget
footer1 = urwid.AttrMap(urwid.Button('Cancel ',
lambda x:self.close()),
None, focus_map = 'selected')
footer2 = urwid.AttrMap(urwid.Button('Preview',
lambda x: self._callback()),
None, focus_map = 'selected')
footer = urwid.GridFlow([footer1,footer2], 11, 1, 1, 'center')
lb = urwid.LineBox(urwid.Frame(body, header = header, footer = footer))
super().__init__(urwid.AttrMap(lb, 'fill'), w,
align = 'center', valign = 'middle',
width = screen_size[0]-2,
height = screen_size[1]-2)
def keypress(self, size, key):
# if key in key_quit:
# raise urwid.ExitMainLoop()
if key in ['esc']:
self.close()
if key in ['tab', 'shift tab']:
paths = [[1, 'body'], [1, 'footer', 0], [1, 'footer', 1]]
fp = self.get_focus_path()
if key == 'tab':
i = (paths.index(fp) + 1) % len(paths)
else:
i = (paths.index(fp) + len(paths) - 1) % len(paths)
self.set_focus_path(paths[i])
return
key = super().keypress(size, key)
def open(self, txt, ok_callback):
if txt:
self.edit.set_edit_text(txt)
self.edit.set_edit_pos(len(txt))
elif self.is_private and draft_private_recpts:
if not draft_private_text or draft_private_text == '':
t = ', '.join(draft_private_recpts) + '\n\n'
self.edit.set_edit_text(t)
self.edit.set_edit_pos(len(t))
self.callback = ok_callback
self.set_focus_path([1, 'body'])
the_loop.widget = self
def reopen(self):
self.set_focus_path([1, 'body'])
the_loop.widget = self
def _callback(self):
self.close()
self.callback(self.edit.get_edit_text())
def close(self):
recpts = None
if self.is_private:
global draft_private_recpts
recpts = draft_private_recpts
save_draft(self.edit.get_edit_text(), recpts)
the_loop.widget = urwid_frame
the_loop.draw_screen()
class ConfirmTextDialog(urwid.Overlay):
def __init__(self, is_private=False):
self.is_private = is_private
txt = 'PRIVATE' if is_private else 'PUBLIC'
header = urwid.Text(('selected',
f" Really post this {txt} message? " + \
"\n(use up/down arrows to scroll, " + \
"TAB to select buttons)"),
align = 'center')
self.body_text = urwid.Text('', align = 'left')
self.recpts_text = urwid.Text('', align = 'left')
if self.is_private:
lst = [ urwid.AttrMap(self.recpts_text, 'selected'),
urwid.Divider() ]
else:
lst = []
lst.append( urwid.AttrMap(self.body_text, 'even') )
body_filler = urwid.ListBox(urwid.SimpleFocusListWalker(lst))
body_padding = urwid.Padding(body_filler, left = 1, right = 1)
body = urwid.LineBox(body_padding)
w = the_loop.widget
footer1 = urwid.AttrMap(urwid.Button(' back ',
lambda x:self._back_callback()),
None, focus_map = 'selected')
footer2 = urwid.AttrMap(urwid.Button('cancel',
lambda x:self.close()),
None, focus_map = 'selected')
footer3 = urwid.AttrMap(urwid.Button(' send!',
lambda x:self._send_callback()),
None, focus_map = 'selected')
footer = urwid.GridFlow([footer1,footer2,footer3], 10, 1, 1, 'center')
lb = urwid.LineBox(urwid.Frame(body, header = header, footer = footer))
super().__init__(urwid.AttrMap(lb, 'fill'), w,
align = 'center', valign = 'middle',
width = screen_size[0]-2,
height = screen_size[1]-2)
def keypress(self, size, key):
if key in ['esc']:
self.close()
if key in ['tab', 'shift tab']:
paths = [[1, 'body', 0], [1, 'footer', 0],
[1, 'footer', 1], [1, 'footer', 2]]
fp = self.get_focus_path()
if fp[1] == 'body':
fp = [1, 'body', 0]
if key == 'tab':
i = (paths.index(fp) + 1) % len(paths)
else:
i = (paths.index(fp) + len(paths) - 1) % len(paths)
self.set_focus_path(paths[i])
return
key = super().keypress(size, key)
def mouse_event(self, size, event, button, x, y, focus):
if mouse_scroll(self, size, button):
return True
return super().mouse_event(size, event, button, x, y, focus)
def open(self, text, recpts, back_callback, send_callback):
self.back_callback = back_callback
self.send_callback = send_callback
r = r"(#[a-zA-Z0-9\-_\.]+)|((\&|%).{44}\.sha256)|(@.{44}.ed25519)|(\(([^\)]+)\)\[[^\]]+\])|(\[[^\]]+\]\([^\)]+\))"
all = []
pos = 0
for i in re.finditer(r, text):
s = i.span()
if s[0] > pos:
all.append(i.string[pos:s[0]])
m = i.string[i.start(0):i.end(0)]
if m[0] in ['@', '%', '&']:
m = f"{m[:8]}.."
elif m[0] in ['(']:
m = re.match(r"\(([^\)]+)\)\[([^\]]+)\]", m)
m = m.group(1)
elif m[0] in ['[']:
m = re.match(r"\[([^\]]+)\]\(([^\)]+)\)", m)
m = m.group(1)
all.append(('cypherlink', m))
pos = s[1]
if pos < len(text):
all.append(text[pos:len(text)])
self.body_text.set_text(all)
if recpts:
lst = ['Recipients:']
for r in recpts:
nm = app.feed2name(r)
if nm:
r = f"[@{nm}]({r})"
lst.append(' ' + r)
self.recpts_text.set_text('\n'.join(lst))
self.set_focus_path([1, 'body'])
the_loop.widget = self
def _back_callback(self):
self.close()
self.back_callback()
def _send_callback(self):
logger.info("send_callback")
self.close()
self.send_callback(str(self.body_text.get_text()[0]))
save_draft(None, [] if self.is_private else None)
def close(self):
the_loop.widget = urwid_frame
the_loop.draw_screen()
# ----------------------------------------------------------------------
class RecptsDialog(urwid.Overlay):
def __init__(self):
header = urwid.Text("Enter the recipients for a private msg, " + \
"one per line\n(use TAB to select buttons)",
align = 'center')
self.edit = urwid.Edit(multiline=True)
self.edit.set_edit_pos(0)
body_filler = urwid.Filler(self.edit, valign = 'top')
body_padding = urwid.Padding(body_filler, left = 1, right = 1)
body = urwid.LineBox(body_padding)
w = the_loop.widget
footer1 = urwid.AttrMap(urwid.Button('Cancel',
lambda x:self.close()),
None, focus_map='selected')
footer2 = urwid.AttrMap(urwid.Button(' Done ',
lambda x: self._callback()),
None, focus_map='selected')
footer = urwid.GridFlow([footer1,footer2], 11, 1, 1, 'center')
lb = urwid.LineBox(urwid.Frame(body, header = header, footer = footer))
super().__init__(urwid.AttrMap(lb, 'fill'), w,
align = 'center', valign = 'middle',
width = screen_size[0]-2,
height = screen_size[1]-2)
def keypress(self, size, key):
# if key in key_quit:
# raise urwid.ExitMainLoop()
if key in ['esc']:
self.close()
if key in ['tab', 'shift tab']:
paths = [[1, 'body'], [1, 'footer', 0], [1, 'footer', 1]]
fp = self.get_focus_path()
output_log(str(fp))
if key == 'tab':
i = (paths.index(fp) + 1) % len(paths)
else:
i = (paths.index(fp) + len(paths) - 1) % len(paths)
self.set_focus_path(paths[i])
return
key = super().keypress(size, key)
def open(self, recpts, ok_callback):
if recpts:
self.edit.set_edit_text('\n'.join(recpts))
self.callback = ok_callback
self.set_focus_path([1, 'body'])
the_loop.widget = self
def reopen(self):
self.set_focus_path([1, 'body'])
the_loop.widget = self
def _callback(self):
recpts = self.edit.get_edit_text().replace(',', '\n').split('\n')
good, bad = [], []
addr = re.compile(r"(@.{44}.ed25519)")
for r in recpts:
r = r.strip()
if len(r) == 0:
continue
for i in addr.findall(r):
good.append(i)
break
else:
users = app.the_db.match_about_name(f"^{r[1:]}$"
if r[0] == '@' else r)
logger.info(f"users: <{r}> {str(users)}")
if len(users) == 1:
good.append(users[0])
else:
bad.append(f"? {r}" if len(users) == 0 else f"?+ {r}")
lst = []
for r in list(set(good)):
nm = app.feed2name(r)
if nm:
r = f"[@{nm}]({r})"
lst.append(r)
good = lst
if len(good) + len(bad) == 0:
bad = ['add one recipient']
if len(good) + len(bad) >= 7:
bad = ['max 7 recipients'] + bad
if len(bad) > 0:
self.edit.set_edit_text('\n'.join(bad + lst))
self.edit.set_edit_pos(len(bad[0]))
self.set_focus_path([1, 'body'])
else:
self.edit.set_edit_text('\n'.join(good))
self.close()
self.callback(good)
def close(self):
global draft_private_text
save_draft(draft_private_text, self.edit.get_edit_text().split('\n'))
the_loop.widget = urwid_frame
the_loop.draw_screen()
# ---------------------------------------------------------------------------
class Menu(urwid.Overlay):
_labels = [ '',
'refresh !',
'',
'reply R',
'compose C',
'',
'directory U',
'help ?',
'',
'quit Q'
]
def cb(self, button):
screen_size = urwid.raw_display.Screen().get_cols_rows()
l = button.get_label()[-1].lower()
self.close()
urwid_frame.keypress(screen_size, l)
def open(self):
the_loop.widget = self
def close(self):
the_loop.widget = urwid_frame
the_loop.draw_screen()
def __init__(self, active=None):
self._active = active
body = [ urwid.Text(" M E N U") ]
for l in self._labels:
if l == '':
body.append( urwid.Text('---------------') )
else:
if not active or l[-1] in active:
body.append( urwid.AttrMap(urwid.Button(l, self.cb),
None, 'selected') )
else:
body.append(urwid.Text(('even', f" {l} ")))
lb = urwid.ListBox(urwid.SimpleFocusListWalker(body))
super().__init__(urwid.AttrMap(urwid.LineBox(lb), 'fill'),
the_loop.widget, align = 'right', valign = 'top',
width=('relative',5), height=('relative',5),
min_width=17, min_height=13)
def keypress(self, size, key):
if key in ['shift tab']:
return super().keypress(size, 'up')
elif key in ['tab']:
return super().keypress(size, 'down')
elif key in ['esc'] + arrow_left:
self.close()
return None
k = key.upper()
for l in self._labels:
if l == '':
continue
if k == l[-1] and (not self._active or k in self._active):
self.close()
urwid_frame.keypress(screen_size, key)
return None
return super().keypress(size, key)
# ---------------------------------------------------------------------------
def launch(app_core, secr, args):
global app, the_loop
global urwid_counter, urwid_back, urwid_title, urwid_header
global urwid_footer, urwid_threadList, urwid_convoList, urwid_frame
app = app_core
print(ui_descr)
amber_palette = [
('fill', 'dark gray', 'black', 'default', '#d80', '#000'),
('even', 'black', 'dark gray', 'standout', '#000', '#d80'),
('evenBold', 'black,underline', 'dark gray', 'standout,underline', '#000,underline', '#d80'),
('odd', 'dark gray', 'black', 'default', '#d80', '#000'),
('oddBold', 'dark gray,underline', 'black', 'underline', '#d80,underline', '#000'),
('header', 'black', 'light gray', 'standout', '#000', '#fa0'),
('selected', 'black', 'light gray', 'standout', '#000', '#fa0'),
('selectedPrivate', 'black', 'light gray', 'standout', '#000', '#fa0'),
('cypherlink', 'dark red,underline', 'black', 'standout', '#fa0,underline', '#d80')
]
green_palette = [
('fill', 'dark green', 'black'),
('even', 'black', 'dark green'),
('evenBold', 'black,underline', 'dark green'),
('odd', 'dark green', 'black'),
('oddBold', 'dark green,underline', 'black'),
('header', 'black', 'light green'),
('selected', 'black', 'light green'),
('selectedPrivate', 'black', 'light green'),
('cypherlink', 'black,underline', 'dark green')
]
mono_palette = [
('fill', 'default', 'default'),
('even', 'standout', 'default'),
('evenBold', 'standout,underline', 'default'),
('odd', 'default', 'default'),
('oddBold', 'underline', 'default'),
('header', 'standout', 'default'),
('selected', 'standout', 'default'),
('selectedPrivate', 'standout', 'default'),
('cypherlink', 'standout,underline', 'default')
]
light_palette = [
('fill', 'black', 'white'),
('even', 'black', 'light gray', 'standout'),
('evenBold', 'black,underline', 'light gray', 'standout'),
('odd', 'black', 'white'),
('oddBold', 'black,underline', 'white'),
('header', 'white', 'light blue', 'underline'),
('selected', 'white', 'light red', 'standout'),
('selectedPrivate', 'white', 'light green', 'standout'),
('cypherlink', 'light blue,underline', 'light gray', 'standout')
]
dark_palette = [
('fill', 'white', 'black'),
('even', 'white', 'dark gray', 'standout'),
('evenBold', 'white,underline', 'dark gray', 'standout'),
('odd', 'white', 'black', 'standout'),
('oddBold', 'white,underline', 'black', 'standout'),
('header', 'black', 'dark green', 'underline'),
('selected', 'black', 'light red', 'standout'),
('selectedPrivate', 'black', 'light blue', 'standout'),
('cypherlink', 'light blue,underline', 'dark gray', 'standout')
]
palette = { 'mono': mono_palette,
'green': green_palette,
'amber': amber_palette,
'light': light_palette,
'dark': dark_palette } [args.style]
screen = urwid.raw_display.Screen()
screen.set_terminal_properties(256)
screen.register_palette(palette)
urwid_counter = urwid.Text(' FWD=0 BWD=0 ', 'right', wrap='clip')
urwid_title = urwid.Text('PUBLIC chats:', wrap='clip')
urwid_header = urwid.Pile([
urwid.Columns([urwid.Text(f"SurfCity - a log-less SSB client{ui_descr}", wrap='clip'),
('pack', urwid_counter)
]),
urwid_title
])
urwid_hdrmap = urwid.AttrMap(urwid_header, 'header')
if args.offline:
urwid_footer = urwid.Text('Offline') # , wrap='clip')
else:
urwid_footer = urwid.Text('Welcome, please stand by ...', wrap='clip')
urwid_ftrmap = urwid.AttrMap(urwid.Columns([
urwid_footer,
('pack', urwid.Text(" Type '?' for help.", 'right'))
]), 'header')
urwid_threadList = urwid.ListBox([urwid.Text('Almost there ...')])
urwid_convoList = PrivateConvoListBox(secr, [urwid.Text('Just a moment...')])
urwid_frame = urwid.Frame(urwid_threadList, header=urwid_hdrmap,
footer=urwid_ftrmap,
focus_part = 'body')
logger.info("%s", f"surfcity {str(time.ctime())} starting")
evl = urwid.AsyncioEventLoop(loop=asyncio.get_event_loop())
ensure_future(main(secr, args))
the_loop = urwid.MainLoop(urwid.AttrMap(urwid_frame, 'fill'),
screen=screen, event_loop=evl,
unhandled_input=on_unhandled_input)
try:
the_loop.run()
except Exception as e:
s = traceback.format_exc()
logger.info("main exc %s", s)
print(s)
if error_message:
print(error_message)
# eof
| 37.204169 | 122 | 0.534608 |
acf50a5c429d6bbf9c8f54a0ffd055e8d8ae83ff | 8,787 | py | Python | ddsp/training/data_preparation/prepare_tfrecord_lib.py | xcyoloxcy/ddsp | f4493924034860bed704140c86d58f4118b03d60 | [
"Apache-2.0"
] | 1 | 2022-01-30T17:47:54.000Z | 2022-01-30T17:47:54.000Z | ddsp/training/data_preparation/prepare_tfrecord_lib.py | mekuto/ddsp | a260cdeb591bf30e0efab83f4865ebf398012231 | [
"Apache-2.0"
] | null | null | null | ddsp/training/data_preparation/prepare_tfrecord_lib.py | mekuto/ddsp | a260cdeb591bf30e0efab83f4865ebf398012231 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Apache Beam pipeline for computing TFRecord dataset from audio files."""
from absl import logging
import apache_beam as beam
from ddsp import spectral_ops
import numpy as np
import pydub
import tensorflow.compat.v2 as tf
def _load_audio_as_array(audio_path: str, sample_rate: int) -> np.array:
"""Load audio file at specified sample rate and return an array.
When `sample_rate` > original SR of audio file, Pydub may miss samples when
reading file defined in `audio_path`. Must manually zero-pad missing samples.
Args:
audio_path: path to audio file
sample_rate: desired sample rate (can be different from original SR)
Returns:
audio: audio in np.float32
"""
with tf.io.gfile.GFile(audio_path, 'rb') as f:
# Load audio at original SR
audio_segment = (pydub.AudioSegment.from_file(f).set_channels(1))
# Compute expected length at given `sample_rate`
expected_len = int(audio_segment.duration_seconds * sample_rate)
# Resample to `sample_rate`
audio_segment = audio_segment.set_frame_rate(sample_rate)
sample_arr = audio_segment.get_array_of_samples()
audio = np.array(sample_arr).astype(np.float32)
# Zero pad missing samples, if any
audio = spectral_ops.pad_or_trim_to_expected_length(audio, expected_len)
# Convert from int to float representation.
audio /= np.iinfo(sample_arr.typecode).max
return audio
def _load_audio(audio_path, sample_rate):
"""Load audio file."""
logging.info("Loading '%s'.", audio_path)
beam.metrics.Metrics.counter('prepare-tfrecord', 'load-audio').inc()
audio = _load_audio_as_array(audio_path, sample_rate)
return {'audio': audio}
def add_loudness(ex, sample_rate, frame_rate, n_fft=2048):
"""Add loudness in dB."""
beam.metrics.Metrics.counter('prepare-tfrecord', 'compute-loudness').inc()
audio = ex['audio']
mean_loudness_db = spectral_ops.compute_loudness(audio, sample_rate,
frame_rate, n_fft,
use_tf=False)
ex = dict(ex)
ex['loudness_db'] = mean_loudness_db.astype(np.float32)
return ex
def _add_f0_estimate(ex, sample_rate, frame_rate):
"""Add fundamental frequency (f0) estimate using CREPE."""
beam.metrics.Metrics.counter('prepare-tfrecord', 'estimate-f0').inc()
audio = ex['audio']
f0_hz, f0_confidence = spectral_ops.compute_f0(audio, sample_rate, frame_rate)
ex = dict(ex)
ex.update({
'f0_hz': f0_hz.astype(np.float32),
'f0_confidence': f0_confidence.astype(np.float32)
})
return ex
def split_example(ex, sample_rate, frame_rate, window_secs, hop_secs):
"""Splits example into windows, padding final window if needed."""
def get_windows(sequence, rate):
window_size = int(window_secs * rate)
hop_size = int(hop_secs * rate)
n_windows = int(np.ceil((len(sequence) - window_size) / hop_size)) + 1
n_samples_padded = (n_windows - 1) * hop_size + window_size
n_padding = n_samples_padded - len(sequence)
sequence = np.pad(sequence, (0, n_padding), mode='constant')
for window_end in range(window_size, len(sequence) + 1, hop_size):
yield sequence[window_end - window_size:window_end]
for audio, loudness_db, f0_hz, f0_confidence in zip(
get_windows(ex['audio'], sample_rate),
get_windows(ex['loudness_db'], frame_rate),
get_windows(ex['f0_hz'], frame_rate),
get_windows(ex['f0_confidence'], frame_rate)):
beam.metrics.Metrics.counter('prepare-tfrecord', 'split-example').inc()
yield {
'audio': audio,
'loudness_db': loudness_db,
'f0_hz': f0_hz,
'f0_confidence': f0_confidence
}
def float_dict_to_tfexample(float_dict):
"""Convert dictionary of float arrays to tf.train.Example proto."""
return tf.train.Example(
features=tf.train.Features(
feature={
k: tf.train.Feature(float_list=tf.train.FloatList(value=v))
for k, v in float_dict.items()
}))
def add_key(example):
"""Add a key to this example by taking the hash of the values."""
return hash(example['audio'].tobytes()), example
def eval_split_partition_fn(example, num_partitions, eval_fraction, all_ids):
"""Partition function to split into train/eval based on the hash ids."""
del num_partitions
example_id = example[0]
eval_range = int(len(all_ids) * eval_fraction)
for i in range(eval_range):
if all_ids[i] == example_id:
return 0
return 1
def prepare_tfrecord(input_audio_paths,
output_tfrecord_path,
num_shards=None,
sample_rate=16000,
frame_rate=250,
window_secs=4,
hop_secs=1,
eval_split_fraction=0.0,
coarse_chunk_secs=20.0,
pipeline_options=''):
"""Prepares a TFRecord for use in training, evaluation, and prediction.
Args:
input_audio_paths: An iterable of paths to audio files to include in
TFRecord.
output_tfrecord_path: The prefix path to the output TFRecord. Shard numbers
will be added to actual path(s).
num_shards: The number of shards to use for the TFRecord. If None, this
number will be determined automatically.
sample_rate: The sample rate to use for the audio.
frame_rate: The frame rate to use for f0 and loudness features. If set to
None, these features will not be computed.
window_secs: The size of the sliding window (in seconds) to use to split the
audio and features. If 0, they will not be split.
hop_secs: The number of seconds to hop when computing the sliding windows.
eval_split_fraction: Fraction of the dataset to reserve for eval split. If
set to 0, no eval split is created.
coarse_chunk_secs: Chunk size in seconds used to split the input audio
files. This is used to split large audio files into manageable chunks
for better parallelization and to enable non-overlapping train/eval
splits.
pipeline_options: An iterable of command line arguments to be used as
options for the Beam Pipeline.
"""
pipeline_options = beam.options.pipeline_options.PipelineOptions(
pipeline_options)
with beam.Pipeline(options=pipeline_options) as pipeline:
examples = (
pipeline
| beam.Create(input_audio_paths)
| beam.Map(_load_audio, sample_rate))
if frame_rate:
examples = (
examples
| beam.Map(_add_f0_estimate, sample_rate, frame_rate)
| beam.Map(add_loudness, sample_rate, frame_rate))
if coarse_chunk_secs:
examples |= beam.FlatMap(split_example, sample_rate, frame_rate,
coarse_chunk_secs, coarse_chunk_secs)
def postprocess_pipeline(examples, output_path, stage_name=''):
if stage_name:
stage_name = f'_{stage_name}'
if window_secs:
examples |= f'create_batches{stage_name}' >> beam.FlatMap(
split_example, sample_rate, frame_rate, window_secs, hop_secs)
_ = (
examples
| f'reshuffle{stage_name}' >> beam.Reshuffle()
| f'make_tfexample{stage_name}' >> beam.Map(float_dict_to_tfexample)
| f'write{stage_name}' >> beam.io.tfrecordio.WriteToTFRecord(
output_path,
num_shards=num_shards,
coder=beam.coders.ProtoCoder(tf.train.Example)))
if eval_split_fraction:
examples |= beam.Map(add_key)
keys = examples | beam.Keys()
splits = examples | beam.Partition(eval_split_partition_fn, 2,
eval_split_fraction,
beam.pvalue.AsList(keys))
# Remove ids.
eval_split = splits[0] | 'remove_id_eval' >> beam.Map(lambda x: x[1])
train_split = splits[1] | 'remove_id_train' >> beam.Map(lambda x: x[1])
postprocess_pipeline(eval_split, f'{output_tfrecord_path}-eval', 'eval')
postprocess_pipeline(train_split, f'{output_tfrecord_path}-train',
'train')
else:
postprocess_pipeline(examples, output_tfrecord_path)
| 38.880531 | 80 | 0.677933 |
acf50a96400748faebd004dc6667a47e3ace23fd | 8,184 | py | Python | src/python/pants/backend/shell/dependency_inference.py | patricklaw/pants | 906099f5c0d2f9e6adcac23e030270c64ca519ae | [
"Apache-2.0"
] | 1 | 2016-04-27T15:35:42.000Z | 2016-04-27T15:35:42.000Z | src/python/pants/backend/shell/dependency_inference.py | patricklaw/pants | 906099f5c0d2f9e6adcac23e030270c64ca519ae | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/shell/dependency_inference.py | patricklaw/pants | 906099f5c0d2f9e6adcac23e030270c64ca519ae | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import logging
import re
from collections import defaultdict
from dataclasses import dataclass
from typing import DefaultDict
from pants.backend.shell.lint.shellcheck.subsystem import Shellcheck
from pants.backend.shell.shell_setup import ShellSetup
from pants.backend.shell.target_types import ShellSourcesField
from pants.base.specs import AddressSpecs, DescendantAddresses
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.engine.addresses import Address
from pants.engine.collection import DeduplicatedCollection
from pants.engine.fs import Digest, DigestSubset, MergeDigests, PathGlobs
from pants.engine.platform import Platform
from pants.engine.process import FallibleProcessResult, Process, ProcessCacheScope
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
ExplicitlyProvidedDependencies,
HydratedSources,
HydrateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
SourcesPaths,
SourcesPathsRequest,
Targets,
WrappedTarget,
)
from pants.engine.unions import UnionRule
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.ordered_set import OrderedSet
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ShellMapping:
"""A mapping of Shell file names to their owning file address."""
mapping: FrozenDict[str, Address]
ambiguous_modules: FrozenDict[str, tuple[Address, ...]]
@rule(desc="Creating map of Shell file names to Shell targets", level=LogLevel.DEBUG)
async def map_shell_files() -> ShellMapping:
all_targets = await Get(Targets, AddressSpecs([DescendantAddresses("")]))
shell_tgts = tuple(tgt for tgt in all_targets if tgt.has_field(ShellSourcesField))
sources_per_target = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(tgt[ShellSourcesField])) for tgt in shell_tgts
)
files_to_addresses: dict[str, Address] = {}
files_with_multiple_owners: DefaultDict[str, set[Address]] = defaultdict(set)
for tgt, sources in zip(shell_tgts, sources_per_target):
for f in sources.files:
if f in files_to_addresses:
files_with_multiple_owners[f].update({files_to_addresses[f], tgt.address})
else:
files_to_addresses[f] = tgt.address
# Remove files with ambiguous owners.
for ambiguous_f in files_with_multiple_owners:
files_to_addresses.pop(ambiguous_f)
return ShellMapping(
mapping=FrozenDict(sorted(files_to_addresses.items())),
ambiguous_modules=FrozenDict(
(k, tuple(sorted(v))) for k, v in sorted(files_with_multiple_owners.items())
),
)
class ParsedShellImports(DeduplicatedCollection):
sort_input = True
@dataclass(frozen=True)
class ParseShellImportsRequest:
# NB: We parse per-file, rather than per-target. This is necessary so that we can have each
# file in complete isolation without its sibling files present so that Shellcheck errors when
# trying to source a sibling file, which then allows us to extract that path.
digest: Digest
fp: str
PATH_FROM_SHELLCHECK_ERROR = re.compile(r"Not following: (.+) was not specified as input")
@rule
async def parse_shell_imports(
request: ParseShellImportsRequest, shellcheck: Shellcheck
) -> ParsedShellImports:
# We use Shellcheck to parse for us by running it against each file in isolation, which means
# that all `source` statements will error. Then, we can extract the problematic paths from the
# JSON output.
downloaded_shellcheck = await Get(
DownloadedExternalTool, ExternalToolRequest, shellcheck.get_request(Platform.current)
)
input_digest = await Get(Digest, MergeDigests([request.digest, downloaded_shellcheck.digest]))
process_result = await Get(
FallibleProcessResult,
Process(
# NB: We do not load up `[shellcheck].{args,config}` because it would risk breaking
# determinism of dependency inference in an unexpected way.
[downloaded_shellcheck.exe, "--format=json", request.fp],
input_digest=input_digest,
description=f"Detect Shell imports for {request.fp}",
level=LogLevel.DEBUG,
# We expect this to always fail, but it should still be cached because the process is
# deterministic.
cache_scope=ProcessCacheScope.ALWAYS,
),
)
try:
output = json.loads(process_result.stdout)
except json.JSONDecodeError:
logger.error(
f"Parsing {request.fp} for dependency inference failed because Shellcheck's output "
f"could not be loaded as JSON. Please open a GitHub issue at "
f"https://github.com/pantsbuild/pants/issues/new with this error message attached.\n\n"
f"\nshellcheck version: {shellcheck.version}\n"
f"process_result.stdout: {process_result.stdout.decode()}"
)
return ParsedShellImports()
paths = set()
for error in output:
if not error.get("code", "") == 1091:
continue
msg = error.get("message", "")
matches = PATH_FROM_SHELLCHECK_ERROR.match(msg)
if matches:
paths.add(matches.group(1))
else:
logger.error(
f"Parsing {request.fp} for dependency inference failed because Shellcheck's error "
f"message was not in the expected format. Please open a GitHub issue at "
f"https://github.com/pantsbuild/pants/issues/new with this error message "
f"attached.\n\n\nshellcheck version: {shellcheck.version}\n"
f"error JSON entry: {error}"
)
return ParsedShellImports(paths)
class InferShellDependencies(InferDependenciesRequest):
infer_from = ShellSourcesField
@rule(desc="Inferring Shell dependencies by analyzing imports")
async def infer_shell_dependencies(
request: InferShellDependencies, shell_mapping: ShellMapping, shell_setup: ShellSetup
) -> InferredDependencies:
if not shell_setup.dependency_inference:
return InferredDependencies([])
address = request.sources_field.address
wrapped_tgt = await Get(WrappedTarget, Address, address)
explicitly_provided_deps, hydrated_sources = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),
Get(HydratedSources, HydrateSourcesRequest(request.sources_field)),
)
per_file_digests = await MultiGet(
Get(Digest, DigestSubset(hydrated_sources.snapshot.digest, PathGlobs([f])))
for f in hydrated_sources.snapshot.files
)
all_detected_imports = await MultiGet(
Get(ParsedShellImports, ParseShellImportsRequest(digest, f))
for digest, f in zip(per_file_digests, hydrated_sources.snapshot.files)
)
result: OrderedSet[Address] = OrderedSet()
for detected_imports in all_detected_imports:
for import_path in detected_imports:
unambiguous = shell_mapping.mapping.get(import_path)
ambiguous = shell_mapping.ambiguous_modules.get(import_path)
if unambiguous:
result.add(unambiguous)
elif ambiguous:
explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
ambiguous,
address,
import_reference="file",
context=f"The target {address} sources `{import_path}`",
)
maybe_disambiguated = explicitly_provided_deps.disambiguated(ambiguous)
if maybe_disambiguated:
result.add(maybe_disambiguated)
return InferredDependencies(sorted(result))
def rules():
return (*collect_rules(), UnionRule(InferDependenciesRequest, InferShellDependencies))
| 40.315271 | 99 | 0.711388 |
acf50becd8a31bf02b0c6f31400afbf0227d69d6 | 3,961 | py | Python | e2edet/utils/meter.py | kienduynguyen/BoxeR | a7d9456141e9fb4f6da53c961bda54886024ee75 | [
"MIT"
] | 18 | 2022-03-24T16:15:09.000Z | 2022-03-31T18:17:41.000Z | e2edet/utils/meter.py | kienduynguyen/BoxeR | a7d9456141e9fb4f6da53c961bda54886024ee75 | [
"MIT"
] | 3 | 2022-03-28T12:34:34.000Z | 2022-03-30T06:21:21.000Z | e2edet/utils/meter.py | kienduynguyen/BoxeR | a7d9456141e9fb4f6da53c961bda54886024ee75 | [
"MIT"
] | 2 | 2022-03-29T08:29:11.000Z | 2022-03-30T03:06:17.000Z | # ------------------------------------------------------------------------
# BoxeR
# Copyright (c) 2022. All Rights Reserved.
# Licensed under the MIT License [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from detectron2 (https://github.com/facebookresearch/detectron2)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
from collections import deque
import torch
class SmoothedValue:
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.window_size = window_size
self.reset()
def reset(self):
self.deque = deque(maxlen=self.window_size)
self.averaged_value_deque = deque(maxlen=self.window_size)
self.batch_sizes = deque(maxlen=self.window_size)
self.total_samples = 0
self.total = 0.0
self.count = 0
def update(self, value, batch_size=1):
self.count += 1
self.deque.append(value * batch_size)
self.averaged_value_deque.append(value)
self.batch_sizes.append(batch_size)
self.total_samples += batch_size
self.total += value * batch_size
@property
def median(self):
d = torch.tensor(list(self.averaged_value_deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
s = torch.tensor(list(self.batch_sizes))
return d.sum().item() / s.sum().item()
@property
def global_avg(self):
return self.total / self.total_samples
def get_latest(self):
return self.averaged_value_deque[-1]
class Meter:
def __init__(self, window_size=20, delimiter=", "):
self.meters = {}
self.delimiter = delimiter
self.window_size = window_size
def update(self, update_dict, batch_size=1):
for k, v in update_dict.items():
if isinstance(v, torch.Tensor):
if v.dim() != 0:
v = v.mean()
v = v.item()
assert isinstance(v, (float, int))
if k not in self.meters:
self.meters[k] = SmoothedValue(self.window_size)
self.meters[k].update(v, batch_size)
def update_from_meter(self, meter):
for key, value in meter.meters.items():
assert isinstance(value, SmoothedValue)
if key not in self.meters:
self.meters[key] = SmoothedValue(self.window_size)
self.meters[key] = value
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def get_scalar_dict(self):
scalar_dict = {}
for k, v in self.meters.items():
scalar_dict[k] = v.avg
return scalar_dict
def get_log_dict(self, split):
log_dict = {}
for k, v in self.meters.items():
if "train" == split:
log_dict[k] = "{:.4f}".format(v.median)
log_dict["{}/avg".format(k)] = "{:.4f}".format(v.avg)
else:
log_dict[k] = "{:.4f}".format(v.global_avg)
return log_dict
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
if "train" in name:
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
else:
# In case of val print global avg
loss_str.append("{}: {:.4f}".format(name, meter.global_avg))
return self.delimiter.join(loss_str)
| 32.467213 | 86 | 0.548346 |
acf50c539371f46c270893c4e95529307a316c47 | 2,570 | py | Python | qa/rpc-tests/wallet_addresses.py | newtl/litecoinz | 801cd1386ae325e041bc2b66b2d066c62d4d2832 | [
"MIT"
] | null | null | null | qa/rpc-tests/wallet_addresses.py | newtl/litecoinz | 801cd1386ae325e041bc2b66b2d066c62d4d2832 | [
"MIT"
] | null | null | null | qa/rpc-tests/wallet_addresses.py | newtl/litecoinz | 801cd1386ae325e041bc2b66b2d066c62d4d2832 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2018 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, start_nodes
# Test wallet address behaviour across network upgradesa\
class WalletAddressesTest(BitcoinTestFramework):
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, [[
'-nuparams=5ba81b19:202', # Overwinter
'-nuparams=76b809bb:204', # Sapling
]] * 4)
def run_test(self):
def addr_checks(default_type):
# Check default type, as well as explicit types
types_and_addresses = [
(default_type, self.nodes[0].z_getnewaddress()),
('sprout', self.nodes[0].z_getnewaddress('sprout')),
('sapling', self.nodes[0].z_getnewaddress('sapling')),
]
all_addresses = self.nodes[0].z_listaddresses()
for addr_type, addr in types_and_addresses:
res = self.nodes[0].z_validateaddress(addr)
assert(res['isvalid'])
assert(res['ismine'])
assert_equal(res['type'], addr_type)
assert(addr in all_addresses)
# Sanity-check the test harness
assert_equal(self.nodes[0].getblockcount(), 200)
# Current height = 200 -> Sprout
# Default address type is Sapling
print "Testing height 200 (Sprout)"
addr_checks('sapling')
self.nodes[0].generate(1)
self.sync_all()
# Current height = 201 -> Sprout
# Default address type is Sapling
print "Testing height 201 (Sprout)"
addr_checks('sapling')
self.nodes[0].generate(1)
self.sync_all()
# Current height = 202 -> Overwinter
# Default address type is Sapling
print "Testing height 202 (Overwinter)"
addr_checks('sapling')
self.nodes[0].generate(1)
self.sync_all()
# Current height = 203 -> Overwinter
# Default address type is Sapling
print "Testing height 203 (Overwinter)"
addr_checks('sapling')
self.nodes[0].generate(1)
self.sync_all()
# Current height = 204 -> Sapling
# Default address type is Sapling
print "Testing height 204 (Sapling)"
addr_checks('sapling')
if __name__ == '__main__':
WalletAddressesTest().main()
| 32.948718 | 70 | 0.617121 |
acf50dad07af82bf7d93651290cea047d3a2b78f | 2,267 | py | Python | slider_status_ui.py | me701/gui_designer | 0115f05250631b806cbf2f634ab8c3c848f9fe94 | [
"MIT"
] | null | null | null | slider_status_ui.py | me701/gui_designer | 0115f05250631b806cbf2f634ab8c3c848f9fe94 | [
"MIT"
] | null | null | null | slider_status_ui.py | me701/gui_designer | 0115f05250631b806cbf2f634ab8c3c848f9fe94 | [
"MIT"
] | 2 | 2021-11-03T14:56:20.000Z | 2021-11-03T16:23:16.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'slider_status.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(392, 621)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(120, 140, 131, 311))
self.widget.setObjectName("widget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalSlider = QtWidgets.QSlider(self.widget)
self.verticalSlider.setOrientation(QtCore.Qt.Vertical)
self.verticalSlider.setObjectName("verticalSlider")
self.horizontalLayout.addWidget(self.verticalSlider)
self.progressBar = QtWidgets.QProgressBar(self.widget)
self.progressBar.setProperty("value", 24)
self.progressBar.setOrientation(QtCore.Qt.Vertical)
self.progressBar.setObjectName("progressBar")
self.horizontalLayout.addWidget(self.progressBar)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 392, 27))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.verticalSlider.valueChanged['int'].connect(self.progressBar.setValue)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
| 44.45098 | 82 | 0.723864 |
acf50e55f8ab58452e5e0ebea3d8fdb2fdff6d95 | 3,992 | py | Python | homework_5/scripts/train.py | marcosicklinger/deep_learning | 54628065eb84aeb890672f82f7f206edc68bc451 | [
"MIT"
] | 2 | 2021-04-24T17:44:36.000Z | 2022-03-23T21:19:46.000Z | homework_5/scripts/train.py | marcosicklinger/deep_learning | 54628065eb84aeb890672f82f7f206edc68bc451 | [
"MIT"
] | null | null | null | homework_5/scripts/train.py | marcosicklinger/deep_learning | 54628065eb84aeb890672f82f7f206edc68bc451 | [
"MIT"
] | null | null | null | import torch
import os
from .train_utils import AverageMeter, accuracy
from .torch_utils import use_gpu_if_possible
def train_epoch(model, dataloader, loss_fn, optimizer, loss_meter, performance_meter, performance, device, lr_scheduler): # note: I've added a generic performance to replace accuracy
for X, y in dataloader:
X = X.to(device)
y = y.to(device)
# 1. reset the gradients previously accumulated by the optimizer
# this will avoid re-using gradients from previous loops
optimizer.zero_grad()
# 2. get the predictions from the current state of the model
# this is the forward pass
y_hat = model(X)
# 3. calculate the loss on the current mini-batch
loss = loss_fn(y_hat, y)
# 4. execute the backward pass given the current loss
loss.backward()
# 5. update the value of the params
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
# 6. calculate the accuracy for this mini-batch
acc = performance(y_hat, y)
# 7. update the loss and accuracy AverageMeter
loss_meter.update(val=loss.item(), n=X.shape[0])
performance_meter.update(val=acc, n=X.shape[0])
def train_model(model, dataloader, loss_fn, optimizer, num_epochs, checkpoint_loc=None, checkpoint_name="checkpoint.pt", performance=accuracy, lr_scheduler=None, device=None, lr_scheduler_step_on_epoch=True):
# create the folder for the checkpoints (if it's not None)
if checkpoint_loc is not None:
os.makedirs(checkpoint_loc, exist_ok=True)
if device is None:
device = use_gpu_if_possible()
model = model.to(device)
model.train()
# epoch loop
for epoch in range(num_epochs):
loss_meter = AverageMeter()
performance_meter = AverageMeter()
# added print for LR
print(f"Epoch {epoch+1} --- learning rate {optimizer.param_groups[0]['lr']:.5f}")
lr_scheduler_batch = lr_scheduler if not lr_scheduler_step_on_epoch else None
train_epoch(model, dataloader, loss_fn, optimizer, loss_meter, performance_meter, performance, device, lr_scheduler_batch)
print(f"Epoch {epoch+1} completed. Loss - total: {loss_meter.sum} - average: {loss_meter.avg}; Performance: {performance_meter.avg}")
# produce checkpoint dictionary -- but only if the name and folder of the checkpoint are not None
if checkpoint_name is not None and checkpoint_loc is not None:
checkpoint_dict = {
"parameters": model.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch
}
torch.save(checkpoint_dict, os.path.join(checkpoint_loc, checkpoint_name))
if lr_scheduler is not None and lr_scheduler_step_on_epoch:
lr_scheduler.step()
return loss_meter.sum, performance_meter.avg
def test_model(model, dataloader, performance=accuracy, loss_fn=None, device=None):
# create an AverageMeter for the loss if passed
if loss_fn is not None:
loss_meter = AverageMeter()
if device is None:
device = use_gpu_if_possible()
model = model.to(device)
performance_meter = AverageMeter()
model.eval()
with torch.no_grad():
for X, y in dataloader:
X = X.to(device)
y = y.to(device)
y_hat = model(X)
loss = loss_fn(y_hat, y) if loss_fn is not None else None
acc = performance(y_hat, y)
if loss_fn is not None:
loss_meter.update(loss.item(), X.shape[0])
performance_meter.update(acc, X.shape[0])
# get final performances
fin_loss = loss_meter.sum if loss_fn is not None else None
fin_perf = performance_meter.avg
print(f"TESTING - loss {fin_loss if fin_loss is not None else '--'} - performance {fin_perf}")
return fin_loss, fin_perf
| 39.524752 | 208 | 0.656062 |
acf50e5a347c8d27493e79e2eb265230b2e95ef9 | 2,094 | py | Python | version.py | salewski/zulip | cc5ac302ab1720a0981e595cb7ddf9ee17ce3acb | [
"Apache-2.0"
] | null | null | null | version.py | salewski/zulip | cc5ac302ab1720a0981e595cb7ddf9ee17ce3acb | [
"Apache-2.0"
] | null | null | null | version.py | salewski/zulip | cc5ac302ab1720a0981e595cb7ddf9ee17ce3acb | [
"Apache-2.0"
] | null | null | null | import os
ZULIP_VERSION = "5.0-dev+git"
# Add information on number of commits and commit hash to version, if available
zulip_git_version_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "zulip-git-version"
)
lines = [ZULIP_VERSION, ""]
if os.path.exists(zulip_git_version_file):
with open(zulip_git_version_file) as f:
lines = f.readlines() + ["", ""]
ZULIP_VERSION = lines.pop(0).strip()
ZULIP_MERGE_BASE = lines.pop(0).strip()
LATEST_MAJOR_VERSION = "4.0"
LATEST_RELEASE_VERSION = "4.10"
LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.com/2021/05/13/zulip-4-0-released/"
# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be
# prevented from connecting to the Zulip server. Versions above
# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have
# a banner at the top of the page asking the user to upgrade.
DESKTOP_MINIMUM_VERSION = "5.2.0"
DESKTOP_WARNING_VERSION = "5.4.3"
# Bump the API_FEATURE_LEVEL whenever an API change is made
# that clients might want to condition on. If we forget at
# the time we make the change, then bump it later as soon
# as we notice; clients using API_FEATURE_LEVEL will just not
# use the new feature/API until the bump.
#
# Changes should be accompanied by documentation explaining what the
# new level means in templates/zerver/api/changelog.md, as well as
# "**Changes**" entries in the endpoint's documentation in `zulip.yaml`.
API_FEATURE_LEVEL = 117
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
# the major version to indicate that folks should provision in both
# directions.
# Typically,
# * adding a dependency only requires a minor version bump;
# * removing a dependency requires a major version bump;
# * upgrading a dependency requires a major version bump, unless the
# upgraded dependency is backwards compatible with all of our
# historical commits sharing the same major version, in which case a
# minor version bump suffices.
PROVISION_VERSION = "179.0"
| 40.269231 | 85 | 0.766476 |
acf50e950c02cafdea390502bfc228d27385f127 | 228 | py | Python | python/lib/behaviors/throw_exception.py | newrelic-experimental/demo-pythontron | 0561d7e496da3a518c28102010c3c76445a47307 | [
"Apache-2.0"
] | null | null | null | python/lib/behaviors/throw_exception.py | newrelic-experimental/demo-pythontron | 0561d7e496da3a518c28102010c3c76445a47307 | [
"Apache-2.0"
] | null | null | null | python/lib/behaviors/throw_exception.py | newrelic-experimental/demo-pythontron | 0561d7e496da3a518c28102010c3c76445a47307 | [
"Apache-2.0"
] | null | null | null | from . import behavior
class ThrowException(behavior.Behavior):
def __init__(self, value = None):
super().__init__("THROW", value)
def execute(self):
super().execute()
reference = None
reference.execute()
| 19 | 40 | 0.679825 |
acf50f4404f5e68a555cd66c01b1257133f25a16 | 670 | py | Python | main.py | Mansouroopi/local-library | f8026813994218084afa3e25520b6c2ee53138e9 | [
"MIT"
] | null | null | null | main.py | Mansouroopi/local-library | f8026813994218084afa3e25520b6c2ee53138e9 | [
"MIT"
] | null | null | null | main.py | Mansouroopi/local-library | f8026813994218084afa3e25520b6c2ee53138e9 | [
"MIT"
] | null | null | null | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
# Todo https://www.vertabelo.com/blog/the-doctor-will-see-you-soon-a-data-model-for-a-medical-appointment-booking-app/
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
print_hi('Mansour')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| 37.222222 | 118 | 0.731343 |
acf50f4b7a4da690f01240b4d0b9891d8c65c339 | 21,912 | py | Python | components/solver.py | xdr940/TSLa | 97f8626cb29e956f77fc34d84100771ab84bea52 | [
"MIT"
] | null | null | null | components/solver.py | xdr940/TSLa | 97f8626cb29e956f77fc34d84100771ab84bea52 | [
"MIT"
] | null | null | null | components/solver.py | xdr940/TSLa | 97f8626cb29e956f77fc34d84100771ab84bea52 | [
"MIT"
] | null | null | null | import itertools
import networkx as nx
import pandas as pd
import math
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from components.AcTik import Tik
#using tk2acc,acc2tk,crossLog, inter_tks
from utils.tool import get_now,time_stat
class TimeStamp:
def __init__(self):
pass
class Solver:
def __init__(self,data,alg_base="Max - Range (km)",terminal=False):
self.data = data
self.terminal = terminal
#tk2acc
#acc2tk
#data.access_name
#df_align
#is_eual
self.alg_base =alg_base
self.position=None
def s_prev(self,si):
return self.__s_prev(si)
def __s_prev(self,i_access,tj=None):
'''
等价于节点的父节点
:param i_access:
:param tj: 一般不需要, 如果时间跨度很长, 卫星可能转两圈的时候就需要
:return:
'''
def tks_half( i_access, half):
try:
mid_tk = self.__si_max_tk(i_access)
except:
print(i_access)
return
half_list = []
for item in self.data.acc2tk[i_access]:
if half == 'last':
if item > mid_tk:
half_list.append(item)
elif half == 'first':
if item < mid_tk:
half_list.append(item)
return half_list
prevs = []
first_half = tks_half(i_access,'first')
for tk in first_half:
tik = self.data.tiks[tk]
if tik.class_id == 'I':
continue
elif tik.class_id =='II':
inters = list( tik.passInter[0])
inters.remove(i_access) #可能多个inter?
for inter_one in inters:
if self.__si_max_tk(inter_one) < tk and self.data.is_equal(i_access,inter_one,tk):
prevs.append(inter_one)
elif tik.class_id =='III':
for inters in tik.passInter:
inters = list(inters)
if i_access in inters:
inters.remove(i_access) # 可能多个inter?
for inter_one in inters:
if self.__si_max_tk(inter_one) < tk and self.data.is_equal(i_access, inter_one, tk):
prevs.append(inter_one)
else:
continue
# if len(prevs) ==0:
# return ['none']
# else:
return prevs
def __s_next(self,si):
return list(self.G.succ[si].keys())
def __trave(self):
'''
最小最大深度遍历法
:return:
'''
# print('->trave')
# for depth in dfs_depth(self.G):
# print(depth)
# for head in self.roots:
# trave_list.append( head)
# # trave_list.extend(list(dict(nx.bfs_successors(self.G,head)).values()))
# listoflist = dict(nx.bfs_successors(self.G,head)).values()
# for ls in listoflist:
# trave_list.extend(ls)
if self.terminal:
print("trave as :{}".format(self.data.accs))
return self.data.accs
def __integ(self,i_access,tj,tj_next):
if i_access not in self.data.access_names:
print(" sat num error")
return
else:
tmp = self.data.df_align.query("time >= {} and time <= {}".format(tj, tj_next))[i_access]
if pd.isnull(tmp).sum():
print(" section error")
return
else:
return tmp.sum()
def __si_max_tk(self,si,tj=None):
try:#max 可能有多个值
max_time = self.data.df_align.query("{} == {}".format(si, self.data.df_align[si].max())).index
ret = math.ceil(max_time[0])
return ret
except:
print(si)
def build_graph(self,weights='1'):
assert(weights in ['1','tk'])
if self.terminal:
print("\n-> GRAPH BUILDING")
#build the graph whose access as the node
self.G = nx.DiGraph(date='2022-3-3', name='handover')
self.G.add_nodes_from(self.data.access_names)
for tk in tqdm(self.data.all_tks[1:]):# 根据tks 来建图
tik = self.data.tiks[tk]
if tik.class_id =='I' : #初始点或者结束点
continue
elif tik.class_id =='II': #普通inter
try:
s1,s2 = tik.passInter[0]
except:
pass
print('error')
if self.__si_max_tk(s1) < tk and s1 in self.__s_prev(s2):
if weights=='tk':
self.G.add_weighted_edges_from([(s1, s2, tk)])
elif weights=='1':
self.G.add_weighted_edges_from([(s1, s2, 1)])
elif self.__si_max_tk(s1) > tk and s2 in self.__s_prev(s1):
if weights=='tk':
self.G.add_weighted_edges_from([(s2, s1, tk)])
elif weights == '1':
self.G.add_weighted_edges_from([(s2, s1, 1)])
elif tik.class_id =='III': #III类tik
# 处理 inter点
inters = tik.getPass('Inter')
for inter in inters:
for s1,s2 in itertools.combinations(inter, 2) :
if self.__si_max_tk(s1) < tk:
if weights=='tk':
self.G.add_weighted_edges_from([(s1, s2, tk)])
elif weights=='1':
self.G.add_weighted_edges_from([(s1, s2, 1)])
else:
if weights=='tk':
self.G.add_weighted_edges_from([(s2, s1, tk)])
elif weights=='1':
self.G.add_weighted_edges_from([(s2, s1, 1)])
# 处理passIn
# 处理passOut
# 包括2个以上卫星函数相交
# 或者不同的函数相交点恰好在一个时刻
#把函数的顶点和中点作为点位置
#from none
#sub graph heads
self.get_roots_and_leaves2()
if self.terminal:
print("--> graph finished")
print(self.G)
def get_roots_and_leaves2(self):
pass
roots=[]
leaves=[]
for acc in self.data.access_names:
if self.data.acc2tk[acc][0] == self.data.all_tks[0]:
roots.append(acc)
if self.data.acc2tk[acc][-1] == self.data.all_tks[-1]:
leaves.append(acc)
self.roots = roots
self.leaves = leaves
if self.terminal:
print("--> graph roots num:{} {}".format(len(self.roots), self.roots))
print("--> graph leaves num:{} {}".format(len(self.leaves), self.leaves))
def get_roots_and_leaves(self):
roots = []
leaves =[]
for si in self.__trave():
if len(self.__s_prev(si))==0 and self.G.out_degree[si] !=0:
# 这里不等等价为子图个数, 如果相位相同两个初始access, 会出错
# 过滤掉1个点的子图
roots.append(si)
if len(self.__s_prev(si))!=0 and self.G.out_degree[si] ==0:
leaves.append(si)
# buble sort for each sub-graph
for i in range(len(roots)):
for j in range(i+1,len(roots)):
if self.data.crossLog[roots[i]][0][0] >self.data.crossLog[roots[j]][0][0]:
tmp = roots[i]
roots[i] = roots[j]
roots[j]= tmp
for i in range(len(leaves)):
for j in range(i + 1, len(leaves)):
if self.data.crossLog[leaves[i]][0][0] > self.data.crossLog[leaves[j]][0][0]:
tmp = leaves[i]
leaves[i] = leaves[j]
leaves[j] = tmp
self.roots = roots
self.leaves = leaves
if self.terminal:
print("-> graph roots num:{}\n {}".format(len(self.roots),self.roots))
print("-> graph leaves num:{} \n{}".format(len(self.leaves),self.leaves))
def mst_run(self):
# nx.draw(self.G,with_labels=True)
# plt.show()
paths={}
for root in self.roots:
for leaf in self.leaves:
try:
path = nx.dijkstra_path(self.G, source=root, target=leaf)
paths[root,leaf]=path
except:
continue
pass
if self.terminal:
print('roots:{}'.format(self.roots))
print('leaves:{}'.format(self.leaves))
root_leaf = sorted(paths.values(),reverse=True,key=lambda x:len(x))
final_solutoin = self.max_cover(paths,mode='mst')
root_leaf[0].insert(0,'none')
return final_solutoin
# print(nx.shortest_path(tmp_graph, source='s2520', target='s2519'))
# path = nx.all_pairs_shortest_path(self.G)
# print(path)
def gd_run(self):
final_solution=[]
x_ks = self.roots
while x_ks:
max_intg =0
max_x =None
for x in x_ks:
if len(final_solution):
tj, tj_next = self.data.getInterTk(final_solution[-1],x), self.data.acc2tk[x][-1]
else:
tj, tj_next = self.data.acc2tk[x][0], self.data.acc2tk[x][-1]
tmp_intg = self.__integ(x, tj, tj_next)
if max_intg<tmp_intg:
max_intg = tmp_intg
max_x = x
final_solution.append(max_x)
x_ks = list(self.G.succ[max_x].keys())
final_solution.insert(0,'none')
return final_solution
def dp_run(self):
def check_start_stamp(access):
pass
if self.data.acc2tk[access][0] == self.data.all_tks[0]:
return self.data.all_tks[0]
elif route_dict[access]=='none':
return self.data.acc2tk[access][0]
else:
return check_start_stamp(route_dict[access])
# self.get_roots_and_leaves()
if self.terminal:
print("\nPROBLEM SOVING BY DP")
hop_dict={}
opt_dict={}
hop_dict['none'] = 0
opt_dict['none']=0
route_dict={}
for root in self.roots:#先对根节点(in-degree ==0)的都预处理
tj,tj_next = self.data.acc2tk[root][0],self.data.acc2tk[root][-1]
opt_dict[root] = (hop_dict['none']* opt_dict['none'] + self.__integ(root,tj,tj_next))/(hop_dict['none']+1)
hop_dict[root] = hop_dict['none'] + 1
route_dict[root] = 'none'
for si in tqdm(self.__trave()):
# if si =='s3017':
# pass
pre_sis = self.__s_prev(si)
tj_next = self.data.acc2tk[si][-1]
if len(pre_sis)==0 : # 没有前置si,si即为相位最靠前的卫星
# if si not in self.roots:
# continue
tj = self.data.acc2tk[si][0]
opt_dict[si] =(hop_dict['none']* opt_dict['none'] + self.__integ(si,tj,tj_next))/(hop_dict['none']+1)
hop_dict[si] = hop_dict['none']+1
route_dict[si] = 'none'
else:
acc_opt ={}
for pre_si in pre_sis:
if (pre_si ,si)not in self.G.edges: #这里遍历方法不同可能会出错
print('error in( {},{})'.format(pre_si,si))
continue
tj = self.G.edges[pre_si,si]['weight']
si_last_integ = self.__integ(pre_si,tj,self.data.acc2tk[pre_si][-1])
integ = self.__integ(si,tj,tj_next)
acc_opt[pre_si] = (hop_dict[pre_si]*opt_dict[pre_si] - si_last_integ + integ)/(hop_dict[pre_si]+1)
# if tmp > tmp_opt :
# tmp_opt = tmp
# arg_opt = pre_si
# if acc_opt:
acc_opt = sorted(acc_opt.items(),reverse=True, key=lambda x: x[1])
for item in acc_opt:
if route_dict[item[0]]=='none' and self.data.acc2tk[item[0]][0]!=self.data.all_tks[0]:
continue
opt_dict[si] =item[1]
hop_dict[si] = hop_dict[item[0]]+1
route_dict[si] = item[0]
break
self.opt_dict=opt_dict
#post process
paths = {} # all paths in all sub graphs
for leaf in self.leaves:
paths[leaf] = []
paths[leaf].append(leaf)
end_node = leaf
while end_node != 'none':
paths[leaf].append(route_dict[end_node])
end_node = route_dict[end_node]
paths[leaf].reverse()
paths[leaf]=paths[leaf][1:]
paths[paths[leaf][0],leaf] = paths[leaf]
del paths[leaf]
if self.terminal:
print('--> paths:')
for path in paths.values():
print(path)
final_solution = self.max_cover(paths,mode='dp')#最大覆盖方法筛选
return final_solution
def __keep(self,path_i,path_j,mode):
'''
确定是留2个还是留任意一个, 共计三种状态
:param path0:
:param path1:
:return:
0: 留0
1: 留1
2:全留
'''
test_list = [path_i[0],path_i[1],path_j[0],path_j[1]]
ret = list(np.argsort(test_list))
if len(set(test_list))==2:
if mode=='dp' and path_i[2]>path_j[2] :
return 0
elif mode =='mst' and path_i[3]<path_j[3] :
return 0
else:
return 1
# 相等
elif len(set(test_list))==3:
if test_list[1]==test_list[2]:
# if ret ==[0,1,2,3]:
return 2
# 0|--|1
# 2|--|3
#[0,1,1,2]-->[0,1,2,3]
# elif ret ==[2,0,3,1]:
if test_list[0]==test_list[3]:
return 2
# 0|--|1
#2|--|3
# [1,2,0,1]--> [2,0,3,1]
# if ret ==[0,2,1,3]:
if test_list[1]==test_list[3]:
# |----|
# |--|
# [0 ,2, 1, 2]
return 0
if test_list[0]==test_list[2] and test_list[3]>test_list[1]:
# 0|--|1
# 2|----|3
# [0,1,0,2]
return 1
if test_list[0]==test_list[2] and test_list[1]>test_list[3]:
# |----|
# |--|
# [0,2,0,1] -->[0,2,3,1]
return 0
elif len(set(test_list))==4:
pass
if ret == [2, 0, 1, 3] :
# |--|
# |----|
# [1,2,0,3]--> [2,0,1,3]
return 1
elif ret == [0, 2, 3, 1]:
#
# |----|
# |--|
# [0,3,1,2] -> [0,2,3,1]
return 0
if ret == [0, 2, 1, 3] :
# |---|
# |---|
if (path_i[1]-path_i[0]) >(path_j[1]-path_j[0]):
return 0
else:
return 1
# return 2
if ret ==[1, 3, 0, 2]:
# |---|
# |---|
if (path_i[1]-path_i[0]) >(path_j[1]-path_j[0]):
return 0
else:
return 1
if ret == [0, 1, 2 , 3 ] or [2,3,0,1]:
# |--|
# |--|
#
# |--|
# |--|
return 2
#不想交
def max_cover(self,paths_dict,mode):
# 按照结束时刻, 从大到小排列
# for path in paths
#
paths_list_detail=[]
if mode=='dp':
for (root,leaf), path in paths_dict.items():
paths_list_detail.append(
(
(root,leaf),#table[i][0]
(
self.data.acc2tk[path[0]][0], #table[i][1][0] start
self.data.acc2tk[path[-1]][-1], #table[i][1][1] end
self.opt_dict[leaf], #table[i][1][2] opt value
len(path)
)
)
)
elif mode =='mst':
for (root,leaf), path in paths_dict.items():
paths_list_detail.append(
(
(root,leaf),#table[i][0]
(
self.data.acc2tk[path[0]][0], #table[i][1][0] start
self.data.acc2tk[path[-1]][-1], #table[i][1][1] end
1 , #table[i][1][2] opt value
len(path)
)
)
)
# 按照覆盖时间从小到大排列
# 如果有被完全覆盖的, drop掉
# 如果有恰好覆盖的, drop opt低的
# 找最大覆盖的n个
# sort1 按照结束时间倒序排
# path_table=[
# (('1','2'), (7, 10, 23)),
# (('3', '4'), (4, 10, 4)),
# (('6', '5'), (2, 10, 12)),
# (('7', '8'), (9, 12, 3)),
# (('9', '10'),(9, 13, 3)),
# (('11', '12'),(1, 2, 3)),
# (('13', '14'),(7, 10, 3)),
#
# ]
# path_dict={}
# path_table = sorted(path_table,reverse=True,key=lambda x:(x[1][1]-x[1][0]))
# for tab in path_table:
# print('\n', tab)
# print('-->')
del_set = set([])
for path_i,path_j in itertools.combinations(paths_list_detail,2):
if path_i[0] in del_set or path_j[0] in del_set:
continue
sta = self.__keep(path_i[1],path_j[1],mode)
if sta ==0:
del_set.add(path_j[0])
elif sta ==1 :
del_set.add(path_i[0])
filted_paths_dict={}
for item,value in paths_dict.items():
if item not in del_set:
value.insert( 0,'none')
filted_paths_dict[item] = value
filted_paths_list = sorted(filted_paths_dict.values(),reverse=False,key=lambda x:self.data.acc2tk[x[1]][0])
final_solution=[]
for path in filted_paths_list:
final_solution+=path
return final_solution
def mea_run(self):
'''
最强信号选择
:return:
'''
if self.terminal:
print("\n-> PROBLEM SOVING BY RSS")
start = get_now()
tmp_list = []
for idx,row in self.data.df_align.iterrows():
row = row.replace(np.nan, 0)
if row.sum()==0:
tmp_list.append('none')
else:
max_acc = np.argmax(row)
tmp_list.append(self.data.df_align.columns[max_acc])
final_solution = ['none']
#去掉成片的重复access name
i=0
final_solution.append(tmp_list[0])
while True:
if tmp_list[i] == final_solution[-1]:
pass
else:
final_solution.append(tmp_list[i])
i+=1
if i >= len(tmp_list):
break
time_stat(start)
return final_solution
def get_inter_tks(self,final_solution):
inter_tk_dict = {}
inter_tk_list = []
for s_prev, s_next in zip(final_solution[1:-1], final_solution[2:]):
tk = self.data.getInterTk(s_prev, s_next)
inter_tk_dict[s_prev, s_next] = tk
inter_tk_list.append(tk)
return inter_tk_dict,inter_tk_list
def get_selected_alg_base(self,inter_tk_dict,final_solution):
data = self.data
nan_assign = 0
solution_length = len(final_solution)
ret_tks = {}
for cnt in range(1, solution_length - 1):
s_pre, s, s_next = final_solution[cnt - 1], final_solution[cnt], final_solution[cnt + 1]
if s_pre == 'none':
ret_tks[s] = (self.data.acc2tk[s][0], inter_tk_dict[(s, s_next)])
continue
if s_next == 'none':
ret_tks[s] = (inter_tk_dict[(s_pre, s)], self.data.acc2tk[s][-1])
continue
if s == 'none':
continue
ret_tks[s] = (inter_tk_dict[(s_pre, s)], inter_tk_dict[(s, s_next)])
# 最后一个星,补上
ret_tks[final_solution[-1]] = (
inter_tk_dict[(final_solution[-2], final_solution[-1])], self.data.acc2tk[final_solution[-1]][-1])
arrs=[]
for access_name in ret_tks.keys():
# for line in data.get_sublines(access_name, [alg_base], with_time=True):
line = data.df_align[access_name]
# if access_name in ret_tks.keys():
best_mask = (line.index >= ret_tks[access_name][0]) * (line.index <= ret_tks[access_name][1])
if len(arrs) > 0:
start = arrs[-1].index[-1]
stop = line[best_mask].index[0]
if start+1 != stop:#s将断线的地方设置为0
time_index = np.linspace(start= start+1,stop=stop,num=stop-start)
arrs.append(pd.Series(np.ones_like(time_index)*nan_assign,index=time_index))
# print("--> {} to {} is null".format(arrs[-1].index[-1],line[best_mask].index[0]))
arrs.append(line[best_mask][:-1])
return pd.concat(arrs,axis=0)
# pass
def __path_tk(self,path,headOrTail):
if headOrTail=='head':
return self.data.acc2tk[path[1]][0]
if headOrTail =="tail":
return self.data.acc2tk[path[-1]][-1]
class GreedySolver:
def __init__(self):
pass
| 32.176211 | 118 | 0.461482 |
acf50fbeef3da773a00a45591348e4bc1d428c4f | 5,977 | py | Python | gpm.py | kimlab/GPyM | 34814be0176c31f1e85326750d187cbc2df41942 | [
"MIT"
] | 9 | 2016-09-02T14:13:10.000Z | 2020-08-31T11:57:49.000Z | gpm.py | kimlab/GPyM | 34814be0176c31f1e85326750d187cbc2df41942 | [
"MIT"
] | 1 | 2018-12-14T18:17:02.000Z | 2019-01-08T18:00:34.000Z | gpm.py | kimlab/GPyM | 34814be0176c31f1e85326750d187cbc2df41942 | [
"MIT"
] | 6 | 2017-05-21T14:06:44.000Z | 2020-06-26T08:45:08.000Z | #! /usr/bin/python
#--------------------------------------------------------------------
# PROGRAM : gpm.py
# CREATED BY : hjkim @IIS.2015-01-14 11:52:17.992599
# MODIFED BY :
#
# USAGE : $ ./gpm.py
#
# DESCRIPTION:
#------------------------------------------------------cf0.2@20120401
import os, sys, importlib
import time
import cPickle as pickle
from optparse import OptionParser
from ConfigParser import SafeConfigParser
from numpy import empty
from alien.dtrange import dtrange
from alien.GridCoordinates import GridCoordinates
#from alien.read_hdf4 import read_hdf4
#from alien.read_hdf5 import read_hdf5
from alien.TimeSeries import bin_bytbound
from gpm_data import GPM_data
from search_granules import SearchGranules
from granule2map import granule2map
class GPM( SearchGranules ):
def __init__(self, prjName, prdLv, prdVer, **kwargs):
'''
prjName : e.g.) 'GPM.KuPR'
prdLv : e.g.) 'L2'
prdVer : e.g.) '02'
'''
modroot = os.path.dirname(__file__)
self.cfg = SafeConfigParser( os.environ )
self.cfg.read( os.path.join(modroot, 'config') )
self.cfg._sections['Defaults'].update( kwargs )
if self.cfg.get( 'Defaults','dataroot') == '':
self.cfg.set('Defaults','dataroot', os.environ['PWD'])
self.dataDir = self.cfg.get('Defaults','dataroot')
self.prjName = prjName
self.prdLv = prdLv
self.prdVer = prdVer
self.prdDir = os.path.join( self.dataDir,
self.prjName,
self.prdLv,
self.prdVer)
self.cached = self.cfg.get('Defaults', 'cached')
self.cacheDir = self.cfg.get('Defaults', 'cache_dir')
fnPath = {'TRMM': self.cfg.get('Defaults','hdf4_module'),
'GPM' : self.cfg.get('Defaults','hdf5_module')}[prjName.split('.')[0]]
fnName = fnPath.split('.')[-1]
modPath = '.'.join( fnPath.split('.')[:-1] )
self.func_read = getattr( importlib.import_module( modPath ), fnName )
'''
self.cacheDir = os.path.join( self.dataDir,
'cache.dim',
self.prjName,
self.prdLv,
self.prdVer)
self.prdDir = '%s/%s/%s/%s'%(self.dataDir,
self.prjName,
self.prdLv,
self.prdVer)
self.cacheDir = '%s/cache.dim/%s/%s/%s'%(self.dataDir,
self.prjName,
self.prdLv,
self.prdVer)
self.func_read = {'TRMM': read_hdf4,
'GPM' : read_hdf5}[ prjName.split('.')[0] ]
'''
'''
dictGrp = {'GPM.GMI':'S1',
'GPM.DPR':'NS', # HS, MS, NS
'GPM.KaPR':'MS', # HS, MS
'GPM.KuPR':'NS',}
grpCode = dictGrp[ self.prjName ]
'''
def __call__(self, varName, sDTime, eDTime, BBox=None, res=None, delT=None):
'''
res : spa. res. of 2d-array
sDTime : DTime bound left
eDTime : DTime bound right
'''
mapCode = '^' + ''.join( str(res).split('.') )
gpmData = GPM_data()
srcDir = os.path.join( self.dataDir, self.prdDir )
assert os.path.exists( srcDir ), '{} is not exists.'.format( srcDir )
Granule = self.search_granules( srcDir, sDTime, eDTime, BBox )
if len(Granule) == 0:
print '! Warning ! no data extracted'
return None
outSize = sum( [ len(gra[2]) for gra in Granule ] ), Granule[0][2].shape[1]
Lat = empty( outSize, 'float32')
Lon = empty( outSize, 'float32')
aOut = empty( outSize, 'float32' )
DTime = []
prvI = 0
for granule in Granule:
srcPath, dtime, lat, lon, idx = granule
gpmData.srcPath.append(srcPath)
gpmData.recLen.append( len(dtime) ) # number of data record for each file
nxtI = prvI + len(dtime)
aOut[prvI:nxtI] = self.func_read( srcPath, varName, idx.tolist() )
Lat[prvI:nxtI] = lat
Lon[prvI:nxtI] = lon
DTime.extend(dtime)
if res != None and delT == None:
gpmData.griddata.append( granule2map( lat, lon, aOut[prvI:nxtI], BBox, res ) )
gpmData.grid = GridCoordinates(mapCode, BBox=BBox)
prvI = nxtI
if delT != None:
dtBnd = dtrange(sDTime, eDTime, delT)
gpmData.tbound = map( None, dtBnd[:-1], dtBnd[1:] )
gpmData.dtime = bin_bytbound( DTime, dtBnd, DTime )
gpmData.lat = bin_bytbound( DTime, dtBnd, Lat )
gpmData.lon = bin_bytbound( DTime, dtBnd, Lon )
gpmData.data = bin_bytbound( DTime, dtBnd, aOut )
if res != None:
gpmData.griddata = [ granule2map(lat, lon, a, BBox, res)
for lat, lon, a in map(None, gpmData.lat, gpmData.lon, gpmData.data) ]
gpmData.grid = GridCoordinates(mapCode, BBox=BBox)
else:
gpmData.dtime = DTime
gpmData.lat = Lat
gpmData.lon = Lon
gpmData.data = aOut
return gpmData
| 32.483696 | 123 | 0.464949 |
acf50fc1065bd5f0f78f56325579d47563fc9b6b | 10,591 | py | Python | astroML/plotting/multiaxes.py | aragilar/astroML | d3f6279eb632957662338761cb559a1dcd541fb0 | [
"BSD-2-Clause"
] | 1 | 2018-09-13T05:13:04.000Z | 2018-09-13T05:13:04.000Z | astroML/plotting/multiaxes.py | aragilar/astroML | d3f6279eb632957662338761cb559a1dcd541fb0 | [
"BSD-2-Clause"
] | 1 | 2018-05-18T17:43:38.000Z | 2018-05-18T19:34:01.000Z | astroML/plotting/multiaxes.py | aragilar/astroML | d3f6279eb632957662338761cb559a1dcd541fb0 | [
"BSD-2-Clause"
] | 1 | 2022-02-26T14:15:02.000Z | 2022-02-26T14:15:02.000Z | """
Multi-panel plotting
"""
from copy import deepcopy
import numpy as np
class MultiAxes(object):
"""Visualize Multiple-dimensional data
This class enables the visualization of multi-dimensional data, using
a triangular grid of 2D plots.
Parameters
----------
ndim : integer
Number of data dimensions
inner_labels : bool
If true, then label the inner axes. If false, then only the outer
axes will be labeled
fig : matplotlib.Figure
if specified, draw the plot on this figure. Otherwise, use the
current active figure.
left, bottom, right, top, wspace, hspace : floats
these parameters control the layout of the plots. They behave have
an identical effect as the arguments to plt.subplots_adjust. If not
specified, default values from the rc file will be used.
Examples
--------
A grid of scatter plots can be created as follows::
x = np.random.normal((4, 1000))
R = np.random.random((4, 4)) # projection matrix
x = np.dot(R, x)
ax = MultiAxes(4)
ax.scatter(x)
ax.set_labels(['x1', 'x2', 'x3', 'x4'])
Alternatively, the scatter plot can be visualized as a density::
ax = MultiAxes(4)
ax.density(x, bins=[20, 20, 20, 20])
"""
def __init__(self, ndim, inner_labels=False,
fig=None,
left=None, bottom=None,
right=None, top=None,
wspace=None, hspace=None):
# Import here so that testing with Agg will work
from matplotlib import pyplot as plt
if fig is None:
fig = plt.gcf()
self.fig = fig
self.ndim = ndim
self.inner_labels = inner_labels
self._update('left', left)
self._update('bottom', bottom)
self._update('right', right)
self._update('top', top)
self._update('wspace', wspace)
self._update('hspace', hspace)
self.axes = self._draw_panels()
def _update(self, s, val):
# Import here so that testing with Agg will work
from matplotlib import rcParams
if val is None:
val = getattr(self, s, None)
if val is None:
key = 'figure.subplot.' + s
val = rcParams[key]
setattr(self, s, val)
def _check_data(self, data):
data = np.asarray(data)
if data.ndim != 2:
raise ValueError("data dimension should be 2")
if data.shape[1] != self.ndim:
raise ValueError("leading dimension of data should match ndim")
return data
def _draw_panels(self):
# Import here so that testing with Agg will work
from matplotlib import pyplot as plt
if self.top <= self.bottom:
raise ValueError('top must be larger than bottom')
if self.right <= self.left:
raise ValueError('right must be larger than left')
ndim = self.ndim
panel_width = ((self.right - self.left)
/ (ndim - 1 + self.wspace * (ndim - 2)))
panel_height = ((self.top - self.bottom)
/ (ndim - 1 + self.hspace * (ndim - 2)))
full_panel_width = (1 + self.wspace) * panel_width
full_panel_height = (1 + self.hspace) * panel_height
axes = np.empty((ndim, ndim), dtype=object)
axes.fill(None)
for j in range(1, ndim):
for i in range(j):
left = self.left + i * full_panel_width
right = self.bottom + (ndim - 1 - j) * full_panel_height
ax = self.fig.add_axes([left, right,
panel_width, panel_height])
axes[i, j] = ax
if not self.inner_labels:
# remove unneeded x labels
for i in range(ndim):
for j in range(ndim - 1):
ax = axes[i, j]
if ax is not None:
ax.xaxis.set_major_formatter(plt.NullFormatter())
# remove unneeded y labels
for i in range(1, ndim):
for j in range(ndim):
ax = axes[i, j]
if ax is not None:
ax.yaxis.set_major_formatter(plt.NullFormatter())
return np.asarray(axes, dtype=object)
def set_limits(self, limits):
"""Set the axes limits
Parameters
----------
limits : list of tuples
a list of plot limits for each dimension, each in the form
(xmin, xmax). The length of `limits` should match the data
dimension.
"""
if len(limits) != self.ndim:
raise ValueError("limits do not match number of dimensions")
for i in range(self.ndim):
for j in range(self.ndim):
ax = self.axes[i, j]
if ax is not None:
ax.set_xlim(limits[i])
ax.set_ylim(limits[j])
def set_labels(self, labels):
"""Set the axes labels
Parameters
----------
labels : list of strings
a list of plot limits for each dimension. The length of `labels`
should match the data dimension.
"""
if len(labels) != self.ndim:
raise ValueError("labels do not match number of dimensions")
for i in range(self.ndim):
ax = self.axes[i, self.ndim - 1]
if ax is not None:
ax.set_xlabel(labels[i])
for j in range(self.ndim):
ax = self.axes[0, j]
if ax is not None:
ax.set_ylabel(labels[j])
def set_locators(self, locators):
"""Set the tick locators for the plots
Parameters
----------
locators : list or plt.Locator object
If a list, then the length should match the data dimension. If
a single Locator instance, then each axes will be given the
same locator.
"""
# Import here so that testing with Agg will work
from matplotlib import pyplot as plt
if isinstance(locators, plt.Locator):
locators = [deepcopy(locators) for i in range(self.ndim)]
elif len(locators) != self.ndim:
raise ValueError("locators do not match number of dimensions")
for i in range(self.ndim):
for j in range(self.ndim):
ax = self.axes[i, j]
if ax is not None:
ax.xaxis.set_major_locator(locators[i])
ax.yaxis.set_major_locator(locators[j])
def set_formatters(self, formatters):
"""Set the tick formatters for the outer edge of plots
Parameters
----------
formatterss : list or plt.Formatter object
If a list, then the length should match the data dimension. If
a single Formatter instance, then each axes will be given the
same locator.
"""
# Import here so that testing with Agg will work
from matplotlib import pyplot as plt
if isinstance(formatters, plt.Formatter):
formatters = [deepcopy(formatters) for i in range(self.ndim)]
elif len(formatters) != self.ndim:
raise ValueError("formatters do not match number of dimensions")
for i in range(self.ndim):
ax = self.axes[i, self.ndim - 1]
if ax is not None:
ax.xaxis.set_major_formatter(formatters[i])
for j in range(self.ndim):
ax = self.axes[0, j]
if ax is not None:
ax.xaxis.set_major_formatter(formatters[i])
def plot(self, data, *args, **kwargs):
"""Plot data
This function calls plt.plot() on each axes. All arguments or
keyword arguments are passed to the plt.plot function.
Parameters
----------
data : ndarray
shape of data is [n_samples, ndim], and ndim should match that
passed to the MultiAxes constructor.
"""
data = self._check_data(data)
for i in range(self.ndim):
for j in range(self.ndim):
ax = self.axes[i, j]
if ax is None:
continue
ax.plot(data[:, i], data[:, j], *args, **kwargs)
def scatter(self, data, *args, **kwargs):
"""Scatter plot data
This function calls plt.scatter() on each axes. All arguments or
keyword arguments are passed to the plt.scatter function.
Parameters
----------
data : ndarray
shape of data is [n_samples, ndim], and ndim should match that
passed to the MultiAxes constructor.
"""
data = self._check_data(data)
for i in range(self.ndim):
for j in range(self.ndim):
ax = self.axes[i, j]
if ax is None:
continue
ax.scatter(data[:, i], data[:, j], *args, **kwargs)
def density(self, data, bins=20, **kwargs):
"""Density plot of data
This function calls np.histogram2D to bin the data in each axes, then
calls plt.imshow() on the result. All extra arguments or
keyword arguments are passed to the plt.imshow function.
Parameters
----------
data : ndarray
shape of data is [n_samples, ndim], and ndim should match that
passed to the MultiAxes constructor.
bins : int, array, list of ints, or list of arrays
specify the bins for each dimension. If bins is a list, then the
length must match the data dimension
"""
data = self._check_data(data)
if not hasattr(bins, '__len__'):
bins = [bins for i in range(self.ndim)]
elif len(bins) != self.ndim:
bins = [bins for i in range(self.ndim)]
for i in range(self.ndim):
for j in range(self.ndim):
ax = self.axes[i, j]
if ax is None:
continue
H, xbins, ybins = np.histogram2d(data[:, i], data[:, j],
(bins[i], bins[j]))
ax.imshow(H.T, origin='lower', aspect='auto',
extent=(xbins[0], xbins[-1], ybins[0], ybins[-1]),
**kwargs)
ax.set_xlim(xbins[0], xbins[-1])
ax.set_ylim(ybins[0], ybins[-1])
| 34.838816 | 77 | 0.541592 |
acf51039042557cef2b8189d6ceb79aa91b64967 | 1,621 | py | Python | tests/unit/api_v1/test_answers.py | briank254/stackoverflow--lite | a95a180bf5104fff17696872e7d088c6d12afae0 | [
"MIT"
] | 1 | 2019-04-10T08:45:48.000Z | 2019-04-10T08:45:48.000Z | tests/unit/api_v1/test_answers.py | briank254/stackoverflow--lite | a95a180bf5104fff17696872e7d088c6d12afae0 | [
"MIT"
] | null | null | null | tests/unit/api_v1/test_answers.py | briank254/stackoverflow--lite | a95a180bf5104fff17696872e7d088c6d12afae0 | [
"MIT"
] | 1 | 2019-04-10T08:48:09.000Z | 2019-04-10T08:48:09.000Z | import unittest
import json
from app.id_gen import id_generator
from app import app
class TestAnswer(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
self.question = {
"title": "Be you",
"question": "How do I refactor tests with database?"
}
self.answer = {
"answer": "You could first of all seperate your tests?"
}
def post_question(self, data):
"""post question helper"""
response = self.app.post('/api/v1/questions',
data=json.dumps(data),
content_type='application/json'
)
return response
def post_answer(self, question_id, data):
"""post question helper"""
res = self.app.post('/api/v1/questions/' +str(question_id) +'/answers',
data=json.dumps(data),
content_type='application/json'
)
return res
def test_post_answer(self):
"""method to test post answer to a question"""
self.post_question(self.question)
question_id = id_generator("Be you")
res = self.post_answer(question_id, self.answer)
self.assertEqual(res.status_code, 201)
def test_get_all_answers(self):
"""method to test get all answers to a question"""
question_id = id_generator("Be you")
res = self.app.get('/api/v1/questions/' +str(question_id) +'/answers')
self.assertEqual(res.status_code, 200)
| 29.472727 | 79 | 0.556447 |
acf511defb27c616588dab3b6220047845a81a8d | 4,287 | py | Python | src/apiClient.py | Fuzzbawls/PIVX-SPMT | 4d5157e452dc43894805c7149b897849dc900029 | [
"MIT"
] | null | null | null | src/apiClient.py | Fuzzbawls/PIVX-SPMT | 4d5157e452dc43894805c7149b897849dc900029 | [
"MIT"
] | null | null | null | src/apiClient.py | Fuzzbawls/PIVX-SPMT | 4d5157e452dc43894805c7149b897849dc900029 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from random import choice
from misc import getCallerName, getFunctionName, printException, printDbg
api_keys = ["b62b40b5091e", "f1d66708a077", "ed85c85c0126", "ccc60d06f737"]
class ApiClient:
def __init__(self):
self.url = "http://chainz.cryptoid.info/pivx/api.dws"
self.parameters = {}
def checkResponse(self, parameters):
key = choice(api_keys)
parameters['key'] = key
resp = requests.get(self.url, params=parameters)
if resp.status_code == 200:
data = resp.json()
return data
else:
print("Invalid response from API provider\n")
print("Status code: %s\n" % str(resp.status_code))
try:
self.client.close()
self.client = requests.session()
except Exception:
pass
return None
def getAddressUtxos(self, address):
try:
self.parameters = {}
self.parameters['q'] = 'unspent'
self.parameters['active'] = address
return self.checkResponse(self.parameters)
except Exception as e:
err_msg = "error in getAddressUtxos"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
try:
self.client.close()
self.client = requests.session()
except Exception:
pass
def getBalance(self, address):
try:
self.parameters = {}
self.parameters['q'] = 'getbalance'
self.parameters['a'] = address
return self.checkResponse(self.parameters)
except Exception as e:
err_msg = "error in getBalance"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
try:
self.client.close()
self.client = requests.session()
except Exception:
pass
def getStatus(self):
try:
self.parameters = {}
self.parameters['q'] = 'getblockcount'
resp = requests.get(self.url, self.parameters)
return resp.status_code
except Exception as e:
err_msg = "Unable to connect to API provider"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
try:
self.client.close()
self.client = requests.session()
except Exception:
pass
return 0
def getStatusMess(self, statusCode):
message = {
0: "No response from server",
200: "OK! Connected"}
if statusCode in message:
return message[statusCode]
return "Not Connected! Status: %s" % str(statusCode)
def getBlockCount(self):
try:
self.parameters = {}
self.parameters['q'] = 'getblockcount'
return self.checkResponse(self.parameters)
except Exception as e:
err_msg = "error in getBlockCount"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
try:
self.client.close()
self.client = requests.session()
except Exception:
pass
def getBlockHash(self, blockNum):
try:
self.parameters = {}
self.parameters['q'] = 'getblockhash'
self.parameters['height'] = str(blockNum)
return self.checkResponse(self.parameters)
except Exception as e:
err_msg = "error in getBlockHash"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
try:
self.client.close()
self.client = requests.session()
except Exception:
pass
| 31.065217 | 80 | 0.49965 |
acf51264320f1c0a1a65f9088bd1f9a0fe8bf819 | 12,145 | py | Python | training.py | hamzajaved05/text2map | c1d824896cf3b9e658687d77762766471b176bc8 | [
"MIT"
] | null | null | null | training.py | hamzajaved05/text2map | c1d824896cf3b9e658687d77762766471b176bc8 | [
"MIT"
] | null | null | null | training.py | hamzajaved05/text2map | c1d824896cf3b9e658687d77762766471b176bc8 | [
"MIT"
] | null | null | null | """
Author: Hamza
Dated: 20.04.2019
Project: texttomap
"""
import argparse
import pickle
import torch.optim as optim
from math import ceil
from tensorboardX import SummaryWriter
import pandas as pd
from torch.utils.data import DataLoader
from util.loaders import *
from util.models import *
import os
from sklearn.model_selection import train_test_split
import logging
from util.valid_training import validation
import random
parser = argparse.ArgumentParser(description='Text to map - Training with image patches and text')
parser.add_argument("--impath", type=str, help="Path for Image patches")
parser.add_argument("--inpickle", type=str, help="Path of pickle file")
parser.add_argument("--epoch", type=int, help="no of epochs")
parser.add_argument("--batch", type=int, help="batch_size")
parser.add_argument("--lr", default=0.001, type=float, help="learning rate")
parser.add_argument("--logid", type=str, help="logid")
parser.add_argument("--write", default=True, type=bool, help="Write on tensorboard")
parser.add_argument("--limit", default=-1, type=int, help="Limit dataset")
parser.add_argument("--earlystopping", default=True, type=bool, help="Enable or disable early stopping")
parser.add_argument("--decay_freq", default=None, type=int, help="Decay freq")
parser.add_argument("--embed_size", default=256, type=int, help="Size of embedding")
parser.add_argument("--model", default="ti", type=str, help="Model type")
parser.add_argument("--dropout", default=0.4, type=float, help="Dropout before")
parser.add_argument("--decay_value", default = 0.95, type = float, help = "decay by value")
parser.add_argument("--margin", default = 0.1, type = float, help = "decay by value")
parser.add_argument("--save_embeds", default = True, type = bool, help = "decay by value")
parser.add_argument("--maxperclass", default = 30, type = int, help="maximum items per class")
parser.add_argument("--soft_positive", default = True, type = bool, help = "Soft positive mining")
args = parser.parse_args()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
with open(args.inpickle, "rb") as a:
[klass, words_sparse, words, jpgs, enc, modes] = pickle.load(a)
if not args.limit == -1:
klass = klass[:args.limit]
words_sparse = words_sparse[:args.limit]
words = words[:args.limit]
jpgs = jpgs[:args.limit]
modes = modes[:args.limit]
print("Limited inputs of length {} with total classes {}".format(len(klass), klass[-1]))
else:
print("Original inputs of length {} with total classes {}".format(len(klass), klass[-1]))
def seq_klass(klass):
x = -1
arr = []
klass2 = []
for i in klass:
if i not in arr:
x+=1
arr.append(i)
klass2.append(x)
return klass2
print("Processing inputs")
def limitklass(klas, word, word_sparse, jpg):
klas = np.array(klas)
klass2, klass2v = [], []
word2, word2v = [], []
word_sparse2, word_sparse2v = [] ,[]
jpgs2, jpgs2v = [], []
label = -1
for j in list(set(klas)):
x = np.sum(np.array(klas)==j)
if x >= args.maxperclass:
label+=1
indices = random.sample(list(np.argwhere(np.array(klas)==j).squeeze()), args.maxperclass)
v_indices = list(set(np.argwhere(np.asarray(klass)==j).squeeze()).difference(set(indices)))
if len(v_indices) >args.itemsperclass:
v_indices = random.sample(v_indices, args.itemsperclass)
for i in v_indices:
klass2v.append(label)
word2v.append(word[i])
word_sparse2v.append(word_sparse[i])
jpgs2v.append(jpg[i])
elif x >= args.itemsperclass:
label+=1
indices = random.sample(list(np.argwhere(np.array(klas)==j).squeeze()), args.itemsperclass)
v_indices = list(set(np.argwhere(np.asarray(klass)==j).squeeze()).difference(set(indices)))
if len(v_indices) >args.itemsperclass:
v_indices = random.sample(v_indices, args.itemsperclass)
for i in v_indices:
klass2v.append(label)
word2v.append(word[i])
word_sparse2v.append(word_sparse[i])
jpgs2v.append(jpg[i])
else:
continue
for i in indices:
klass2.append(label)
word2.append(word[i])
word_sparse2.append(word_sparse[i])
jpgs2.append(jpg[i])
return klass2, word2, word_sparse2, jpgs2, klass2v, word2v, word_sparse2v, jpgs2v
klass, words, words_sparse, jpgs, klass_v, words_v, words_sparse_v, jpgs_v= limitklass(klass, words, words_sparse, jpgs)
print("length of train is {}, test is {}".format(len(klass), len(klass_v)))
no_classes = klass[-1] + 1
data_size = len(klass)
if args.model == 'ti':
Model = p_embed_net
elif args.model == 't':
Model = ModelT
elif args.model == 'i':
Model = ModelI
else:
raise ("UnIdentified Model specified")
Inter = Model(embedding= args.embed_size, do = args.dropout).float().to(device)
Network = TripletNet(Inter).float().to(device)
criterion = TripletLoss(margin= args.margin).to(device)
logging.basicConfig(filename='models/' + args.logid + args.model + '.log', filemode='w', format='%(message)s')
logger = logging.getLogger('dummy')
logger.addHandler(logging.FileHandler("testing.log"))
complete_dataset = image_word_triplet_loader_allhard(jpgs, words, words_sparse, klass, args.impath, args.soft_positive)
train_loader = DataLoader(complete_dataset, batch_size=args.batch, shuffle=True)
print("Dataloaders done")
if args.write:
Writer = SummaryWriter("models/tbx/" + args.logid + args.model)
Writer.add_scalars("Metadata" + args.model, {"Batch_size": args.batch,
"learning_rate": args.lr,
"logid": int(args.logid),
"No_of_classes": no_classes,
"dropout": args.dropout,
"embed_size": args.embed_size,
})
valid_class = validation(klass_v, words_v, words_sparse_v,jpgs_v, args.impath, Network, Writer)
optimizer = optim.Adam(Network.parameters(), lr=args.lr)
epochs = args.epoch
train_accuracy = []
train_loss = []
validation_accuracy = []
validation_loss = []
epoch_metric = {"Training_loss": [], "Training_acc": [], "Validation_loss": [], "Validation_acc": []}
logs = {}
logs["training_batch_pdis"] = []
logs["training_batch_ndis"] = []
logs["training_batch_loss"] = []
logs["validation_batch_pdis"] = []
logs["validation_batch_loss"] = []
logs["validation_batch_ndis"] = []
trainingcounter = 0
validationcounter = 0
for epoch in range(1, epochs + 1):
# print("epoch {}".format(epoch))
Network.train()
for batch_idx, data in enumerate(train_loader):
ai, ap, aw, a_index,\
pi, pp, pw, \
ni1, np1, nw1,\
ni2, np2, nw2,\
ni3, np3, nw3,\
ni4, np4, nw4,\
ni5, np5, nw5,\
ni6, np6, nw6,\
ni7, np7, nw7,\
ni8, np8, nw8,\
ni9, np9, nw9,\
ni10, np10, nw10, \
x,\
first_pos, last_pos, len_pos = data
optimizer.zero_grad()
ao, po, no1 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni1.to(device), np1.to(device)])
_, _, no2 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni2.to(device), np2.to(device)])
_, _, no3 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni3.to(device), np3.to(device)])
_, _, no4 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni4.to(device), np4.to(device)])
_, _, no5 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni5.to(device), np5.to(device)])
# _, _, no6 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni6.to(device), np6.to(device)])
# _, _, no7 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni7.to(device), np7.to(device)])
# _, _, no8 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni8.to(device), np8.to(device)])
# _, _, no9 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni9.to(device), np9.to(device)])
# _, _, no10 = Network([ai.to(device), ap.to(device), pi.to(device), pp.to(device), ni10.to(device), np10.to(device)])
# print('e')
loss1, p1, n1 = criterion(ao.to(device), po.to(device), no1.to(device))
loss2, _, n2 = criterion(ao.to(device), po.to(device), no2.to(device))
loss3, _, n3 = criterion(ao.to(device), po.to(device), no3.to(device))
loss4, _, n4 = criterion(ao.to(device), po.to(device), no4.to(device))
loss5, _, n5 = criterion(ao.to(device), po.to(device), no5.to(device))
# loss6, _, n6 = criterion(ao.to(device), po.to(device), no6.to(device))
# loss7, _, n7 = criterion(ao.to(device), po.to(device), no7.to(device))
# loss8, _, n8 = criterion(ao.to(device), po.to(device), no8.to(device))
# loss9, _, n9 = criterion(ao.to(device), po.to(device), no9.to(device))
# loss10, _, n10 = criterion(ao.to(device), po.to(device), no10.to(device))
# print(p, n)
loss = loss1+loss2+loss3+loss4+loss5
# loss6+loss7+loss8+loss9+loss10
n = (n1.item()+n2.item()+n3.item()+n4.item()+n5.item())/5
# +n6.item()+n7.item()+n8.item()+n9.item()+n10.item())/10
loss.backward()
# print(loss)
optimizer.step()
print("epoch {}, batch {}/ {}, train_loss {:.5f}".format(epoch, batch_idx, train_loader.__len__(), loss.item()))
complete_dataset.result_update(ao.cpu().detach().numpy(), a_index.cpu().detach().numpy())
logs["training_batch_pdis"].append(p1.item())
logs["training_batch_ndis"].append(n)
logs["training_batch_loss"].append(loss.item())
Writer.add_scalars("Training_data",{"batch_loss": loss.item(),
"batch_pdis": p1.item(),
"batch_ndis": n
}, trainingcounter)
Writer.add_scalars("Indices", {"n_indices": sum(x) / len(x),
"max_p_index": sum(last_pos)/len(last_pos),
"min_p_index": sum(first_pos)/len(first_pos),
"len_indices": sum(len_pos)/len(len_pos)
}, trainingcounter)
trainingcounter+=1
if not epoch == 1:
complete_dataset.update()
complete_dataset.increasebatch()
complete_dataset.increaseepoch()
if epoch ==1:
complete_dataset.update()
ps, ns = valid_class.evaluate(np.asarray(complete_dataset.values),np.asarray(complete_dataset.labels))
if args.save_embeds:
with open("models/"+args.logid+"embeds.pickle", "wb") as q:
pickle.dump([complete_dataset.libs, complete_dataset.labels], q)
with open('models/' + args.logid + 'logfile.pickle', 'wb') as q:
pickle.dump([logs, args], q)
torch.save(Network.state_dict(), "models/"+args.logid+"timodeldict.pt")
torch.save(Network, "models/"+args.logid+"timodelcom.pt")
if args.decay_freq is not None:
if epoch % args.decay_freq == 0:
for g in optimizer.param_groups:
g['lr'] = g["lr"] * args.decay_value
# mode(words[np.argwhere(np.asarray(klass) == 0)].squeeze())
# import pickle
# import statistics
# import numpy as np
#
# with open("training_data_pytorch04.pickle", "rb") as a:
# [klass, words_sparse, words, jpgs, enc, modes] = pickle.load(a)
#
#
# klass, words = np.array(klass), np.array(words)
# mod = []
# for i in list(set(klass)):
# dum = list(words[np.argwhere(np.asarray(klass) == i)].squeeze())
# mod.append(max(set(dum), key = dum.count)) | 42.764085 | 126 | 0.610951 |
acf513dd69342df0b2d9298b03a5ced2d63d2b30 | 4,675 | py | Python | src/cosalib/cli.py | kelvinfan001/coreos-assembler | 32e55462b9c24fdeb9ce3fad7e840ca4128c8492 | [
"Apache-2.0"
] | 2 | 2019-02-12T00:46:11.000Z | 2020-03-02T15:40:16.000Z | src/cosalib/cli.py | kelvinfan001/coreos-assembler | 32e55462b9c24fdeb9ce3fad7e840ca4128c8492 | [
"Apache-2.0"
] | 1 | 2021-02-23T22:46:44.000Z | 2021-02-23T22:46:44.000Z | src/cosalib/cli.py | kelvinfan001/coreos-assembler | 32e55462b9c24fdeb9ce3fad7e840ca4128c8492 | [
"Apache-2.0"
] | null | null | null | # NOTE: PYTHONUNBUFFERED is set in the entrypoint for unbuffered output
# pylint: disable=C0103
import argparse
import logging as log
import os
from cosalib import (
aliyun,
aws,
azure,
digitalocean,
gcp,
vultr,
exoscale
)
CLOUD_CLI_TARGET = {
"aws": (aws.aws_cli,
aws.aws_run_ore,
aws.aws_run_ore_replicate),
"aliyun": (aliyun.aliyun_cli,
aliyun.aliyun_run_ore,
aliyun.aliyun_run_ore_replicate),
"azure": (azure.azure_cli,
azure.azure_run_ore,
azure.azure_run_ore_replicate),
"digitalocean": (digitalocean.digitalocean_cli,
digitalocean.digitalocean_run_ore,
digitalocean.digitalocean_run_ore_replicate),
"gcp": (gcp.gcp_cli,
gcp.gcp_run_ore,
gcp.gcp_run_ore_replicate),
"vultr": (vultr.vultr_cli,
vultr.vultr_run_ore,
vultr.vultr_run_ore_replicate),
"exoscale": (exoscale.exoscale_cli,
exoscale.exoscale_run_ore,
exoscale.exoscale_run_ore_replicate),
}
def cloud_clis():
return CLOUD_CLI_TARGET.keys()
def get_cloud_ore_cmds(target):
_, orecmd, orerep = CLOUD_CLI_TARGET[target]
return orecmd, orerep
def get_cloud_cli(target, parser=None):
if parser is None:
parser = BuildCli()
cli_func, _, _ = CLOUD_CLI_TARGET[target]
return cli_func(parser)
class Cli(argparse.ArgumentParser):
"""
Abstraction for executing commands from the cli.
"""
def __init__(self, *args, **kwargs):
"""
Initializes the Cli instance.
:param kwargs: All keyword arguments which will pass to ArgumentParser
:type kwargs: dict
"""
argparse.ArgumentParser.__init__(self, *args, **kwargs)
self.add_argument(
'--log-level', env_var='COSA_LOG_LEVEL', default='INFO',
choices=log._nameToLevel.keys(), help='Set the log level')
def add_argument(self, *args, **kwargs):
"""
Overloads the add_argument to be able to also read from
the environment. To read from the environment provide
the keyword arugment env_var.
:param args: Non keyword arguments to pass to add_argument
:type args: list
:param kwargs: Keyword arguments to pass to add_argument
:type kwargs: dict
"""
env_var = kwargs.pop('env_var', None)
if env_var is not None:
if not env_var.startswith('COSA_'):
env_var = f"COSA_{env_var}"
ka = kwargs.get("help", '')
kwargs['help'] = f"{ka} (Env: {env_var})"
default = kwargs.pop('default', None)
super().add_argument(
*args, default=os.environ.get(env_var, default), **kwargs)
else:
super().add_argument(*args, **kwargs)
def parse_args(self, **kwargs):
"""
Parses the arguments passed in, verifies inputs, sets the logger,
and returns the arguments.
:returns: The parsed arguments
:rtype: argparse.Namepsace
"""
args = super().parse_args()
self._set_logger(args.log_level)
return args
def _set_logger(self, level):
"""
Set the log level
:param level: set the log level
:type level: str
"""
log.basicConfig(
format='[%(levelname)s]: %(message)s',
level=log._nameToLevel.get(level.upper(), log.DEBUG))
class BuildCli(Cli):
"""
Cli class that adds in reusable build specific arguments.
"""
def __init__(self, *args, **kwargs):
"""
Initializes the BuildCli instance.
:param kwargs: All keyword arguments which will pass to ArgumentParser
:type kwargs: dict
"""
Cli.__init__(self, *args, **kwargs)
# Set common arguments
self.add_argument(
'--build', env_var="BUILD", default='latest',
help='Override build id, defaults to latest')
self.add_argument(
'--buildroot', env_var="BUILD_ROOT", default='builds',
help='Build diretory')
self.add_argument(
'--dump', default=False, action='store_true',
help='Dump the manfiest and exit')
self.add_argument(
'--schema', env_var="META_SCHEMA",
default='/usr/lib/coreos-assembler/schema/v1.json',
help='Schema to use. Set to NONE to skip all validation')
| 31.166667 | 78 | 0.58139 |
acf51459f4eb0e067fee6d7a9968fa4785de42c0 | 6,846 | py | Python | tools/nntool/graph/matches/match_gap_conv.py | coWorkr-InSights/gap_sdk | a934747441481ea3d9c029719d721780cdff9e46 | [
"Apache-2.0"
] | null | null | null | tools/nntool/graph/matches/match_gap_conv.py | coWorkr-InSights/gap_sdk | a934747441481ea3d9c029719d721780cdff9e46 | [
"Apache-2.0"
] | null | null | null | tools/nntool/graph/matches/match_gap_conv.py | coWorkr-InSights/gap_sdk | a934747441481ea3d9c029719d721780cdff9e46 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from graph.types import (ActivationParameters, Conv2DParameters,
ConvFusionParameters, PoolingParameters)
from quantization.symmetric.symmetric_quantization import (
SymmetricQuantizationRecord, SymmetricScalableFilterQuantizationRecord)
from quantization.multiplicative.mult_quantization import (
MultQuantizationRecord, MultScalableFilterQuantizationRecord)
from quantization.float32.float32_quantization import (
Float32QuantizationRecord, Float32ScalableFilterQuantizationRecord)
from utils.graph import Edge, GraphView, MatchNode
from utils.node_id import NodeId
from .matcher import DefaultMatcher, DontReplaceError, MatchGroup
LOG = logging.getLogger("nntool." + __name__)
class MatchGapConv(DefaultMatcher):
def __init__(self, *args, match_activation=True, match_pool=False, pool_after_activation=False, **kwargs):
super(MatchGapConv, self).__init__(*args, **kwargs)
assert match_activation or match_pool, "not very interesting to just match conv"
self.match_activation = match_activation
self.match_pool = match_pool
self.pool_after_activation = pool_after_activation
if match_activation and match_pool:
if pool_after_activation:
self.fusion_type = "conv_active_pool"
else:
self.fusion_type = "conv_pool_active"
elif match_activation:
self.fusion_type = "conv_active"
else:
self.fusion_type = "conv_pool"
def valid_convolution(self, node):
del node
# TODO - Add specific convolution parameter checking here
return True
def valid_pooling(self, node):
del node
# TODO - Add specific pool parameter checking here
return True
def valid_activation(self, node):
del node
# TODO - Add specific pool parameter checking here
return True
def validate_match(self, subgraph: GraphView):
nodes = list(subgraph.nodes())
if self.match_pool and self.match_activation and len(nodes) == 3:
if self.pool_after_activation:
pool_node = nodes[2]
# standard fusion puts relu after the pool operation
if pool_node.pool_type == "average":
return False
else:
pool_node = nodes[1]
act_node = nodes[2]
if pool_node.pool_type == "average" and act_node.activation != "relu":
return False
return True
def add_pooling(self, i, sub):
sub.add_node(MatchNode(str(i),
matcher=lambda node:\
isinstance(node, PoolingParameters) and\
self.valid_pooling(node)))
def add_activation(self, i, sub):
sub.add_node(MatchNode(str(i),
matcher=lambda node:\
isinstance(node, ActivationParameters) and\
self.valid_activation(node)))
def match_function(self, G: GraphView):
sub = GraphView()
sub.add_node(MatchNode('0', matcher=lambda node:\
isinstance(node, Conv2DParameters) and\
self.valid_convolution(node)))
if self.match_activation and self.match_pool:
if self.pool_after_activation:
self.add_activation('1', sub)
self.add_pooling('2', sub)
else:
self.add_pooling('1', sub)
self.add_activation('2', sub)
sub.add_edge(Edge('0', '1'))
sub.add_edge(Edge('1', '2'))
elif self.match_activation:
self.add_activation('1', sub)
sub.add_edge(Edge('0', '1'))
elif self.match_pool:
self.add_pooling('1', sub)
sub.add_edge(Edge('0', '1'))
return G.match_fragment(sub)
def replace_function(self, G: GraphView, subgraph: GraphView):
if not self.validate_match(subgraph):
raise DontReplaceError()
step = 0
for node in subgraph.nodes():
node.step_idx = step
step = step + 1
if isinstance(node, Conv2DParameters):
conv_name = node.name + "_fusion"
break
LOG.debug("fused nodes %s", ",".join((node.name for node in subgraph.nodes())))
# simple node order is necessary because nodes() will not necessarily
# be in order
pnode = ConvFusionParameters(conv_name, fusion_type=self.fusion_type, subgraph=subgraph)
if G.quantization:
qrecs = G.quantization.get_all(pnode.contained_nodes())
if qrecs:
if isinstance(qrecs[0], (SymmetricQuantizationRecord, SymmetricScalableFilterQuantizationRecord)):
prec = SymmetricQuantizationRecord(in_qs=qrecs[0].in_qs, out_qs=qrecs[-1].out_qs)
elif isinstance(qrecs[0], (MultQuantizationRecord, MultScalableFilterQuantizationRecord)):
prec = MultQuantizationRecord(in_qs=qrecs[0].in_qs, out_qs=qrecs[-1].out_qs)
elif isinstance(qrecs[0], (Float32QuantizationRecord, Float32ScalableFilterQuantizationRecord)):
prec = Float32QuantizationRecord(in_qs=qrecs[0].in_qs, out_qs=qrecs[-1].out_qs)
for node in pnode.contained_nodes():
G.quantization.move_to_fusion(node, pnode)
G.quantization[NodeId(pnode)] = prec
return pnode, None, None
class MatchAllGapConv(MatchGroup):
NAME = 'fuse_gap_convs'
DESCRIPTION = 'Fuse convolutions, pools and activations to match GAP AutoTiler operations'
def __init__(self):
super().__init__(\
MatchGapConv(match_activation=True, match_pool=True, pool_after_activation=True),\
MatchGapConv(match_activation=True, match_pool=True, pool_after_activation=False),\
MatchGapConv(match_activation=True, match_pool=False),\
MatchGapConv(match_activation=False, match_pool=True)\
)
| 43.884615 | 114 | 0.642419 |
acf514775779083c0814d10e597b211d19b58a0b | 17,464 | py | Python | pubsub/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py | rsalmond/google-cloud-python | 120bf628220227bb4dd905c86feda93180e14970 | [
"Apache-2.0"
] | null | null | null | pubsub/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py | rsalmond/google-cloud-python | 120bf628220227bb4dd905c86feda93180e14970 | [
"Apache-2.0"
] | 1 | 2018-04-06T19:51:23.000Z | 2018-04-06T19:51:23.000Z | pubsub/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py | rsalmond/google-cloud-python | 120bf628220227bb4dd905c86feda93180e14970 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import collections
import functools
import logging
import threading
import grpc
import six
from google.api_core import bidi
from google.api_core import exceptions
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import heartbeater
from google.cloud.pubsub_v1.subscriber._protocol import histogram
from google.cloud.pubsub_v1.subscriber._protocol import leaser
from google.cloud.pubsub_v1.subscriber._protocol import requests
import google.cloud.pubsub_v1.subscriber.message
import google.cloud.pubsub_v1.subscriber.scheduler
_LOGGER = logging.getLogger(__name__)
_RPC_ERROR_THREAD_NAME = "Thread-OnRpcTerminated"
_RETRYABLE_STREAM_ERRORS = (
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
exceptions.InternalServerError,
exceptions.Unknown,
exceptions.GatewayTimeout,
exceptions.Aborted,
)
def _maybe_wrap_exception(exception):
"""Wraps a gRPC exception class, if needed."""
if isinstance(exception, grpc.RpcError):
return exceptions.from_grpc_error(exception)
return exception
def _wrap_callback_errors(callback, message):
"""Wraps a user callback so that if an exception occurs the message is
nacked.
Args:
callback (Callable[None, Message]): The user callback.
message (~Message): The Pub/Sub message.
"""
try:
callback(message)
except Exception:
# Note: the likelihood of this failing is extremely low. This just adds
# a message to a queue, so if this doesn't work the world is in an
# unrecoverable state and this thread should just bail.
_LOGGER.exception(
"Top-level exception occurred in callback while processing a " "message"
)
message.nack()
class StreamingPullManager(object):
"""The streaming pull manager coordinates pulling messages from Pub/Sub,
leasing them, and scheduling them to be processed.
Args:
client (~.pubsub_v1.subscriber.client): The subscriber client used
to create this instance.
subscription (str): The name of the subscription. The canonical
format for this is
``projects/{project}/subscriptions/{subscription}``.
flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow
control settings.
scheduler (~google.cloud.pubsub_v1.scheduler.Scheduler): The scheduler
to use to process messages. If not provided, a thread pool-based
scheduler will be used.
"""
_UNARY_REQUESTS = True
"""If set to True, this class will make requests over a separate unary
RPC instead of over the streaming RPC."""
def __init__(
self, client, subscription, flow_control=types.FlowControl(), scheduler=None
):
self._client = client
self._subscription = subscription
self._flow_control = flow_control
self._ack_histogram = histogram.Histogram()
self._last_histogram_size = 0
self._ack_deadline = 10
self._rpc = None
self._callback = None
self._closing = threading.Lock()
self._closed = False
self._close_callbacks = []
if scheduler is None:
self._scheduler = (
google.cloud.pubsub_v1.subscriber.scheduler.ThreadScheduler()
)
else:
self._scheduler = scheduler
# The threads created in ``.open()``.
self._dispatcher = None
self._leaser = None
self._consumer = None
self._heartbeater = None
@property
def is_active(self):
"""bool: True if this manager is actively streaming.
Note that ``False`` does not indicate this is complete shut down,
just that it stopped getting new messages.
"""
return self._consumer is not None and self._consumer.is_active
@property
def flow_control(self):
"""google.cloud.pubsub_v1.types.FlowControl: The active flow control
settings."""
return self._flow_control
@property
def dispatcher(self):
"""google.cloud.pubsub_v1.subscriber._protocol.dispatcher.Dispatcher:
The dispatcher helper.
"""
return self._dispatcher
@property
def leaser(self):
"""google.cloud.pubsub_v1.subscriber._protocol.leaser.Leaser:
The leaser helper.
"""
return self._leaser
@property
def ack_histogram(self):
"""google.cloud.pubsub_v1.subscriber._protocol.histogram.Histogram:
The histogram tracking time-to-acknowledge.
"""
return self._ack_histogram
@property
def ack_deadline(self):
"""Return the current ack deadline based on historical time-to-ack.
This method is "sticky". It will only perform the computations to
check on the right ack deadline if the histogram has gained a
significant amount of new information.
Returns:
int: The ack deadline.
"""
target = min([self._last_histogram_size * 2, self._last_histogram_size + 100])
if len(self.ack_histogram) > target:
self._ack_deadline = self.ack_histogram.percentile(percent=99)
return self._ack_deadline
@property
def load(self):
"""Return the current load.
The load is represented as a float, where 1.0 represents having
hit one of the flow control limits, and values between 0.0 and 1.0
represent how close we are to them. (0.5 means we have exactly half
of what the flow control setting allows, for example.)
There are (currently) two flow control settings; this property
computes how close the manager is to each of them, and returns
whichever value is higher. (It does not matter that we have lots of
running room on setting A if setting B is over.)
Returns:
float: The load value.
"""
if self._leaser is None:
return 0
return max(
[
self._leaser.message_count / self._flow_control.max_messages,
self._leaser.bytes / self._flow_control.max_bytes,
]
)
def add_close_callback(self, callback):
"""Schedules a callable when the manager closes.
Args:
callback (Callable): The method to call.
"""
self._close_callbacks.append(callback)
def maybe_pause_consumer(self):
"""Check the current load and pause the consumer if needed."""
if self.load >= 1.0:
if self._consumer is not None and not self._consumer.is_paused:
_LOGGER.debug("Message backlog over load at %.2f, pausing.", self.load)
self._consumer.pause()
def maybe_resume_consumer(self):
"""Check the current load and resume the consumer if needed."""
# If we have been paused by flow control, check and see if we are
# back within our limits.
#
# In order to not thrash too much, require us to have passed below
# the resume threshold (80% by default) of each flow control setting
# before restarting.
if self._consumer is None or not self._consumer.is_paused:
return
if self.load < self.flow_control.resume_threshold:
self._consumer.resume()
else:
_LOGGER.debug("Did not resume, current load is %s", self.load)
def _send_unary_request(self, request):
"""Send a request using a separate unary request instead of over the
stream.
Args:
request (types.StreamingPullRequest): The stream request to be
mapped into unary requests.
"""
if request.ack_ids:
self._client.acknowledge(
subscription=self._subscription, ack_ids=list(request.ack_ids)
)
if request.modify_deadline_ack_ids:
# Send ack_ids with the same deadline seconds together.
deadline_to_ack_ids = collections.defaultdict(list)
for n, ack_id in enumerate(request.modify_deadline_ack_ids):
deadline = request.modify_deadline_seconds[n]
deadline_to_ack_ids[deadline].append(ack_id)
for deadline, ack_ids in six.iteritems(deadline_to_ack_ids):
self._client.modify_ack_deadline(
subscription=self._subscription,
ack_ids=ack_ids,
ack_deadline_seconds=deadline,
)
_LOGGER.debug("Sent request(s) over unary RPC.")
def send(self, request):
"""Queue a request to be sent to the RPC."""
if self._UNARY_REQUESTS:
try:
self._send_unary_request(request)
except exceptions.GoogleAPICallError:
_LOGGER.debug(
"Exception while sending unary RPC. This is typically "
"non-fatal as stream requests are best-effort.",
exc_info=True,
)
else:
self._rpc.send(request)
def heartbeat(self):
"""Sends an empty request over the streaming pull RPC.
This always sends over the stream, regardless of if
``self._UNARY_REQUESTS`` is set or not.
"""
if self._rpc is not None and self._rpc.is_active:
self._rpc.send(types.StreamingPullRequest())
def open(self, callback):
"""Begin consuming messages.
Args:
callback (Callable[None, google.cloud.pubsub_v1.message.Messages]):
A callback that will be called for each message received on the
stream.
"""
if self.is_active:
raise ValueError("This manager is already open.")
if self._closed:
raise ValueError("This manager has been closed and can not be re-used.")
self._callback = functools.partial(_wrap_callback_errors, callback)
# Create the RPC
self._rpc = bidi.ResumableBidiRpc(
start_rpc=self._client.api.streaming_pull,
initial_request=self._get_initial_request,
should_recover=self._should_recover,
)
self._rpc.add_done_callback(self._on_rpc_done)
# Create references to threads
self._dispatcher = dispatcher.Dispatcher(self, self._scheduler.queue)
self._consumer = bidi.BackgroundConsumer(self._rpc, self._on_response)
self._leaser = leaser.Leaser(self)
self._heartbeater = heartbeater.Heartbeater(self)
# Start the thread to pass the requests.
self._dispatcher.start()
# Start consuming messages.
self._consumer.start()
# Start the lease maintainer thread.
self._leaser.start()
# Start the stream heartbeater thread.
self._heartbeater.start()
def close(self, reason=None):
"""Stop consuming messages and shutdown all helper threads.
This method is idempotent. Additional calls will have no effect.
Args:
reason (Any): The reason to close this. If None, this is considered
an "intentional" shutdown. This is passed to the callbacks
specified via :meth:`add_close_callback`.
"""
with self._closing:
if self._closed:
return
# Stop consuming messages.
if self.is_active:
_LOGGER.debug("Stopping consumer.")
self._consumer.stop()
self._consumer = None
# Shutdown all helper threads
_LOGGER.debug("Stopping scheduler.")
self._scheduler.shutdown()
self._scheduler = None
_LOGGER.debug("Stopping leaser.")
self._leaser.stop()
self._leaser = None
_LOGGER.debug("Stopping dispatcher.")
self._dispatcher.stop()
self._dispatcher = None
_LOGGER.debug("Stopping heartbeater.")
self._heartbeater.stop()
self._heartbeater = None
self._rpc = None
self._closed = True
_LOGGER.debug("Finished stopping manager.")
for callback in self._close_callbacks:
callback(self, reason)
def _get_initial_request(self):
"""Return the initial request for the RPC.
This defines the initial request that must always be sent to Pub/Sub
immediately upon opening the subscription.
Returns:
google.cloud.pubsub_v1.types.StreamingPullRequest: A request
suitable for being the first request on the stream (and not
suitable for any other purpose).
"""
# Any ack IDs that are under lease management need to have their
# deadline extended immediately.
if self._leaser is not None:
# Explicitly copy the list, as it could be modified by another
# thread.
lease_ids = list(self._leaser.ack_ids)
else:
lease_ids = []
# Put the request together.
request = types.StreamingPullRequest(
modify_deadline_ack_ids=list(lease_ids),
modify_deadline_seconds=[self.ack_deadline] * len(lease_ids),
stream_ack_deadline_seconds=self.ack_histogram.percentile(99),
subscription=self._subscription,
)
# Return the initial request.
return request
def _on_response(self, response):
"""Process all received Pub/Sub messages.
For each message, send a modified acknowledgment request to the
server. This prevents expiration of the message due to buffering by
gRPC or proxy/firewall. This makes the server and client expiration
timer closer to each other thus preventing the message being
redelivered multiple times.
After the messages have all had their ack deadline updated, execute
the callback for each message using the executor.
"""
_LOGGER.debug(
"Scheduling callbacks for %s messages.", len(response.received_messages)
)
# Immediately modack the messages we received, as this tells the server
# that we've received them.
items = [
requests.ModAckRequest(message.ack_id, self._ack_histogram.percentile(99))
for message in response.received_messages
]
self._dispatcher.modify_ack_deadline(items)
for received_message in response.received_messages:
message = google.cloud.pubsub_v1.subscriber.message.Message(
received_message.message, received_message.ack_id, self._scheduler.queue
)
# TODO: Immediately lease instead of using the callback queue.
self._scheduler.schedule(self._callback, message)
def _should_recover(self, exception):
"""Determine if an error on the RPC stream should be recovered.
If the exception is one of the retryable exceptions, this will signal
to the consumer thread that it should "recover" from the failure.
This will cause the stream to exit when it returns :data:`False`.
Returns:
bool: Indicates if the caller should recover or shut down.
Will be :data:`True` if the ``exception`` is "acceptable", i.e.
in a list of retryable / idempotent exceptions.
"""
exception = _maybe_wrap_exception(exception)
# If this is in the list of idempotent exceptions, then we want to
# recover.
if isinstance(exception, _RETRYABLE_STREAM_ERRORS):
_LOGGER.info("Observed recoverable stream error %s", exception)
return True
_LOGGER.info("Observed non-recoverable stream error %s", exception)
return False
def _on_rpc_done(self, future):
"""Triggered whenever the underlying RPC terminates without recovery.
This is typically triggered from one of two threads: the background
consumer thread (when calling ``recv()`` produces a non-recoverable
error) or the grpc management thread (when cancelling the RPC).
This method is *non-blocking*. It will start another thread to deal
with shutting everything down. This is to prevent blocking in the
background consumer and preventing it from being ``joined()``.
"""
_LOGGER.info("RPC termination has signaled streaming pull manager shutdown.")
future = _maybe_wrap_exception(future)
thread = threading.Thread(
name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future}
)
thread.daemon = True
thread.start()
| 37 | 88 | 0.645385 |
acf514969fd079cb3775f48f215f71a4fa64b225 | 156 | py | Python | testData/quickfix/addPantsTargetDependency/basic_expected.py | jsirois/intellij-pants-plugin | 06c4bd415374290d4877ab30013b140264cd6945 | [
"Apache-2.0"
] | null | null | null | testData/quickfix/addPantsTargetDependency/basic_expected.py | jsirois/intellij-pants-plugin | 06c4bd415374290d4877ab30013b140264cd6945 | [
"Apache-2.0"
] | null | null | null | testData/quickfix/addPantsTargetDependency/basic_expected.py | jsirois/intellij-pants-plugin | 06c4bd415374290d4877ab30013b140264cd6945 | [
"Apache-2.0"
] | null | null | null | java_library(name='test',
dependencies =[
'bar/baz1',
'bar/baz2',
'bar/baz3:baz3',
'foo:scala',
],
sources=rglobs('*.java'),
) | 17.333333 | 27 | 0.525641 |
acf514d3e333a166d7a0ba98160f2245b83e1af0 | 4,240 | py | Python | services/codespeed/fabfile.py | graingert/braid | 1df63c5d8b44e079487be2f0bf099108a77872e5 | [
"MIT"
] | 8 | 2015-10-18T11:02:54.000Z | 2019-03-29T18:33:18.000Z | services/codespeed/fabfile.py | graingert/braid | 1df63c5d8b44e079487be2f0bf099108a77872e5 | [
"MIT"
] | 214 | 2015-01-19T06:58:36.000Z | 2022-02-10T10:22:30.000Z | services/codespeed/fabfile.py | graingert/braid | 1df63c5d8b44e079487be2f0bf099108a77872e5 | [
"MIT"
] | 12 | 2015-02-08T17:32:13.000Z | 2020-10-25T22:22:59.000Z | """
Support for benchmark reporting.
"""
from StringIO import StringIO
import os
import random
from fabric.api import execute, run, settings, env, put, cd
from braid import git, cron, archive, utils
from braid.twisted import service
from braid.tasks import addTasks
from braid import config
__all__ = [ 'config' ]
class Codespeed(service.Service):
python = "python"
def task_install(self):
"""
Install codespeed, a benchmark reporting tool
"""
# Bootstrap a new service environment
self.bootstrap()
self.update()
with settings(user=self.serviceUser):
run('/bin/ln -nsf {}/start {}/start'.format(self.configDir, self.binDir))
run('mkdir -p ~/data')
execute(self.update)
cron.install(self.serviceUser, '{}/crontab'.format(self.configDir))
self.task_generateSecretKey()
def task_generateSecretKey(self):
"""
Generate a new C{SECRET_KEY} and save it in the settings file.
"""
with settings(user=self.serviceUser):
if utils.succeeds('ls {}/secret_key.py'.format(self.configDir)):
execute = utils.confirm('This will replace the current secret '
'key with a newly generated one.')
else:
execute = True
if execute:
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
secret = ''.join([random.choice(chars) for i in range(50)])
setting = StringIO("SECRET_KEY = '{}'\n".format(secret))
put(setting, '{}/secret_key.py'.format(self.configDir),
mode=0o600)
def update(self):
"""
Update config.
"""
with settings(user=self.serviceUser):
run('mkdir -p ' + self.configDir)
put(
os.path.dirname(__file__) + '/*', self.configDir,
mirror_local_mode=True)
git.branch('https://github.com/tobami/codespeed.git', '~/codespeed')
with cd("~/codespeed"):
run("git checkout 0.12.0")
run("git reset --hard")
self.venv.install_twisted()
self.venv.install('-U -r ~/codespeed/requirements.txt')
if env.get('installTestData'):
execute(self.task_installTestData)
def djangoAdmin(self, args):
"""
Run django-admin with proper settings.
"""
with settings(user=self.serviceUser):
path = '~/config:~/codespeed/'
run('PYTHONPATH={} '
'DJANGO_SETTINGS_MODULE=twistedcodespeed.local_settings '
'~/virtualenv/bin/django-admin.py {}'.format(path, ' '.join(args)))
def task_installTestData(self):
"""
Create test db.
"""
self.djangoAdmin(['migrate'])
def task_createSuperuser(self):
"""
Reset the admin password.
"""
self.djangoAdmin(['createsuperuser'])
def task_update(self):
"""
Update config and restart.
"""
self.update()
self.task_restart()
def task_dump(self, localfile):
"""
Dump codespeed database and download it to the given C{localfile}.
"""
with settings(user=self.serviceUser):
with utils.tempfile() as temp:
run('/usr/bin/sqlite3 ~/data/codespeed.db .dump >{}'.format(temp))
archive.dump({
'db.dump': temp,
}, localfile)
def task_restore(self, localfile):
"""
Restore codespeed database from the given C{localfile}.
"""
msg = 'The whole database will be replaced with the backup.'
if utils.confirm(msg):
with settings(user=self.serviceUser):
with utils.tempfile() as temp:
archive.restore({
'db.dump': temp,
}, localfile)
run('/bin/rm -f ~/data/codespeed.db')
run('/usr/bin/sqlite3 ~/data/codespeed.db ".read {}"'.format(temp))
addTasks(globals(), Codespeed('codespeed').getTasks())
| 31.641791 | 87 | 0.547877 |
acf516bcc96559558a6506cb087211a1d03e43b3 | 4,014 | py | Python | bart_summarizer.py | Shaumik-Ashraf/BART-MIMIC-CXR | 573b702344f8bd1d05aa2ba151818b0677233c60 | [
"MIT"
] | 4 | 2021-03-31T23:10:26.000Z | 2021-05-18T22:44:41.000Z | bart_summarizer.py | Shaumik-Ashraf/BART-MIMIC-CXR | 573b702344f8bd1d05aa2ba151818b0677233c60 | [
"MIT"
] | null | null | null | bart_summarizer.py | Shaumik-Ashraf/BART-MIMIC-CXR | 573b702344f8bd1d05aa2ba151818b0677233c60 | [
"MIT"
] | null | null | null | # summarizer.py
"""
Does abstractive summarization on MIMIC CXR Radiology reports, uses BART transformer
Requirements:
data/test.csv exists
data/summaries.csv can be created or overwritten
transformers==3.3.1
"""
#import torch
from transformers import BartModel, BartTokenizer, BartForConditionalGeneration, BartConfig
from transformers import Trainer, TrainingArguments
from transformers.modeling_bart import shift_tokens_right
import csv
import numpy as np
import pandas as pd
import os
ROOT = os.path.dirname( os.path.abspath(__file__) );
#TRAIN_FILE = os.path.join(ROOT, 'data', 'train.csv');
#VALIDATION_FILE = os.path.join(ROOT, 'data', 'validation.csv');
TEST_FILE = os.path.join(ROOT, 'data', 'test.csv');
LIMIT = 11; #set limit to -1 to do all data
SUMMARIES_FILE = os.path.join(ROOT, 'data', f"summaries_{LIMIT}.csv");
def load_file(filename):
"""
loads csv data and returns it as np matrix
param: filename - path to csv file
return: 2D numpy of csv data with text
"""
print(f"Loading data from {filename}...");
df = pd.read_csv(filename)
print(f"Done.");
return( np.array(df) );
def load_bart(model_name='facebook/bart-large-cnn', tokenizer_name='facebook/bart-large'):
"""
loads pretrained BART model and tokenizer
params: model_name - pretrained BART huggingface transformer download path, default: facebook/bart-large-cnn
tokenizer_name - pretrained BART huggingface tokenizer download path, default: facebook/bart-large
return: (model, tokenizer)
"""
print(f"Loading pretrained model {model_name}...");
model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
print("Done.");
print(f"Loading pretrained tokenizer {tokenizer_name}...");
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
print("Done.");
return((model, tokenizer));
def baseBart(article_to_summarize, model, tokenizer):
"""
runs BART summarization
params: model - from load_bart()
tokenizer - from load_bart()
article_to_summarize - text (string)
return: generated abstractive summary (string)
"""
inputs = tokenizer([article_to_summarize], max_length=1024, truncation='do_not_truncate', return_tensors='pt')
summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=25, early_stopping=True)
return [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids][0]
def write_csv_row(opened_file, row, model, tokenizer):
"""
generates abstractive summary and writes it to a file in csv format, 1 summary per row
params: opened_file - open File object, actually any IO stream implementing write() works
row - a list/array containing [<subject id>, <study id>, <text to summarize>, <ground truth summary>]
model - trained BART model
tokenizer - BART tokenizer
returns: generated summary
"""
comp_summary = baseBart(row[2], model, tokenizer)
opened_file.write(f"\"{row[0]}\",\"{row[1]}\",\"{comp_summary}\",\"{row[3]}\"\n");
return(comp_summary);
print("==================== Start abstractive summarization ======================");
data = load_file(TEST_FILE);
model, tokenizer = load_bart();
print(f"Writing {os.path.basename(SUMMARIES_FILE)}...");
f = open(SUMMARIES_FILE, 'w');
f.write(f"\"subject_id\",\"study_id\",\"prediction\",\"actual\"\n");
i = 0;
if LIMIT==-1: # based on the limit, print progress messages appropriately
for row in data:
write_csv_row(f, row, model, tokenizer);
if( (i%1000 == 0) or (i+1 == LIMIT) ):
print(f"Computed {i+1} summaries");
i += 1;
elif LIMIT < 100:
for row in data[:LIMIT]:
write_csv_row(f, row, model, tokenizer);
if( (i%(int(LIMIT/4)) == 0) or (i+1 == LIMIT)):
print(f"Computed {i+1} summaries");
i += 1;
else:
for row in data[:LIMIT]:
write_csv_row(f, row, model, tokenizer);
if( (i%(int(LIMIT/8)) == 0) or (i+1 == LIMIT) ):
print(f"Computed {i+1} summaries");
i += 1;
f.close();
print("Done.\n");
print("==================== End abstractive summarization ======================");
| 34.904348 | 115 | 0.705032 |
acf516e24d1f7fa8d0ba0fdee6d61b6acba8a787 | 112,620 | py | Python | gpMgmt/bin/gpload.py | paul-guo-/gpdb | 9ac1602373a13667afd7ea8cff378a39c4e3d55b | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gpload.py | paul-guo-/gpdb | 9ac1602373a13667afd7ea8cff378a39c4e3d55b | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gpload.py | paul-guo-/gpdb | 9ac1602373a13667afd7ea8cff378a39c4e3d55b | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# gpload - load file(s) into Greenplum Database
# Copyright Greenplum 2008
'''gpload [options] -f configuration file
Options:
-h hostname: host to connect to
-p port: port to connect to
-U username: user to connect as
-d database: database to connect to
-W: force password authentication
-q: quiet mode
-D: do not actually load data
-v: verbose
-V: very verbose
-l logfile: log output to logfile
--no_auto_trans: do not wrap gpload in transaction
--gpfdist_timeout timeout: gpfdist timeout value
--max_retries retry_times: max retry times on gpdb connection timed out. 0 means disabled, -1 means forever
--version: print version number and exit
-?: help
'''
import sys
import yaml
if sys.hexversion<0x2040400:
sys.stderr.write("gpload needs python 2.4.4 or higher\n")
sys.exit(2)
try:
import yaml
except ImportError:
sys.stderr.write("gpload needs pyyaml. You can get it from http://pyyaml.org.\n")
sys.exit(2)
import platform
try:
import pg
except ImportError:
try:
from pygresql import pg
except Exception as e:
pass
except Exception as e:
print(repr(e))
errorMsg = "gpload was unable to import The PyGreSQL Python module (pg.py) - %s\n" % str(e)
sys.stderr.write(str(errorMsg))
errorMsg = "Please check if you have the correct Visual Studio redistributable package installed.\n"
sys.stderr.write(str(errorMsg))
sys.exit(2)
import hashlib
import datetime,getpass,os,signal,socket,threading,time,traceback,re
import subprocess
import uuid
try:
from gppylib.gpversion import GpVersion
except ImportError:
sys.stderr.write("gpload can't import gpversion, will run in GPDB5 compatibility mode.\n")
noGpVersion = True
else:
noGpVersion = False
thePlatform = platform.system()
if thePlatform in ['Windows', 'Microsoft']:
windowsPlatform = True
else:
windowsPlatform = False
if windowsPlatform == False:
import select
from sys import version_info
if version_info.major == 2 :
import __builtin__
long = __builtin__.long
else:
long = int
EXECNAME = 'gpload'
NUM_WARN_ROWS = 0
# Mapping for validating our configuration file. We're only concerned with
# keys -- stuff left of ':'. It gets complex in two cases: firstly when
# we handle blocks which have keys which are not keywords -- such as under
# COLUMNS:. Secondly, we want to detect when users put keywords in the wrong
# place. To that end, the mapping is structured such that:
#
# key -> { 'parse_children' -> [ True | False ],
# 'parent' -> <parent name> }
#
# Each key is a keyword in the configuration file. parse_children tells us
# whether children are expected to be keywords. parent tells us the parent
# keyword or None
valid_tokens = {
"version": {'parse_children': True, 'parent': None},
"database": {'parse_children': True, 'parent': None},
"user": {'parse_children': True, 'parent': None},
"host": {'parse_children': True, 'parent': None},
"port": {'parse_children': True, 'parent': [None, "source"]},
"password": {'parse_children': True, 'parent': None},
"gpload": {'parse_children': True, 'parent': None},
"input": {'parse_children': True, 'parent': "gpload"},
"source": {'parse_children': True, 'parent': "input"},
"local_hostname": {'parse_children': False, 'parent': "source"},
"port_range": {'parse_children': False, 'parent': "source"},
"file": {'parse_children': False, 'parent': "source"},
"ssl": {'parse_children': False, 'parent': "source"},
"certificates_path": {'parse_children': False, 'parent': "source"},
"columns": {'parse_children': False, 'parent': "input"},
"transform": {'parse_children': True, 'parent': "input"},
"transform_config": {'parse_children': True, 'parent': "input"},
"max_line_length": {'parse_children': True, 'parent': "input"},
"format": {'parse_children': True, 'parent': "input"},
"delimiter": {'parse_children': True, 'parent': "input"},
"escape": {'parse_children': True, 'parent': "input"},
"null_as": {'parse_children': True, 'parent': "input"},
"quote": {'parse_children': True, 'parent': "input"},
"encoding": {'parse_children': True, 'parent': "input"},
"force_not_null": {'parse_children': False, 'parent': "input"},
"fill_missing_fields": {'parse_children': False, 'parent': "input"},
"error_limit": {'parse_children': True, 'parent': "input"},
"error_percent": {'parse_children': True, 'parent': "input"},
"error_table": {'parse_children': True, 'parent': "input"},
"log_errors": {'parse_children': False, 'parent': "input"},
"header": {'parse_children': True, 'parent': "input"},
"fully_qualified_domain_name": {'parse_children': False, 'parent': 'input'},
"output": {'parse_children': True, 'parent': "gpload"},
"table": {'parse_children': True, 'parent': "output"},
"mode": {'parse_children': True, 'parent': "output"},
"match_columns": {'parse_children': False, 'parent': "output"},
"update_columns": {'parse_children': False, 'parent': "output"},
"update_condition": {'parse_children': True, 'parent': "output"},
"mapping": {'parse_children': False, 'parent': "output"},
"preload": {'parse_children': True, 'parent': 'gpload'},
"truncate": {'parse_children': False, 'parent': 'preload'},
"reuse_tables": {'parse_children': False, 'parent': 'preload'},
"fast_match": {'parse_children': False, 'parent': 'preload'},
"staging_table": {'parse_children': False, 'parent': 'preload'},
"sql": {'parse_children': True, 'parent': 'gpload'},
"before": {'parse_children': False, 'parent': 'sql'},
"after": {'parse_children': False, 'parent': 'sql'},
"external": {'parse_children': True, 'parent': 'gpload'},
"schema": {'parse_children': False, 'parent': 'external'}}
_abbrevs = [
(long(1<<50), ' PB'),
(long(1<<40), ' TB'),
(long(1<<30), ' GB'),
(long(1<<20), ' MB'),
(long(1<<10), ' kB'),
(1, ' bytes')
]
received_kill = False
keywords = {
"abort": True,
"absolute": True,
"access": True,
"action": True,
"active": True,
"add": True,
"admin": True,
"after": True,
"aggregate": True,
"all": True,
"also": True,
"alter": True,
"analyse": True,
"analyze": True,
"and": True,
"any": True,
"array": True,
"as": True,
"asc": True,
"assertion": True,
"assignment": True,
"asymmetric": True,
"at": True,
"authorization": True,
"backward": True,
"before": True,
"begin": True,
"between": True,
"bigint": True,
"binary": True,
"bit": True,
"boolean": True,
"both": True,
"by": True,
"cache": True,
"called": True,
"cascade": True,
"cascaded": True,
"case": True,
"cast": True,
"chain": True,
"char": True,
"character": True,
"characteristics": True,
"check": True,
"checkpoint": True,
"class": True,
"close": True,
"cluster": True,
"coalesce": True,
"collate": True,
"column": True,
"comment": True,
"commit": True,
"committed": True,
"concurrently": True,
"connection": True,
"constraint": True,
"constraints": True,
"conversion": True,
"convert": True,
"coordinator": True,
"copy": True,
"cost": True,
"create": True,
"createdb": True,
"createrole": True,
"createuser": True,
"cross": True,
"csv": True,
"cube": True,
"current": True,
"current_date": True,
"current_role": True,
"current_time": True,
"current_timestamp": True,
"current_user": True,
"cursor": True,
"cycle": True,
"database": True,
"day": True,
"deallocate": True,
"dec": True,
"decimal": True,
"declare": True,
"default": True,
"defaults": True,
"deferrable": True,
"deferred": True,
"definer": True,
"delete": True,
"delimiter": True,
"delimiters": True,
"desc": True,
"disable": True,
"distinct": True,
"distributed": True,
"do": True,
"domain": True,
"double": True,
"drop": True,
"each": True,
"else": True,
"enable": True,
"encoding": True,
"encrypted": True,
"end": True,
"errors": True,
"escape": True,
"every": True,
"except": True,
"exchange": True,
"exclude": True,
"excluding": True,
"exclusive": True,
"execute": True,
"exists": True,
"explain": True,
"external": True,
"extract": True,
"false": True,
"fetch": True,
"fields": True,
"fill": True,
"filter": True,
"first": True,
"float": True,
"following": True,
"for": True,
"force": True,
"foreign": True,
"format": True,
"forward": True,
"freeze": True,
"from": True,
"full": True,
"function": True,
"global": True,
"grant": True,
"granted": True,
"greatest": True,
"group": True,
"group_id": True,
"grouping": True,
"handler": True,
"hash": True,
"having": True,
"header": True,
"hold": True,
"host": True,
"hour": True,
"if": True,
"ignore": True,
"ilike": True,
"immediate": True,
"immutable": True,
"implicit": True,
"in": True,
"including": True,
"inclusive": True,
"increment": True,
"index": True,
"indexes": True,
"inherit": True,
"inherits": True,
"initially": True,
"inner": True,
"inout": True,
"input": True,
"insensitive": True,
"insert": True,
"instead": True,
"int": True,
"integer": True,
"intersect": True,
"interval": True,
"into": True,
"invoker": True,
"is": True,
"isnull": True,
"isolation": True,
"join": True,
"keep": True,
"key": True,
"lancompiler": True,
"language": True,
"large": True,
"last": True,
"leading": True,
"least": True,
"left": True,
"level": True,
"like": True,
"limit": True,
"list": True,
"listen": True,
"load": True,
"local": True,
"localtime": True,
"localtimestamp": True,
"location": True,
"lock": True,
"log": True,
"login": True,
"match": True,
"maxvalue": True,
"merge": True,
"minute": True,
"minvalue": True,
"mirror": True,
"missing": True,
"mode": True,
"modify": True,
"month": True,
"move": True,
"names": True,
"national": True,
"natural": True,
"nchar": True,
"new": True,
"next": True,
"no": True,
"nocreatedb": True,
"nocreaterole": True,
"nocreateuser": True,
"noinherit": True,
"nologin": True,
"none": True,
"noovercommit": True,
"nosuperuser": True,
"not": True,
"nothing": True,
"notify": True,
"notnull": True,
"nowait": True,
"null": True,
"nullif": True,
"numeric": True,
"object": True,
"of": True,
"off": True,
"offset": True,
"oids": True,
"old": True,
"on": True,
"only": True,
"operator": True,
"option": True,
"or": True,
"order": True,
"others": True,
"out": True,
"outer": True,
"over": True,
"overcommit": True,
"overlaps": True,
"overlay": True,
"owned": True,
"owner": True,
"partial": True,
"partition": True,
"partitions": True,
"password": True,
"percent": True,
"placing": True,
"position": True,
"preceding": True,
"precision": True,
"prepare": True,
"prepared": True,
"preserve": True,
"primary": True,
"prior": True,
"privileges": True,
"procedural": True,
"procedure": True,
"queue": True,
"quote": True,
"randomly": True,
"range": True,
"read": True,
"real": True,
"reassign": True,
"recheck": True,
"references": True,
"reindex": True,
"reject": True,
"relative": True,
"release": True,
"rename": True,
"repeatable": True,
"replace": True,
"reset": True,
"resource": True,
"restart": True,
"restrict": True,
"returning": True,
"returns": True,
"revoke": True,
"right": True,
"role": True,
"rollback": True,
"rollup": True,
"row": True,
"rows": True,
"rule": True,
"savepoint": True,
"schema": True,
"scroll": True,
"second": True,
"security": True,
"segment": True,
"select": True,
"sequence": True,
"serializable": True,
"session": True,
"session_user": True,
"set": True,
"setof": True,
"sets": True,
"share": True,
"show": True,
"similar": True,
"simple": True,
"smallint": True,
"some": True,
"split": True,
"stable": True,
"start": True,
"statement": True,
"statistics": True,
"stdin": True,
"stdout": True,
"storage": True,
"strict": True,
"subpartition": True,
"subpartitions": True,
"substring": True,
"superuser": True,
"symmetric": True,
"sysid": True,
"system": True,
"table": True,
"tablespace": True,
"temp": True,
"template": True,
"temporary": True,
"then": True,
"threshold": True,
"ties": True,
"time": True,
"timestamp": True,
"to": True,
"trailing": True,
"transaction": True,
"transform": True,
"treat": True,
"trigger": True,
"trim": True,
"true": True,
"truncate": True,
"trusted": True,
"type": True,
"unbounded": True,
"uncommitted": True,
"unencrypted": True,
"union": True,
"unique": True,
"unknown": True,
"unlisten": True,
"until": True,
"update": True,
"user": True,
"using": True,
"vacuum": True,
"valid": True,
"validation": True,
"validator": True,
"values": True,
"varchar": True,
"varying": True,
"verbose": True,
"view": True,
"volatile": True,
"web": True,
"when": True,
"where": True,
"window": True,
"with": True,
"without": True,
"work": True,
"write": True,
"year": True,
"zone": True
}
def is_keyword(tab):
if tab in keywords:
return True
else:
return False
def caseInsensitiveDictLookup(key, dictionary):
"""
Do a case insensitive dictionary lookup. Return the dictionary value if found,
or None if not found.
"""
for entry in dictionary:
if entry.lower() == key.lower():
return dictionary[entry]
return None
def sqlIdentifierCompare(x, y):
"""
Compare x and y as SQL identifiers. Use SQL rules for comparing delimited
and non-delimited identifiers. Return True if they are equivalent or False
if they are not equivalent.
"""
if x is None or y is None:
return False
if isDelimited(x):
x = quote_unident(x)
else:
x = x.lower()
if isDelimited(y):
y = quote_unident(y)
else:
y = y.lower()
if x == y:
return True
else:
return False
def isDelimited(value):
"""
This method simply checks to see if the user supplied value has delimiters.
That is, if it starts and ends with double-quotes, then it is delimited.
"""
if len(value) < 2:
return False
if value[0] == '"' and value[-1] == '"':
return True
else:
return False
def convertListToDelimited(identifiers):
"""
This method will convert a list of identifiers, which may be a mix of
delimited and non-delimited identifiers, and return a list of
delimited identifiers.
"""
returnList = []
for id in identifiers:
if isDelimited(id) == False:
id = id.lower()
returnList.append(quote_ident(id))
else:
returnList.append(id)
return returnList
def splitUpMultipartIdentifier(id):
"""
Given a sql identifier like sch.tab, return a list of its
individual elements (e.g. sch.tab would return ['sch','tab']
"""
returnList = []
elementList = splitIntoLiteralsAndNonLiterals(id, quoteValue='"')
# If there is a leading empty string, remove it.
if elementList[0] == ' ':
elementList.pop(0)
# Remove the dots, and split up undelimited multipart names
for e in elementList:
if e != '.':
if e[0] != '"':
subElementList = e.split('.')
else:
subElementList = [e]
for se in subElementList:
# remove any empty elements
if se != '':
returnList.append(se)
return returnList
def splitIntoLiteralsAndNonLiterals(str1, quoteValue="'"):
"""
Break the string (str1) into a list of literals and non-literals where every
even number element is a non-literal and every odd number element is a literal.
The delimiter between literals and non-literals is the quoteValue, so this
function will not take into account any modifiers on a literal (e.g. E'adf').
"""
returnList = []
if len(str1) > 1 and str1[0] == quoteValue:
# Always start with a non-literal
str1 = ' ' + str1
inLiteral = False
i = 0
tokenStart = 0
while i < len(str1):
if str1[i] == quoteValue:
if inLiteral == False:
# We are at start of literal
inLiteral = True
returnList.append(str1[tokenStart:i])
tokenStart = i
elif i + 1 < len(str1) and str1[i+1] == quoteValue:
# We are in a literal and found quote quote, so skip over it
i = i + 1
else:
# We are at the end of a literal or end of str1
returnList.append(str1[tokenStart:i+1])
tokenStart = i + 1
inLiteral = False
i = i + 1
if tokenStart < len(str1):
returnList.append(str1[tokenStart:])
return returnList
def quote_ident(val):
"""
This method returns a new string replacing " with "",
and adding a " at the start and end of the string.
"""
return '"' + val.replace('"', '""') + '"'
def quote_unident(val):
"""
This method returns a new string replacing "" with ",
and removing the " at the start and end of the string.
"""
if val != None and len(val) > 0:
val = val.replace('""', '"')
if val != None and len(val) > 1 and val[0] == '"' and val[-1] == '"':
val = val[1:-1]
return val
def notice_processor(notice):
if windowsPlatform == True:
# We don't have a pygresql with our notice fix, so skip for windows.
# This means we will not get any warnings on windows (MPP10989).
return
theNotices = notice
r = re.compile("^NOTICE: found (\d+) data formatting errors.*")
messageNumber = 0
m = None
while messageNumber < len(theNotices) and m is None:
aNotice = theNotices[messageNumber]
m = r.match(aNotice)
messageNumber = messageNumber + 1
if m:
global NUM_WARN_ROWS
NUM_WARN_ROWS = int(m.group(1))
def handle_kill(signum, frame):
# already dying?
global received_kill
if received_kill:
return
received_kill = True
g.log(g.INFO, "received signal %d" % signum)
g.exitValue = 2
sys.exit(2)
def bytestr(size, precision=1):
"""Return a string representing the greek/metric suffix of a size"""
if size==1:
return '1 byte'
for factor, suffix in _abbrevs:
if size >= factor:
break
float_string_split = "size/float(factor)".split('.')
integer_part = float_string_split[0]
decimal_part = float_string_split[1]
if int(decimal_part[0:precision]):
float_string = '.'.join([integer_part, decimal_part[0:precision]])
else:
float_string = integer_part
return float_string + suffix
class CatThread(threading.Thread):
"""
Simple threading wrapper to read a file descriptor and put the contents
in the log file.
The fd is assumed to be stdout and stderr from gpfdist. We must use select.select
and locks to ensure both threads are not read at the same time. A dead lock
situation could happen if they did. communicate() is not used since it blocks.
We will wait 1 second between read attempts.
"""
def __init__(self,gpload,fd, sharedLock = None):
threading.Thread.__init__(self)
self.gpload = gpload
self.fd = fd
self.theLock = sharedLock
def run(self):
try:
if windowsPlatform == True:
while 1:
# Windows select does not support select on non-file fd's, so we can use the lock fix. Deadlock is possible here.
# We need to look into the Python windows module to see if there is another way to do this in Windows.
line = self.fd.readline().decode()
if line=='':
break
self.gpload.log(self.gpload.DEBUG, 'gpfdist: ' + line.strip('\n'))
else:
while 1:
retList = select.select( [self.fd]
, []
, []
, 1
)
if retList[0] == [self.fd]:
self.theLock.acquire()
line = self.fd.readline().decode()
self.theLock.release()
else:
continue
if line=='':
break
self.gpload.log(self.gpload.DEBUG, 'gpfdist: ' + line.strip('\n'))
except Exception as e:
# close fd so that not block the worker thread because of stdout/stderr pipe not finish/closed.
self.fd.close()
sys.stderr.write("\n\nWarning: gpfdist log halt because Log Thread '%s' got an exception: %s \n" % (self.getName(), str(e)))
self.gpload.log(self.gpload.WARN, "gpfdist log halt because Log Thread '%s' got an exception: %s" % (self.getName(), str(e)))
raise
class Progress(threading.Thread):
"""
Determine our progress from the gpfdist daemon
"""
def __init__(self,gpload,ports):
threading.Thread.__init__(self)
self.gpload = gpload
self.ports = ports
self.number = 0
self.condition = threading.Condition()
def get(self,port):
"""
Connect to gpfdist and issue an HTTP query. No need to do this with
httplib as the transaction is extremely simple
"""
addrinfo = socket.getaddrinfo('localhost', port)
s = socket.socket(addrinfo[0][0],socket.SOCK_STREAM)
s.connect(('localhost',port))
s.sendall('GET gpfdist/status HTTP/1.0\r\n\r\n')
f = s.makefile()
read_bytes = -1
total_bytes = -1
total_sessions = -1
for line in f:
self.gpload.log(self.gpload.DEBUG, "gpfdist stat: %s" % \
line.strip('\n'))
a = line.split(' ')
if not a:
continue
if a[0]=='read_bytes':
read_bytes = int(a[1])
elif a[0]=='total_bytes':
total_bytes = int(a[1])
elif a[0]=='total_sessions':
total_sessions = int(a[1])
s.close()
f.close()
return read_bytes,total_bytes,total_sessions
def get1(self):
"""
Parse gpfdist output
"""
read_bytes = 0
total_bytes = 0
for port in self.ports:
a = self.get(port)
if a[2]<1:
return
if a[0]!=-1:
read_bytes += a[0]
if a[1]!=-1:
total_bytes += a[1]
self.gpload.log(self.gpload.INFO,'transferred %s of %s' % \
(bytestr(read_bytes),bytestr(total_bytes)))
def run(self):
"""
Thread worker
"""
while 1:
try:
self.condition.acquire()
n = self.number
self.condition.release()
self.get1()
if n:
self.gpload.log(self.gpload.DEBUG, "gpfdist status thread told to stop")
self.condition.acquire()
self.condition.notify()
self.condition.release()
break
except socket.error as e:
self.gpload.log(self.gpload.DEBUG, "got socket exception: %s" % e)
break
time.sleep(1)
def cli_help():
help_path = os.path.join(sys.path[0], '..', 'docs', 'cli_help', EXECNAME +
'_help');
f = None
try:
try:
f = open(help_path);
return f.read(-1)
except:
return ''
finally:
if f: f.close()
#============================================================
def usage(error = None):
print (cli_help() or __doc__)
sys.stdout.flush()
if error:
sys.stderr.write('ERROR: ' + error + '\n')
sys.stderr.write('\n')
sys.stderr.flush()
sys.exit(2)
def quote(a):
"""
SQLify a string
"""
return "'"+a.replace("'","''").replace('\\','\\\\')+"'"
def quote_no_slash(a):
"""
SQLify a string
"""
return "'"+a.replace("'","''")+"'"
def splitPgpassLine(a):
"""
If the user has specified a .pgpass file, we'll have to parse it. We simply
split the string into arrays at :. We could just use a native python
function but we need to escape the ':' character.
"""
b = []
escape = False
d = ''
for c in a:
if not escape and c=='\\':
escape = True
elif not escape and c==':':
b.append(d)
d = ''
else:
d += c
escape = False
if escape:
d += '\\'
b.append(d)
return b
def test_key(gp, key, crumb):
"""
Make sure that a key is a valid keyword in the configuration grammar and
that it appears in the configuration file where we expect -- that is, where
it has the parent we expect
"""
val = valid_tokens.get(key)
if val is None:
gp.log(gp.ERROR, 'unrecognized key: "%s"' % key)
p = val['parent']
# simplify for when the same keyword can appear in multiple places
if type(p) != list:
p = [p]
c = None
if len(crumb):
c = crumb[-1]
found = False
for m in p:
if m == c:
found = True
break
if not found:
gp.log(gp.ERROR, 'unexpected key: "%s"' % key)
return val
def yaml_walk(gp, node, crumb):
if type(node) == list:
for a in node:
if type(a) == tuple:
key = a[0].value.lower()
val = test_key(gp, key, crumb)
if (len(a) > 1 and val['parse_children'] and
(isinstance(a[1], yaml.nodes.MappingNode) or
isinstance(a[1], yaml.nodes.SequenceNode))):
crumb.append(key)
yaml_walk(gp, a[1], crumb)
crumb.pop()
elif isinstance(a, yaml.nodes.ScalarNode):
test_key(gp, a.value, crumb)
else:
yaml_walk(gp, a, crumb)
elif isinstance(node, yaml.nodes.MappingNode):
yaml_walk(gp, node.value, crumb)
elif isinstance(node, yaml.nodes.ScalarNode):
pass
elif isinstance(node, yaml.nodes.SequenceNode):
yaml_walk(gp, node.value, crumb)
elif isinstance(node, yaml.nodes.CollectionNode):
pass
def changeToUnicode(a):
"""
Change every entry in a list or dictionary to a unicode item
"""
if type(a) == list:
return list(map(changeToUnicode,a))
if type(a) == dict:
b = dict()
for key,value in list(a.items()):
if type(key) == str:
key = str(key)
b[key] = changeToUnicode(value)
return b
if type(a) == str:
a = str(a)
return a
def dictKeyToLower(a):
"""
down case all entries in a list or dict
"""
if type(a) == list:
return list(map(dictKeyToLower,a))
if type(a) == dict:
b = dict()
for key,value in list(a.items()):
if type(key) == str:
key = str(key.lower())
b[key] = dictKeyToLower(value)
return b
if type(a) == str:
a = str(a)
return a
#
# MPP-13348
#
'''Jenkins hash - http://burtleburtle.net/bob/hash/doobs.html'''
def jenkinsmix(a, b, c):
a &= 0xffffffff; b &= 0xffffffff; c &= 0xffffffff
a -= b; a -= c; a ^= (c>>13); a &= 0xffffffff
b -= c; b -= a; b ^= (a<<8); b &= 0xffffffff
c -= a; c -= b; c ^= (b>>13); c &= 0xffffffff
a -= b; a -= c; a ^= (c>>12); a &= 0xffffffff
b -= c; b -= a; b ^= (a<<16); b &= 0xffffffff
c -= a; c -= b; c ^= (b>>5); c &= 0xffffffff
a -= b; a -= c; a ^= (c>>3); a &= 0xffffffff
b -= c; b -= a; b ^= (a<<10); b &= 0xffffffff
c -= a; c -= b; c ^= (b>>15); c &= 0xffffffff
return a, b, c
def jenkins(data, initval = 0):
length = lenpos = len(data)
if length == 0:
return 0
a = b = 0x9e3779b9
c = initval
p = 0
while lenpos >= 12:
a += (ord(data[p+0]) + (ord(data[p+1])<<8) + (ord(data[p+2])<<16) + (ord(data[p+3])<<24))
b += (ord(data[p+4]) + (ord(data[p+5])<<8) + (ord(data[p+6])<<16) + (ord(data[p+7])<<24))
c += (ord(data[p+8]) + (ord(data[p+9])<<8) + (ord(data[p+10])<<16) + (ord(data[p+11])<<24))
a, b, c = jenkinsmix(a, b, c)
p += 12
lenpos -= 12
c += length
if lenpos >= 11: c += ord(data[p+10])<<24
if lenpos >= 10: c += ord(data[p+9])<<16
if lenpos >= 9: c += ord(data[p+8])<<8
if lenpos >= 8: b += ord(data[p+7])<<24
if lenpos >= 7: b += ord(data[p+6])<<16
if lenpos >= 6: b += ord(data[p+5])<<8
if lenpos >= 5: b += ord(data[p+4])
if lenpos >= 4: a += ord(data[p+3])<<24
if lenpos >= 3: a += ord(data[p+2])<<16
if lenpos >= 2: a += ord(data[p+1])<<8
if lenpos >= 1: a += ord(data[p+0])
a, b, c = jenkinsmix(a, b, c)
return c
# MPP-20927: gpload external table name problem
# Not sure if it is used by other components, just leave it here.
def shortname(name):
"""
Returns a 10 character string formed by concatenating the first two characters
of the name with another 8 character string computed using the Jenkins hash
function of the table name. When the original name has only a single non-space
ascii character, we return '00' followed by 8 char hash.
For example:
>>> shortname('mytable')
'my3cbb7ba8'
>>> shortname('some_pretty_long_test_table_name')
'so9068664a'
>>> shortname('t')
'006742be70'
@param name: the input tablename
@returns: a string 10 characters or less built from the table name
"""
# Remove spaces from original name
name = re.sub(r' ', '', name)
# Run the hash function
j = jenkins(name)
# Now also remove non ascii chars from original name.
# We do this after jenkins so that we exclude the
# (very rare) case of passing an empty string to jenkins
name = "".join(i for i in name if ord(i) < 128)
if len(name) > 1:
return '%2s%08x' % (name[0:2], j)
else:
return '00%08x' % (j) # could be len 0 or 1
class options:
pass
class gpload:
"""
Main class wrapper
"""
def __init__(self,argv):
self.threads = [] # remember threads so that we can join() against them
self.exitValue = 0
self.options = options()
self.options.h = None
self.options.gpfdist_timeout = None
self.options.p = None
self.options.U = None
self.options.W = False
self.options.D = False
self.options.no_auto_trans = False
self.options.password = None
self.options.d = None
self.DEBUG = 5
self.LOG = 4
self.INFO = 3
self.WARN = 2
self.ERROR = 1
self.options.qv = self.INFO
self.options.l = None
self.formatOpts = ""
self.startTimestamp = time.time()
self.error_table = False
self.gpdb_version = ""
self.options.max_retries = 0
seenv = False
seenq = False
# Create Temp and External table names. However external table name could
# get overwritten with another name later on (see create_external_table_name).
# MPP-20927: gpload external table name problem. We use uuid to avoid
# external table name confliction.
self.unique_suffix = str(uuid.uuid1()).replace('-', '_')
self.staging_table_name = 'temp_staging_gpload_' + self.unique_suffix
self.extTableName = 'ext_gpload_' + self.unique_suffix
# SQL to run in order to undo our temporary work
self.cleanupSql = []
self.distkey = None
configFilename = None
while argv:
try:
try:
if argv[0]=='-h':
self.options.h = argv[1]
argv = argv[2:]
elif argv[0]=='--gpfdist_timeout':
self.options.gpfdist_timeout = argv[1]
argv = argv[2:]
elif argv[0]=='-p':
self.options.p = int(argv[1])
argv = argv[2:]
elif argv[0]=='-l':
self.options.l = argv[1]
argv = argv[2:]
elif argv[0]=='-q':
self.options.qv -= 1
argv = argv[1:]
seenq = True
elif argv[0]=='--version':
sys.stderr.write("gpload version $Revision$\n")
sys.exit(0)
elif argv[0]=='-v':
self.options.qv = self.LOG
argv = argv[1:]
seenv = True
elif argv[0]=='-V':
self.options.qv = self.DEBUG
argv = argv[1:]
seenv = True
elif argv[0]=='-W':
self.options.W = True
argv = argv[1:]
elif argv[0]=='-D':
self.options.D = True
argv = argv[1:]
elif argv[0]=='-U':
self.options.U = argv[1]
argv = argv[2:]
elif argv[0]=='-d':
self.options.d = argv[1]
argv = argv[2:]
elif argv[0]=='-f':
configFilename = argv[1]
argv = argv[2:]
elif argv[0]=='--max_retries':
self.options.max_retries = int(argv[1])
argv = argv[2:]
elif argv[0]=='--no_auto_trans':
self.options.no_auto_trans = True
argv = argv[1:]
elif argv[0]=='-?':
usage()
else:
break
except IndexError:
sys.stderr.write("Option %s needs a parameter.\n"%argv[0])
sys.exit(2)
except ValueError:
sys.stderr.write("Parameter for option %s must be an integer.\n"%argv[0])
sys.exit(2)
if configFilename==None:
usage('configuration file required')
elif argv:
a = ""
if len(argv) > 1:
a = "s"
usage('unrecognized argument%s: %s' % (a, ' '.join(argv)))
# default to gpAdminLogs for a log file, may be overwritten
if self.options.l is None:
self.options.l = os.path.join(os.environ.get('HOME', '.'),'gpAdminLogs')
if not os.path.isdir(self.options.l):
os.mkdir(self.options.l)
self.options.l = os.path.join(self.options.l, 'gpload_' + \
datetime.date.today().strftime('%Y%m%d') + '.log')
try:
self.logfile = open(self.options.l,'a')
except Exception as e:
self.log(self.ERROR, "could not open logfile %s: %s" % \
(self.options.l, e))
if seenv and seenq:
self.log(self.ERROR, "-q conflicts with -v and -V")
if self.options.D:
self.log(self.INFO, 'gpload has the -D option, so it does not actually load any data')
try:
f = open(configFilename,'r')
except IOError as e:
self.log(self.ERROR, "could not open configuration file: %s" % e)
# pull in the config file, which should be in valid YAML
try:
# do an initial parse, validating the config file
doc = f.read()
self.config = yaml.safe_load(doc)
self.configOriginal = changeToUnicode(self.config)
self.config = dictKeyToLower(self.config)
ver = self.getconfig('version', str, extraStuff = ' tag')
if ver != '1.0.0.1':
self.control_file_error("gpload configuration schema version must be 1.0.0.1")
# second parse, to check that the keywords are sensible
y = yaml.compose(doc)
# first should be MappingNode
if not isinstance(y, yaml.MappingNode):
self.control_file_error("configuration file must begin with a mapping")
yaml_walk(self, y.value, [])
except yaml.scanner.ScannerError as e:
self.log(self.ERROR, "configuration file error: %s, line %s" % \
(e.problem, e.problem_mark.line))
except yaml.reader.ReaderError as e:
es = ""
if isinstance(e.character, str):
es = "'%s' codec can't decode byte #x%02x: %s position %d" % \
(e.encoding, ord(e.character), e.reason,
e.position)
else:
es = "unacceptable character #x%04x at byte %d: %s" \
% (ord(e.character), e.position, e.reason)
self.log(self.ERROR, es)
except yaml.error.MarkedYAMLError as e:
self.log(self.ERROR, "configuration file error: %s, line %s" % \
(e.problem, e.problem_mark.line))
f.close()
self.subprocesses = []
self.log(self.INFO,'gpload session started ' + \
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
def control_file_warning(self, msg):
self.log(self.WARN, "A gpload control file processing warning occurred. %s" % msg)
def control_file_error(self, msg):
self.log(self.ERROR, "A gpload control file processing error occurred. %s" % msg)
def elevel2str(self, level):
if level == self.DEBUG:
return "DEBUG"
elif level == self.LOG:
return "LOG"
elif level == self.INFO:
return "INFO"
elif level == self.ERROR:
return "ERROR"
elif level == self.WARN:
return "WARN"
else:
self.log(self.ERROR, "unknown log type %i" % level)
def log(self, level, a):
"""
Level is either DEBUG, LOG, INFO, ERROR. a is the message
"""
try:
log = '|'.join(
[datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
self.elevel2str(level), a]) + '\n'
#str = str.encode('utf-8')
except Exception as e:
# log even if contains non-utf8 data and pass this exception
self.logfile.write("\nWarning: Log() threw an exception: %s \n" % (e))
if level <= self.options.qv:
sys.stdout.write(log)
if level <= self.options.qv or level <= self.INFO:
try:
self.logfile.write(log)
self.logfile.flush()
except AttributeError as e:
pass
if level == self.ERROR:
self.exitValue = 2;
sys.exit(self.exitValue)
def getconfig(self, a, typ=None, default='error', extraStuff='', returnOriginal=False):
"""
Look for a config entry, via a column delimited string. a:b:c points to
a:
b:
c
Make sure that end point is of type 'typ' when not set to None.
If returnOriginal is False, the return value will be in lower case,
else the return value will be in its original form (i.e. the case that
the user specified in their yaml file).
"""
self.log(self.DEBUG, "getting config for " + a)
if returnOriginal == True:
config = self.configOriginal
else:
config = self.config
for s in a.split(':'):
self.log(self.DEBUG, "trying " + s)
index = 1
if s[-1:]==')':
j = s.index('(')
index = int(s[j+1:-1])
s = s[:j]
if type(config)!=list:
config = [config]
for c in config:
if type(c)==dict:
temp = caseInsensitiveDictLookup(s, c)
if temp != None:
index -= 1
if not index:
self.log(self.DEBUG, "found " + s)
config = temp
break
else:
if default=='error':
self.control_file_error("The configuration must contain %s%s"%(a,extraStuff))
sys.exit(2)
return default
if typ != None and type(config) != typ:
if typ == list:
self.control_file_error("The %s entry must be a YAML sequence %s"% (a ,extraStuff))
elif typ == dict:
self.control_file_error("The %s entry must be a YAML mapping %s"% (a, extraStuff))
elif typ == str or typ == str:
self.control_file_error("%s must be a string %s" % (a, extraStuff))
elif typ == int:
self.control_file_error("The %s entry must be a YAML integer %s" % (a, extraStuff))
else:
assert 0
self.control_file_error("Encountered unknown configuration type %s"% type(config))
sys.exit(2)
return config
def read_config(self):
"""
Configure ourselves
"""
# ensure output is of type list
self.getconfig('gpload:output', list)
# The user supplied table name can be completely or partially delimited,
# and it can be a one or two part name. Get the originally supplied name
# and parse it into its delimited one or two part name.
self.schemaTable = self.getconfig('gpload:output:table', str, returnOriginal=True)
schemaTableList = splitUpMultipartIdentifier(self.schemaTable)
schemaTableList = convertListToDelimited(schemaTableList)
if len(schemaTableList) == 2:
self.schema = schemaTableList[0]
self.table = schemaTableList[1]
else:
self.schema = None
self.table = schemaTableList[0]
# Precedence for configuration: command line > config file > env
# variable
# host to connect to
if not self.options.h:
self.options.h = self.getconfig('host', str, None)
if self.options.h:
self.options.h = str(self.options.h)
if not self.options.h:
self.options.h = os.environ.get('PGHOST')
if not self.options.h or len(self.options.h) == 0:
self.log(self.INFO, "no host supplied, defaulting to localhost")
self.options.h = "localhost"
# Port to connect to
if not self.options.p:
self.options.p = self.getconfig('port',int,None)
if not self.options.p:
try:
self.options.p = int(os.environ.get('PGPORT'))
except (ValueError, TypeError):
pass
if not self.options.p:
self.options.p = 5432
# User to connect as
if not self.options.U:
self.options.U = self.getconfig('user', str, None)
if not self.options.U:
self.options.U = os.environ.get('PGUSER')
if not self.options.U:
self.options.U = getpass.getuser()
self.log(self.INFO, "no user supplied, defaulting to "+self.options.U)
#self.options.U = os.environ.get('USER') or \
# os.environ.get('LOGNAME') or \
# os.environ.get('USERNAME')
if not self.options.U or len(self.options.U) == 0:
self.log(self.ERROR,
"You need to specify your username with the -U " +
"option or in your configuration or in your " +
"environment as PGUSER")
# database to connect to
if not self.options.d:
self.options.d = self.getconfig('database', str, None)
if not self.options.d:
self.options.d = os.environ.get('PGDATABASE')
if not self.options.d:
# like libpq, just inherit USER
self.options.d = self.options.U
if self.getconfig('gpload:input:error_table', str, None):
self.error_table = True
self.log(self.WARN,
"ERROR_TABLE is not supported. " +
"We will set LOG_ERRORS and REUSE_TABLES to True for compatibility.")
def gpfdist_port_options(self, name, availablePorts, popenList):
"""
Adds gpfdist -p / -P port options to popenList based on port and port_range in YAML file.
Raises errors if options are invalid or ports are unavailable.
@param name: input source name from YAML file.
@param availablePorts: current set of available ports
@param popenList: gpfdist options (updated)
"""
port = self.getconfig(name + ':port', int, None)
port_range = self.getconfig(name+':port_range', list, None)
if port:
startPort = endPort = port
endPort += 1
elif port_range:
try:
startPort = int(port_range[0])
endPort = int(port_range[1])
except (IndexError,ValueError):
self.control_file_error(name + ":port_range must be a YAML sequence of two integers")
else:
startPort = self.getconfig(name+':port',int,8000)
endPort = self.getconfig(name+':port',int,9000)
if (startPort > 65535 or endPort > 65535):
# Do not allow invalid ports
self.control_file_error("Invalid port. Port values must be less than or equal to 65535.")
elif not (set(range(startPort,endPort+1)) & availablePorts):
self.log(self.ERROR, "no more ports available for gpfdist")
popenList.append('-p')
popenList.append(str(startPort))
popenList.append('-P')
popenList.append(str(endPort))
def gpfdist_filenames(self, name, popenList):
"""
Adds gpfdist -f filenames to popenList.
Raises errors if YAML file option is invalid.
@param name: input source name from YAML file.
@param popenList: gpfdist options (updated)
@return: list of files names
"""
file = self.getconfig(name+':file',list)
for i in file:
if type(i)!= str and type(i) != str:
self.control_file_error(name + ":file must be a YAML sequence of strings")
popenList.append('-f')
popenList.append('"'+' '.join(file)+'"')
return file
def gpfdist_timeout_options(self, popenList):
"""
Adds gpfdist -t timeout option to popenList.
@param popenList: gpfdist options (updated)
"""
if self.options.gpfdist_timeout != None:
gpfdistTimeout = self.options.gpfdist_timeout
else:
gpfdistTimeout = 30
popenList.append('-t')
popenList.append(str(gpfdistTimeout))
def gpfdist_verbose_options(self, popenList):
"""
Adds gpfdist -v / -V options to popenList depending on logging level
@param popenList: gpfdist options (updated)
"""
if self.options.qv == self.LOG:
popenList.append('-v')
elif self.options.qv > self.LOG:
popenList.append('-V')
def gpfdist_max_line_length(self, popenList):
"""
Adds gpfdist -m option to popenList when max_line_length option specified in YAML file.
@param popenList: gpfdist options (updated)
"""
max_line_length = self.getconfig('gpload:input:max_line_length',int,None)
if max_line_length is not None:
popenList.append('-m')
popenList.append(str(max_line_length))
def gpfdist_transform(self, popenList):
"""
Compute and return url fragment if transform option specified in YAML file.
Checks for readable transform config file if transform_config option is specified.
Adds gpfdist -c option to popenList if transform_config is specified.
Validates that transform_config is present when transform option is specified.
@param popenList: gpfdist options (updated)
@returns: uri fragment for transform or "" if not appropriate.
"""
transform = self.getconfig('gpload:input:transform', str, None)
transform_config = self.getconfig('gpload:input:transform_config', str, None)
if transform_config:
try:
f = open(transform_config,'r')
except IOError as e:
self.log(self.ERROR, "could not open transform_config file: %s" % e)
f.close()
popenList.append('-c')
popenList.append(transform_config)
else:
if transform:
self.control_file_error("transform_config is required when transform is specified")
fragment = ""
if transform is not None:
fragment = "#transform=" + transform
return fragment
def gpfdist_ssl(self, popenList):
"""
Adds gpfdist --ssl option to popenList when ssl option specified as true in YAML file.
@param popenList: gpfdist options (updated)
"""
ssl = self.getconfig('gpload:input:source:ssl',bool, False)
certificates_path = self.getconfig('gpload:input:source:certificates_path', str, None)
if ssl and certificates_path:
dir_exists = os.path.isdir(certificates_path)
if dir_exists == False:
self.log(self.ERROR, "could not access CERTIFICATES_PATH directory: %s" % certificates_path)
popenList.append('--ssl')
popenList.append(certificates_path)
else:
if ssl:
self.control_file_error("CERTIFICATES_PATH is required when SSL is specified as true")
elif certificates_path: # ssl=false (or not specified) and certificates_path is specified
self.control_file_error("CERTIFICATES_PATH is specified while SSL is not specified as true")
def start_gpfdists(self):
"""
Start gpfdist daemon(s)
"""
self.locations = []
self.ports = []
sourceIndex = 0
availablePorts = set(range(1,65535))
found_source = False
while 1:
sourceIndex += 1
name = 'gpload:input:source(%d)'%sourceIndex
a = self.getconfig(name,None,None)
if not a:
break
found_source = True
local_hostname = self.getconfig(name+':local_hostname', list, False)
# do default host, the current one
if not local_hostname:
# if fully_qualified_domain_name is defined and set to true we want to
# resolve the fqdn rather than just grabbing the hostname.
fqdn = self.getconfig('gpload:input:fully_qualified_domain_name', bool, False)
if fqdn:
local_hostname = [socket.getfqdn()]
else:
local_hostname = [socket.gethostname()]
# build gpfdist parameters
popenList = ['gpfdist']
self.gpfdist_ssl(popenList)
self.gpfdist_port_options(name, availablePorts, popenList)
file = self.gpfdist_filenames(name, popenList)
self.gpfdist_timeout_options(popenList)
self.gpfdist_verbose_options(popenList)
self.gpfdist_max_line_length(popenList)
fragment = self.gpfdist_transform(popenList)
try:
self.log(self.LOG, 'trying to run %s' % ' '.join(popenList))
cfds = True
if platform.system() in ['Windows', 'Microsoft']: # not supported on win32
cfds = False
cmd = ' '.join(popenList)
needshell = False
else:
srcfile = None
if os.environ.get('GPHOME_LOADERS'):
srcfile = os.path.join(os.environ.get('GPHOME_LOADERS'),
'greenplum_loaders_path.sh')
elif os.environ.get('GPHOME'):
srcfile = os.path.join(os.environ.get('GPHOME'),
'greenplum_path.sh')
if (not (srcfile and os.path.exists(srcfile))):
self.log(self.ERROR, 'cannot find greenplum environment ' +
'file: environment misconfigured')
cmd = 'source %s ; exec ' % srcfile
cmd += ' '.join(popenList)
needshell = True
a = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=cfds, shell=needshell)
self.subprocesses.append(a)
except Exception as e:
self.log(self.ERROR, "could not run %s: %s" % \
(' '.join(popenList), str(e)))
"""
Reading from stderr and stdout on a Popen object can result in a dead lock if done at the same time.
Create a lock to share when reading stderr and stdout from gpfdist.
"""
readLock = threading.Lock()
# get all the output from the daemon(s)
t = CatThread(self,a.stderr, readLock)
t.start()
self.threads.append(t)
while 1:
readLock.acquire()
line = a.stdout.readline().decode()
readLock.release()
if line=='':
self.log(self.ERROR,'failed to start gpfdist: ' +
'gpfdist command line: ' + ' '.join(popenList))
line = line.strip('\n')
self.log(self.LOG,'gpfdist says: ' + line)
if (line.startswith('Serving HTTP on port ') or line.startswith('Serving HTTPS on port ')):
port = int(line[21:line.index(',')])
break
self.log(self.INFO, 'started %s' % ' '.join(popenList))
self.log(self.LOG,'gpfdist is running on port %d'%port)
if port in availablePorts:
availablePorts.remove(port)
self.ports.append(port)
t = CatThread(self,a.stdout,readLock)
t.start()
self.threads.append(t)
ssl = self.getconfig('gpload:input:source:ssl', bool, False)
if ssl:
protocol = 'gpfdists'
else:
protocol = 'gpfdist'
for l in local_hostname:
if type(l) != str and type(l) != str:
self.control_file_error(name + ":local_hostname must be a YAML sequence of strings")
l = str(l)
sep = ''
if file[0] != '/':
sep = '/'
# MPP-13617
if ':' in l:
l = '[' + l + ']'
self.locations.append('%s://%s:%d%s%s%s' % (protocol, l, port, sep, '%20'.join(file), fragment))
if not found_source:
self.control_file_error("configuration file must contain source definition")
def readPgpass(self,pgpassname):
"""
Get password form .pgpass file
"""
try:
f = open(pgpassname,'r')
except IOError:
return
for row in f:
try:
row = row.rstrip("\n")
line = splitPgpassLine(row)
if line[0]!='*' and line[0].lower()!=self.options.h.lower():
continue
if line[1]!='*' and int(line[1])!=self.options.p:
continue
if line[2]!='*' and line[2]!=self.options.d:
continue
if line[3]!='*' and line[3]!=self.options.U:
continue
self.options.password = line[4]
break
except (ValueError,IndexError):
pass
f.close()
def setup_connection(self, recurse = 0):
"""
Connect to the backend
"""
if self.db != None:
self.db.close()
self.db = None
if self.options.W:
if self.options.password==None:
self.options.password = getpass.getpass()
else:
if self.options.password==None:
self.options.password = self.getconfig('password', str,
None)
if self.options.password==None:
self.options.password = os.environ.get('PGPASSWORD')
if self.options.password==None:
self.readPgpass(os.environ.get('PGPASSFILE',
os.environ.get('HOME','.')+'/.pgpass'))
try:
self.log(self.DEBUG, "connection string:" +
" user=" + str(self.options.U) +
" host=" + str(self.options.h) +
" port=" + str(self.options.p) +
" database=" + str(self.options.d))
self.db = pg.DB( dbname=self.options.d
, host=self.options.h
, port=self.options.p
, user=self.options.U
, passwd=self.options.password
)
self.log(self.DEBUG, "Successfully connected to database")
if noGpVersion == False:
# Get GPDB version
curs = self.db.query("SELECT version()")
self.gpdb_version = GpVersion(curs.getresult()[0][0])
self.log(self.DEBUG, "GPDB version is: %s" % self.gpdb_version)
except Exception as e:
errorMessage = str(e)
if errorMessage.find("no password supplied") != -1:
self.options.password = getpass.getpass()
recurse += 1
if recurse > 10:
self.log(self.ERROR, "too many login attempt failures")
self.setup_connection(recurse)
elif errorMessage.find("Connection timed out") != -1 and self.options.max_retries != 0:
recurse += 1
if self.options.max_retries > 0:
if recurse > self.options.max_retries: # retry failed
self.log(self.ERROR, "could not connect to database after retry %d times, " \
"error message:\n %s" % (recurse-1, errorMessage))
else:
self.log(self.INFO, "retry to connect to database, %d of %d times" % (recurse,
self.options.max_retries))
else: # max_retries < 0, retry forever
self.log(self.INFO, "retry to connect to database.")
self.setup_connection(recurse)
else:
self.log(self.ERROR, "could not connect to database: %s. Is " \
"the Greenplum Database running on port %i?" % (errorMessage,
self.options.p))
def add_quote_if_not(self, col):
'''
Judge if the column name string has quotations.
If not, return a string with double quotations.
pyyaml cannot preserve quotes of string in yaml file.
So we need to quote the string for furter comparison if it is not quoted.
'''
if col[0] == '"' and col[-1] == '"':
return col
elif col[0] == "'" and col[-1] == "'":
return col
else:
return quote_ident(col)
def read_columns(self):
'''
get from columns
'''
columns = self.getconfig('gpload:input:columns',list,None, returnOriginal=True)
if columns != None:
self.from_cols_from_user = True # user specified from columns
self.from_columns = []
for d in columns:
if type(d)!=dict:
self.control_file_error("gpload:input:columns must be a sequence of YAML mappings")
tempkey = list(d.keys())[0]
value = d[tempkey]
""" remove leading or trailing spaces """
d = { tempkey.strip() : value }
key = list(d.keys())[0]
col_name = self.add_quote_if_not(key)
if d[key] is None or not d[key]:
self.log(self.DEBUG,
'getting source column data type from target')
for name, typ, mapto, hasseq in self.into_columns:
if sqlIdentifierCompare(name, key):
d[key] = typ
break
# perform the same kind of magic type change that postgres does
if d[key] == 'bigserial':
d[key] = 'bigint'
elif d[key] == 'serial':
d[key] = 'int4'
# Mark this column as having no mapping, which is important
# for do_insert()
self.from_columns.append([col_name,d[key].lower(),None, False])
else:
self.from_columns = self.into_columns
self.from_cols_from_user = False
# make sure that all columns have a type
for name, typ, map, hasseq in self.from_columns:
if typ is None:
self.log(self.ERROR, 'column "%s" has no type ' % name +
'and does not appear in target table "%s"' % self.schemaTable)
self.log(self.DEBUG, 'from columns are:')
for c in self.from_columns:
name = c[0]
typ = c[1]
self.log(self.DEBUG, '%s: %s'%(name,typ))
def read_table_metadata(self):
'''
get into columns list like: [column name, column data type, mapping target, has_sequence(bool)]
'''
# KAS Note to self. If schema is specified, then probably should use PostgreSQL rules for defining it.
# find the shema name for this table (according to search_path)
# if it was not explicitly specified in the configuration file.
if self.schema is None:
queryString = """SELECT n.nspname
FROM pg_catalog.pg_class c
INNER JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
WHERE c.relname = '%s'
AND pg_catalog.pg_table_is_visible(c.oid);""" % quote_unident(self.table)
resultList = self.db.query(queryString).getresult()
if len(resultList) > 0:
self.schema = (resultList[0])[0]
self.log(self.INFO, "setting schema '%s' for table '%s'" % (self.schema, quote_unident(self.table)))
else:
self.log(self.ERROR, "table %s not found in any database schema" % self.table)
queryString = """select nt.nspname as table_schema,
c.relname as table_name,
a.attname as column_name,
a.attnum as ordinal_position,
format_type(a.atttypid, a.atttypmod) as data_type,
c.relkind = 'r' AS is_updatable,
a.atttypid in (23, 20) and a.atthasdef and
(select position ( 'nextval(' in pg_catalog.pg_get_expr(adbin,adrelid) ) > 0 and
position ( '::regclass)' in pg_catalog.pg_get_expr(adbin,adrelid) ) > 0
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef) as has_sequence
from pg_catalog.pg_class c join pg_catalog.pg_namespace nt on (c.relnamespace = nt.oid)
join pg_attribute a on (a.attrelid = c.oid)
where a.attnum > 0 and a.attisdropped = 'f'
and a.attrelid = (select c.oid from pg_catalog.pg_class c join pg_catalog.pg_namespace nt on (c.relnamespace = nt.oid) where c.relname = '%s' and nt.nspname = '%s')
order by a.attnum """ % (quote_unident(self.table), quote_unident(self.schema))
count = 0
self.into_columns = []
self.into_columns_dict = dict()
resultList = self.db.query(queryString).dictresult()
while count < len(resultList):
row = resultList[count]
count += 1
ct = str(row['data_type'])
if ct == 'bigserial':
ct = 'bigint'
elif ct == 'serial':
ct = 'int4'
name = row['column_name']
name = quote_ident(name)
has_seq = row['has_sequence']
if has_seq == str('f') or has_seq==False:
has_seq_bool = False
if has_seq == str('t') or has_seq==True:
has_sql_bool = True
i = [name,ct,None, has_seq_bool]
# i: [column name, column data type, mapping target, has_sequence]
self.into_columns.append(i)
self.into_columns_dict[name] = i
self.log(self.DEBUG, "found input column: " + str(i))
if count == 0:
# see if it's a permissions issue or it actually doesn't exist
tableName = quote_unident(self.table)
tableSchema = quote_unident(self.schema)
sql = """select 1 from pg_class c, pg_namespace n
where c.relname = '%s' and
n.nspname = '%s' and
n.oid = c.relnamespace""" % (tableName, tableSchema)
resultList = self.db.query(sql).getresult()
if len(resultList) > 0:
self.log(self.ERROR, "permission denied for table %s.%s" % \
(tableSchema, tableName))
else:
self.log(self.ERROR, 'table %s.%s does not exist in database %s'% (tableSchema, tableName, self.options.d))
def read_mapping(self):
'''
get mapping for into_colums and record the mapping at into_columns[2].
if no mapping in cofig file, this function will get mapping from from_columns
'''
mapping = self.getconfig('gpload:output:mapping',dict,None, returnOriginal=True)
if mapping:
for key,value in list(mapping.items()):
if type(key) != str or type(value) != str:
self.control_file_error("gpload:output:mapping must be a YAML type mapping from strings to strings")
found = False
for a in self.into_columns:
if sqlIdentifierCompare(a[0], key) == True:
a[2] = value
found = True
break
if found == False:
self.log(self.ERROR,'%s in mapping is not in table %s'% \
(key, self.schemaTable))
else:
# Now, map anything yet to be mapped to itself, picking up on those
# columns which are not found in the table.
for x in self.from_columns:
# Check to see if it already has a mapping value
i = [a for a in self.into_columns if a[2] == x[0]]
if not i:
# Check to see if the target column names match the input column names.
for a in self.into_columns:
if sqlIdentifierCompare(a[0], x[0]) == True:
i = a
break
if i:
if i[2] is None: i[2] = i[0]
else:
self.log(self.ERROR, 'no mapping for input column ' +
'"%s" to output table' % x[0])
for name,typ,mapto,seq in self.into_columns:
self.log(self.DEBUG,'%s: %s = %s'%(name,typ,mapto))
def get_reuse_exttable_query(self, formatType, formatOpts, limitStr, from_cols, schemaName, log_errors, encodingCode):
'''
In order to find out whether we have an existing external table in the
catalog which could be reused for this operation we need to make sure
that it has the same column names and types, the same data format, and
location specification, and single row error handling specs.
Return:
SQL to run in order to find out whether such a table exists.
'''
sqlFormat = """select attrelid::regclass
from (
select
attrelid,
row_number() over (partition by attrelid order by attnum) as attord,
attnum,
attname,
atttypid::regtype
from
pg_attribute
join
pg_class
on (pg_class.oid = attrelid)
%s
where
relkind = '%s' and
relname like 'ext_gpload_reusable_%%' and
attnum > 0 and
not attisdropped and %s
) pgattr
join
pg_exttable pgext
on(pgattr.attrelid = pgext.reloid)
"""
joinStr = ""
relkind = ""
conditionStr = ""
# if schemaName is None, find the resuable ext table which is visible to
# current search path. Else find the resuable ext table under the specific
# schema, and this needs to join pg_namespace.
if schemaName is None:
joinStr = ""
conditionStr = "pg_table_is_visible(pg_class.oid)"
else:
joinStr = """join
pg_namespace pgns
on(pg_class.relnamespace = pgns.oid)
"""
conditionStr = "pgns.nspname = '%s'" % schemaName
if noGpVersion or self.gpdb_version < "7.0.0":
relkind='r'
else:
relkind='f'
sql = sqlFormat % (joinStr, relkind, conditionStr)
if noGpVersion or self.gpdb_version < "6.0.0":
if log_errors:
sql += " WHERE pgext.fmterrtbl = pgext.reloid "
else:
sql += " WHERE pgext.fmterrtbl IS NULL "
else:
if log_errors:
sql += " WHERE pgext.logerrors='t' "
else:
sql += " WHERE pgext.logerrors='f' "
for i, l in enumerate(self.locations):
sql += " and pgext.urilocation[%s] = %s\n" % (i + 1, quote(l))
sql+= """and pgext.fmttype = %s
and pgext.writable = false
and pgext.fmtopts like %s """ % (quote(formatType[0]),quote("%" + quote_unident(formatOpts.rstrip()) +"%"))
if limitStr:
sql += "and pgext.rejectlimit = %s " % limitStr
else:
sql += "and pgext.rejectlimit IS NULL "
if encodingCode:
sql += "and pgext.encoding = %s " % encodingCode
sql+= "group by attrelid "
sql+= """having
count(*) = %s and
bool_and(case """ % len(from_cols)
for i, c in enumerate(from_cols):
name = c[0]
typ = c[1]
sql+= "when attord = %s then atttypid = %s::regtype and attname = %s\n" % (i+1, quote(typ), quote(quote_unident(name)))
sql+= """else true
end)
limit 1;"""
self.log(self.DEBUG, "query used to identify reusable external relations: %s" % sql)
return sql
def get_fast_match_exttable_query(self, formatType, formatOpts, limitStr, schemaName, log_errors, encodingCode):
'''
Fast path to find out whether we have an existing external table in the
catalog which could be reused for this operation. we only make sure the
location, data format and error limit are same. we don't check column
names and types
Return: SQL to run in order to find out whether
such a table exists. The results of this SQl are table names without schema
'''
sqlFormat = """select relname from pg_class
join
pg_exttable pgext
on(pg_class.oid = pgext.reloid)
%s
where
relkind = '%s' and
relname like 'ext_gpload_reusable_%%' and
%s
"""
joinStr = ""
relkind = ""
conditionStr = ""
# if schemaName is None, find the resuable ext table which is visible to
# current search path. Else find the resuable ext table under the specific
# schema, and this needs to join pg_namespace.
if schemaName is None:
joinStr = ""
conditionStr = "pg_table_is_visible(pg_class.oid)"
else:
joinStr = """join
pg_namespace pgns
on(pg_class.relnamespace = pgns.oid)"""
conditionStr = "pgns.nspname = '%s'" % schemaName
if noGpVersion or self.gpdb_version < "7.0.0":
relkind='r'
else:
relkind='f'
sql = sqlFormat % (joinStr, relkind, conditionStr)
if noGpVersion or self.gpdb_version < "6.0.0":
if log_errors:
sql += " and pgext.fmterrtbl = pgext.reloid "
else:
sql += " and pgext.fmterrtbl IS NULL "
else:
if log_errors:
sql += " and pgext.logerrors='t' "
else:
sql += " and pgext.logerrors='f' "
for i, l in enumerate(self.locations):
sql += " and pgext.urilocation[%s] = %s\n" % (i + 1, quote(l))
sql+= """and pgext.fmttype = %s
and pgext.writable = false
and pgext.fmtopts like %s """ % (quote(formatType[0]),quote("%" + quote_unident(formatOpts.rstrip()) +"%"))
if limitStr:
sql += "and pgext.rejectlimit = %s " % limitStr
else:
sql += "and pgext.rejectlimit IS NULL "
if encodingCode:
sql += "and pgext.encoding = %s " % encodingCode
sql+= "limit 1;"
self.log(self.DEBUG, "query used to fast match external relations:\n %s" % sql)
return sql
def get_staging_conditions_string(self, target_table_name, staging_cols, distribution_cols):
'''
Create a string from the following conditions to reuse staging table:
1. same target table
2. same number of columns
3. same names and types, in the same order
4. same distribution key (according to columns' names and their order)
Return:
string (target_table_name:columns_num:staging_cols_str:distribution_cols_str)
'''
columns_num = len(staging_cols)
staging_cols_str = '-'.join(['%s-%s' % (quote(quote_unident(col[0])), quote(col[1])) for col in staging_cols])
distribution_cols_str = '-'.join([quote(quote_unident(col)) for col in distribution_cols])
return '%s:%s:%s:%s' % (target_table_name, columns_num, staging_cols_str, distribution_cols_str)
def get_reuse_staging_table_query(self, encoding_conditions):
'''
This function will return the SQL to run in order to find out whether
we have an existing staging table in the catalog which could be reused for this
operation, according to the method and the encoding conditions.
return:
sql(string)
'''
sql = """SELECT oid::regclass
FROM pg_class
WHERE relname = 'staging_gpload_reusable_%s';""" % (encoding_conditions)
self.log(self.DEBUG, "query used to identify reusable temporary relations: %s" % sql)
return sql
def get_table_oid(self, tableName):
'''get oid for table from pg_class, None if not exist'''
if tableName:
sql = "select %s::regclass::oid" % quote(quote_unident(tableName))
try:
resultList = self.db.query(sql).getresult()
return resultList[0][0]
except Exception as e:
pass
return None
def get_ext_schematable(self, schemaName, tableName):
'''
return formated table name
'''
if schemaName is None:
return tableName
else:
schemaTable = "%s.%s" % (schemaName, tableName)
return schemaTable
def get_external_table_formatOpts(self, option, specify=''):
'''
add option, specify to self.formatOpts for creating external table
'''
formatType = self.getconfig('gpload:input:format', str, 'text').lower()
if formatType == 'text':
valid_token = ['delimiter','escape']
elif formatType == 'csv':
valid_token = ['delimiter', 'quote', 'escape']
else:
valid_token = []
if not option in valid_token:
self.control_file_error("The option you specified doesn't support now")
return
if option == 'delimiter':
defval = ',' if formatType == 'csv' else '\t'
val = self.getconfig('gpload:input:delimiter', str, defval)
elif option == 'escape':
defval = self.getconfig('gpload:input:quote', str, '"')
val = self.getconfig('gpload:input:escape', str, defval)
elif option == 'quote':
val = self.getconfig('gpload:input:quote', str, '"')
else:
self.control_file_error("unexpected error -- backtrace " +
"written to log file")
sys.exit(2)
specify_str = str(specify) if specify else option
if len(val) != 1:
val_decoded = val.encode().decode('unicode-escape')
subval_decoded = val[2:-1].encode().decode('unicode-escape')
if val.startswith("E'") and val.endswith("'") and len(subval_decoded) == 1:
subval = val[2:-1]
if subval == "\\'":
self.formatOpts += "%s %s " % (specify_str, val)
else:
val = subval_decoded
self.formatOpts += "%s '%s' " % (specify_str, val)
elif len(val_decoded) == 1:
val = val_decoded
self.formatOpts += "%s '%s' " % (specify_str, val)
else:
self.control_file_warning(option +''' must be single ASCII character, you can also use unprintable characters(for example: '\\x1c' / E'\\x1c' or '\\u001c' / E'\\u001c' ''')
self.control_file_error("Invalid option, gpload quit immediately")
sys.exit(2)
else:
self.formatOpts += "%s '%s' " % (specify_str, val)
def create_external_table(self):
'''
extract all control file information and transform it accordingly,
create a new external table or find a reusable external table to use for this operation or later
'''
formatType = self.getconfig('gpload:input:format', str, 'text').lower()
locationStr = ','.join(map(quote,self.locations))
self.get_external_table_formatOpts('delimiter')
nullas = self.getconfig('gpload:input:null_as', str, False)
self.log(self.DEBUG, "null " + str(nullas))
if nullas != False: # could be empty string
self.formatOpts += "null %s " % quote_no_slash(nullas)
elif formatType=='csv':
self.formatOpts += "null '' "
else:
self.formatOpts += "null %s " % quote_no_slash("\\N")
esc = self.getconfig('gpload:input:escape', None, None)
if esc:
if type(esc) != str and type(esc) != str:
self.control_file_error("gpload:input:escape must be a string")
if esc.lower() == 'off':
if formatType == 'csv':
self.control_file_error("ESCAPE cannot be set to OFF in CSV mode")
self.formatOpts += "escape 'off' "
else:
self.get_external_table_formatOpts('escape')
else:
if formatType=='csv':
self.get_external_table_formatOpts('quote','escape')
else:
self.formatOpts += "escape '\\'"
if formatType=='csv':
self.get_external_table_formatOpts('quote')
if self.getconfig('gpload:input:header',bool,False):
self.formatOpts += "header "
### should be true or false
force_not_null_columns = self.getconfig('gpload:input:force_not_null',list,[])
if force_not_null_columns:
for i in force_not_null_columns:
if type(i) != str and type(i) != str:
self.control_file_error("gpload:input:force_not_null must be a YAML sequence of strings")
self.formatOpts += "force not null %s " % ','.join(force_not_null_columns)
encodingCode = None
encodingStr = self.getconfig('gpload:input:encoding', str, None)
if encodingStr is None:
result = self.db.query("SHOW SERVER_ENCODING").getresult()
if len(result) > 0:
encodingStr = result[0][0]
if encodingStr:
sql = "SELECT pg_char_to_encoding('%s')" % encodingStr
result = self.db.query(sql).getresult()
if len(result) > 0:
encodingCode = result[0][0]
limitStr = self.getconfig('gpload:input:error_limit',int, None)
if self.log_errors and not limitStr:
self.control_file_error("gpload:input:log_errors requires " +
"gpload:input:error_limit to be specified")
self.extSchemaName = self.getconfig('gpload:external:schema', str, None)
if self.extSchemaName == '%':
self.extSchemaName = self.schema
# get the list of columns to use in the extnernal table
if not self.from_cols_from_user:
# don't put values serial columns
from_cols = [a for a in self.from_columns if a[3] != True]
else:
from_cols = self.from_columns
if formatType == 'csv' or formatType == 'text':
if self.getconfig('gpload:input:fill_missing_fields', bool, False):
self.formatOpts += 'fill missing fields'
# If the 'reuse tables' option was specified we now try to find an
# already existing external table in the catalog which will match
# the one that we need to use. It must have identical attributes,
# external location, format, and encoding specifications.
if self.reuse_tables == True:
if self.staging_table:
if '.' in self.staging_table:
self.log(self.ERROR, "Character '.' is not allowed in staging_table parameter. Please use EXTERNAL->SCHEMA to set the schema of external table")
self.extTableName = quote_unident(self.staging_table)
sql = """SELECT n.nspname as Schema, c.relname as Name
FROM pg_catalog.pg_class c
INNER JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','v','S','f','')
AND c.relname = '%s'
""" % self.extTableName
if self.extSchemaName is not None:
sql += "AND n.nspname = '%s'" % quote_unident(self.extSchemaName)
else:
sql += """AND pg_catalog.pg_table_is_visible(c.oid)
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'"""
result = self.db.query(sql).getresult()
if len(result) > 0:
self.extSchemaTable = self.get_ext_schematable(quote_unident(self.extSchemaName), self.extTableName)
self.log(self.INFO, "reusing external staging table %s" % self.extSchemaTable)
return
# staging table is not specified, we need to find it manually
else:
# process the single quotes in order to successfully find an existing external table to reuse.
self.formatOpts = self.formatOpts.replace("E'\\''","'\''")
if self.fast_match:
sql = self.get_fast_match_exttable_query(formatType, self.formatOpts,
limitStr, self.extSchemaName, self.log_errors, encodingCode)
else:
sql = self.get_reuse_exttable_query(formatType, self.formatOpts,
limitStr, from_cols, self.extSchemaName, self.log_errors, encodingCode)
resultList = self.db.query(sql).getresult()
if len(resultList) > 0:
# found an external table to reuse. no need to create one. we're done here.
self.extTableName = (resultList[0])[0]
# fast match result is only table name, so we need add schema info
if self.fast_match:
self.extSchemaTable = self.get_ext_schematable(quote_unident(self.extSchemaName), self.extTableName)
else:
self.extSchemaTable = self.extTableName
self.log(self.INFO, "reusing external table %s" % self.extSchemaTable)
return
# didn't find an existing external table suitable for reuse. Format a reusable
# name and issue a CREATE EXTERNAL TABLE on it. Hopefully we can use it next time
# around
self.extTableName = "ext_gpload_reusable_%s" % self.unique_suffix
self.log(self.INFO, "did not find an external table to reuse. creating %s" % self.get_ext_schematable(self.extSchemaName, self.extTableName))
# process the single quotes in order to successfully create an external table.
self.formatOpts = self.formatOpts.replace("'\''","E'\\''")
# construct a CREATE EXTERNAL TABLE statement and execute it
self.extSchemaTable = self.get_ext_schematable(self.extSchemaName, self.extTableName)
sql = "create external table %s" % self.extSchemaTable
sql += "(%s)" % ','.join(['%s %s' % (a[0], a[1]) for a in from_cols])
sql += "location(%s) "%locationStr
sql += "format%s "% quote(formatType)
if len(self.formatOpts) > 0:
sql += "(%s) "% self.formatOpts
if encodingStr:
sql += "encoding%s "%quote(encodingStr)
if self.log_errors:
sql += "log errors "
if limitStr:
if limitStr < 2:
self.control_file_error("error_limit must be 2 or higher")
sql += "segment reject limit %s "%limitStr
try:
self.db.query(sql.encode('utf-8'))
except Exception as e:
self.log(self.ERROR, 'could not run SQL "%s": %s' % (sql, unicode(e)))
# set up to drop the external table at the end of operation, unless user
# specified the 'reuse_tables' option, in which case we don't drop
if self.reuse_tables == False:
self.cleanupSql.append('drop external table if exists %s'%self.extSchemaTable)
def create_staging_table(self):
'''
Create a new staging table or find a reusable staging table to use for this operation
(only valid for update/merge operations).
'''
# make sure we set the correct distribution policy
distcols = self.getconfig('gpload:output:match_columns', list)
sql = "SELECT * FROM pg_class WHERE relname LIKE 'temp_gpload_reusable_%%';"
resultList = self.db.query(sql).getresult()
if len(resultList) > 0:
self.log(self.WARN, """Old style, reusable tables named "temp_gpload_reusable_*" from a previous versions were found.
Greenplum recommends running "DROP TABLE temp_gpload_reusable_..." on each table. This only needs to be done once.""")
# If the 'reuse tables' option was specified we now try to find an
# already existing staging table in the catalog which will match
# the one that we need to use. It must meet the reuse conditions
is_temp_table = 'TEMP '
target_columns = []
for column in self.into_columns:
if column[2]:
target_columns.append([quote_unident(column[0]), column[1]])
if self.reuse_tables == True:
is_temp_table = ''
target_table_name = quote_unident(self.table)
# create a string from all reuse conditions for staging tables and ancode it
conditions_str = self.get_staging_conditions_string(target_table_name, target_columns, distcols).encode()
encoding_conditions = hashlib.md5(conditions_str).hexdigest()
sql = self.get_reuse_staging_table_query(encoding_conditions)
resultList = self.db.query(sql).getresult()
if len(resultList) > 0:
# found a temp table to reuse. no need to create one. we're done here.
self.staging_table_name = (resultList[0])[0]
self.log(self.INFO, "reusing staging table %s" % self.staging_table_name)
# truncate it so we don't use old data
self.do_truncate(self.staging_table_name)
return
# didn't find an existing staging table suitable for reuse. Format a reusable
# name and issue a CREATE TABLE on it (without TEMP!). Hopefully we can use it
# next time around
# we no longer need the timestamp, since we will never want to create few
# tables with same encoding_conditions
self.staging_table_name = "staging_gpload_reusable_%s" % (encoding_conditions)
self.log(self.INFO, "did not find a staging table to reuse. creating %s" % self.staging_table_name)
# MPP-14667 - self.reuse_tables should change one, and only one, aspect of how we build the following table,
# and that is, whether it's a temp table or not. In other words, is_temp_table = '' iff self.reuse_tables == True.
sql = 'CREATE %sTABLE %s ' % (is_temp_table, self.staging_table_name)
cols = ['"%s" %s' % (a[0], a[1]) for a in target_columns]
sql += "(%s)" % ','.join(cols)
#sql += " DISTRIBUTED BY (%s)" % ', '.join(distcols)
self.log(self.LOG, sql)
if not self.options.D:
self.db.query(sql)
if not self.reuse_tables:
self.cleanupSql.append('DROP TABLE IF EXISTS %s' % self.staging_table_name)
def count_errors(self):
if self.gpdb_version < "7.0.0": # for gpdb6
notice_processor(self.db.notices())
else:
self.db.set_notice_receiver(notice_processor)
if self.log_errors and not self.options.D:
# make sure we only get errors for our own instance
if not self.reuse_tables:
queryStr = "select count(*) from gp_read_error_log('%s')" % pg.escape_string(self.extSchemaTable)
results = self.db.query(queryStr).getresult()
return (results[0])[0]
else: # reuse_tables
queryStr = "select count(*) from gp_read_error_log('%s') where cmdtime > to_timestamp(%s)" % (pg.escape_string(self.extSchemaTable), self.startTimestamp)
results = self.db.query(queryStr).getresult()
global NUM_WARN_ROWS
NUM_WARN_ROWS = (results[0])[0]
return (results[0])[0];
return 0
def report_errors(self):
errors = self.count_errors()
if errors==1:
self.log(self.WARN, '1 bad row')
elif errors:
self.log(self.WARN, '%d bad rows'%errors)
# error message is also deleted if external table is dropped.
# if reuse_table is set, error message is not deleted.
if errors and self.log_errors and self.reuse_tables:
self.log(self.WARN, "Please use following query to access the detailed error")
self.log(self.WARN, "select * from gp_read_error_log('{0}') where cmdtime > to_timestamp('{1}')".format(pg.escape_string(self.extSchemaTable), self.startTimestamp))
self.exitValue = 1 if errors else 0
def do_insert(self, dest):
"""
Handle the INSERT case
insert data into dest table from self external table
"""
self.log(self.DEBUG, "into columns " + str(self.into_columns))
# a[2] is mapping target
#cols = filter(lambda a:a[2]!=None, self.into_columns)
cols = [a for a in self.into_columns if a[2]!=None]
# only insert non-serial columns, unless the user told us to
# insert the serials explicitly
# a[3] is has_sequence (bool)
if not self.from_cols_from_user:
cols = [a for a in cols if a[3] == False]
sql = 'INSERT INTO %s' % dest
sql += ' (%s)' % ','.join([a[0] for a in cols])
sql += ' SELECT %s' % ','.join([a[2] for a in cols])
sql += ' FROM %s' % self.extSchemaTable
# cktan: progress thread is not reliable. revisit later.
#progress = Progress(self,self.ports)
#progress.start()
#self.threads.append(progress)
self.log(self.LOG, sql)
if not self.options.D:
try:
self.rowsInserted = self.db.query(sql.encode('utf-8'))
except Exception as e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = e.__str__().encode().decode('unicode-escape')
strF = sql.encode().decode('unicode-escape')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
#progress.condition.acquire()
#progress.number = 1
#progress.condition.wait()
#progress.condition.release()
self.report_errors()
def do_method_insert(self):
self.create_external_table()
self.do_insert(self.get_qualified_tablename())
def map_stuff(self,config,configFormat,index):
'''
get the config and find it in into_columns_dict,
report error if no column finded in into_columns_dict or no mapping for it.
Return
list: [ configFormat(into_clomuns[0], into_clomuns[index]) ]
'''
lis = []
theList = self.getconfig(config,list)
theList = convertListToDelimited(theList)
for i in theList:
if type(i) != str and type(i) != str:
self.control_file_error("%s must be a YAML sequence of strings"%config)
j = self.into_columns_dict.get(i)
if not j:
self.log(self.ERROR,'column %s in %s does not exist'%(i,config))
if not j[index]:
self.log(self.ERROR,'there is no mapping from the column %s in %s'%(i,config))
# append ( j[0] = from_table.j[index])
# column_name = from_table.column_name
lis.append(configFormat(j[0],j[index]))
return lis
def fix_update_cond(self, match):
self.log(self.DEBUG, match.group(0))
return 'into_table.' + match.group(0)
def do_update(self,fromname,index):
"""
UPDATE case. Update into_table from staging_table
form the update sql from update_columns, match_columns and update_condition
"""
sql = 'update %s into_table ' % self.get_qualified_tablename()
sql += 'set %s '%','.join(self.map_stuff('gpload:output:update_columns',(lambda x,y:'%s=from_table.%s' % (x, y)),index))
sql += 'from %s from_table' % fromname
match = self.map_stuff('gpload:output:match_columns'
, lambda x,y:'into_table.%s=from_table.%s' % (x, y)
, index)
update_condition = self.getconfig('gpload:output:update_condition',
str, None)
if update_condition:
### need to optimize
#
# Place the table alias infront of column references.
#
# The following logic is not bullet proof. It may not work
# correctly if the user uses an identifier in both its
# delimited and un-delimited format (e.g. where c1 < 7 and "c1" > 2)
# Better lexing and parsing needs to be done here to fix all cases.
#
update_condition = ' ' + update_condition + ' '
for name, colType, mapto, seq in self.into_columns:
regexp = '(?<=[^\w])%s(?=[^\w])' % name
self.log(self.DEBUG, 'update_condition re: ' + regexp)
temp_update_condition = update_condition
updateConditionList = splitIntoLiteralsAndNonLiterals(update_condition)
skip = False
update_condition = """"""
for uc in updateConditionList:
if skip == False:
uc = re.sub(regexp, self.fix_update_cond, uc)
skip = True
update_condition = update_condition + uc
if update_condition == temp_update_condition:
# see if column can be undelimited, and try again.
if len(name) > 2 and name[1:-1] == name[1:-1].lower():
regexp = '(?<=[^\w])%s(?=[^\w])' % name[1:-1]
self.log(self.DEBUG, 'update_condition undelimited re: ' + regexp)
update_condition = re.sub( regexp
, self.fix_update_cond
, update_condition
)
self.log(self.DEBUG, "updated update_condition to %s" %
update_condition)
match.append(update_condition)
sql += ' where %s' % ' and '.join(match)
self.log(self.LOG, sql)
if not self.options.D:
try:
self.rowsUpdated = self.db.query(sql.encode('utf-8'))
except Exception as e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = str(str(e), errors = 'ignore')
strF = str(str(sql), errors = 'ignore')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
def get_qualified_tablename(self):
'''
return a qualified table name from self.schema and self.table
'''
tblname = "%s.%s" % (self.schema, self.table)
return tblname
def get_table_dist_key(self):
'''
'''
# NOTE: this query should be re-written better. the problem is that it is
# not possible to perform a cast on a table name with spaces...
if noGpVersion or self.gpdb_version < "6.0.0":
sql = "select attname from pg_attribute a, gp_distribution_policy p , pg_class c, pg_namespace n "+\
"where a.attrelid = c.oid and " + \
"a.attrelid = p.localoid and " + \
"a.attnum = any (p.attrnums) and " + \
"c.relnamespace = n.oid and " + \
"n.nspname = '%s' and c.relname = '%s'; " % (quote_unident(self.schema), quote_unident(self.table))
else:
sql = "select attname from pg_attribute a, gp_distribution_policy p , pg_class c, pg_namespace n "+\
"where a.attrelid = c.oid and " + \
"a.attrelid = p.localoid and " + \
"a.attnum = any (p.distkey) and " + \
"c.relnamespace = n.oid and " + \
"n.nspname = '%s' and c.relname = '%s'; " % (quote_unident(self.schema), quote_unident(self.table))
resultList = self.db.query(sql).getresult()
attrs = []
count = 0
while count < len(resultList):
attrs.append((resultList[count])[0])
count = count + 1
return attrs
def table_supports_update(self):
""" Check wether columns being updated are distribution key."""
distKeyList = self.get_table_dist_key()
distkey = set()
for dk in distKeyList:
distkey.add(quote_ident(dk))
self.distkey = distkey
if len(distkey) != 0:
# not randomly distributed - check that UPDATE_COLUMNS isn't part of the distribution key
updateColumnList = self.getconfig('gpload:output:update_columns',
list,
returnOriginal=True)
update_columns = convertListToDelimited(updateColumnList)
update_columns = set(update_columns)
a = distkey.intersection(update_columns)
if len(a):
self.control_file_error('update_columns cannot reference column(s) in distribution key (%s)' % ', '.join(list(distkey)))
def do_method_update(self):
"""Load the data in and update an existing table based upon it"""
self.table_supports_update()
self.create_staging_table()
self.create_external_table()
self.do_insert(self.staging_table_name)
# These rows are inserted temporarily for processing, so set inserted rows back to zero.
self.rowsInserted = 0
self.do_update(self.staging_table_name, 0)
def do_method_merge(self):
"""insert data not already in the table, update remaining items"""
self.table_supports_update()
self.create_staging_table()
self.create_external_table()
self.do_insert(self.staging_table_name)
self.rowsInserted = 0 # MPP-13024. No rows inserted yet (only to temp table).
self.do_update(self.staging_table_name, 0)
# delete the updated rows in staging table for merge
# so we can directly insert new rows left in staging table
# and avoid left outer join when insert new rows which is poor in performance
match = self.map_stuff('gpload:output:match_columns'
, lambda x,y:'staging_table.%s=into_table.%s' % (x, y)
, 0)
sql = 'DELETE FROM %s staging_table '% self.staging_table_name
sql += 'USING %s into_table WHERE '% self.get_qualified_tablename()
sql += ' %s' % ' AND '.join(match)
self.log(self.LOG, sql)
if not self.options.D:
try:
self.db.query(sql.encode('utf-8'))
except Exception as e:
strE = str(str(e), errors = 'ignore')
strF = str(str(sql), errors = 'ignore')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
# insert new rows to the target table
match = self.map_stuff('gpload:output:match_columns',lambda x,y:'into_table.%s=from_table.%s'%(x,y),0)
matchColumns = self.getconfig('gpload:output:match_columns',list)
cols = [a for a in self.into_columns if a[2] != None]
sql = 'INSERT INTO %s ' % self.get_qualified_tablename()
sql += '(%s) ' % ','.join([a[0] for a in cols])
sql += '(SELECT %s ' % ','.join(['from_table.%s' % a[0] for a in cols])
sql += 'FROM (SELECT *, row_number() OVER (PARTITION BY %s) AS gpload_row_number ' % ','.join(matchColumns)
sql += 'FROM %s) AS from_table ' % self.staging_table_name
sql += 'WHERE gpload_row_number=1)'
self.log(self.LOG, sql)
if not self.options.D:
try:
self.rowsInserted = self.db.query(sql.encode('utf-8'))
except Exception as e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = str(str(e), errors = 'ignore')
strF = str(str(sql), errors = 'ignore')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
def do_truncate(self, tblname):
self.log(self.LOG, "Truncate table %s" %(tblname))
if not self.options.D:
try:
truncateSQLtext = "truncate %s" % tblname
self.db.query(truncateSQLtext.encode('utf-8'))
except Exception as e:
self.log(self.ERROR, 'could not execute truncate target %s: %s' % (tblname, str(e)))
def do_method(self):
'''
setup gpload config,
start a transaction
execute the 'before sql',
do method (insert upade, merge) accordingly,
execute the 'after sql'
'''
# Is the table to be truncated before the load?
preload = self.getconfig('gpload:preload', list, default=None)
method = self.getconfig('gpload:output:mode', str, 'insert').lower()
self.log_errors = self.getconfig('gpload:input:log_errors', bool, False)
truncate = False
self.reuse_tables = False
if not self.options.no_auto_trans and not method=='insert':
self.db.query("BEGIN")
if preload:
truncate = self.getconfig('gpload:preload:truncate',bool,False)
self.reuse_tables = self.getconfig('gpload:preload:reuse_tables',bool,False)
self.fast_match = self.getconfig('gpload:preload:fast_match',bool,False)
if self.reuse_tables == False and self.fast_match == True:
self.log(self.WARN, 'fast_match is ignored when reuse_tables is false!')
self.staging_table = self.getconfig('gpload:preload:staging_table', str, default=None)
if self.error_table:
self.log_errors = True
self.reuse_tables = True
self.staging_table = self.getconfig('gpload:preload:staging_table', str, default=None)
self.fast_match = self.getconfig('gpload:preload:fast_match',bool,False)
if truncate == True:
if method=='insert':
self.do_truncate(self.schemaTable)
else:
self.log(self.ERROR, 'preload truncate operation should be used with insert ' +
'operation only. used with %s' % method)
# sql pre or post processing?
sql = self.getconfig('gpload:sql', list, default=None)
before = None
after = None
if sql:
before = self.getconfig('gpload:sql:before', str, default=None)
after = self.getconfig('gpload:sql:after', str, default=None)
if before:
self.log(self.LOG, "Pre-SQL from user: %s" % before)
if not self.options.D:
try:
self.db.query(before.encode('utf-8'))
except Exception as e:
self.log(self.ERROR, 'could not execute SQL in sql:before "%s": %s' %
(before, str(e)))
if method=='insert':
self.do_method_insert()
elif method=='update':
self.do_method_update()
elif method=='merge':
self.do_method_merge()
else:
self.control_file_error('unsupported method %s' % method)
# truncate the staging table to avoid dumping it's content - see MPP-15474
if method=='merge' or method=='update':
self.do_truncate(self.staging_table_name)
if after:
self.log(self.LOG, "Post-SQL from user: %s" % after)
if not self.options.D:
try:
self.db.query(after.encode('utf-8'))
except Exception as e:
self.log(self.ERROR, 'could not execute SQL in sql:after "%s": %s' %
(after, str(e)))
if not self.options.no_auto_trans and not method=='insert':
self.db.query("COMMIT")
def stop_gpfdists(self):
if self.subprocesses:
self.log(self.LOG, 'killing gpfdist')
for a in self.subprocesses:
try:
if platform.system() in ['Windows', 'Microsoft']:
# win32 API is better but hard for us
# to install, so we use the crude method
subprocess.Popen("taskkill /F /T /PID %i" % a.pid,
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
os.kill(a.pid, signal.SIGKILL)
except OSError:
pass
self.log(self.LOG, 'terminating all threads')
for t in self.threads:
t.join()
self.log(self.LOG, 'all threads are terminated')
def run2(self):
self.log(self.DEBUG, 'config ' + str(self.config))
start = time.time()
self.read_config()
self.setup_connection()
self.read_table_metadata()
self.read_columns()
self.read_mapping()
self.start_gpfdists()
self.do_method()
self.log(self.INFO, 'running time: %.2f seconds'%(time.time()-start))
def run(self):
self.db = None
self.rowsInserted = 0
self.rowsUpdated = 0
signal.signal(signal.SIGINT, handle_kill)
signal.signal(signal.SIGTERM, handle_kill)
# win32 doesn't do SIGQUIT
if not platform.system() in ['Windows', 'Microsoft']:
signal.signal(signal.SIGQUIT, handle_kill)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
try:
self.run2()
except Exception:
traceback.print_exc(file=self.logfile)
self.logfile.flush()
self.exitValue = 2
if (self.options.qv > self.INFO):
traceback.print_exc()
else:
self.log(self.ERROR, "unexpected error -- backtrace " +
"written to log file")
finally:
self.stop_gpfdists()
if self.cleanupSql:
self.log(self.LOG, 'removing temporary data')
self.setup_connection()
for a in self.cleanupSql:
try:
self.log(self.DEBUG, a)
self.db.query(a)
except (Exception, SystemExit):
traceback.print_exc(file=self.logfile)
self.logfile.flush()
traceback.print_exc()
if self.db != None:
self.db.close()
self.log(self.INFO, 'rows Inserted = ' + str(self.rowsInserted))
self.log(self.INFO, 'rows Updated = ' + str(self.rowsUpdated))
self.log(self.INFO, 'data formatting errors = ' + str(NUM_WARN_ROWS))
if self.exitValue==0:
self.log(self.INFO, 'gpload succeeded')
elif self.exitValue==1:
self.log(self.INFO, 'gpload succeeded with warnings')
else:
self.log(self.INFO, 'gpload failed')
if __name__ == '__main__':
g = gpload(sys.argv[1:])
g.run()
sys.stdout.flush()
sys.stderr.flush()
os._exit(g.exitValue)
| 36.827992 | 188 | 0.549041 |
acf51717855a12e7fff6953f05f9b55d1dae6586 | 15,082 | py | Python | veracode/api.py | fossabot/easy_sast | 1aa7dbbf340e3340fa2f70ec5bafb798294bfa7a | [
"BSD-3-Clause"
] | null | null | null | veracode/api.py | fossabot/easy_sast | 1aa7dbbf340e3340fa2f70ec5bafb798294bfa7a | [
"BSD-3-Clause"
] | null | null | null | veracode/api.py | fossabot/easy_sast | 1aa7dbbf340e3340fa2f70ec5bafb798294bfa7a | [
"BSD-3-Clause"
] | 1 | 2021-01-20T20:59:52.000Z | 2021-01-20T20:59:52.000Z | #!/usr/bin/env python3
"""
A python module to interface with the Veracode Static Analysis APIs
"""
# built-ins
from pathlib import Path
import logging
from typing import Optional, Dict, Any
from datetime import datetime
# custom
from veracode import __project_name__
from veracode.utils import is_valid_attribute, http_request
LOG = logging.getLogger(__project_name__ + "." + __name__)
class VeracodeXMLAPI:
"""
A base Veracode XML API class to inherit from
For more details, see Veracode's documentation at
https://help.veracode.com/reader/LMv_dtSHyb7iIxAQznC~9w/pd_p6JjB9PcDNH3GzWF5Ag
"""
def __init__(self):
# Hard code these to None as they should be specified in the derived classes
self._app_id = None
self._version = None
## Use the setter to apply a default to ensure it is valid
self.base_url = "https://analysiscenter.veracode.com/api/"
def http_get(
self,
*,
endpoint: str,
params: Optional[Dict] = None,
headers: Optional[Dict] = None,
):
"""
Perform a HTTP GET request to a Veracode XML API and return the
response
"""
return http_request(
verb="get",
url=self.base_url + self.version[endpoint] + "/" + endpoint,
params=params,
headers=headers,
)
def http_post(
self,
*,
endpoint: str,
data: Optional[bytes] = None,
params: Optional[Dict] = None,
headers: Optional[Dict] = None,
):
"""
Perform a HTTP POST request to a Veracode XML API and return the
response
"""
return http_request(
verb="post",
url=self.base_url + self.version[endpoint] + "/" + endpoint,
data=data,
params=params,
headers=headers,
)
@property
def base_url(self):
"""
Create the base_url property
"""
return self._base_url # pragma: no cover
@base_url.getter
def base_url(self):
"""
Create a base_url getter that validates before returning
"""
# Validate what was already stored
self._validate(key="base_url", value=self._base_url)
return self._base_url
@base_url.setter
def base_url(self, base_url):
"""
Create a base_url setter that validates before setting
"""
# Validate what was provided
self._validate(key="base_url", value=base_url)
self._base_url = base_url
@property
def version(self):
"""
Create the version property
"""
return self._version # pragma: no cover
@version.getter
def version(self):
"""
Create a version getter that validates before returning
"""
# Validate what was already stored
self._validate(key="version", value=self._version)
return self._version
@version.setter
def version(self, version):
"""
Create a version setter that validates before setting
"""
# Validate what was provided
self._validate(key="version", value=version)
self._version = version
@property
def app_id(self):
"""
Create the app_id property
"""
return self._app_id # pragma: no cover
@app_id.getter
def app_id(self):
"""
Create an app_id getter that validates before returning
"""
# Validate what was already stored
self._validate(key="app_id", value=self._app_id)
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""
Create an app_id setter that validates before setting
"""
# Validate what was provided
self._validate(key="app_id", value=app_id)
self._app_id = app_id
@staticmethod
def _validate(*, key: str, value: Any):
if is_valid_attribute(key=key, value=value):
return True
raise ValueError(f"Invalid {key}")
class UploadAPI(VeracodeXMLAPI): # pylint: disable=too-many-instance-attributes
"""
A class to interact with the Upload API
"""
def __init__(self, *, app_id: str):
# Don't forget to call the init of the parent
super().__init__()
## Use the setter to apply a default to ensure it is valid
self.app_id = app_id
# version information was pulled from
# https://help.veracode.com/reader/LMv_dtSHyb7iIxAQznC~9w/G1Nd5yH0QSlT~vPccPhtRQ
self.version = {
"beginprescan.do": "5.0",
"beginscan.do": "5.0",
"createapp.do": "5.0",
"createbuild.do": "5.0",
"deleteapp.do": "5.0",
"deletebuild.do": "5.0",
"getappinfo.do": "5.0",
"getapplist.do": "5.0",
"getbuildinfo.do": "5.0",
"getbuildlist.do": "5.0",
"getfilelist.do": "5.0",
"getpolicylist.do": "5.0",
"getprescanresults.do": "5.0",
"getvendorlist.do": "5.0",
"removefile.do": "5.0",
"updateapp.do": "5.0",
"updatebuild.do": "5.0",
"uploadfile.do": "5.0",
"uploadlargefile.do": "5.0",
}
self.build_dir = Path("/build").absolute()
self.build_id = datetime.utcnow().strftime("%F_%H-%M-%S")
self.scan_all_nonfatal_top_level_modules = True
self.auto_scan = True
# sandbox_id is not meant to be set manually. Instead, configure using
# the response of a Sandbox API query using the intended sandbox name
self.sandbox_id = None
@property
def build_dir(self):
"""
Create the build_dir property
"""
return self._build_dir # pragma: no cover
@build_dir.getter
def build_dir(self):
"""
Create a build_dir getter that validates before returning
"""
# Validate what was already stored
self._validate(key="build_dir", value=self._build_dir)
return self._build_dir
@build_dir.setter
def build_dir(self, build_dir):
"""
Create a build_dir setter that validates before setting
"""
# Validate what was provided
self._validate(key="build_dir", value=build_dir)
self._build_dir = build_dir
@property
def build_id(self):
"""
Create the build_id property
"""
return self._build_id # pragma: no cover
@build_id.getter
def build_id(self):
"""
Create a build_id getter that validates before returning
"""
# Validate what was already stored
self._validate(key="build_id", value=self._build_id)
return self._build_id
@build_id.setter
def build_id(self, build_id):
"""
Create a build_id setter that validates before setting
"""
# Validate what was provided
self._validate(key="build_id", value=build_id)
self._build_id = build_id
@property
def sandbox_id(self):
"""
Create the sandbox_id property
"""
return self._sandbox_id # pragma: no cover
@sandbox_id.getter
def sandbox_id(self):
"""
Create a sandbox_id getter that validates before returning
This should only be set using the response of a Sandbox API query using
the intended sandbox name
"""
# Validate what was already stored
self._validate(key="sandbox_id", value=self._sandbox_id)
return self._sandbox_id
@sandbox_id.setter
def sandbox_id(self, sandbox_id):
"""
Create a sandbox_id setter that validates before setting
"""
# Validate what was provided
self._validate(key="sandbox_id", value=sandbox_id)
self._sandbox_id = sandbox_id
@property
def scan_all_nonfatal_top_level_modules(self):
"""
Create the scan_all_nonfatal_top_level_modules property
"""
return self._scan_all_nonfatal_top_level_modules # pragma: no cover
@scan_all_nonfatal_top_level_modules.getter
def scan_all_nonfatal_top_level_modules(self):
"""
Create a scan_all_nonfatal_top_level_modules getter that validates
before returning
"""
# Validate what was already stored
self._validate(
key="scan_all_nonfatal_top_level_modules",
value=self._scan_all_nonfatal_top_level_modules,
)
return self._scan_all_nonfatal_top_level_modules
@scan_all_nonfatal_top_level_modules.setter
def scan_all_nonfatal_top_level_modules(self, scan_all_nonfatal_top_level_modules):
"""
Create a scan_all_nonfatal_top_level_modules setter that validates before setting
"""
# Validate what was provided
self._validate(
key="scan_all_nonfatal_top_level_modules",
value=scan_all_nonfatal_top_level_modules,
)
self._scan_all_nonfatal_top_level_modules = scan_all_nonfatal_top_level_modules
@property
def auto_scan(self):
"""
Create the auto_scan property
"""
return self._auto_scan # pragma: no cover
@auto_scan.getter
def auto_scan(self):
"""
Create an auto_scan getter that validates before returning
"""
# Validate what was already stored
self._validate(key="auto_scan", value=self._auto_scan)
return self._auto_scan
@auto_scan.setter
def auto_scan(self, auto_scan):
"""
Create an auto_scan setter that validates before setting
"""
# Validate what was provided
self._validate(key="auto_scan", value=auto_scan)
self._auto_scan = auto_scan
# pylint: disable=too-many-instance-attributes
class ResultsAPI(VeracodeXMLAPI):
"""
A class to interact with the Results API
"""
def __init__(self, *, app_id: str):
# Don't forget to call the init of the parent
super().__init__()
## Use the setter to apply a default to ensure it is valid
self.app_id = app_id
self.ignore_compliance_status = False
# version information was pulled from
# https://help.veracode.com/reader/LMv_dtSHyb7iIxAQznC~9w/Mp2BEkLx6rD87k465BWqQg
self.version = {
"detailedreport.do": "5.0",
"detailedreportpdf.do": "4.0",
"getaccountcustomfieldlist.do": "5.0",
"getappbuilds.do": "4.0",
"getcallstacks.do": "5.0",
"summaryreport.do": "4.0",
"summaryreportpdf.do": "4.0",
"thirdpartyreportpdf.do": "4.0",
}
@property
def ignore_compliance_status(self):
"""
Specify whether or not to ignore the app compliance status
"""
return self._ignore_compliance_status # pragma: no cover
@ignore_compliance_status.getter
def ignore_compliance_status(self):
"""
Create an ignore_compliance_status getter that validates before returning
"""
# Validate what was already stored
self._validate(
key="ignore_compliance_status", value=self._ignore_compliance_status
)
return self._ignore_compliance_status
@ignore_compliance_status.setter
def ignore_compliance_status(self, ignore_compliance_status):
"""
Create an ignore_compliance_status setter that validates before setting
"""
# Validate what was provided
self._validate(key="ignore_compliance_status", value=ignore_compliance_status)
self._ignore_compliance_status = ignore_compliance_status
class SandboxAPI(VeracodeXMLAPI):
"""
A class to interact with the Sandbox API
"""
def __init__(self, *, app_id: str, sandbox_name: str):
# Don't forget to call the init of the parent
super().__init__()
## Use the setter to apply a default to ensure it is valid
self.app_id = app_id
# version information was pulled from
# https://help.veracode.com/reader/LMv_dtSHyb7iIxAQznC~9w/KusbW5J7EG8jEr64JEiBzw
self.version = {
"createsandbox.do": "5.0",
"getsandboxlist.do": "5.0",
"promotesandbox.do": "5.0",
"updatesandbox.do": "5.0",
"deletesandbox.do": "5.0",
}
self.build_id = datetime.utcnow().strftime("%F_%H-%M-%S")
self.sandbox_name = sandbox_name
# sandbox_id is not meant to be set manually. Instead, configure using
# the response of a Sandbox API query using the intended sandbox name
self.sandbox_id = None
@property
def build_id(self):
"""
Create the build_id property
"""
return self._build_id # pragma: no cover
@build_id.getter
def build_id(self):
"""
Create a build_id getter that validates before returning
"""
# Validate what was already stored
self._validate(key="build_id", value=self._build_id)
return self._build_id
@build_id.setter
def build_id(self, build_id):
"""
Create a build_id setter that validates before setting
"""
# Validate what was provided
self._validate(key="build_id", value=build_id)
self._build_id = build_id
@property
def sandbox_id(self):
"""
Create the sandbox_id property
"""
return self._sandbox_id # pragma: no cover
@sandbox_id.getter
def sandbox_id(self):
"""
Create a sandbox_id getter that validates before returning
"""
# Validate what was already stored
self._validate(key="sandbox_id", value=self._sandbox_id)
return self._sandbox_id
@sandbox_id.setter
def sandbox_id(self, sandbox_id):
"""
Create a sandbox_id setter that validates before setting
This should only be set using the response of a Sandbox API query using
the intended sandbox name
"""
# Validate what was provided
self._validate(key="sandbox_id", value=sandbox_id)
self._sandbox_id = sandbox_id
@property
def sandbox_name(self):
"""
Create the sandbox_name property
"""
return self._sandbox_name # pragma: no cover
@sandbox_name.getter
def sandbox_name(self):
"""
Create a sandbox_name getter that validates before returning
"""
# Validate what was already stored
self._validate(key="sandbox_name", value=self._sandbox_name)
return self._sandbox_name
@sandbox_name.setter
def sandbox_name(self, sandbox_name):
"""
Create a sandbox_name setter that validates before setting
"""
# Validate what was provided
self._validate(key="sandbox_name", value=sandbox_name)
self._sandbox_name = sandbox_name
| 30.779592 | 89 | 0.613513 |
acf5179451fecf64506eb7fdf0efdc2707d2b60c | 1,144 | py | Python | examples/add_vectors_image.py | bryantChhun/napari-gui | 05933b16a2f8531eaf34d5c2769b764d4225e482 | [
"BSD-3-Clause"
] | null | null | null | examples/add_vectors_image.py | bryantChhun/napari-gui | 05933b16a2f8531eaf34d5c2769b764d4225e482 | [
"BSD-3-Clause"
] | 1 | 2019-01-18T17:26:36.000Z | 2019-01-18T17:26:36.000Z | examples/add_vectors_image.py | AllenCellModeling/napari | 3566383e6310d02e8673b564b6f63411fa176708 | [
"BSD-3-Clause"
] | null | null | null | """
This example generates an image of vectors
Vector data is an array of shape (N, M, 2)
Each vector position is defined by an (x-proj, y-proj) element
where x-proj and y-proj are the vector projections at each center
where each vector is centered on a pixel of the NxM grid
"""
from napari import ViewerApp
from napari.util import app_context
import numpy as np
with app_context():
# create the viewer and window
viewer = ViewerApp()
n = 100
m = 200
image = 0.2*np.random.random((n, m)) + 0.5
layer = viewer.add_image(image, clim_range=[0, 1], name='background')
layer.colormap = 'gray'
# sample vector image-like data
# n x m grid of slanted lines
# random data on the open interval (-1, 1)
pos = np.zeros(shape=(n, m, 2), dtype=np.float32)
rand1 = 2*(np.random.random_sample(n * m)-0.5)
rand2 = 2*(np.random.random_sample(n * m)-0.5)
# assign projections for each vector
pos[:, :, 0] = rand1.reshape((n, m))
pos[:, :, 1] = rand2.reshape((n, m))
# add the vectors
vect = viewer.add_vectors(pos, width=0.2, length=2.5)
print(image.shape, pos.shape)
| 28.6 | 73 | 0.652972 |
acf5199a09449f4972736565db73540f66c43cd4 | 24,496 | py | Python | examples/findContours.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 94 | 2016-03-04T17:25:53.000Z | 2022-03-18T18:05:23.000Z | examples/findContours.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 2,841 | 2016-01-21T09:06:49.000Z | 2022-03-18T14:53:56.000Z | examples/findContours.py | tifuchs/silx | 4b8b9e58ecd6fd4ca0ae80f2e74b956b26bcc3f7 | [
"CC0-1.0",
"MIT"
] | 71 | 2015-09-30T08:35:35.000Z | 2022-03-16T07:16:28.000Z | #!/usr/bin/env python
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2021 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Find contours examples
.. note:: This module has an optional dependency with sci-kit image library.
You might need to install it if you don't already have it.
"""
import logging
import sys
import numpy
import time
logging.basicConfig()
_logger = logging.getLogger("find_contours")
from silx.gui import qt
import silx.gui.plot
from silx.gui.colors import Colormap
import silx.image.bilinear
try:
import skimage
except ImportError:
_logger.debug("Error while importing skimage", exc_info=True)
skimage = None
if skimage is not None:
try:
from silx.image.marchingsquares._skimage import MarchingSquaresSciKitImage
except ImportError:
_logger.debug("Error while importing MarchingSquaresSciKitImage", exc_info=True)
MarchingSquaresSciKitImage = None
else:
MarchingSquaresSciKitImage = None
def rescale_image(image, shape):
y, x = numpy.ogrid[:shape[0], :shape[1]]
y, x = y * 1.0 * (image.shape[0] - 1) / (shape[0] - 1), x * 1.0 * (image.shape[1] - 1) / (shape[1] - 1)
b = silx.image.bilinear.BilinearImage(image)
# TODO: could be optimized using strides
x2d = numpy.zeros_like(y) + x
y2d = numpy.zeros_like(x) + y
result = b.map_coordinates((y2d, x2d))
return result
def create_spiral(size, nb=1, freq=100):
half = size // 2
y, x = numpy.ogrid[-half:half, -half:half]
coef = 1.0 / half
y, x = y * coef, x * coef + 0.0001
distance = numpy.sqrt(x * x + y * y)
angle = numpy.arctan(y / x)
data = numpy.sin(angle * nb * 2 + distance * freq * half / 100, dtype=numpy.float32)
return data
def create_magnetic_field(size, x1=0.0, y1=0.0, x2=0.0, y2=0.0):
half = size // 2
yy, xx = numpy.ogrid[-half:half, -half:half]
coef = 1.0 / half
yy1, xx1 = (yy + half * y1) * coef, (xx + half * x1) * coef
distance1 = numpy.sqrt(xx1 * xx1 + yy1 * yy1)
yy2, xx2 = (yy + half * y2) * coef, (xx + half * x2) * coef
distance2 = numpy.sqrt(xx2 * xx2 + yy2 * yy2)
return (numpy.arctan2(distance1, distance2) - numpy.pi * 0.25) * 1000
def create_gravity_field(size, objects):
half = size // 2
yy, xx = numpy.ogrid[-half:half, -half:half]
coef = 1.0 / half
def distance(x, y):
yy1, xx1 = (yy + half * y) * coef, (xx + half * x) * coef
return numpy.sqrt(xx1 ** 2 + yy1 ** 2)
result = numpy.zeros((size, size), dtype=numpy.float32)
for x, y, m in objects:
result += m / distance(x, y)
return numpy.log(result) * 1000
def create_gradient(size, dx=0, dy=0, sx=1.0, sy=1.0):
half = size // 2
yy, xx = numpy.ogrid[-half:half, -half:half]
coef = 1.0 / half
yy, xx = (yy - (dy * half)) * coef, (xx - (dx * half)) * coef + 0.0001
distance = numpy.sqrt(xx * xx * sx + yy * yy * sy)
return distance
def create_composite_gradient(size, dx=0, dy=0, sx=1.0, sy=1.0):
hole = (size - 4) // 4
gap = 10
base = create_gradient(size + hole + gap * 4, dx, dy, sx, sy)
result = numpy.zeros((size, size))
width = (size - 2) // 2
half_hole = hole // 2
def copy_module(x1, y1, x2, y2, width, height):
result[y1:y1 + height, x1:x1 + width] = base[y2:y2 + height, x2:x2 + width]
y1 = 0
y2 = 0
copy_module(0, y1, half_hole, y2, width, hole)
copy_module(width + 1, y1, half_hole + width, y2, width, hole)
y1 += hole + 1
y2 += hole + gap
copy_module(0, y1, 0, y2, width, hole)
copy_module(width + 1, y1, width + hole, y2, width, hole)
y1 += hole + 1
y2 += hole + gap
copy_module(0, y1, half_hole, y2, width, hole)
copy_module(width + 1, y1, half_hole + width, y2, width, hole)
y1 += hole + 1
y2 += hole + gap
copy_module(0, y1, half_hole, y2, width, hole)
copy_module(width + 1, y1, half_hole + width, y2, width, hole)
return result
def create_value_noise(shape, octaves=8, weights=None, first_array=None):
data = numpy.zeros(shape, dtype=numpy.float32)
t = 2
for i in range(octaves):
if t > shape[0] and t > shape[1]:
break
if i == 0 and first_array is not None:
d = first_array
else:
if weights is None:
w = (256 >> i) - 1
else:
w = weights[i]
d = numpy.random.randint(w, size=(t, t)).astype(dtype=numpy.uint8)
d = rescale_image(d, shape)
data = data + d
t = t << 1
return data
def create_island(shape, summit, under_water):
# Force a centric shape
first_array = numpy.zeros((4, 4), dtype=numpy.uint8)
first_array[1:3, 1:3] = 255
weights = [255] + [(256 >> (i)) - 1 for i in range(8)]
data = create_value_noise(shape, octaves=7, first_array=first_array, weights=weights)
# more slops
data *= data
# normalize the height
data -= data.min()
data = data * ((summit + under_water) / data.max()) - under_water
return data
def createRgbaMaskImage(mask, color):
"""Generate an RGBA image where a custom color is apply to the location of
the mask. Non masked part of the image is transparent."""
image = numpy.zeros((mask.shape[0], mask.shape[1], 4), dtype=numpy.uint8)
color = numpy.array(color)
image[mask == True] = color
return image
class FindContours(qt.QMainWindow):
"""
This window show an example of use of a Hdf5TreeView.
The tree is initialized with a list of filenames. A panel allow to play
with internal property configuration of the widget, and a text screen
allow to display events.
"""
def __init__(self, filenames=None):
"""
:param files_: List of HDF5 or Spec files (pathes or
:class:`silx.io.spech5.SpecH5` or :class:`h5py.File`
instances)
"""
qt.QMainWindow.__init__(self)
self.setWindowTitle("Silx HDF5 widget example")
self.__plot = silx.gui.plot.Plot2D(parent=self)
dummy = numpy.array([[0]])
self.__plot.addImage(dummy, legend="image", z=-10, replace=False)
dummy = numpy.array([[[0, 0, 0, 0]]])
self.__plot.addImage(dummy, legend="iso-pixels", z=0, replace=False)
self.__algo = None
self.__polygons = []
self.__customPolygons = []
self.__image = None
self.__mask = None
self.__customValue = None
mainPanel = qt.QWidget(self)
layout = qt.QHBoxLayout()
layout.addWidget(self.__createConfigurationPanel(self))
layout.addWidget(self.__plot)
mainPanel.setLayout(layout)
self.setCentralWidget(mainPanel)
def __createConfigurationPanel(self, parent):
panel = qt.QWidget(parent=parent)
layout = qt.QVBoxLayout()
panel.setLayout(layout)
self.__kind = qt.QButtonGroup(self)
self.__kind.setExclusive(True)
group = qt.QGroupBox(self)
group.setTitle("Image")
layout.addWidget(group)
groupLayout = qt.QVBoxLayout(group)
button = qt.QRadioButton(parent=panel)
button.setText("Island")
button.clicked.connect(self.generateIsland)
button.setCheckable(True)
button.setChecked(True)
groupLayout.addWidget(button)
self.__kind.addButton(button)
button = qt.QRadioButton(parent=panel)
button.setText("Gravity")
button.clicked.connect(self.generateGravityField)
button.setCheckable(True)
groupLayout.addWidget(button)
self.__kind.addButton(button)
button = qt.QRadioButton(parent=panel)
button.setText("Magnetic")
button.clicked.connect(self.generateMagneticField)
button.setCheckable(True)
groupLayout.addWidget(button)
self.__kind.addButton(button)
button = qt.QRadioButton(parent=panel)
button.setText("Spiral")
button.clicked.connect(self.generateSpiral)
button.setCheckable(True)
groupLayout.addWidget(button)
self.__kind.addButton(button)
button = qt.QRadioButton(parent=panel)
button.setText("Gradient")
button.clicked.connect(self.generateGradient)
button.setCheckable(True)
groupLayout.addWidget(button)
self.__kind.addButton(button)
button = qt.QRadioButton(parent=panel)
button.setText("Composite gradient")
button.clicked.connect(self.generateCompositeGradient)
button.setCheckable(True)
groupLayout.addWidget(button)
self.__kind.addButton(button)
button = qt.QPushButton(parent=panel)
button.setText("Generate a new image")
button.clicked.connect(self.generate)
groupLayout.addWidget(button)
# Contours
group = qt.QGroupBox(self)
group.setTitle("Contours")
layout.addWidget(group)
groupLayout = qt.QVBoxLayout(group)
button = qt.QCheckBox(parent=panel)
button.setText("Use the plot's mask")
button.setCheckable(True)
button.setChecked(True)
button.clicked.connect(self.updateContours)
groupLayout.addWidget(button)
self.__useMaskButton = button
button = qt.QPushButton(parent=panel)
button.setText("Update contours")
button.clicked.connect(self.updateContours)
groupLayout.addWidget(button)
# Implementations
group = qt.QGroupBox(self)
group.setTitle("Implementation")
layout.addWidget(group)
groupLayout = qt.QVBoxLayout(group)
self.__impl = qt.QButtonGroup(self)
self.__impl.setExclusive(True)
button = qt.QRadioButton(parent=panel)
button.setText("silx")
button.clicked.connect(self.updateContours)
button.setCheckable(True)
button.setChecked(True)
groupLayout.addWidget(button)
self.__implMerge = button
self.__impl.addButton(button)
button = qt.QRadioButton(parent=panel)
button.setText("silx with cache")
button.clicked.connect(self.updateContours)
button.setCheckable(True)
groupLayout.addWidget(button)
self.__implMergeCache = button
self.__impl.addButton(button)
button = qt.QRadioButton(parent=panel)
button.setText("skimage")
button.clicked.connect(self.updateContours)
button.setCheckable(True)
groupLayout.addWidget(button)
self.__implSkimage = button
self.__impl.addButton(button)
if MarchingSquaresSciKitImage is None:
button.setEnabled(False)
button.setToolTip("skimage is not installed or not compatible")
# Processing
group = qt.QGroupBox(self)
group.setTitle("Processing")
layout.addWidget(group)
group.setLayout(self.__createInfoLayout(group))
# Processing
group = qt.QGroupBox(self)
group.setTitle("Custom level")
layout.addWidget(group)
groupLayout = qt.QVBoxLayout(group)
label = qt.QLabel(parent=panel)
self.__value = qt.QSlider(panel)
self.__value.setOrientation(qt.Qt.Horizontal)
self.__value.sliderMoved.connect(self.__updateCustomContours)
self.__value.valueChanged.connect(self.__updateCustomContours)
groupLayout.addWidget(self.__value)
return panel
def __createInfoLayout(self, parent):
layout = qt.QGridLayout()
header = qt.QLabel(parent=parent)
header.setText("Time: ")
label = qt.QLabel(parent=parent)
label.setText("")
layout.addWidget(header, 0, 0)
layout.addWidget(label, 0, 1)
self.__timeLabel = label
header = qt.QLabel(parent=parent)
header.setText("Nb polygons: ")
label = qt.QLabel(parent=parent)
label.setText("")
layout.addWidget(header, 2, 0)
layout.addWidget(label, 2, 1)
self.__polygonsLabel = label
header = qt.QLabel(parent=parent)
header.setText("Nb points: ")
label = qt.QLabel(parent=parent)
label.setText("")
layout.addWidget(header, 1, 0)
layout.addWidget(label, 1, 1)
self.__pointsLabel = label
return layout
def __cleanCustomContour(self):
for name in self.__customPolygons:
self.__plot.removeCurve(name)
self.__customPolygons = []
dummy = numpy.array([[[0, 0, 0, 0]]])
item = self.__plot.getImage(legend="iso-pixels")
item.setData([[[0, 0, 0, 0]]])
def __cleanPolygons(self):
for name in self.__polygons:
self.__plot.removeCurve(name)
def clean(self):
self.__cleanCustomContour()
self.__cleanPolygons()
self.__polygons = []
self.__image = None
self.__mask = None
def updateContours(self):
self.__redrawContours()
self.updateCustomContours()
def __updateCustomContours(self, value):
self.__customValue = value
self.updateCustomContours()
def updateCustomContours(self):
if self.__algo is None:
return
value = self.__customValue
self.__cleanCustomContour()
if value is None:
return
# iso pixels
iso_pixels = self.__algo.find_pixels(value)
if len(iso_pixels) != 0:
mask = numpy.zeros(self.__image.shape, dtype=numpy.int8)
indexes = iso_pixels[:, 0] * self.__image.shape[1] + iso_pixels[:, 1]
mask = mask.ravel()
mask[indexes] = 1
mask.shape = self.__image.shape
mask = createRgbaMaskImage(mask, color=numpy.array([255, 0, 0, 128]))
item = self.__plot.getImage(legend="iso-pixels")
item.setData(mask)
# iso contours
polygons = self.__algo.find_contours(value)
for ipolygon, polygon in enumerate(polygons):
if len(polygon) == 0:
continue
isClosed = numpy.allclose(polygon[0], polygon[-1])
x = polygon[:, 1] + 0.5
y = polygon[:, 0] + 0.5
legend = "custom-polygon-%d" % ipolygon
self.__customPolygons.append(legend)
self.__plot.addCurve(x=x, y=y, linestyle="--", color="red", linewidth=2.0, legend=legend, resetzoom=False)
def __updateAlgo(self, image, mask=None):
if mask is None:
if self.__useMaskButton.isChecked():
mask = self.__plot.getMaskToolsDockWidget().getSelectionMask()
self.__image = image
self.__mask = mask
implButton = self.__impl.checkedButton()
if implButton == self.__implMerge:
from silx.image.marchingsquares import MarchingSquaresMergeImpl
self.__algo = MarchingSquaresMergeImpl(self.__image, self.__mask)
elif implButton == self.__implMergeCache:
from silx.image.marchingsquares import MarchingSquaresMergeImpl
self.__algo = MarchingSquaresMergeImpl(self.__image, self.__mask, use_minmax_cache=True)
elif implButton == self.__implSkimage and MarchingSquaresSciKitImage is not None:
self.__algo = MarchingSquaresSciKitImage(self.__image, self.__mask)
else:
_logger.error("No algorithm available")
self.__algo = None
def setData(self, image, mask=None, value=0.0):
self.clean()
self.__updateAlgo(image, mask=None)
# image
item = self.__plot.getImage(legend="image")
item.setData(image)
item.setColormap(self.__colormap)
self.__plot.resetZoom()
def __redrawContours(self):
self.__updateAlgo(self.__image)
if self.__algo is None:
return
self.__cleanPolygons()
self.__drawContours(self.__values, self.__lineStyleCallback)
def __drawContours(self, values, lineStyleCallback=None):
if self.__algo is None:
return
self.__values = values
self.__lineStyleCallback = lineStyleCallback
if self.__values is None:
return
nbTime = 0
nbPolygons = 0
nbPoints = 0
# iso contours
ipolygon = 0
for ivalue, value in enumerate(values):
startTime = time.time()
polygons = self.__algo.find_contours(value)
nbTime += (time.time() - startTime)
nbPolygons += len(polygons)
for polygon in polygons:
if len(polygon) == 0:
continue
nbPoints += len(polygon)
isClosed = numpy.allclose(polygon[0], polygon[-1])
x = polygon[:, 1] + 0.5
y = polygon[:, 0] + 0.5
legend = "polygon-%d" % ipolygon
if lineStyleCallback is not None:
extraStyle = lineStyleCallback(value, ivalue, ipolygon)
else:
extraStyle = {"linestyle": "-", "linewidth": 1.0, "color": "black"}
self.__polygons.append(legend)
self.__plot.addCurve(x=x, y=y, legend=legend, resetzoom=False, **extraStyle)
ipolygon += 1
self.__timeLabel.setText("%0.3fs" % nbTime)
self.__polygonsLabel.setText("%d" % nbPolygons)
self.__pointsLabel.setText("%d" % nbPoints)
def __defineDefaultValues(self, value=None):
# Do not use min and max to avoid to create iso contours on small
# and many artefacts
if value is None:
value = self.__image.mean()
self.__customValue = value
div = 12
delta = (self.__image.max() - self.__image.min()) / div
self.__value.setValue(int(numpy.round(value)))
minv = self.__image.min() + delta
maxv = self.__image.min() + delta * (div - 1)
self.__value.setRange(int(numpy.floor(minv)), int(numpy.ceil(maxv)))
self.updateCustomContours()
def generate(self):
self.__kind.checkedButton().click()
def generateSpiral(self):
shape = 512
nb_spiral = numpy.random.randint(1, 8)
freq = numpy.random.randint(2, 50)
image = create_spiral(shape, nb_spiral, freq)
image *= 1000.0
self.__colormap = Colormap("cool")
self.setData(image=image, mask=None)
self.__defineDefaultValues()
def generateIsland(self):
shape = (512, 512)
image = create_island(shape, summit=4808.72, under_water=1500)
self.__colormap = Colormap("terrain")
self.setData(image=image, mask=None)
values = range(-800, 5000, 200)
def styleCallback(value, ivalue, ipolygon):
if value == 0:
style = {"linestyle": "-", "linewidth": 1.0, "color": "black"}
elif value % 1000 == 0:
style = {"linestyle": "--", "linewidth": 0.5, "color": "black"}
else:
style = {"linestyle": "--", "linewidth": 0.1, "color": "black"}
return style
self.__drawContours(values, styleCallback)
self.__value.setValue(0)
self.__value.setRange(0, 5000)
self.__updateCustomContours(0)
def generateMagneticField(self):
shape = 512
x1 = numpy.random.random() * 2 - 1
y1 = numpy.random.random() * 2 - 1
x2 = numpy.random.random() * 2 - 1
y2 = numpy.random.random() * 2 - 1
image = create_magnetic_field(shape, x1, y1, x2, y2)
self.__colormap = Colormap("coolwarm")
self.setData(image=image, mask=None)
maximum = abs(image.max())
m = abs(image.min())
if m > maximum:
maximum = m
maximum = int(maximum)
values = range(-maximum, maximum, maximum // 20)
def styleCallback(value, ivalue, ipolygon):
if (ivalue % 2) == 0:
style = {"linestyle": "-", "linewidth": 0.5, "color": "black"}
else:
style = {"linestyle": "-", "linewidth": 0.5, "color": "white"}
return style
self.__drawContours(values, styleCallback)
self.__defineDefaultValues(value=0)
def generateGravityField(self):
shape = 512
nb = numpy.random.randint(2, 10)
objects = []
for _ in range(nb):
x = numpy.random.random() * 2 - 1
y = numpy.random.random() * 2 - 1
m = numpy.random.random() * 10 + 1.0
objects.append((x, y, m))
image = create_gravity_field(shape, objects)
self.__colormap = Colormap("inferno")
self.setData(image=image, mask=None)
delta = (image.max() - image.min()) / 30.0
values = numpy.arange(image.min(), image.max(), delta)
def styleCallback(value, ivalue, ipolygon):
return {"linestyle": "-", "linewidth": 0.1, "color": "white"}
self.__drawContours(values, styleCallback)
self.__defineDefaultValues()
def generateGradient(self):
shape = 512
dx = numpy.random.random() * 2 - 1
dy = numpy.random.random() * 2 - 1
sx = numpy.random.randint(10, 5000) / 10.0
sy = numpy.random.randint(10, 5000) / 10.0
image = create_gradient(shape, dx=dx, dy=dy, sx=sx, sy=sy)
image *= 1000.0
def styleCallback(value, ivalue, ipolygon):
colors = ["#9400D3", "#4B0082", "#0000FF", "#00FF00", "#FFFF00", "#FF7F00", "#FF0000"]
color = colors[ivalue % len(colors)]
style = {"linestyle": "-", "linewidth": 2.0, "color": color}
return style
delta = (image.max() - image.min()) / 9.0
values = numpy.arange(image.min(), image.max(), delta)
values = values[1:8]
self.__colormap = Colormap("Greys")
self.setData(image=image, mask=None)
self.__drawContours(values, styleCallback)
self.__defineDefaultValues()
def generateCompositeGradient(self):
shape = 512
hole = 1 / 4.0
dx = numpy.random.random() * hole - hole / 2.0
dy = numpy.random.random() * hole - hole * 2
sx = numpy.random.random() * 10.0 + 1
sy = numpy.random.random() * 10.0 + 1
image = create_composite_gradient(shape, dx, dy, sx, sy)
image *= 1000.0
def styleCallback(value, ivalue, ipolygon):
colors = ["#9400D3", "#4B0082", "#0000FF", "#00FF00", "#FFFF00", "#FF7F00", "#FF0000"]
color = colors[ivalue % len(colors)]
style = {"linestyle": "-", "linewidth": 2.0, "color": color}
return style
delta = (image.max() - image.min()) / 9.0
values = numpy.arange(image.min(), image.max(), delta)
values = values[1:8]
self.__colormap = Colormap("Greys")
self.setData(image=image, mask=None)
self.__drawContours(values, styleCallback)
self.__defineDefaultValues()
def main():
app = qt.QApplication([])
sys.excepthook = qt.exceptionHandler
window = FindContours()
window.generateIsland()
window.show()
result = app.exec()
# remove ending warnings relative to QTimer
app.deleteLater()
return result
if __name__ == "__main__":
result = main()
sys.exit(result)
| 34.696884 | 118 | 0.60614 |
acf51a84b46b7efc7169e14ef66c82cedf47000a | 303 | py | Python | examples/futures/market/open_interest_statistics.py | leozaragoza/binance-connector-python | 3311d102c9e788e3d71047f0af103c00d1ae2162 | [
"MIT"
] | 3 | 2021-08-16T15:29:09.000Z | 2021-09-14T16:25:03.000Z | examples/futures/market/open_interest_statistics.py | leozaragoza/binance-connector-python | 3311d102c9e788e3d71047f0af103c00d1ae2162 | [
"MIT"
] | null | null | null | examples/futures/market/open_interest_statistics.py | leozaragoza/binance-connector-python | 3311d102c9e788e3d71047f0af103c00d1ae2162 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
from binance.futures import Futures as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
futures_client = Client(base_url="https://fapi.binance.com")
logging.info(futures_client.open_interest_statistics("BTCUSDT", "1h"))
| 25.25 | 70 | 0.805281 |
acf51b35469813d31fe73751a7473535a60c17ed | 10,636 | py | Python | main/teacher_app/teacher_api.py | emerginganalytics/ualr-cyber-gym | 1156bc2c85c17af02da048f40b2be875f89db0ce | [
"MIT"
] | 3 | 2020-09-02T19:18:03.000Z | 2021-04-29T20:23:01.000Z | main/teacher_app/teacher_api.py | emerginganalytics/ualr-cyber-gym | 1156bc2c85c17af02da048f40b2be875f89db0ce | [
"MIT"
] | null | null | null | main/teacher_app/teacher_api.py | emerginganalytics/ualr-cyber-gym | 1156bc2c85c17af02da048f40b2be875f89db0ce | [
"MIT"
] | 4 | 2020-11-20T20:38:49.000Z | 2021-04-29T20:23:12.000Z | from flask import Blueprint, request, redirect
from utilities.reset_workout import reset_workout
from utilities.stop_workout import stop_workout
from utilities.child_project_manager import ChildProjectManager
from utilities.workout_spec_to_cloud import WorkoutSpecToCloud, InvalidBuildSpecification
from utilities.datastore_functions import *
from utilities.pubsub_functions import *
from utilities.globals import auth_config, dns_suffix, ds_client, log_client, logger, LOG_LEVELS, main_app_url, \
post_endpoint, workout_token, workout_globals, BuildTypes
import json
teacher_api = Blueprint('teacher_api', __name__, url_prefix='/api')
@teacher_api.route('/change_student_name/<workout_id>', methods=["POST"])
def change_student_name(workout_id):
workout = ds_client.get(ds_client.key("cybergym-workout", workout_id))
if request.values['new_name']:
workout['student_name'] = request.values['new_name']
g_logger = log_client.logger('teacher-app')
g_logger.log_struct(
{
"message": "Workout {} student name changed to {}".format(str(workout_id), str(request.values['new_name']))
}, severity=LOG_LEVELS.INFO
)
ds_client.put(workout)
return workout['student_name']
else:
return False
@teacher_api.route('/create_new_class', methods=['POST'])
def create_new_class():
if(request.method == 'POST'):
teacher_email = request.form['teacher_email']
num_students = request.form['num_students']
class_name = request.form['class_name']
student_auth = request.form['student_auth']
g_logger = log_client.logger('teacher-app')
g_logger.log_struct(
{
"message": "User {} created a new class with {} students".format(teacher_email, num_students)
}, severity=LOG_LEVELS.INFO
)
store_class_info(teacher_email, num_students, class_name, student_auth)
return redirect('/teacher/home')
@teacher_api.route('/change_roster_name/<class_id>/<student_name>', methods=['POST'])
def change_roster_name(class_id, student_name):
if request.method == 'POST':
request_data = request.get_json(force=True)
new_name = request_data['new_name']
class_info = ds_client.get(ds_client.key('cybergym-class', int(class_id)))
if student_name in class_info['roster'] and new_name:
class_info['roster'].remove(student_name)
class_info['roster'].append(new_name)
ds_client.put(class_info)
if 'unit_list' in class_info:
for unit in class_info['unit_list']:
student_workout_query = ds_client.query(kind='cybergym-workout')
student_workout_query.add_filter('unit_id', '=', unit['unit_id'])
student_workout_query.add_filter('student_name', '=', student_name)
for workout in list(student_workout_query.fetch()):
workout['student_name'] = new_name
ds_client.put(workout)
return json.dumps(class_info)
@teacher_api.route('/change_class_roster/<class_id>', methods=['POST'])
def change_class_roster(class_id):
class_info = ds_client.get(ds_client.key('cybergym-class', int(class_id)))
if request.method == 'POST':
request_data = request.get_json(force=True)
if request_data['action'] == 'remove':
if class_info['student_auth'] == 'email':
for student in class_info['roster']:
if student['student_name'] == request_data['student_name']:
class_info['roster'].remove(student)
if request_data['student_name'] in class_info['roster']:
class_info['roster'].remove(str(request_data['student_name']))
g_logger = log_client.logger('teacher-app')
g_logger.log_struct(
{
"message": "Student {} removed from class {}".format(request_data['student_name'], class_id)
}, severity=LOG_LEVELS.INFO
)
elif request_data['action'] == 'add':
if 'student_auth' not in class_info:
class_info['student_auth'] = 'anonymous'
ds_client.put(class_info)
if class_info['student_auth'] == 'email':
student_dict = {
'student_name': request_data['student_name'],
'student_email': request_data['student_email']
}
class_info['roster'].append(student_dict)
user_list = ds_client.get(ds_client.key('cybergym-admin-info', 'cybergym'))
if request_data['student_email'] not in user_list['students']:
user_list['students'].append(request_data['student_email'])
ds_client.put(user_list)
else:
class_info['roster'].append(str(request_data['student_name']))
g_logger = log_client.logger('teacher-app')
g_logger.log_struct(
{
"message": "Student {} added to class {}".format(request_data['student_name'], class_id)
}, severity=LOG_LEVELS.INFO
)
ds_client.put(class_info)
return redirect('/teacher/home')
@teacher_api.route('add_multiple_students', methods=['POST'])
def add_multiple_students():
if request.method == "POST":
request_data = request.get_json(force=True)
class_id = request_data['class_id']
new_student_list = request_data['new_student_list'].splitlines()
user_list = ds_client.get(ds_client.key('cybergym-admin-info', 'cybergym'))
class_entity = ds_client.get(ds_client.key('cybergym-class', int(class_id)))
if class_entity['student_auth'] == 'email':
for student in new_student_list:
student_info = student.split(',')
student_name = student_info[0]
student_email = student_info[1].strip()
if len(student_name) == 0 or len(student_email) == 0:
return json.dumps({"result": "Student entry must have both a name and email address"})
if '@' not in student_email:
return json.dumps({'result': 'Must enter a valid email for student {}.\nEntered email {}'.format(student_name, student_email)})
new_student_info = {
'student_email': student_email,
'student_name': student_name
}
class_entity['roster'].append(new_student_info)
if student_email not in user_list['students']:
user_list['students'].append(student_email)
ds_client.put(user_list)
else:
for student in new_student_list:
if student == '' or student == ' ':
return json.dumps({'result': 'Student names must not be empty'})
else:
class_entity['roster'].append(student)
ds_client.put(class_entity)
return json.dumps({'result': "success"})
@teacher_api.route('/unclaim_workout/<workout_id>', methods=["POST"])
def unclaim_workout(workout_id):
workout = ds_client.get(ds_client.key("cybergym-workout", workout_id))
if(request.method == 'POST'):
if workout:
if('runtime_counter' in workout and workout['runtime_counter']):
g_logger = log_client.logger('teacher-app')
g_logger.log_struct(
{
"message": "Workout {} is not new, fully rebuilding workout to unclaim".format(str(workout_id))
}, severity=LOG_LEVELS.INFO
)
pub_nuke_workout(workout_id)
else:
workout['student_name'] = ""
ds_client.put(workout)
g_logger = log_client.logger('teacher-app')
g_logger.log_struct(
{
"message": "Workout {} is new, setting to unclaimed".format(str(workout_id))
}, severity=LOG_LEVELS.INFO
)
return workout['student_name']
@teacher_api.route('/add_team/<arena_id>', methods=['POST'])
def add_team(arena_id):
arena = ds_client.get(ds_client.key('cybergym-unit', str(arena_id)))
if request.method == "POST":
request_data = request.get_json(force=True)
new_team_name = request_data['team_name']
if 'teams' in arena:
if new_team_name not in arena:
arena['teams'].append(new_team_name)
else:
arena['teams'] = []
arena['teams'].append(new_team_name)
ds_client.put(arena)
return json.dumps(arena)
#Used to change teams for arena workouts
@teacher_api.route('/change_team/<workout_id>', methods=['POST'])
def change_team(workout_id):
if request.method == "POST":
request_data = request.get_json(force=True)
workout = ds_client.get(ds_client.key('cybergym-workout', str(workout_id)))
if 'new_team' in request_data:
workout['team'] = request_data['new_team']
ds_client.put(workout)
return json.dumps(workout)
@teacher_api.route('/remove_unit_from_class/<class_id>/<unit_id>', methods=['GET','POST'])
def remove_unit_from_class(class_id, unit_id):
class_info = ds_client.get(ds_client.key('cybergym-class', int(class_id)))
for unit in class_info['unit_list']:
if unit['unit_id'] == unit_id:
class_info['unit_list'].remove(unit)
ds_client.put(class_info)
g_logger = log_client.logger('teacher-app')
g_logger.log_struct(
{
"message": "Class {} removed from unit {}".format(class_id, unit_id),
"class_id": str(class_id),
"unit": str(unit_id)
}, severity=LOG_LEVELS.INFO
)
return redirect('/teacher_home')
@teacher_api.route('/remove_class/<class_id>', methods=['GET',"POST"])
def remove_class(class_id):
class_entity = ds_client.get(ds_client.key('cybergym-class', int(class_id)))
if class_entity:
g_logger = log_client.logger('teacher-app')
g_logger.log_struct(
{
"message": "Class {} deleted".format(class_entity['class_name']),
"class_roster": class_entity['roster'],
"instructor_email": class_entity['teacher_email']
}, severity=LOG_LEVELS.INFO
)
ds_client.delete(ds_client.key('cybergym-class', int(class_id)))
return redirect('/teacher/home') | 46.445415 | 147 | 0.615927 |
acf51b647b1cdf38270a34439e594bceba13426a | 304 | py | Python | models/reco/__init__.py | grsgth/Offline-Chinese-Handwriting-Text-Page-Spotter-with-Text-Kernel | 00334215b63b12284a74e26fa0fbf15f09a046a2 | [
"MIT"
] | 18 | 2021-05-10T04:10:44.000Z | 2022-02-09T14:36:08.000Z | models/reco/__init__.py | grsgth/Offline-Chinese-Handwriting-Text-Page-Spotter-with-Text-Kernel | 00334215b63b12284a74e26fa0fbf15f09a046a2 | [
"MIT"
] | 4 | 2021-07-08T06:29:54.000Z | 2021-08-02T08:51:01.000Z | models/reco/__init__.py | grsgth/Offline-Chinese-Handwriting-Text-Page-Spotter-with-Text-Kernel | 00334215b63b12284a74e26fa0fbf15f09a046a2 | [
"MIT"
] | 4 | 2021-12-14T02:39:20.000Z | 2022-02-14T02:38:58.000Z |
from .reco_layer_new_with_tcn_big import DenseNet as DenseNet_with_TCN_big
from .reco_layer_new_with_tcn_big import DenseNet as DenseNet_with_TCN_big
from .reco_layer_new_with_tcn_big import DenseNet as DenseNet_with_TCN_big
from .reco_layer_new_with_tcn_big import DenseNet as DenseNet_with_TCN_big
| 33.777778 | 74 | 0.894737 |
acf51b88e48960f9018a5c92c74abb19108118b7 | 4,637 | py | Python | scripts/ur5e_ik_marker_teleope_distance.py | DavidYaonanZhu/ur5e_teleope | 934a0bf9db979996d75a7f5f105625bb2a0a8f56 | [
"MIT"
] | null | null | null | scripts/ur5e_ik_marker_teleope_distance.py | DavidYaonanZhu/ur5e_teleope | 934a0bf9db979996d75a7f5f105625bb2a0a8f56 | [
"MIT"
] | null | null | null | scripts/ur5e_ik_marker_teleope_distance.py | DavidYaonanZhu/ur5e_teleope | 934a0bf9db979996d75a7f5f105625bb2a0a8f56 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
import time
# TF stuff
from geometry_msgs.msg import PoseStamped
from tf.transformations import euler_from_quaternion
from math import radians
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from trac_ik_python.trac_ik import IK
class PR2Teleop(object):
def __init__(self):
urdf = rospy.get_param('/robot_description')
self.ik_right = IK("base_link",
"ee_link",0.005,1e-5,"Distance",urdf)
#self.ik_left = IK("torso_lift_link",
# "l_wrist_roll_link")
#self.left_command = rospy.Publisher('/l_arm_controller/command',
# JointTrajectory,
# queue_size=1)
self.right_command = rospy.Publisher('/arm_controller/command',
JointTrajectory,
queue_size=1)
#self.last_left_pose = None
#self.left_pose = rospy.Subscriber('/left_controller_as_posestamped',
#PoseStamped,
#self.left_cb, queue_size=1)
self.last_right_pose = None
self.right_pose = rospy.Subscriber('/free_positioning/gripper_marker_pose',
PoseStamped,
self.right_cb, queue_size=1)
rospy.sleep(2.0)
def left_cb(self, msg):
self.last_left_pose = msg
def right_cb(self, msg):
self.last_right_pose = msg
def send_right_arm_goal(self, positions):
jt = JointTrajectory()
jt.header.stamp = rospy.Time.now()
jt.joint_names = ["shoulder_pan_joint", "shoulder_lift_joint", "elbow_joint",
"wrist_1_joint", "wrist_2_joint", "wrist_3_joint"]
jtp = JointTrajectoryPoint()
jtp.positions = list(positions)
jtp.velocities = [0.0] * len(positions)
jtp.time_from_start = rospy.Time(0.08) #default 0.4
jt.points.append(jtp)
# print("Goal: ")
#print(jt)
self.right_command.publish(jt)
'''
def send_left_arm_goal(self, positions):
jt = JointTrajectory()
jt.header.stamp = rospy.Time.now()
jt.joint_names = ["l_shoulder_pan_joint", "l_shoulder_lift_joint", "l_upper_arm_roll_joint",
"l_elbow_flex_joint", "l_forearm_roll_joint", "l_wrist_flex_joint", "l_wrist_roll_joint"]
jtp = JointTrajectoryPoint()
jtp.positions = list(positions)
jtp.velocities = [0.0] * len(positions)
jtp.time_from_start = rospy.Time(0.1)
jt.points.append(jtp)
self.left_command.publish(jt)
'''
def run_with_ik(self):
qinit = [0., 0., 0., 0., 0., 0.]
x = y = z = 0.0
rx = ry = rz = 0.0
rw = 1.0
bx = by = bz = 0.0 #default 0.02
brx = bry = brz = 0.0 #default 0.5
r = rospy.Rate(125) #default 4
while not rospy.is_shutdown():
ps = self.last_right_pose
if ps is None:
r.sleep()
print("No last right pose...")
continue
x = self.last_right_pose.pose.position.x
y = self.last_right_pose.pose.position.y
z = self.last_right_pose.pose.position.z
rx = self.last_right_pose.pose.orientation.x
ry = self.last_right_pose.pose.orientation.y
rz = self.last_right_pose.pose.orientation.z
rw = self.last_right_pose.pose.orientation.w
# rospy.loginfo("Got pose: " + str(ps))
sol = None
retries = 0
start = time.clock()
while not sol and retries < 10:
sol = self.ik_right.get_ik(qinit,
x, y, z,
rx, ry, rz, rw,
bx, by, bz,
brx, bry, brz)
retries += 1
end = time.clock()
print "Execution time:" + str(1000*(end-start)) + "ms"
if sol:
print "Solution found: (" + str(retries) + " retries)"
#print sol
self.send_right_arm_goal(sol)
qinit = sol
else:
print "NO SOLUTION FOUND :("
r.sleep()
if __name__ == '__main__':
rospy.init_node('ur5e_ik_marker_teleope_py')
nv = PR2Teleop()
nv.run_with_ik()
| 36.226563 | 115 | 0.52383 |
acf51c69d8be45cb2e92ad3097fc47a6f5343dca | 7,957 | py | Python | desktop/core/ext-py/thrift-0.9.1/src/transport/TSSLSocket.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | null | null | null | desktop/core/ext-py/thrift-0.9.1/src/transport/TSSLSocket.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | null | null | null | desktop/core/ext-py/thrift-0.9.1/src/transport/TSSLSocket.py | vinaymundada27/Hue | 7bffb33bbe7cfa34d340241c4ba3b19476211b2a | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import socket
import ssl
from thrift.transport import TSocket
from thrift.transport.TTransport import TTransportException
class TSSLSocket(TSocket.TSocket):
"""
SSL implementation of client-side TSocket
This class creates outbound sockets wrapped using the
python standard ssl module for encrypted connections.
The protocol used is set using the class variable
SSL_VERSION, which must be one of ssl.PROTOCOL_* and
defaults to ssl.PROTOCOL_TLSv1 for greatest security.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host='localhost',
port=9090,
validate=True,
ca_certs=None,
keyfile=None,
certfile=None,
unix_socket=None):
"""Create SSL TSocket
@param validate: Set to False to disable SSL certificate validation
@type validate: bool
@param ca_certs: Filename to the Certificate Authority pem file, possibly a
file downloaded from: http://curl.haxx.se/ca/cacert.pem This is passed to
the ssl_wrap function as the 'ca_certs' parameter.
@type ca_certs: str
@param keyfile: The private key
@type keyfile: str
@param certfile: The cert file
@type certfile: str
Raises an IOError exception if validate is True and the ca_certs file is
None, not present or unreadable.
"""
self.validate = validate
self.is_valid = False
self.peercert = None
if not validate:
self.cert_reqs = ssl.CERT_NONE
else:
self.cert_reqs = ssl.CERT_REQUIRED
self.ca_certs = ca_certs
self.keyfile = keyfile
self.certfile = certfile
if validate:
if ca_certs is None or not os.access(ca_certs, os.R_OK):
raise IOError('Certificate Authority ca_certs file "%s" '
'is not readable, cannot validate SSL '
'certificates.' % (ca_certs))
TSocket.TSocket.__init__(self, host, port, unix_socket)
def open(self):
try:
res0 = self._resolveAddr()
for res in res0:
sock_family, sock_type = res[0:2]
ip_port = res[4]
plain_sock = socket.socket(sock_family, sock_type)
# check and turn on TCP Keepalive
sockprops = plain_sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)
if (sockprops == 0):
sockprops = plain_sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.handle = ssl.wrap_socket(plain_sock,
ssl_version=self.SSL_VERSION,
do_handshake_on_connect=True,
ca_certs=self.ca_certs,
keyfile=self.keyfile,
certfile=self.certfile,
cert_reqs=self.cert_reqs)
self.handle.settimeout(self._timeout)
try:
self.handle.connect(ip_port)
except socket.error, e:
if res is not res0[-1]:
continue
else:
raise e
break
except socket.error, e:
if self._unix_socket:
message = 'Could not connect to secure socket %s: %s' \
% (self._unix_socket, e)
else:
message = 'Could not connect to %s:%d: %s' % (self.host, self.port, e)
raise TTransportException(type=TTransportException.NOT_OPEN,
message=message)
if self.validate:
self._validate_cert()
def _validate_cert(self):
"""internal method to validate the peer's SSL certificate, and to check the
commonName of the certificate to ensure it matches the hostname we
used to make this connection. Does not support subjectAltName records
in certificates.
raises TTransportException if the certificate fails validation.
"""
cert = self.handle.getpeercert()
self.peercert = cert
if 'subject' not in cert:
raise TTransportException(
type=TTransportException.NOT_OPEN,
message='No SSL certificate found from %s:%s' % (self.host, self.port))
fields = cert['subject']
for field in fields:
# ensure structure we get back is what we expect
if not isinstance(field, tuple):
continue
cert_pair = field[0]
if len(cert_pair) < 2:
continue
cert_key, cert_value = cert_pair[0:2]
if cert_key != 'commonName':
continue
certhost = cert_value
# this check should be performed by some sort of Access Manager
if certhost == self.host:
# success, cert commonName matches desired hostname
self.is_valid = True
return
else:
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Hostname we connected to "%s" doesn\'t match certificate '
'provided commonName "%s"' % (self.host, certhost))
raise TTransportException(
type=TTransportException.UNKNOWN,
message='Could not validate SSL certificate from '
'host "%s". Cert=%s' % (self.host, cert))
class TSSLServerSocket(TSocket.TServerSocket):
"""SSL implementation of TServerSocket
This uses the ssl module's wrap_socket() method to provide SSL
negotiated encryption.
"""
SSL_VERSION = ssl.PROTOCOL_TLSv1
def __init__(self,
host=None,
port=9090,
certfile='cert.pem',
unix_socket=None):
"""Initialize a TSSLServerSocket
@param certfile: filename of the server certificate, defaults to cert.pem
@type certfile: str
@param host: The hostname or IP to bind the listen socket to,
i.e. 'localhost' for only allowing local network connections.
Pass None to bind to all interfaces.
@type host: str
@param port: The port to listen on for inbound connections.
@type port: int
"""
self.setCertfile(certfile)
TSocket.TServerSocket.__init__(self, host, port)
def setCertfile(self, certfile):
"""Set or change the server certificate file used to wrap new connections.
@param certfile: The filename of the server certificate,
i.e. '/etc/certs/server.pem'
@type certfile: str
Raises an IOError exception if the certfile is not present or unreadable.
"""
if not os.access(certfile, os.R_OK):
raise IOError('No such certfile found: %s' % (certfile))
self.certfile = certfile
def accept(self):
plain_client, addr = self.handle.accept()
try:
client = ssl.wrap_socket(plain_client, certfile=self.certfile,
server_side=True, ssl_version=self.SSL_VERSION)
except ssl.SSLError, ssl_exc:
# failed handshake/ssl wrap, close socket to client
plain_client.close()
# raise ssl_exc
# We can't raise the exception, because it kills most TServer derived
# serve() methods.
# Instead, return None, and let the TServer instance deal with it in
# other exception handling. (but TSimpleServer dies anyway)
return None
result = TSocket.TSocket()
result.setHandle(client)
return result
| 36.333333 | 86 | 0.648234 |
acf51c9bed9b7ee6dde47b581c22cd349284a398 | 197 | py | Python | tests/test_cuts.py | torressa/grblogtools | 6a7783ed6514b3a60cc4cc041ee922e538571701 | [
"Apache-2.0"
] | null | null | null | tests/test_cuts.py | torressa/grblogtools | 6a7783ed6514b3a60cc4cc041ee922e538571701 | [
"Apache-2.0"
] | 39 | 2021-12-14T05:01:39.000Z | 2022-03-01T23:05:46.000Z | tests/test_cuts.py | torressa/grblogtools | 6a7783ed6514b3a60cc4cc041ee922e538571701 | [
"Apache-2.0"
] | null | null | null | from .helpers import read_single
def test_cuts():
row = read_single("912-glass4-0.log")
cuts = {key: row[key] for key in row.keys() if key.startswith("Cuts: ")}
assert len(cuts) == 8
| 24.625 | 76 | 0.649746 |
acf51e428e0f11ad54d080f5da0f1920e91e8ffb | 3,076 | py | Python | maistra/vendor/com_googlesource_chromium_v8/wee8/build/android/gyp/java_cpp_strings_tests.py | maistra-bot/proxy | 05a551df62d90e96c24afc649f2755983d020b5b | [
"Apache-2.0"
] | 1 | 2021-03-21T10:43:16.000Z | 2021-03-21T10:43:16.000Z | src/build/android/gyp/java_cpp_strings_tests.py | wclmgcd/naiveproxy | e32a3afb76fd21207c322f2d5e794c4f5505fb59 | [
"BSD-3-Clause"
] | null | null | null | src/build/android/gyp/java_cpp_strings_tests.py | wclmgcd/naiveproxy | e32a3afb76fd21207c322f2d5e794c4f5505fb59 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for java_cpp_strings.py.
This test suite contains various tests for the C++ -> Java string generator.
"""
import unittest
import java_cpp_strings
class _TestStringsParser(unittest.TestCase):
def testParseComments(self):
test_data = """
/**
* This should be ignored as well.
*/
// Comment followed by a blank line.
// Comment followed by unrelated code.
int foo() { return 3; }
// Real comment.
const char kASwitch[] = "a-value";
// Real comment that spans
// multiple lines.
const char kAnotherSwitch[] = "another-value";
// Comment followed by nothing.
""".split('\n')
strings = java_cpp_strings.StringFileParser(test_data).Parse()
self.assertEqual(2, len(strings))
self.assertEqual('A_SWITCH', strings[0].name)
self.assertEqual('"a-value"', strings[0].value)
self.assertEqual(1, len(strings[0].comments.split('\n')))
self.assertEqual('ANOTHER_SWITCH', strings[1].name)
self.assertEqual('"another-value"', strings[1].value)
self.assertEqual(2, len(strings[1].comments.split('\n')))
def testStringValues(self):
test_data = """
// Single line string constants.
const char kAString[] = "a-value";
const char kNoComment[] = "no-comment";
// Single line switch with a big space.
const char kAStringWithSpace[] = "a-value";
// Wrapped constant definition.
const char kAStringWithAVeryLongNameThatWillHaveToWrap[] =
"a-string-with-a-very-long-name-that-will-have-to-wrap";
// This is erroneous and should be ignored.
const char kInvalidLineBreak[] =
"invalid-line-break";
""".split('\n')
strings = java_cpp_strings.StringFileParser(test_data).Parse()
self.assertEqual(4, len(strings))
self.assertEqual('A_STRING', strings[0].name)
self.assertEqual('"a-value"', strings[0].value)
self.assertEqual('NO_COMMENT', strings[1].name)
self.assertEqual('"no-comment"', strings[1].value)
self.assertEqual('A_STRING_WITH_SPACE', strings[2].name)
self.assertEqual('"a-value"', strings[2].value)
self.assertEqual('A_STRING_WITH_A_VERY_LONG_NAME_THAT_WILL_HAVE_TO_WRAP',
strings[3].name)
self.assertEqual('"a-string-with-a-very-long-name-that-will-have-to-wrap"',
strings[3].value)
def testTemplateParsing(self):
test_data = """
// Copyright {YEAR} The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// {SCRIPT_NAME}
// From
// {SOURCE_PATH}, and
// {TEMPLATE_PATH}
package my.java.package;
public any sort of class MyClass {{
{NATIVE_STRINGS}
}}
""".split('\n')
package, class_name = java_cpp_strings.ParseTemplateFile(test_data)
self.assertEqual('my.java.package', package)
self.assertEqual('MyClass', class_name)
if __name__ == '__main__':
unittest.main()
| 29.018868 | 79 | 0.696359 |
acf51fa7b8bd49fecd9dde9b0e0ea9bfabb934cb | 965 | py | Python | corehq/ex-submodules/casexml/apps/phone/migrations/0001_initial.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/ex-submodules/casexml/apps/phone/migrations/0001_initial.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/ex-submodules/casexml/apps/phone/migrations/0001_initial.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='OwnershipCleanlinessFlag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=100, db_index=True)),
('owner_id', models.CharField(max_length=100, db_index=True)),
('is_clean', models.BooleanField(default=False)),
('last_checked', models.DateTimeField()),
('hint', models.CharField(max_length=100, null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='ownershipcleanlinessflag',
unique_together=set([('domain', 'owner_id')]),
),
]
| 33.275862 | 114 | 0.563731 |
acf51fd09db948a8f194cbe0d86875153bf4b3dd | 6,835 | py | Python | mmur/viz/dists.py | RUrlus/ModelMetricUncertaintyResearch | 37daa1421a3a45a6adaea3788e2d00493477ff96 | [
"Apache-2.0"
] | null | null | null | mmur/viz/dists.py | RUrlus/ModelMetricUncertaintyResearch | 37daa1421a3a45a6adaea3788e2d00493477ff96 | [
"Apache-2.0"
] | null | null | null | mmur/viz/dists.py | RUrlus/ModelMetricUncertaintyResearch | 37daa1421a3a45a6adaea3788e2d00493477ff96 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import scipy.stats as sts
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
from mmur.viz import _set_plot_style
from mmu.stats import compute_hdi
COLORS = _set_plot_style()
def plot_hdis_violin(
hdi_estimates, holdout_metrics, prob=0.95, ax=None
):
"""Plot Highest Density Interval containing `prob` of the distribution
against the HDI of the holdout_metrics.
Parameters
----------
hdi_estimates : pd.DataFrame
dataframe containing the Highest Density Interval with probability equal
to `prob`. The index of ``hdi_estimates`` should be equal to the columns
of ``holdout_metrics``. The columns should contain at least: 'lb',
'ub' and 'mu'.
axs : nmatplotlib.axes._subplots.AxesSubplot, default=None
axes object to plot on
Returns
-------
fig : matplotlib.figure.Figure
the figure is returned when ``axs`` is None
ax : matplotlib.axes._subplots.AxesSubplot
the created or passed axes object
"""
holdout_metrics_moments = pd.concat(
(
holdout_metrics.apply([np.min, np.max, np.mean]).T, # type: ignore
compute_hdi(holdout_metrics, prob=prob)
), axis=1
)
target_metrics = holdout_metrics.columns.to_list()
if ax is None:
fig, ax = plt.subplots(figsize=(14, 7))
else:
fig = None
_ = sns.violinplot(
data=holdout_metrics,
saturation=0.1,
ax=ax,
color=COLORS[3],
zorder=5,
label='out-of-sample',
)
violin = mpatches.Patch(color=COLORS[3], label='out-of-sample')
for i, idx in enumerate(hdi_estimates.index):
mu = hdi_estimates.loc[idx, 'mu']
lb = hdi_estimates.loc[idx, 'lb']
ub = hdi_estimates.loc[idx, 'ub']
err = np.abs(np.array([lb, ub])[:, None] - mu)
ax.errorbar(
x=i - 0.1, y=mu, yerr=err, capsize=10, fmt='none',
color=COLORS[0], zorder=10, lw=2, label='HDI estimate'
);
ax.scatter(
x=i - 0.1, y=mu, marker='d', s=100, color=COLORS[0],
zorder=10, label='mean estimate'
)
mu = holdout_metrics_moments.loc[idx, 'mean']
lb = holdout_metrics_moments.loc[idx, 'lb']
ub = holdout_metrics_moments.loc[idx, 'ub']
err = np.abs(np.array([lb, ub])[:, None] - mu)
ax.errorbar(
x=i + 0.1, y=mu, yerr=err, capsize=10,
fmt='none', color=COLORS[1], label='HDI out-of-sample', zorder=10, lw=2
);
ax.scatter(
x=i + 0.1, y=mu, marker='d', s=100, color=COLORS[1], zorder=10,
label='mean out-of-sample'
)
_ = ax.set_xticks([i for i in range(len(target_metrics))])
_ = ax.set_xticklabels(target_metrics)
ax.set_title('Coverage metrics', fontsize=18)
ax.set_ylabel('value', fontsize=18)
ax.set_xlabel('metrics', fontsize=18)
ax.tick_params(labelsize=16)
ax.legend(fontsize=18)
handles, labels = plt.gca().get_legend_handles_labels()
handles = [violin, ] + handles
labels = ['out-of-sample'] + labels
by_label = dict(zip(labels, handles))
_ = ax.legend(by_label.values(), by_label.keys())
if fig is None:
return ax
return fig, ax
def plot_ci_violin(
ci_estimates, holdout_metrics, alpha=0.95, ax=None
):
"""Plot confidence interval containing `prob` of the distribution
against the HDI of the holdout_metrics.
Parameters
----------
ci_estimates : pd.DataFrame
dataframe containing the ``alpha`` confidence interval. The index of
``ci_estimates`` should be equal to the columns
of ``holdout_metrics``. The columns should contain at least: 'lb',
'ub' and 'mu'.
axs : nmatplotlib.axes._subplots.AxesSubplot, default=None
axes object to plot on
Returns
-------
fig : matplotlib.figure.Figure
the figure is returned when ``axs`` is None
ax : matplotlib.axes._subplots.AxesSubplot
the created or passed axes object
"""
alpha_ = (1 - alpha) / 2
q = (alpha_, 1 - alpha_)
alpha_perc = round(alpha * 100, 2)
holdout_metrics_moments = holdout_metrics.apply([np.mean, np.std]).T
holdout_metrics_moments['lb'] = 0.0
holdout_metrics_moments['ub'] = 0.0
dist = sts.norm(
holdout_metrics_moments.iloc[0, 0],
holdout_metrics_moments.iloc[0, 1]
)
holdout_metrics_moments.iloc[0, 2:] = dist.ppf(q)
dist = sts.norm(
holdout_metrics_moments.iloc[1, 0],
holdout_metrics_moments.iloc[1, 1]
)
holdout_metrics_moments.iloc[1, 2:] = dist.ppf(q)
target_metrics = holdout_metrics.columns.to_list()
if ax is None:
fig, ax = plt.subplots(figsize=(14, 7))
else:
fig = None
_ = sns.violinplot(
data=holdout_metrics,
saturation=0.1,
ax=ax,
color=COLORS[3],
zorder=5,
label='observed',
)
violin = mpatches.Patch(color=COLORS[3], label='observed')
for i, idx in enumerate(ci_estimates.index):
mu = ci_estimates.loc[idx, 'mu']
lb = ci_estimates.loc[idx, 'lb']
ub = ci_estimates.loc[idx, 'ub']
err = np.abs(np.array([lb, ub])[:, None] - mu)
ax.errorbar(
x=i - 0.1, y=mu, yerr=err, capsize=10, fmt='none',
color=COLORS[0], zorder=10, lw=2, label=f'{alpha_perc}% CI estimate'
);
ax.scatter(
x=i - 0.1, y=mu, marker='d', s=100, color=COLORS[0],
zorder=10, label='mean estimate'
)
mu = holdout_metrics_moments.loc[idx, 'mean']
lb = holdout_metrics_moments.loc[idx, 'lb']
ub = holdout_metrics_moments.loc[idx, 'ub']
err = np.abs(np.array([lb, ub])[:, None] - mu)
ax.errorbar(
x=i + 0.1, y=mu, yerr=err, capsize=10,
fmt='none', color=COLORS[1],
label=f'{alpha_perc}% CI observed', zorder=10, lw=2
);
ax.scatter(
x=i + 0.1, y=mu, marker='d', s=100, color=COLORS[1], zorder=10,
label='mean observed'
)
_ = ax.set_xticks([i for i in range(len(target_metrics))])
_ = ax.set_xticklabels(target_metrics)
ax.set_title(f'estimated vs observed\n {alpha_perc}% Confidence intervals', fontsize=18)
ax.set_ylabel('value', fontsize=18)
ax.set_xlabel('metrics', fontsize=18)
ax.tick_params(labelsize=16)
ax.legend(fontsize=18)
handles, labels = plt.gca().get_legend_handles_labels()
handles = [violin, ] + handles
labels = ['out-of-sample'] + labels
by_label = dict(zip(labels, handles))
_ = ax.legend(by_label.values(), by_label.keys())
if fig is None:
return ax
return fig, ax
| 32.089202 | 92 | 0.603804 |
acf51fd5e2006fd2aaffe773008631d9fc9c69bb | 6,050 | py | Python | docs/userguide/introduction/mdcheatsheet.py | Mailaender/spectrochempy | d58221afeb9f78e2e3e0079b3fd6c0162a902c04 | [
"CECILL-B"
] | null | null | null | docs/userguide/introduction/mdcheatsheet.py | Mailaender/spectrochempy | d58221afeb9f78e2e3e0079b3fd6c0162a902c04 | [
"CECILL-B"
] | null | null | null | docs/userguide/introduction/mdcheatsheet.py | Mailaender/spectrochempy | d58221afeb9f78e2e3e0079b3fd6c0162a902c04 | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# formats: ipynb,py:percent
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Markdown Cheat Sheet
# %% [markdown]
# Copied and adapted
# from __[this guide](https://www.ibm.com/support/knowledgecenter/en/SSGNPV_2.0.0/dsx/markd-jupyter.html)__!
#
# This Markdown cheat sheet provides a quick overview of all the Markdown syntax elements to format Markdown cells in
# Jupyter notebooks.
# %% [markdown]
# ## Headings
# %% [markdown]
# Use the number sign (#) followed by a blank space for notebook titles and section headings, e.g.:
# ```md
# # for titles
# ## for major headings
# ### for subheadings
# #### for 4th level subheading
# ```
# %% [markdown]
# ## Emphasis
# %% [markdown]
# Use the surroundig _ or * to emphasize text, e.g.:
# ```
# Bold text: `__string___ or **string**`
# Italic text: `_string_ or *string`
# ```
# %% [markdown]
# ## Mathematical symbols
# %% [markdown]
# Surround mathematical symbols with a dollar sign (\$), for example:
# ```
# $ \lambda = \sqrt{2*\pi} $
# ```
# gives $ \lambda = \sqrt{2*\pi} $
# %% [markdown]
# ## Monospace font
# %% [markdown]
# Surround text with a grave accent (\`) also called a back single quotation mark, for example:
# ```
# `string`
# ```
# You can use the monospace font for `file paths`, `file names`,`message text`...
# %% [markdown]
# ## Line breaks
# %% [markdown]
# Sometimes markdown doesn’t make line breaks when you want them. To force a linebreak, use the following code: `<br>`
# %% [markdown]
# ## Indenting
# %% [markdown]
# Use the greater than sign (>) followed by a space, for example:
# ```
# > Text that will be indented when the Markdown is rendered.
# Any subsequent text is indented until the next carriage return.
# ```
# %% [markdown]
# ## Bullets
# %% [markdown]
# To create a circular bullet point, use one of the following methods. Each bullet point must be on its own line.
#
# - A hyphen (-) followed by one or two spaces, for example:
#
# ```
# - Bulleted item
# ```
#
# - A space, a hyphen (-) and a space, for example:
#
# ```
# - Bulleted item
# ```
#
# * An asterisk (*) followed by one or two spaces, for example:
#
# ```
# * Bulleted item
# ```
#
# To create a sub bullet, press Tab before entering the bullet point using one of the methods described above. For
# example:
#
# ```
# - Main bullet point
# - Sub bullet point
# ```
# %% [markdown]
# ## Numbered lists
# %% [markdown]
# To create a numbered list, enter 1. followed by a space, for example:
# ```
# 1. Numbered item
# 1. Numbered item
# ```
# For simplicity, you use 1. before each entry. The list will be numbered correctly when you run the cell.
#
# To create a substep, press Tab before entering the numbered item, for example:
# ```
# 1. Numbered item
# 1. Substep
# ```
# %% [markdown]
# ## Colored note boxes
# %% [markdown]
# Use one of the following <div> tags to display text in a colored box.
#
# **Restriction**:
# Not all Markdown code displays correctly within <div> tags, so review your colored boxes carefully.
# For example, to make a word bold, surround it with the HTML code for bold (<b>text</b> instead of the Markdown code.
#
# The color of the box is determined by the alert type that you specify:
#
# * Blue boxes (alert-info)
# * Yellow boxes (alert-warning)
# * Green boxes (alert-success)
# * Red boxes (alert-danger)
#
# ```
# <div class="alert alert-block alert-info">
# <b>Tip:</b> For example use blue boxes to highlight a tip.
# If it’s a note, you don’t have to include the word “Note”.
# </div>
# ```
#
# <div class="alert alert-block alert-info">
# <b>Tip:</b> For example use blue boxes to highlight a tip.
# If it’s a note, you don’t have to include the word “Note”.
# </div>
# %% [markdown]
# ## Graphics
# %% [markdown]
# You can attach image files directly to a notebook in Markdown cells by dragging and dropping it into the cell.
# To add images to other types of cells, you must use a graphic that is hosted on the web and use the following code
# to insert the graphic:
# ```
# <img src="url.gif" alt="Alt text that describes the graphic" title="Title text" />
#
# ```
# <img src="images/scpy.png" alt="Alt text that describes the graphic" width=100 title="Title text" />
#
# **Restriction**
# You cannot add captions to graphics.
# %% [markdown]
# ## Geometric shapes
# Use &# followed by the decimal or hex reference number for the shape, for example:
# ```
# &#reference_number;
# ```
# e.g., `◀`: ◀
#
# For a list of reference numbers, see __[UTF-8 Geometric shapes](https://en.wikipedia.org/wiki/Geometric_Shapes)__.
# %% [markdown]
# ## Horizontal lines
# On a new line, enter three asterisks: ``***``
# ***
# %% [markdown]
# ## Internal links
# To link to a section within your notebook, use the following code:
# ```
# [Section title](#section-title)
# ```
#
# For the text inside the parentheses, replace any spaces and special characters with a hyphen. For example,
# if your section is called `processing_functions`, you'd enter:
# ```
# [processing_functions](#processing_functions)
# ```
# [processing_functions](#processing_functions)
#
# Alternatively, you can add an ID above the section:
# ```
# <a id="section_ID"></a>
# ```
#
# **Important**
# Each ID in the notebook must be unique.
#
# To link to a section that has an ID, use the following code:
# ```
# [Section title](#section_ID)
# ```
# [Section title](#section_ID)
# %% [markdown]
# ## External links
# %% [markdown]
# To link to an external site, use the following code:
# ```
# __[link text](https://github.com/spectrochempy/spectrochempy)__
# ```
# Surround the link with two underscores (_) on each side
# __[link text](https://github.com/spectrochempy/spectrochempy)__
# %%
| 25.744681 | 118 | 0.660661 |
acf5206a15d2d701a4f1b9f2f52dd90ecb1f64f7 | 12,160 | py | Python | autoSimulation.py | tokisamu/btcoin-ng | 1a7e9dd3ff687361753685318a732368cb88de2c | [
"MIT"
] | null | null | null | autoSimulation.py | tokisamu/btcoin-ng | 1a7e9dd3ff687361753685318a732368cb88de2c | [
"MIT"
] | null | null | null | autoSimulation.py | tokisamu/btcoin-ng | 1a7e9dd3ff687361753685318a732368cb88de2c | [
"MIT"
] | 1 | 2021-03-17T11:21:45.000Z | 2021-03-17T11:21:45.000Z | import networkx as nx
import numpy as np
import math
import subprocess
import socket
import os
import re
import sys
import threading
import time
import matplotlib.pyplot as plt
from functools import wraps
from pathlib import Path
import errno
HOME = f'{str(Path.home())}/regtest'
print(HOME)
# Decorator that adds identifying information to method calls
def identify(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
print(f'Node {self.nid} executing "{method.__name__}"')
return method(self, *args, **kwargs)
return wrapper
class Network:
def __init__(self, n_nodes=6, topology='rand', connect_nodes=True):
self.nid = "Master"
self.n_nodes = n_nodes
self.topology = topology
self.network: nx.Graph = None
self.connect_nodes = connect_nodes
self.nodes = {}
self.ports = [self._free_port() for _ in range(n_nodes*2)] # reserve two ports for each node
try:
self.setup()
except Exception as e:
print(f'ERROR! {e}')
self.close()
@identify
def setup(self):
if self.topology == 'rand':
self.network = nx.erdos_renyi_graph(self.n_nodes, 0.5)
while(not nx.is_connected(self.network)): # should not happen very often
self.network = nx.erdos_renyi_graph(self.n_nodes, 0.5)
elif self.topology == 'complete':
self.network = nx.complete_graph(self.n_nodes)
elif self.topology == 'ba':
self.network = nx.barabasi_albert_graph(self.n_nodes, 8, seed=None)
elif self.topology == 'grid':
side = math.floor(math.sqrt(self.n_nodes))
self.network = nx.grid_2d_graph(side, side)
self.network = nx.convert_node_labels_to_integers(self.network, ordering = 'sorted')
elif self.topology == 'ring':
def ring_graph(n, k=1):
G = nx.Graph()
nxk = np.arange(0, n).repeat(k)
src = nxk.reshape(n, k)
dst = np.mod(np.tile(np.arange(0, k), n) + (nxk + 1), n).reshape((n, k))
flat_pairs = np.dstack((src, dst)).flatten().tolist()
edges = list(zip(flat_pairs[::2], flat_pairs[1::2]))
G.add_edges_from(edges)
return G
self.network = ring_graph(self.n_nodes)
else:
print("Topology is not recognized!")
self.close()
#nx.draw_networkx(self.network)
#plt.show()
# create the actual network
print("Setting up the network...")
print("Starting up the nodes")
self.create_all_nodes()
if self.connect_nodes:
print("Connecting the nodes")
self.connect_all_nodes()
print("Creating wallets")
self.create_all_wallets()
print("Done!")
def create_all_nodes(self):
import time
for nid in self.network.nodes:
self.nodes[nid] = Node(nid, self.network, self.ports.pop(), self.ports.pop())
# time.sleep(1)
def connect_all_nodes(self):
for nid, node in self.nodes.items():
for neighbor_id in self.network.neighbors(nid):
node.adj_nodes.add((neighbor_id, self.nodes[neighbor_id].port))
node.add_all_nodes()
def create_all_wallets(self):
for _, node in self.nodes.items():
node.createwallet()
# TODO print an overview table
def print_table(self):
pass
@identify
def close(self):
# subprocess.run(['killall --regex bitcoin.*'], shell=True)
print("Shutting network down...")
for _, node in self.nodes.items():
node.stop()
exit()
# TODO race conditions are unlikely but still possible; improve in the future
def _free_port(self, port=1024, max_port=65535):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while port <= max_port:
try:
sock.bind(('', port))
# sock.close()
# reserve and return a bound socket to (hopefully) avoid race conditions
# close and rebind when actually needed
return sock
except OSError:
port += 1
raise IOError('no free ports')
class Node:
def __init__(self, nid, network, port, rpcport):
self.nid = nid
self.network: nx.Graph = network # TODO remove if not needed in the future
self.adj_nodes = set() # set of (nid, port) of the adjacent nodes
self.datadir = HOME + f'/Node{str(self.nid)}/'
self._port = port
self._rpcport = rpcport
self._start()
# RPC wrappers -------------------------------------------------------
# -discover ?
def addnode(self, nid, port, command='add'):
"""
Add/remove a node
https://developer.bitcoin.org/reference/rpc/addnode.html
nid: id of the node
port: port of the node
command: add|remove|onetry
"""
self._runcmd(self.cli_prefix + f'addnode "localhost:{port}" "{command}"')
print(f'Node {self.nid} connected to Node {nid}!')
@identify
def stop(self):
"""
Stop Bitcoin server.
https://developer.bitcoin.org/reference/rpc/stop.html
"""
self._runcmd(self.cli_prefix + f'stop')
@identify
def createwallet(self):
"""
Creates and loads a new wallet.
https://developer.bitcoin.org/reference/rpc/createwallet.html
"""
wallet_name = f'wallet_node{self.nid}'
if not os.path.exists(self.datadir + f'/regtest/wallets/{wallet_name}'):
self._runcmd(self.cli_prefix + f'createwallet {wallet_name}')
else:
self._runcmd(self.cli_prefix + f'loadwallet {wallet_name}')
# Helper methods -------------------------------------------------------
@property
def port(self):
"""
Close the reserved socket and return its port so that it can be bound
by the actual bitcoind process.
"""
if isinstance(self._port, socket.socket):
port = self._port.getsockname()[1]
self._port.close()
self._port = port
return self._port
@property
def rpcport(self):
"""
Close the reserved socket and return its port so that it can be bound
by the actual bitcoind process.
"""
if isinstance(self._rpcport, socket.socket):
rpcport = self._rpcport.getsockname()[1]
self._rpcport.close()
self._rpcport = rpcport
return self._rpcport
def add_all_nodes(self):
for nid, port in self.adj_nodes:
self.addnode(nid, port)
def _start(self):
create_dir(self.datadir)
self._runcmd(f'bitcoind -regtest -fallbackfee=0.00001 -server -daemon -debug -listen -port={self.port} -rpcport={self.rpcport} -datadir={self.datadir}')
self.cli_prefix = f'bitcoin-cli -regtest -datadir={self.datadir} -rpcport={self.rpcport} '
def _runcmd(self, cmd: str, suppress=False):
try:
result = subprocess.run(cmd, shell=True, check=True)
if not suppress:
print(result)
except subprocess.CalledProcessError as e:
print(f'ERROR in Node {self.nid}: {e.stderr}')
# TODO graceful exit
def create_dir(path):
if not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as e:
print(f'ERROR: {e}')
if e.errno != errno.EEXIST:
raise
with open(f'{path}bitcoin.conf', "w") as f:
f.write('rpcuser=user\nrpcpassword=pass\n')
def executeCommand(command,net):
pat = re.compile('node {0,1}[0-9]+')
nids = list(map(lambda x: x.replace('node','').strip(), pat.findall(command)))
# get first node's cli_prefix
command = re.sub(pat, str(net.nodes[int(nids[0])].cli_prefix.strip()), command, 1).strip()
# get ports of all other nodes
for nid in nids[1:]:
command = re.sub(pat, str(net.nodes[int(nid)].port), command, 1).strip()
print(f'Running: {command}')
result = subprocess.run(command, shell=True, check=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding="utf-8")
stdout_as_str = result.stdout
pat2 = re.compile('"address": "\w+"')
return pat2.findall(stdout_as_str)[0]
def executeCommand2(command,net):
pat = re.compile('node {0,1}[0-9]+')
nids = list(map(lambda x: x.replace('node','').strip(), pat.findall(command)))
# get first node's cli_prefix
command = re.sub(pat, str(net.nodes[int(nids[0])].cli_prefix.strip()), command, 1).strip()
# get ports of all other nodes
for nid in nids[1:]:
command = re.sub(pat, str(net.nodes[int(nid)].port), command, 1).strip()
print(f'Running: {command}')
result = subprocess.run(command, shell=True, check=True)
return 1
# TODO create argparser later on
def main(argv):
"""
Create and manage a local network of bitcoin nodes.
----
n_nodes: number of nodes in the network
topology: rand|grid|complete|ring
"""
"""
node0 -generate 1
...
noden -generate 1
node(n/2) -generate 101
node(n/2) generateKeyblock address publickey
nodei sendtoaddress "random" 0.0001
repeat 10times
node(n/2) -generateMicroblock address publickey private
repeat 100times
"""
nodes = 50
topo = "ba"
rounds = 1000
if len(argv)==3:
nodes = int(argv[0])
topo = str(argv[1])
rounds = int(argv[2])
commands = []
address = []
net = Network(n_nodes=399, topology='ba', connect_nodes=True)
centerNode = round(net.n_nodes/2)
for i in range(0,net.n_nodes):
command =("node"+str(i)+" -generate 1");
result = executeCommand(command,net)
address.append(result[12:-1])
time.sleep(1)
#result = executeCommand(command,net)
#result = executeCommand(command,net)
command =("node"+str(centerNode)+" -generate 101");
result = executeCommand2(command,net)
time.sleep(2)
for i in range(0,net.n_nodes):
command =("node"+str(i)+" -generate 1");
result = executeCommand(command,net)
address.append(result[12:-1])
time.sleep(0.5)
result = executeCommand(command,net)
time.sleep(0.5)
#result = executeCommand(command,net)
command =("node"+str(centerNode)+" -generate 101");
result = executeCommand2(command,net)
time.sleep(2)
command =("node"+str(centerNode)+" generateKeyblock 1 "+address[centerNode]+" 998");
result = executeCommand2(command,net)
cnt = 1000
while(cnt):
for repeat in range(0,1):
time.sleep(1)
for i in range(0,net.n_nodes):
if i==centerNode:
continue;
target = (i+centerNode)%net.n_nodes;
if target==centerNode:
target+=1;
command =("node"+str(i)+" sendtoaddress "+address[i]+" 0.0001");
result = executeCommand2(command,net)
command =("node"+str(centerNode)+" generateMicroblock 1 "+address[centerNode]+" 998 123");
result = executeCommand2(command,net)
for i in range(0,3):
command =("node"+str(i)+" getwalletinfo");
result = executeCommand2(command,net)
cnt-=1
print(cnt)
for i in range(0,10):
command =("node"+str(centerNode)+" generateMicroblock 1 "+address[centerNode]+" 998 123");
result = executeCommand2(command,net)
time.sleep(3)
'''
for i in range(0,net.n_nodes):
command =("node"+str(i)+" -generate 1");
result = executeCommand(command,net)
address.append(result[12:-1])
time.sleep(0.5)
#result = executeCommand(command,net)
'''
print("finished")
net.close()
if __name__ == '__main__':
main(sys.argv[1:]) | 35.348837 | 160 | 0.584622 |
acf5218f93cabd6b3f39bd76d354e2d466867fe7 | 1,394 | py | Python | kde/frameworks/tier3/knotifications/knotifications.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 14 | 2017-09-04T09:01:03.000Z | 2022-01-04T20:09:00.000Z | kde/frameworks/tier3/knotifications/knotifications.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 14 | 2017-12-15T08:11:22.000Z | 2020-12-29T19:11:13.000Z | kde/frameworks/tier3/knotifications/knotifications.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 19 | 2017-09-05T19:16:21.000Z | 2020-10-18T12:46:06.000Z | import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.patchLevel["5.82.0"] = 1
# add backported patch for 5.83.0 to allow inline-replies on Windows Notifications
if CraftCore.compiler.isWindows:
self.patchToApply["5.83.0"] = [("inline_reply_win_backported.diff", 1)]
self.patchLevel["5.83.0"] = 1
def setDependencies(self):
self.buildDependencies["virtual/base"] = None
self.buildDependencies["kde/frameworks/extra-cmake-modules"] = None
self.runtimeDependencies["kde/frameworks/tier1/kconfig"] = None
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = None
if not CraftCore.compiler.isAndroid:
self.runtimeDependencies["libs/qt5/qtspeech"] = None
self.runtimeDependencies["kde/frameworks/tier1/kwindowsystem"] = None
self.runtimeDependencies["qt-libs/phonon"] = None
else:
self.runtimeDependencies["libs/qt5/qtandroidextras"] = None
if OsUtils.isMac():
self.runtimeDependencies["libs/qt5/qtmacextras"] = None
if OsUtils.isWin():
self.runtimeDependencies["dev-utils/snoretoast"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
| 37.675676 | 90 | 0.670014 |
acf52229f901339194c86c439e7423cdcb12b31b | 10,514 | py | Python | test.py | yuewei1002/test | ebfabcc5ce8aa354d57814a64eb8cc675549e843 | [
"Apache-2.0"
] | null | null | null | test.py | yuewei1002/test | ebfabcc5ce8aa354d57814a64eb8cc675549e843 | [
"Apache-2.0"
] | null | null | null | test.py | yuewei1002/test | ebfabcc5ce8aa354d57814a64eb8cc675549e843 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
import xlrd
import sys, os
import tarfile
# if os.environ.get("WORKSPACE"):
# sys.path.append(os.path.join(os.environ.get("WORKSPACE"), "AutoTest"))
# from ..Others import get_python_version
# if get_python_version() == 3:
from functools import reduce
# from .. import logger
# logging = logger
from files import HandleXml
class ConvertTestCaseFromExcelToXml(object):
def __init__(self, excel_file,version,sheets):
self.excel_file = excel_file
self.version = version
self.dic_testlink = {}
self.sheets = map(lambda x: int(x.strip()), sheets.split(","))
self.xml_tool = HandleXml()
def get_sheet(self, sheet_index):
sheet_obj = xlrd.open_workbook(self.excel_file).sheet_by_index(sheet_index)
return sheet_obj
def case_to_dic(self, case, sheet_name):
testcase = {"testsuite": "", "name": "", "node_order": "100", "externalid": "", "version": "1", "summary": "",
"preconditions": "", "execution_type": "1", "importance": "3", "steps": [], "keywords": "P1"}
# if get_python_version() == 2:
# testcase["testsuite"] = case[1].encode("utf-8")
# testcase["name"] = case[3].encode("utf-8")
# else:
testcase["testsuite"] = case[1]
testcase["name"] = case[3]
testcase["summary"] = ""
try:
testcase["importance"] = str(case[4])
except:
print(u"错误的用例优先级格式,请查看用例:%s, 对应的优先级为:%s" % (testcase["name"], case[4]))
print(u"sheet名称为:%s" % sheet_name)
sys.exit(1)
# if get_python_version() == 2:
# testcase["preconditions"] = case[5].encode("utf-8")
# testcase["keywords"] = case[2].encode("utf-8")
# actions = case[6].strip().encode("utf-8").split("\n")
# expectedresults = case[7].strip().encode("utf-8").split("\n")
# else:
testcase["preconditions"] = case[5]
testcase["keywords"] = case[2]
actions = case[6].strip().split("\n") # 去掉空格,通过换行符分隔成列表
expectedresults = case[7].strip().split("\n") # 去掉空格,通过换行符分隔成列表
count = 0
if len(actions) == len(expectedresults):
while count < len(actions):
step = {"execution_type": ""}
step["step_number"] = str(count + 1)
step["actions"] = actions[count]
step["expectedresults"] = expectedresults[count]
testcase["steps"].append(
step) # {"execution_type": "","step_number":"1","actions":"步骤1","expectedresults":"期望1"}
count += 1
else:
step = {"execution_type": ""}
step["step_number"] = "1"
step["actions"] = reduce(lambda x, y: x + y, actions)
step["expectedresults"] = reduce(lambda x, y: x + y, expectedresults)
testcase["steps"].append(step) # {"execution_type": "","step_number":"","actions":"","expectedresults":""}
return testcase
def get_cases(self, case_sheet_obj):
start_row = 4
cases = []
while True:
try:
case = case_sheet_obj.row_values(start_row) # 返回由该行中所有单元格的数据组成的列表
cases.append(case)
except IndexError:
break
start_row += 1
return cases
def zip_file(self, zip_file, file_list):
tar = tarfile.open(zip_file, 'w')
for File in file_list:
tar.add(File, arcname=File.split(os.sep)[-1])
tar.close()
def main(self):
file_list = []
for sheet in self.sheets: # 单张表操作
sheet_obj = self.get_sheet(sheet - 1)
cases = self.get_cases(case_sheet_obj=sheet_obj) # 获取单张表用例,列表嵌套列表
split_number = 300 # testlink有导入大小限制,所以做拆分处理
cases_splited = [cases[i:i + split_number] for i in range(0, len(cases), split_number)] # 每300条用例拆分为一个子列表
cases_splited_count = 0
while cases_splited_count < len(cases_splited):
for case in cases_splited[cases_splited_count]:
case_datails = self.case_to_dic(case, sheet_obj.name) # 返回用例的字典格式
test_suite = case_datails["testsuite"].replace("\n", "") # 对应二级模块名 兼容回车符问题
if not test_suite:
print("suite is empty for cases:%s" % case_datails)
sys.exit("suite is empty for cases:%s" % case_datails)
if sheet_obj.name not in self.dic_testlink.keys():
self.dic_testlink[sheet_obj.name] = {} # 添加表名的空字典,字典嵌套字典,一张表对应一个子字典
if test_suite not in self.dic_testlink[sheet_obj.name].keys():
self.dic_testlink[sheet_obj.name][test_suite] = {"testcase": [case_datails]}
else:
self.dic_testlink[sheet_obj.name][test_suite]["testcase"].append(case_datails)
# {"表名":{"二级模块1":{"测试用例":[{用例1},{用例2}]},"二级模块2":{"测试用例":[{用例1},{用例2}]}...}}
node_version = self.xml_tool.add_node("testsuite", {"name": self.version}) # 根节点,版本
# node_order
self.xml_tool.add_child_node(node_version, "node_order", content="4")
# details
self.xml_tool.add_child_node(node_version, "details")
node_sheet = self.xml_tool.add_child_node(node_version, "testsuite", {"name": sheet_obj.name}) # 表名节点
# node_order
self.xml_tool.add_child_node(node_sheet, "node_order", content="4")
# details
self.xml_tool.add_child_node(node_sheet, "details")
for sheet_name_from_dict in self.dic_testlink.keys():
if sheet_name_from_dict == sheet_obj.name:
for test_suite in self.dic_testlink[sheet_name_from_dict].keys():
node_modlue = self.xml_tool.add_child_node(node_sheet, "testsuite",
{"name": test_suite})
testcase_list = self.dic_testlink[sheet_name_from_dict][test_suite]["testcase"] # 列表格式
# print(testcase_list)
for testcase in testcase_list:
# testcase
node_case = self.xml_tool.add_child_node(node_modlue, "testcase",
{"name": testcase["name"]})
# node_order
self.xml_tool.add_child_node(node_case, "node_order", content=testcase["node_order"])
# externalid
self.xml_tool.add_child_node(node_case, "externalid", content=testcase["externalid"])
# version
self.xml_tool.add_child_node(node_case, "version", content=testcase["version"])
# summary
self.xml_tool.add_child_node(node_case, "summary", content=testcase["summary"])
# preconditions
self.xml_tool.add_child_node(node_case, "preconditions",
content=testcase["preconditions"])
# execution_type
self.xml_tool.add_child_node(node_case, "execution_type",
content=testcase["execution_type"])
# importance
self.xml_tool.add_child_node(node_case, "importance", content=testcase["importance"])
# steps
node_steps = self.xml_tool.add_child_node(node_case, "steps")
for step in testcase["steps"]:
# step
node_step = self.xml_tool.add_child_node(node_steps, "step")
# step_number
self.xml_tool.add_child_node(node_step, "step_number", content=step["step_number"])
# actions
self.xml_tool.add_child_node(node_step, "actions", content=step["actions"])
# expectedresults
self.xml_tool.add_child_node(node_step, "expectedresults",
content=step["expectedresults"])
# execution_type
self.xml_tool.add_child_node(node_step, "execution_type",
content=step["execution_type"])
# keywords
node_keywords = self.xml_tool.add_child_node(node_case, "keywords")
# keyword
node_keyword = self.xml_tool.add_child_node(node_keywords, "keyword",
{"name": testcase["keywords"]})
# notes
self.xml_tool.add_child_node(node_keyword, "notes", content="aaaa")
# if get_python_version() == 3:
xml_file = "%s_%s_%s.xml" % (self.version, sheet_obj.name, str(cases_splited_count + 1))
# else:
# xml_file = "%s_%s_%s.xml" % (
# self.version, sheet_obj.name.encode("utf-8"), str(cases_splited_count + 1))
self.xml_tool.write_xml(xml_file, root_node=node_version)
cases_splited_count += 1
file_list.append(xml_file)
tar_file = self.excel_file.replace(".xlsx", "").replace(".xls", "") + ".tar"
self.zip_file(tar_file, file_list)
return tar_file
#
# if __name__ == '__main__':
# version = '瑕疵1.1.0'
# sheets = '3'
# excel_file = "F:\\瑕疵\\01.瑕疵检测平台_V1.0.0_功能测试用例.xlsx"
# ConvertXml = ConvertTestCaseFromExcelToXml(excel_file=excel_file,version=version,sheets=sheets)
# ConvertXml.main()
| 53.642857 | 119 | 0.507704 |
acf522e990c9ab97941302ddbe8f6b180f26145d | 727 | py | Python | feature/simple_features/second_quart_feature.py | smogork/TAiO_ImageClassification | 14b2f6e707475b45e936a8ddd4345309aaef78f2 | [
"MIT"
] | null | null | null | feature/simple_features/second_quart_feature.py | smogork/TAiO_ImageClassification | 14b2f6e707475b45e936a8ddd4345309aaef78f2 | [
"MIT"
] | 2 | 2021-10-12T17:45:49.000Z | 2021-12-21T19:23:30.000Z | feature/simple_features/second_quart_feature.py | smogork/TAiO_ImageClassification | 14b2f6e707475b45e936a8ddd4345309aaef78f2 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
Moduł zawiera klasę wyliczajcą liczbę punktów o wartościach (1/4, 2/4]
"""
import copy
from feature import feature
from bitmap import bitmap_grayscale
class SecondQuartFeature(feature.Feature):
"""
Klasa oblicza .
Cecha 19.
"""
def __init__(self):
self.__points = None
def calculate(self) -> float:
if self.__points is None:
raise RuntimeError("Run prepare() before calculate()")
count = 0
for x in self.__points:
if 0.25 < x <= 0.5:
count += 1
return count
def prepare(self, bitmap: bitmap_grayscale) -> None:
self.__points = super()._map_bitmap_to_single_dimention(bitmap)
| 21.382353 | 71 | 0.618982 |
acf5246700140cb7ddb6fb73e14e166f143faafd | 45,961 | py | Python | electrum/transaction.py | Marcdnd/electrum | c2ff3e4a2bbfc0fa31b83bfd92e5ab9d6b7e5230 | [
"MIT"
] | 1 | 2019-03-31T17:37:08.000Z | 2019-03-31T17:37:08.000Z | electrum/transaction.py | juanasorta/electrum | a62bf2a53ac3c6a596b2570ba41636fb979df2c5 | [
"MIT"
] | 4 | 2018-05-30T07:20:08.000Z | 2018-05-31T05:13:16.000Z | electrum/transaction.py | rt121212121/electrum | df6cba5af80f6d5bc821573788f176b8668fb714 | [
"MIT"
] | 1 | 2020-06-25T21:04:52.000Z | 2020-06-25T21:04:52.000Z | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
import struct
import traceback
import sys
from typing import (Sequence, Union, NamedTuple, Tuple, Optional, Iterable,
Callable, List, Dict)
from . import ecc, bitcoin, constants, segwit_addr
from .util import print_error, profiler, to_bytes, bh2u, bfh
from .bitcoin import (TYPE_ADDRESS, TYPE_PUBKEY, TYPE_SCRIPT, hash_160,
hash160_to_p2sh, hash160_to_p2pkh, hash_to_segwit_addr,
hash_encode, var_int, TOTAL_COIN_SUPPLY_LIMIT_IN_BTC, COIN,
push_script, int_to_hex, push_script, b58_address_to_hash160,
opcodes, add_number_to_script, base_decode)
from .crypto import sha256d
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
PARTIAL_TXN_HEADER_MAGIC = b'EPTF\xff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class UnknownTxinType(Exception):
pass
class NotRecognizedRedeemScript(Exception):
pass
class MalformedBitcoinScript(Exception):
pass
class TxOutput(NamedTuple):
type: int
address: str
value: Union[int, str] # str when the output is set to max: '!'
class TxOutputForUI(NamedTuple):
address: str
value: int
class TxOutputHwInfo(NamedTuple):
address_index: Tuple
sorted_xpubs: Iterable[str]
num_sig: Optional[int]
script_type: str
class BCDataStream(object):
"""Workalike python implementation of Bitcoin's CDataStream class."""
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer") from None
def can_read_more(self) -> bool:
if not self.input:
return False
return self.read_cursor < len(self.input)
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError as e:
raise SerializationError("attempt to read past end of buffer") from e
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e) from e
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
def script_GetOp(_bytes : bytes):
i = 0
while i < len(_bytes):
vch = None
opcode = _bytes[i]
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
try: nSize = _bytes[i]
except IndexError: raise MalformedBitcoinScript()
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
try: (nSize,) = struct.unpack_from('<H', _bytes, i)
except struct.error: raise MalformedBitcoinScript()
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
try: (nSize,) = struct.unpack_from('<I', _bytes, i)
except struct.error: raise MalformedBitcoinScript()
i += 4
vch = _bytes[i:i + nSize]
i += nSize
yield opcode, vch, i
class OPPushDataGeneric:
def __init__(self, pushlen: Callable=None):
if pushlen is not None:
self.check_data_len = pushlen
@classmethod
def check_data_len(cls, datalen: int) -> bool:
# Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
return opcodes.OP_PUSHDATA4 >= datalen >= 0
@classmethod
def is_instance(cls, item):
# accept objects that are instances of this class
# or other classes that are subclasses
return isinstance(item, cls) \
or (isinstance(item, type) and issubclass(item, cls))
OPPushDataPubkey = OPPushDataGeneric(lambda x: x in (33, 65))
# note that this does not include x_pubkeys !
def match_decoded(decoded, to_match):
if decoded is None:
return False
if len(decoded) != len(to_match):
return False
for i in range(len(decoded)):
to_match_item = to_match[i]
decoded_item = decoded[i]
if OPPushDataGeneric.is_instance(to_match_item) and to_match_item.check_data_len(decoded_item[0]):
continue
if to_match_item != decoded_item[0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = [ x for x in script_GetOp(_bytes) ]
except Exception as e:
# coinbase transactions raise an exception
print_error("parse_scriptSig: cannot find address in input script (coinbase?)",
bh2u(_bytes))
return
match = [OPPushDataGeneric]
if match_decoded(decoded, match):
item = decoded[0][1]
if item[0] == 0:
# segwit embedded into p2sh
# witness version 0
d['address'] = bitcoin.hash160_to_p2sh(hash_160(item))
if len(item) == 22:
d['type'] = 'p2wpkh-p2sh'
elif len(item) == 34:
d['type'] = 'p2wsh-p2sh'
else:
print_error("unrecognized txin type", bh2u(item))
elif opcodes.OP_1 <= item[0] <= opcodes.OP_16:
# segwit embedded into p2sh
# witness version 1-16
pass
else:
# assert item[0] == 0x30
# pay-to-pubkey
d['type'] = 'p2pk'
d['address'] = "(pubkey)"
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# p2pkh TxIn transactions push a signature
# (71-73 bytes) and then their public key
# (33 or 65 bytes) onto the stack:
match = [OPPushDataGeneric, OPPushDataGeneric]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("parse_scriptSig: cannot find address in input script (p2pkh?)",
bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [opcodes.OP_0] + [OPPushDataGeneric] * (len(decoded) - 1)
if match_decoded(decoded, match):
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
redeem_script_unsanitized = decoded[-1][1] # for partial multisig txn, this has x_pubkeys
try:
m, n, x_pubkeys, pubkeys, redeem_script = parse_redeemScript_multisig(redeem_script_unsanitized)
except NotRecognizedRedeemScript:
print_error("parse_scriptSig: cannot find address in input script (p2sh?)",
bh2u(_bytes))
# we could still guess:
# d['address'] = hash160_to_p2sh(hash_160(decoded[-1][1]))
return
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeem_script'] = redeem_script
d['address'] = hash160_to_p2sh(hash_160(bfh(redeem_script)))
return
# custom partial format for imported addresses
match = [opcodes.OP_INVALIDOPCODE, opcodes.OP_0, OPPushDataGeneric]
if match_decoded(decoded, match):
x_pubkey = bh2u(decoded[2][1])
pubkey, address = xpubkey_to_address(x_pubkey)
d['type'] = 'address'
d['address'] = address
d['num_sig'] = 1
d['x_pubkeys'] = [x_pubkey]
d['pubkeys'] = None # get_sorted_pubkeys will populate this
d['signatures'] = [None]
return
print_error("parse_scriptSig: cannot find address in input script (unknown)",
bh2u(_bytes))
def parse_redeemScript_multisig(redeem_script: bytes):
try:
dec2 = [ x for x in script_GetOp(redeem_script) ]
except MalformedBitcoinScript:
raise NotRecognizedRedeemScript()
try:
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
except IndexError:
raise NotRecognizedRedeemScript()
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [op_m] + [OPPushDataGeneric] * n + [op_n, opcodes.OP_CHECKMULTISIG]
if not match_decoded(dec2, match_multisig):
raise NotRecognizedRedeemScript()
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeem_script2 = bfh(multisig_script(x_pubkeys, m))
if redeem_script2 != redeem_script:
raise NotRecognizedRedeemScript()
redeem_script_sanitized = multisig_script(pubkeys, m)
return m, n, x_pubkeys, pubkeys, redeem_script_sanitized
def get_address_from_output_script(_bytes: bytes, *, net=None) -> Tuple[int, str]:
try:
decoded = [x for x in script_GetOp(_bytes)]
except MalformedBitcoinScript:
decoded = None
# p2pk
match = [OPPushDataPubkey, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match) and ecc.ECPubkey.is_pubkey_bytes(decoded[0][1]):
return TYPE_PUBKEY, bh2u(decoded[0][1])
# p2pkh
match = [opcodes.OP_DUP, opcodes.OP_HASH160, OPPushDataGeneric(lambda x: x == 20), opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2pkh(decoded[2][1], net=net)
# p2sh
match = [opcodes.OP_HASH160, OPPushDataGeneric(lambda x: x == 20), opcodes.OP_EQUAL]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2sh(decoded[1][1], net=net)
# segwit address (version 0)
match = [opcodes.OP_0, OPPushDataGeneric(lambda x: x in (20, 32))]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_to_segwit_addr(decoded[1][1], witver=0, net=net)
# segwit address (version 1-16)
future_witness_versions = list(range(opcodes.OP_1, opcodes.OP_16 + 1))
for witver, opcode in enumerate(future_witness_versions, start=1):
match = [opcode, OPPushDataGeneric(lambda x: 2 <= x <= 40)]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_to_segwit_addr(decoded[1][1], witver=witver, net=net)
return TYPE_SCRIPT, bh2u(_bytes)
def parse_input(vds, full_parse: bool):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['scriptSig'] = bh2u(scriptSig)
d['sequence'] = sequence
d['type'] = 'unknown' if prevout_hash != '00'*32 else 'coinbase'
d['address'] = None
d['num_sig'] = 0
if not full_parse:
return d
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
if d['type'] != 'coinbase' and scriptSig:
try:
parse_scriptSig(d, scriptSig)
except BaseException:
traceback.print_exc(file=sys.stderr)
print_error('failed to parse scriptSig', bh2u(scriptSig))
return d
def construct_witness(items: Sequence[Union[str, int, bytes]]) -> str:
"""Constructs a witness from the given stack items."""
witness = var_int(len(items))
for item in items:
if type(item) is int:
item = bitcoin.script_num_to_hex(item)
elif type(item) is bytes:
item = bh2u(item)
witness += bitcoin.witness_push(item)
return witness
def parse_witness(vds, txin, full_parse: bool):
n = vds.read_compact_size()
if n == 0:
txin['witness'] = '00'
return
if n == 0xffffffff:
txin['value'] = vds.read_uint64()
txin['witness_version'] = vds.read_uint16()
n = vds.read_compact_size()
# now 'n' is the number of items in the witness
w = list(bh2u(vds.read_bytes(vds.read_compact_size())) for i in range(n))
txin['witness'] = construct_witness(w)
if not full_parse:
return
try:
if txin.get('witness_version', 0) != 0:
raise UnknownTxinType()
if txin['type'] == 'coinbase':
pass
elif txin['type'] == 'address':
pass
elif txin['type'] == 'p2wsh-p2sh' or n > 2:
witness_script_unsanitized = w[-1] # for partial multisig txn, this has x_pubkeys
try:
m, n, x_pubkeys, pubkeys, witness_script = parse_redeemScript_multisig(bfh(witness_script_unsanitized))
except NotRecognizedRedeemScript:
raise UnknownTxinType()
txin['signatures'] = parse_sig(w[1:-1])
txin['num_sig'] = m
txin['x_pubkeys'] = x_pubkeys
txin['pubkeys'] = pubkeys
txin['witness_script'] = witness_script
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wsh'
txin['address'] = bitcoin.script_to_p2wsh(witness_script)
elif txin['type'] == 'p2wpkh-p2sh' or n == 2:
txin['num_sig'] = 1
txin['x_pubkeys'] = [w[1]]
txin['pubkeys'] = [safe_parse_pubkey(w[1])]
txin['signatures'] = parse_sig([w[0]])
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wpkh'
txin['address'] = bitcoin.public_key_to_p2wpkh(bfh(txin['pubkeys'][0]))
else:
raise UnknownTxinType()
except UnknownTxinType:
txin['type'] = 'unknown'
except BaseException:
txin['type'] = 'unknown'
traceback.print_exc(file=sys.stderr)
print_error('failed to parse witness', txin.get('witness'))
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
if d['value'] > TOTAL_COIN_SUPPLY_LIMIT_IN_BTC * COIN:
raise SerializationError('invalid output amount (too large)')
if d['value'] < 0:
raise SerializationError('invalid output amount (negative)')
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw: str, force_full_parse=False) -> dict:
raw_bytes = bfh(raw)
d = {}
if raw_bytes[:5] == PARTIAL_TXN_HEADER_MAGIC:
d['partial'] = is_partial = True
partial_format_version = raw_bytes[5]
if partial_format_version != 0:
raise SerializationError('unknown tx partial serialization format version: {}'
.format(partial_format_version))
raw_bytes = raw_bytes[6:]
else:
d['partial'] = is_partial = False
full_parse = force_full_parse or is_partial
vds = BCDataStream()
vds.write(raw_bytes)
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
is_segwit = (n_vin == 0)
if is_segwit:
marker = vds.read_bytes(1)
if marker != b'\x01':
raise ValueError('invalid txn marker byte: {}'.format(marker))
n_vin = vds.read_compact_size()
d['segwit_ser'] = is_segwit
d['inputs'] = [parse_input(vds, full_parse=full_parse) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
if is_segwit:
for i in range(n_vin):
txin = d['inputs'][i]
parse_witness(vds, txin, full_parse=full_parse)
d['lockTime'] = vds.read_uint32()
if vds.can_read_more():
raise SerializationError('extra junk at the end')
return d
# pay & redeem scripts
def multisig_script(public_keys: Sequence[str], m: int) -> str:
n = len(public_keys)
assert 1 <= m <= n <= 15, f'm {m}, n {n}'
op_m = bh2u(add_number_to_script(m))
op_n = bh2u(add_number_to_script(n))
keylist = [push_script(k) for k in public_keys]
return op_m + ''.join(keylist) + op_n + opcodes.OP_CHECKMULTISIG.hex()
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise Exception("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None # type: List[TxOutput]
self.locktime = 0
self.version = 2
# by default we assume this is a partial txn;
# this value will get properly set when deserializing
self.is_partial_originally = True
self._segwit_ser = None # None means "don't know"
self.output_info = None # type: Optional[Dict[str, TxOutputHwInfo]]
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self) -> List[TxOutput]:
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
if txin['type'] == 'coinbase':
return [], []
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, signatures: Sequence[str]):
"""Add new signatures to a transaction
`signatures` is expected to be a list of sigs with signatures[i]
intended for self._inputs[i].
This is used by the Trezor, KeepKey an Safe-T plugins.
"""
if self.is_complete():
return
if len(self.inputs()) != len(signatures):
raise Exception('expected {} signatures; got {}'.format(len(self.inputs()), len(signatures)))
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sig = signatures[i]
if sig in txin.get('signatures'):
continue
pre_hash = sha256d(bfh(self.serialize_preimage(i)))
sig_string = ecc.sig_string_from_der_sig(bfh(sig[:-2]))
for recid in range(4):
try:
public_key = ecc.ECPubkey.from_sig_string(sig_string, recid, pre_hash)
except ecc.InvalidECPointException:
# the point might not be on the curve for some recid values
continue
pubkey_hex = public_key.get_public_key_hex(compressed=True)
if pubkey_hex in pubkeys:
try:
public_key.verify_message_hash(sig_string, pre_hash)
except Exception:
traceback.print_exc(file=sys.stderr)
continue
j = pubkeys.index(pubkey_hex)
print_error("adding sig", i, j, pubkey_hex, sig)
self.add_signature_to_txin(i, j, sig)
break
# redo raw
self.raw = self.serialize()
def add_signature_to_txin(self, i, signingPos, sig):
txin = self._inputs[i]
txin['signatures'][signingPos] = sig
txin['scriptSig'] = None # force re-serialization
txin['witness'] = None # force re-serialization
self.raw = None
def add_inputs_info(self, wallet):
if self.is_complete():
return
for txin in self.inputs():
wallet.add_input_info(txin)
def remove_signatures(self):
for txin in self.inputs():
txin['signatures'] = [None] * len(txin['signatures'])
assert not self.is_complete()
def deserialize(self, force_full_parse=False):
if self.raw is None:
return
#self.raw = self.serialize()
if self._inputs is not None:
return
d = deserialize(self.raw, force_full_parse)
self._inputs = d['inputs']
self._outputs = [TxOutput(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
self.version = d['version']
self.is_partial_originally = d['partial']
self._segwit_ser = d['segwit_ser']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, version=None):
self = klass(None)
self._inputs = inputs
self._outputs = outputs
self.locktime = locktime
if version is not None:
self.version = version
self.BIP69_sort()
return self
@classmethod
def pay_script(self, output_type, addr: str) -> str:
"""Returns scriptPubKey in hex form."""
if output_type == TYPE_SCRIPT:
return addr
elif output_type == TYPE_ADDRESS:
return bitcoin.address_to_script(addr)
elif output_type == TYPE_PUBKEY:
return bitcoin.public_key_to_p2pk_script(addr)
else:
raise TypeError('Unknown output type')
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
if txin['type'] == 'coinbase':
return [], []
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long
sig_list = [ "00" * 0x48 ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def serialize_witness(self, txin, estimate_size=False):
_type = txin['type']
if not self.is_segwit_input(txin) and not txin['type'] == 'address':
return '00'
if _type == 'coinbase':
return txin['witness']
witness = txin.get('witness', None)
if witness is None or estimate_size:
if _type == 'address' and estimate_size:
_type = self.guess_txintype_from_address(txin['address'])
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
if _type in ['p2wpkh', 'p2wpkh-p2sh']:
witness = construct_witness([sig_list[0], pubkeys[0]])
elif _type in ['p2wsh', 'p2wsh-p2sh']:
witness_script = multisig_script(pubkeys, txin['num_sig'])
witness = construct_witness([0] + sig_list + [witness_script])
else:
witness = txin.get('witness', '00')
if self.is_txin_complete(txin) or estimate_size:
partial_format_witness_prefix = ''
else:
input_value = int_to_hex(txin['value'], 8)
witness_version = int_to_hex(txin.get('witness_version', 0), 2)
partial_format_witness_prefix = var_int(0xffffffff) + input_value + witness_version
return partial_format_witness_prefix + witness
@classmethod
def is_segwit_input(cls, txin, guess_for_address=False):
_type = txin['type']
if _type == 'address' and guess_for_address:
_type = cls.guess_txintype_from_address(txin['address'])
has_nonzero_witness = txin.get('witness', '00') not in ('00', None)
return cls.is_segwit_inputtype(_type) or has_nonzero_witness
@classmethod
def is_segwit_inputtype(cls, txin_type):
return txin_type in ('p2wpkh', 'p2wpkh-p2sh', 'p2wsh', 'p2wsh-p2sh')
@classmethod
def guess_txintype_from_address(cls, addr):
# It's not possible to tell the script type in general
# just from an address.
# - "1" addresses are of course p2pkh
# - "3" addresses are p2sh but we don't know the redeem script..
# - "bc1" addresses (if they are 42-long) are p2wpkh
# - "bc1" addresses that are 62-long are p2wsh but we don't know the script..
# If we don't know the script, we _guess_ it is pubkeyhash.
# As this method is used e.g. for tx size estimation,
# the estimation will not be precise.
witver, witprog = segwit_addr.decode(constants.net.SEGWIT_HRP, addr)
if witprog is not None:
return 'p2wpkh'
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == constants.net.ADDRTYPE_P2PKH:
return 'p2pkh'
elif addrtype == constants.net.ADDRTYPE_P2SH:
return 'p2wpkh-p2sh'
@classmethod
def input_script(self, txin, estimate_size=False):
_type = txin['type']
if _type == 'coinbase':
return txin['scriptSig']
# If there is already a saved scriptSig, just return that.
# This allows manual creation of txins of any custom type.
# However, if the txin is not complete, we might have some garbage
# saved from our partial txn ser format, so we re-serialize then.
script_sig = txin.get('scriptSig', None)
if script_sig is not None and self.is_txin_complete(txin):
return script_sig
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'address' and estimate_size:
_type = self.guess_txintype_from_address(txin['address'])
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type in ['p2wpkh', 'p2wsh']:
return ''
elif _type == 'p2wpkh-p2sh':
pubkey = safe_parse_pubkey(pubkeys[0])
scriptSig = bitcoin.p2wpkh_nested_script(pubkey)
return push_script(scriptSig)
elif _type == 'p2wsh-p2sh':
if estimate_size:
witness_script = ''
else:
witness_script = self.get_preimage_script(txin)
scriptSig = bitcoin.p2wsh_nested_script(witness_script)
return push_script(scriptSig)
elif _type == 'address':
return bytes([opcodes.OP_INVALIDOPCODE, opcodes.OP_0]).hex() + push_script(pubkeys[0])
elif _type == 'unknown':
return txin['scriptSig']
return script
@classmethod
def is_txin_complete(cls, txin):
if txin['type'] == 'coinbase':
return True
num_sig = txin.get('num_sig', 1)
if num_sig == 0:
return True
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
preimage_script = txin.get('preimage_script', None)
if preimage_script is not None:
return preimage_script
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
if txin['type'] == 'p2pkh':
return bitcoin.address_to_script(txin['address'])
elif txin['type'] in ['p2sh', 'p2wsh', 'p2wsh-p2sh']:
return multisig_script(pubkeys, txin['num_sig'])
elif txin['type'] in ['p2wpkh', 'p2wpkh-p2sh']:
pubkey = pubkeys[0]
pkh = bh2u(hash_160(bfh(pubkey)))
return '76a9' + push_script(pkh) + '88ac'
elif txin['type'] == 'p2pk':
pubkey = pubkeys[0]
return bitcoin.public_key_to_p2pk_script(pubkey)
else:
raise TypeError('Unknown txin type', txin['type'])
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def get_outpoint_from_txin(cls, txin):
if txin['type'] == 'coinbase':
return None
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
return prevout_hash + ':%d' % prevout_n
@classmethod
def serialize_input(self, txin, script):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
return s
def set_rbf(self, rbf):
nSequence = 0xffffffff - (2 if rbf else 1)
for txin in self.inputs():
txin['sequence'] = nSequence
def BIP69_sort(self, inputs=True, outputs=True):
if inputs:
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
if outputs:
self._outputs.sort(key = lambda o: (o.value, self.pay_script(o.type, o.address)))
@classmethod
def serialize_output(cls, output: TxOutput) -> str:
s = int_to_hex(output.value, 8)
script = cls.pay_script(output.type, output.address)
s += var_int(len(script)//2)
s += script
return s
def serialize_preimage(self, i):
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(1, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txin = inputs[i]
# TODO: py3 hex
if self.is_segwit_input(txin):
hashPrevouts = bh2u(sha256d(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs))))
hashSequence = bh2u(sha256d(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs))))
hashOutputs = bh2u(sha256d(bfh(''.join(self.serialize_output(o) for o in outputs))))
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
amount = int_to_hex(txin['value'], 8)
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
preimage = nVersion + hashPrevouts + hashSequence + outpoint + scriptCode + amount + nSequence + hashOutputs + nLocktime + nHashType
else:
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.get_preimage_script(txin) if i==k else '') for k, txin in enumerate(inputs))
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
preimage = nVersion + txins + txouts + nLocktime + nHashType
return preimage
def is_segwit(self, guess_for_address=False):
if not self.is_partial_originally:
return self._segwit_ser
return any(self.is_segwit_input(x, guess_for_address=guess_for_address) for x in self.inputs())
def serialize(self, estimate_size=False, witness=True):
network_ser = self.serialize_to_network(estimate_size, witness)
if estimate_size:
return network_ser
if self.is_partial_originally and not self.is_complete():
partial_format_version = '00'
return bh2u(PARTIAL_TXN_HEADER_MAGIC) + partial_format_version + network_ser
else:
return network_ser
def serialize_to_network(self, estimate_size=False, witness=True):
self.deserialize()
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size)) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
use_segwit_ser_for_estimate_size = estimate_size and self.is_segwit(guess_for_address=True)
use_segwit_ser_for_actual_use = not estimate_size and \
(self.is_segwit() or any(txin['type'] == 'address' for txin in inputs))
use_segwit_ser = use_segwit_ser_for_estimate_size or use_segwit_ser_for_actual_use
if witness and use_segwit_ser:
marker = '00'
flag = '01'
witness = ''.join(self.serialize_witness(x, estimate_size) for x in inputs)
return nVersion + marker + flag + txins + txouts + witness + nLocktime
else:
return nVersion + txins + txouts + nLocktime
def txid(self):
self.deserialize()
all_segwit = all(self.is_segwit_input(x) for x in self.inputs())
if not all_segwit and not self.is_complete():
return None
ser = self.serialize_to_network(witness=False)
return bh2u(sha256d(bfh(ser))[::-1])
def wtxid(self):
self.deserialize()
if not self.is_complete():
return None
ser = self.serialize_to_network(witness=True)
return bh2u(sha256d(bfh(ser))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
self.BIP69_sort(outputs=False)
def add_outputs(self, outputs):
self._outputs.extend(outputs)
self.raw = None
self.BIP69_sort(inputs=False)
def input_value(self):
return sum(x['value'] for x in self.inputs())
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
return self.input_value() - self.output_value()
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1 for x in self.inputs()])
def estimated_size(self):
"""Return an estimated virtual tx size in vbytes.
BIP-0141 defines 'Virtual transaction size' to be weight/4 rounded up.
This definition is only for humans, and has little meaning otherwise.
If we wanted sub-byte precision, fee calculation should use transaction
weights, but for simplicity we approximate that with (virtual_size)x4
"""
weight = self.estimated_weight()
return self.virtual_size_from_weight(weight)
@classmethod
def estimated_input_weight(cls, txin, is_segwit_tx):
'''Return an estimate of serialized input weight in weight units.'''
script = cls.input_script(txin, True)
input_size = len(cls.serialize_input(txin, script)) // 2
if cls.is_segwit_input(txin, guess_for_address=True):
witness_size = len(cls.serialize_witness(txin, True)) // 2
else:
witness_size = 1 if is_segwit_tx else 0
return 4 * input_size + witness_size
@classmethod
def estimated_output_size(cls, address):
"""Return an estimate of serialized output size in bytes."""
script = bitcoin.address_to_script(address)
# 8 byte value + 1 byte script len + script
return 9 + len(script) // 2
@classmethod
def virtual_size_from_weight(cls, weight):
return weight // 4 + (weight % 4 > 0)
def estimated_total_size(self):
"""Return an estimated total transaction size in bytes."""
return len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None else len(self.raw) // 2 # ASCII hex string
def estimated_witness_size(self):
"""Return an estimate of witness size in bytes."""
estimate = not self.is_complete()
if not self.is_segwit(guess_for_address=estimate):
return 0
inputs = self.inputs()
witness = ''.join(self.serialize_witness(x, estimate) for x in inputs)
witness_size = len(witness) // 2 + 2 # include marker and flag
return witness_size
def estimated_base_size(self):
"""Return an estimated base transaction size in bytes."""
return self.estimated_total_size() - self.estimated_witness_size()
def estimated_weight(self):
"""Return an estimate of transaction weight."""
total_tx_size = self.estimated_total_size()
base_tx_size = self.estimated_base_size()
return 3 * base_tx_size + total_tx_size
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def sign(self, keypairs) -> None:
# keypairs: (x_)pubkey -> secret_bytes
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, (pubkey, x_pubkey) in enumerate(zip(pubkeys, x_pubkeys)):
if self.is_txin_complete(txin):
break
if pubkey in keypairs:
_pubkey = pubkey
elif x_pubkey in keypairs:
_pubkey = x_pubkey
else:
continue
print_error("adding signature for", _pubkey)
sec, compressed = keypairs.get(_pubkey)
sig = self.sign_txin(i, sec)
self.add_signature_to_txin(i, j, sig)
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def sign_txin(self, txin_index, privkey_bytes) -> str:
pre_hash = sha256d(bfh(self.serialize_preimage(txin_index)))
privkey = ecc.ECPrivkey(privkey_bytes)
sig = privkey.sign_transaction(pre_hash)
sig = bh2u(sig) + '01'
return sig
def get_outputs_for_UI(self) -> Sequence[TxOutputForUI]:
outputs = []
for o in self.outputs():
if o.type == TYPE_ADDRESS:
addr = o.address
elif o.type == TYPE_PUBKEY:
addr = 'PUBKEY ' + o.address
else:
addr = 'SCRIPT ' + o.address
outputs.append(TxOutputForUI(addr, o.value)) # consider using yield
return outputs
def has_address(self, addr: str) -> bool:
return (addr in (o.address for o in self.outputs())) \
or (addr in (txin.get("address") for txin in self.inputs()))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
def tx_from_str(txt: str) -> str:
"""Sanitizes tx-describing input (json or raw hex or base43) into
raw hex transaction."""
assert isinstance(txt, str), f"txt must be str, not {type(txt)}"
txt = txt.strip()
if not txt:
raise ValueError("empty string")
# try hex
try:
bfh(txt)
return txt
except:
pass
# try base43
try:
return base_decode(txt, length=None, base=43).hex()
except:
pass
# try json
import json
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
| 37.952931 | 159 | 0.608451 |
acf524d653405492de29fa4549d9452e48dde62d | 27,872 | py | Python | biothings/utils/common.py | newgene/biothings.api | e3278695ac15a55fe420aa49c464946f81ec019d | [
"Apache-2.0"
] | null | null | null | biothings/utils/common.py | newgene/biothings.api | e3278695ac15a55fe420aa49c464946f81ec019d | [
"Apache-2.0"
] | 2 | 2022-03-18T23:12:02.000Z | 2022-03-18T23:59:42.000Z | biothings/utils/common.py | newgene/biothings.api | e3278695ac15a55fe420aa49c464946f81ec019d | [
"Apache-2.0"
] | null | null | null | """
This module contains util functions may be shared by both BioThings data-hub and web components.
In general, do not include utils depending on any third-party modules.
"""
import asyncio
import base64
import glob
import gzip
import hashlib
import importlib
import inspect
import io
import json
import logging
import os
import os.path
import pickle
import random
import string
import sys
import time
import types
from collections import UserDict, UserList
from contextlib import contextmanager
from datetime import date, datetime, timezone
from functools import partial
from itertools import islice
from shlex import shlex
# from json serial, catching special type
# import _sre # TODO: unused import;remove it once confirmed
# ===============================================================================
# Misc. Utility functions
# ===============================================================================
def ask(prompt, options='YN'):
'''Prompt Yes or No,return the upper case 'Y' or 'N'.'''
options = options.upper()
while 1:
s = input(prompt+'[%s]' % '|'.join(list(options))).strip().upper()
if s in options:
break
return s
def timesofar(t0, clock=0, t1=None):
'''return the string(eg.'3m3.42s') for the passed real time/CPU time so far
from given t0 (return from t0=time.time() for real time/
t0=time.clock() for CPU time).'''
t1 = t1 or time.clock() if clock else time.time()
t = t1 - t0
h = int(t / 3600)
m = int((t % 3600) / 60)
s = round((t % 3600) % 60, 2)
t_str = ''
if h != 0:
t_str += '%sh' % h
if m != 0:
t_str += '%sm' % m
t_str += '%ss' % s
return t_str
def is_int(s):
"""return True or False if input string is integer or not."""
try:
int(s)
return True
except (ValueError, TypeError):
return False
def is_str(s):
"""return True or False if input is a string or not.
python3 compatible.
"""
return isinstance(s, str)
def is_seq(li):
"""return True if input is either a list or a tuple.
"""
return isinstance(li, (list, tuple))
def is_float(f):
"""return True if input is a float.
"""
return isinstance(f, float)
def is_scalar(f):
# return type(f) in (int, str, bool, float, bytes) or f is None or is_int(f) or is_float(f) or is_str(f)
return isinstance(f, (int, str, bool, float, bytes)) or f is None
def iter_n(iterable, n, with_cnt=False):
'''
Iterate an iterator by chunks (of n)
if with_cnt is True, return (chunk, cnt) each time
ref http://stackoverflow.com/questions/8991506/iterate-an-iterator-by-chunks-of-n-in-python
'''
it = iter(iterable)
if with_cnt:
cnt = 0
while True:
chunk = tuple(islice(it, n))
if not chunk:
return
if with_cnt:
cnt += len(chunk)
yield (chunk, cnt)
else:
yield chunk
def addsuffix(filename, suffix, noext=False):
'''Add suffix in front of ".extension", so keeping the same extension.
if noext is True, remove extension from the filename.'''
if noext:
return os.path.splitext(filename)[0] + suffix
else:
return suffix.join(os.path.splitext(filename))
def safewfile(filename, prompt=True, default='C', mode='w'):
'''return a file handle in 'w' mode,use alternative name if same name exist.
if prompt == 1, ask for overwriting,appending or changing name,
else, changing to available name automatically.'''
suffix = 1
while 1:
if not os.path.exists(filename):
break
print('Warning:"%s" exists.' % filename, end='')
if prompt:
option = ask('Overwrite,Append or Change name?', 'OAC')
else:
option = default
if option == 'O':
if not prompt or ask('You sure?') == 'Y':
print("Overwritten.")
break
elif option == 'A':
print("Append to original file.")
f = open(filename, 'a')
f.write('\n' + "=" * 20 + 'Appending on ' + time.ctime() + "=" * 20 + '\n')
return f, filename
print('Use "%s" instead.' % addsuffix(filename, '_' + str(suffix)))
filename = addsuffix(filename, '_' + str(suffix))
suffix += 1
return open(filename, mode), filename
def anyfile(infile, mode='r'):
'''
return a file handler with the support for gzip/zip comppressed files.
if infile is a two value tuple, then first one is the compressed file;
the second one is the actual filename in the compressed file.
e.g., ('a.zip', 'aa.txt')
'''
if isinstance(infile, tuple):
infile, rawfile = infile[:2]
else:
rawfile = os.path.splitext(infile)[0]
filetype = os.path.splitext(infile)[1].lower()
if filetype == '.gz':
# import gzip
in_f = io.TextIOWrapper(gzip.GzipFile(infile, mode))
elif filetype == '.zip':
import zipfile
in_f = io.TextIOWrapper(zipfile.ZipFile(infile, mode).open(rawfile, mode))
elif filetype == '.xz':
import lzma
in_f = io.TextIOWrapper(lzma.LZMAFile(infile, mode))
else:
in_f = open(infile, mode)
return in_f
def is_filehandle(fh):
'''return True/False if fh is a file-like object'''
return hasattr(fh, 'read') and hasattr(fh, 'close')
## This is another (older) implementation of open_anyfile
## Keep the code here for reference
#
# class open_anyfile(object):
# '''a context manager can be used in "with" stmt.
# accepts a filehandle or anything accepted by anyfile function.
# with open_anyfile('test.txt') as in_f:
# do_something()
# '''
# def __init__(self, infile, mode='r'):
# self.infile = infile
# self.mode = mode
# def __enter__(self):
# if is_filehandle(self.infile):
# self.in_f = self.infile
# else:
# self.in_f = anyfile(self.infile, mode=self.mode)
# return self.in_f
# def __exit__(self, type, value, traceback):
# self.in_f.close()
@contextmanager
def open_anyfile(infile, mode='r'):
'''a context manager can be used in "with" stmt.
accepts a filehandle or anything accepted by anyfile function.
with open_anyfile('test.txt') as in_f:
do_something()
'''
if is_filehandle(infile):
in_f = infile
else:
in_f = anyfile(infile, mode=mode)
try:
yield in_f
finally:
in_f.close()
class dotdict(dict):
def __getattr__(self, attr):
value = self.get(attr, None)
if isinstance(value, dict):
value = dotdict(value)
setattr(self, attr, value)
return value
else:
return value
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def get_dotfield_value(dotfield, d):
"""
Explore dictionary d using dotfield notation and return value.
Example::
d = {"a":{"b":1}}.
get_dotfield_value("a.b",d) => 1
"""
fields = dotfield.split(".")
if len(fields) == 1:
return d[fields[0]]
else:
first = fields[0]
return get_dotfield_value(".".join(fields[1:]), d[first])
def split_ids(q):
'''
split input query string into list of ids.
any of ``" \t\n\x0b\x0c\r|,+"`` as the separator,
but perserving a phrase if quoted
(either single or double quoted)
more detailed rules see:
http://docs.python.org/2/library/shlex.html#parsing-rules
e.g.::
>>> split_ids('CDK2 CDK3')
['CDK2', 'CDK3']
>>> split_ids('"CDK2 CDK3"\n CDk4')
['CDK2 CDK3', 'CDK4']
'''
# Python3 strings are already unicode, .encode
# now returns a bytearray, which cannot be searched with
# shlex. For now, do this terrible thing until we discuss
if sys.version_info.major == 3:
lex = shlex(q, posix=True)
else:
lex = shlex(q.encode('utf8'), posix=True)
lex.whitespace = ' \t\n\x0b\x0c\r|,+'
lex.whitespace_split = True
lex.commenters = ''
if sys.version_info.major == 3:
ids = [x.strip() for x in list(lex)]
else:
ids = [x.decode('utf8').strip() for x in list(lex)]
ids = [x for x in ids if x]
return ids
def get_compressed_outfile(filename, compress='gzip'):
'''Get a output file handler with given compress method.
currently support gzip/bz2/lzma, lzma only available in py3
'''
if compress == "gzip":
# import gzip
out_f = gzip.GzipFile(filename, 'wb')
elif compress == 'bz2':
import bz2
out_f = bz2.BZ2File(filename, 'wb')
elif compress == 'lzma' or compress == 'xz':
import lzma
out_f = lzma.LZMAFile(filename, 'wb')
elif compress is None:
out_f = open(filename, 'wb')
else:
raise ValueError("Invalid compress parameter.")
return out_f
def open_compressed_file(filename):
'''Get a read-only file-handler for compressed file,
currently support gzip/bz2/lzma, lzma only available in py3
'''
in_f = open(filename, 'rb')
sig = in_f.read(5)
in_f.close()
if sig[:3] == b'\x1f\x8b\x08':
# this is a gzip file
# import gzip
fobj = gzip.GzipFile(filename, 'rb')
elif sig[:3] == b'BZh':
# this is a bz2 file
import bz2
fobj = bz2.BZ2File(filename, 'r')
elif sig[:5] == b'\xfd7zXZ':
# this is a lzma file
import lzma
fobj = lzma.LZMAFile(filename, 'r')
else:
# assuming uncompressed ?
fobj = open(filename, 'rb')
return fobj
def dump(obj, filename, protocol=2, compress='gzip'):
'''Saves a compressed object to disk
binary protocol 2 is compatible with py2, 3 and 4 are for py3
'''
# TODO: in py3, protocol=3 is the default, and also support protocol=4
# for py>=3.4. We should check which one works best for us.
out_f = get_compressed_outfile(filename, compress=compress)
pickle.dump(obj, out_f, protocol=protocol)
out_f.close()
def dump2gridfs(obj, filename, db, protocol=2):
'''Save a compressed (support gzip only) object to MongoDB gridfs.'''
import gridfs
# import gzip
fs = gridfs.GridFS(db)
if fs.exists(_id=filename):
fs.delete(filename)
fobj = fs.new_file(filename=filename, _id=filename)
try:
gzfobj = gzip.GzipFile(filename=filename, mode='wb', fileobj=fobj)
pickle.dump(obj, gzfobj, protocol=protocol)
finally:
gzfobj.close()
fobj.close()
def loadobj(filename, mode='file'):
'''
Loads a compressed object from disk file (or file-like handler) or
MongoDB gridfs file (mode='gridfs')
::
obj = loadobj('data.pyobj')
obj = loadobj(('data.pyobj', mongo_db), mode='gridfs')
'''
# import gzip
if mode == 'gridfs':
import gridfs
filename, db = filename # input is a tuple of (filename, mongo_db)
fs = gridfs.GridFS(db)
fobj = gzip.GzipFile(fileobj=fs.get(filename))
else:
if is_str(filename):
fobj = open_compressed_file(filename)
else:
fobj = filename # input is a file-like handler
try:
obj = pickle.load(fobj)
finally:
fobj.close()
return obj
def list2dict(a_list, keyitem, alwayslist=False):
'''
Return a dictionary with specified keyitem as key, others as values.
keyitem can be an index or a sequence of indexes.
For example::
li = [['A','a',1],
['B','a',2],
['A','b',3]]
list2dict(li, 0)---> {'A':[('a',1),('b',3)],
'B':('a',2)}
If alwayslist is True, values are always a list even there is only one item in it.
::
list2dict(li, 0, True)---> {'A':[('a',1),('b',3)],
'B':[('a',2),]}
'''
_dict = {}
for x in a_list:
if isinstance(keyitem, int): # single item as key
key = x[keyitem]
value = tuple(x[:keyitem] + x[keyitem + 1:])
else:
key = tuple([x[i] for i in keyitem])
value = tuple([x[i] for i in range(len(a_list)) if i not in keyitem])
if len(value) == 1: # single value
value = value[0]
if key not in _dict:
if alwayslist:
_dict[key] = [value, ]
else:
_dict[key] = value
else:
current_value = _dict[key]
if not isinstance(current_value, list):
current_value = [current_value, ]
current_value.append(value)
_dict[key] = current_value
return _dict
def filter_dict(d, keys):
"""
Remove keys from dict "d". "keys" is a list
of string, dotfield notation can be used to
express nested keys. If key to remove doesn't
exist, silently ignore it
"""
if isinstance(keys, str):
keys = [keys]
for key in keys:
if "." in key:
innerkey = ".".join(key.split(".")[1:])
rkey = key.split(".")[0]
if rkey in d:
d[rkey] = filter_dict(d[rkey], innerkey)
else:
continue
else:
d.pop(key, None)
return d
def get_random_string():
strb = base64.b64encode(os.urandom(6), "".join(random.sample(string.ascii_letters, 2)).encode("ascii"))
return strb.decode("ascii")
def get_timestamp():
return time.strftime('%Y%m%d')
class LogPrint:
def __init__(self, log_f, log=1, timestamp=0):
'''If this class is set to sys.stdout, it will output both log_f and __stdout__.
log_f is a file handler.
'''
self.log_f = log_f
self.log = log
self.timestamp = timestamp
if self.timestamp:
self.log_f.write('*'*10 + 'Log starts at ' + time.ctime() + '*'*10 + '\n')
def write(self, text):
sys.__stdout__.write(text)
if self.log:
self.log_f.write(text)
self.flush()
def flush(self):
self.log_f.flush()
def start(self):
sys.stdout = self
def pause(self):
sys.stdout = sys.__stdout__
def resume(self):
sys.stdout = self
def close(self):
if self.timestamp:
self.log_f.write('*'*10 + 'Log ends at ' + time.ctime() + '*'*10 + '\n')
sys.stdout = sys.__stdout__
self.log_f.close()
def fileno(self):
return self.log_f.fileno()
def setup_logfile(logfile):
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logging.getLogger().addHandler(fh)
def find_doc(k, keys):
''' Used by jsonld insertion in www.api.es._insert_jsonld '''
n = len(keys)
for i in range(n):
# if k is a dictionary, then directly get its value
if isinstance(k, dict):
k = k[keys[i]]
# if k is a list, then loop through k
elif isinstance(k, list):
tmp = []
for item in k:
try:
if isinstance(item[keys[i]], dict):
tmp.append(item[keys[i]])
elif isinstance(item[keys[i]], list):
for _item in item[keys[i]]:
tmp.append(_item)
except Exception:
continue
k = tmp
return k
def SubStr(input_string, start_string='', end_string='', include=0):
'''Return the substring between start_string and end_string.
If start_string is '', cut string from the beginning of input_string.
If end_string is '', cut string to the end of input_string.
If either start_string or end_string can not be found from input_string, return ''.
The end_pos is the first position of end_string after start_string.
If multi-occurence,cut at the first position.
include=0(default), does not include start/end_string;
include=1: include start/end_string.'''
start_pos = input_string.find(start_string)
if start_pos == -1:
return ''
start_pos += len(start_string)
if end_string == '':
end_pos = len(input_string)
else:
end_pos = input_string[start_pos:].find(end_string) # get the end_pos relative with the start_pos
if end_pos == -1:
return ''
else:
end_pos += start_pos # get actual end_pos
if include == 1:
return input_string[start_pos - len(start_string): end_pos + len(end_string)]
else:
return input_string[start_pos:end_pos]
def safe_unicode(s, mask='#'):
'''replace non-decodable char into "#".'''
try:
_s = str(s)
except UnicodeDecodeError as e:
pos = e.args[2]
_s = s.replace(s[pos], mask)
print('Warning: invalid character "%s" is masked as "%s".' % (s[pos], mask))
return safe_unicode(_s, mask)
return _s
def file_newer(source, target):
'''return True if source file is newer than target file.'''
return os.stat(source)[-2] > os.stat(target)[-2]
def newer(t0, t1, fmt='%Y%m%d'):
'''t0 and t1 are string of timestamps matching "format" pattern.
Return True if t1 is newer than t0.
'''
return datetime.strptime(t0, fmt) < datetime.strptime(t1, fmt)
class BiothingsJSONEncoder(json.JSONEncoder):
'''A class to dump Python Datetime object.
json.dumps(data, cls=DateTimeJSONEncoder, indent=indent)
'''
def default(self, o):
if isinstance(o, (datetime, date)):
return o.isoformat()
elif isinstance(o, UserDict):
return dict(o)
elif isinstance(o, UserList):
return list(o)
else:
return super().default(o)
# https://stackoverflow.com/questions/11875770/how-to-overcome-datetime-datetime-not-json-serializable
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
if obj.tzinfo is None:
# assuming UTC if no timezone info
obj = obj.replace(tzinfo=timezone.utc)
serial = obj.isoformat()
return serial
elif isinstance(obj, type):
return str(obj)
elif "SRE_Pattern" in type(obj).__name__: # can't find the class
return obj.pattern
elif isinstance(obj, types.FunctionType):
return "__function__"
raise TypeError("Type %s not serializable" % type(obj))
def json_encode(obj):
"""Tornado-aimed json encoder, it does the same job as tornado.escape.json_encode
but also deals with datetime encoding"""
return json.dumps(obj, default=json_serial).replace("</", r"<\/")
def rmdashfr(top):
'''Recursively delete dirs and files from "top" directory, then delete "top" dir'''
assert top # prevent rm -fr * ... (let's be explicit there)
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
try:
os.rmdir(top)
except FileNotFoundError:
# top did not exist, silently ignore
pass
def get_class_from_classpath(class_path):
str_mod, str_klass = ".".join(class_path.split(".")[:-1]), class_path.split(".")[-1]
mod = importlib.import_module(str_mod)
return getattr(mod, str_klass)
def find_classes_subclassing(mods, baseclass):
"""
Given a module or a list of modules, inspect and find all classes which are
a subclass of the given baseclass, inside those modules
"""
# collect all modules found in given modules
if not isinstance(mods, list):
mods = [mods]
innner_mods = inspect.getmembers(mods, lambda obj: isinstance(obj, types.ModuleType))
mods.extend(innner_mods)
classes = []
for m in mods:
name_klasses = inspect.getmembers(m, lambda obj: isinstance(obj, type) and issubclass(obj, baseclass))
if name_klasses:
for name, klass in name_klasses:
del name
classes.append(klass)
return classes
def sizeof_fmt(num, suffix='B'):
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def unzipall(folder, pattern="*.zip"):
'''
unzip all zip files in "folder", in "folder"
'''
import zipfile
for zfile in glob.glob(os.path.join(folder, pattern)):
zf = zipfile.ZipFile(zfile)
logging.info("unzipping '%s'", zf.filename)
zf.extractall(folder)
logging.info("done unzipping '%s'", zf.filename)
def untargzall(folder, pattern="*.tar.gz"):
'''
gunzip and untar all ``*.tar.gz`` files in "folder"
'''
import tarfile
for tgz in glob.glob(os.path.join(folder, pattern)):
gz = gzip.GzipFile(tgz)
tf = tarfile.TarFile(fileobj=gz)
logging.info("untargz '%s'", tf.name)
tf.extractall(folder)
logging.info("done untargz '%s'", tf.name)
def gunzipall(folder, pattern="*.gz"):
'''
gunzip all ``*.gz`` files in "folder"
'''
for f in glob.glob(os.path.join(folder, pattern)):
# build uncompress filename from gz file and pattern
# pattern is used to select/filter files, but it may not
# match the gzip file suffix (usually ".gz"), so assuming it's the last
# bit after "."
suffix = ".%s" % pattern.split(".")[1]
gunzip(f, suffix)
def unxzall(folder, pattern="*.xz"):
'''
unxz all xz files in "folder", in "folder"
'''
import tarfile
for xzfile in glob.glob(os.path.join(folder, pattern)):
logging.info("unxzing '%s'", xzfile)
with tarfile.open(xzfile, 'r:xz') as t:
t.extractall(folder)
logging.info("done unxzing '%s'", xzfile)
def gunzip(f, pattern="*.gz"):
# build uncompress filename from gz file and pattern
destf = f.replace(pattern.replace("*", ""), "")
fout = open(destf, "wb")
with gzip.GzipFile(f) as gz:
logging.info("gunzip '%s'", gz.name)
for line in gz:
fout.write(line)
logging.info("Done gunzip '%s'", gz.name)
fout.close()
async def aiogunzipall(folder, pattern, job_manager, pinfo):
"""
Gunzip all files in folder matching pattern. job_manager is used
for parallelisation, and pinfo is a pre-filled dict used by
job_manager to report jobs in the hub (see bt.utils.manager.JobManager)
"""
jobs = []
got_error = None
logging.info("Unzipping files in '%s'", folder)
for f in glob.glob(os.path.join(folder, pattern)):
pinfo["description"] = os.path.basename(f)
job = await job_manager.defer_to_process(
pinfo, partial(gunzip, f, pattern=pattern)
)
def gunzipped(fut, infile):
try:
# res = fut.result()
fut.result()
except Exception as e:
logging.error("Failed to gunzip file %s: %s", infile, e)
nonlocal got_error
got_error = e
job.add_done_callback(partial(gunzipped, infile=f))
jobs.append(job)
if got_error:
raise got_error
if jobs:
await asyncio.gather(*jobs)
if got_error:
raise got_error
def uncompressall(folder):
"""Try to uncompress any known archive files in folder"""
unzipall(folder)
untargzall(folder)
gunzipall(folder)
unxzall(folder)
def md5sum(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class splitstr(str):
"""Type representing strings with space in it"""
class nan(object):
"""Represents NaN type, but not as a float"""
class inf(object):
"""Represents Inf type, but not as a float"""
def traverse(obj, leaf_node=False):
"""
Output path-dictionary pairs. For example, input:
{
'exac_nontcga': {'af': 0.00001883},
'gnomad_exome': {'af': {'af': 0.0000119429, 'af_afr': 0.000123077}},
'snpeff': {'ann': [{'effect': 'intron_variant',
'feature_id': 'NM_014672.3'},
{'effect': 'intron_variant',
'feature_id': 'NM_001256678.1'}]}
}
will be translated to a generator:
(
("exac_nontcga", {"af": 0.00001883}),
("gnomad_exome.af", {"af": 0.0000119429, "af_afr": 0.000123077}),
("gnomad_exome", {"af": {"af": 0.0000119429, "af_afr": 0.000123077}}),
("snpeff.ann", {"effect": "intron_variant", "feature_id": "NM_014672.3"}),
("snpeff.ann", {"effect": "intron_variant", "feature_id": "NM_001256678.1"}),
("snpeff.ann", [{ ... },{ ... }]),
("snpeff", {"ann": [{ ... },{ ... }]}),
('', {'exac_nontcga': {...}, 'gnomad_exome': {...}, 'snpeff': {...}})
)
or when traversing leaf nodes:
(
('exac_nontcga.af', 0.00001883),
('gnomad_exome.af.af', 0.0000119429),
('gnomad_exome.af.af_afr', 0.000123077),
('snpeff.ann.effect', 'intron_variant'),
('snpeff.ann.feature_id', 'NM_014672.3'),
('snpeff.ann.effect', 'intron_variant'),
('snpeff.ann.feature_id', 'NM_001256678.1')
)
"""
if isinstance(obj, (dict, UserDict)): # path level increases
for key in obj:
for sub_path, val in traverse(obj[key], leaf_node):
yield '.'.join((str(key), str(sub_path))).strip('.'), val
if not leaf_node:
yield '', obj
elif isinstance(obj, list): # does not affect path
for item in obj:
for sub_path, val in traverse(item, leaf_node):
yield sub_path, val
if not leaf_node or not obj: # [] count as a leaf node
yield '', obj
elif leaf_node: # including str, int, float, and *None*.
yield '', obj
def run_once():
"""
should_run_task_1 = run_once()
print(should_run_task_1()) -> True
print(should_run_task_1()) -> False
print(should_run_task_1()) -> False
print(should_run_task_1()) -> False
should_run_task_2 = run_once()
print(should_run_task_2('2a')) -> True
print(should_run_task_2('2b')) -> True
print(should_run_task_2('2a')) -> False
print(should_run_task_2('2b')) -> False
...
"""
has_run = set()
def should_run(identifier=None):
if identifier in has_run:
return False
has_run.add(identifier)
return True
return should_run
def merge(x, dx):
"""
Merge dictionary dx (Δx) into dictionary x.
If __REPLACE__ key is present in any level z in dx,
z in x is replaced, instead of merged, with z in dx.
"""
assert isinstance(x, dict)
assert isinstance(dx, dict)
if dx.pop("__REPLACE__", None):
# merge dx with "nothing" just to
# make sure to remove any "__REPLACE__"
_y = {}
merge(_y, dx)
x.clear()
x.update(_y)
return x
for k, v in dx.items():
if isinstance(v, dict):
if v.get("__REMOVE__"):
x.pop(k, None)
continue
if not isinstance(x.get(k), dict):
x[k] = {}
merge(x[k], v)
else:
x[k] = v
return x
| 31.672727 | 110 | 0.582484 |
acf525510176838d54bb943599e93d9726c6b011 | 1,102 | py | Python | component_features_testscripts/fulldem_clip.py | derekfeger/visibility_analysis | 7f5e92688d1717d42d0f4b949e3431ae7f40056d | [
"Unlicense"
] | null | null | null | component_features_testscripts/fulldem_clip.py | derekfeger/visibility_analysis | 7f5e92688d1717d42d0f4b949e3431ae7f40056d | [
"Unlicense"
] | null | null | null | component_features_testscripts/fulldem_clip.py | derekfeger/visibility_analysis | 7f5e92688d1717d42d0f4b949e3431ae7f40056d | [
"Unlicense"
] | null | null | null | import arcpy
import os.path
# Set the workspace
arcpy.env.workspace = "F:\\independent_study\\visibility_analysis"
# User Inputs (Ideally would be definable parameters instead of having to code in themselves)
# Should include input point features, full_dem, buffersize
input_points = "F:\\independent_study\\Billboardsdata_pghcityplanning\\LamarSigns.shp"
full_dem = "F:\\independent_study\\visibility_analysis\\fullcity_outputmosaic.tif"
buffersize = "1000 Feet"
csv_output_file = "F:\\independent_study\\visibility_analysis\\visibility_analysis.csv"
# Set global variables, variables for produced files
recordnumber = 0
prepare_table = True
vispix = 0
nvispix = 0
subset_dem = "F:\\independent_study\\visibility_analysis\\dem_files\\clipped_dem.tif"
full_buffer = "F:\\independent_study\\visibility_analysis\\output_files\\va_rALL_buf.shp"
# Buffers input point features to specified buffer size
arcpy.Buffer_analysis(input_points, full_buffer, buffersize)
# Clips full DEM to extent of buffer output_file
arcpy.Clip_management(full_dem, "#", subset_dem, full_buffer, "0", "ClippingGeometry")
| 39.357143 | 93 | 0.8049 |
acf525a57b94c90fdc2e15d413882681778fc2eb | 23,051 | py | Python | asyncevents/events.py | nocturn9x/asyncevents | e08b47260f3d0d76fc5a28641b8f08738036a65b | [
"Apache-2.0"
] | 5 | 2021-12-24T18:18:00.000Z | 2022-03-12T12:11:29.000Z | asyncevents/events.py | nocturn9x/asyncevents | e08b47260f3d0d76fc5a28641b8f08738036a65b | [
"Apache-2.0"
] | null | null | null | asyncevents/events.py | nocturn9x/asyncevents | e08b47260f3d0d76fc5a28641b8f08738036a65b | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2021 nocturn9x
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import inspect
import sys
import time
from collections import defaultdict
from heapq import heappush, heapify, heappop
from logging import Logger, getLogger, INFO, Formatter, StreamHandler
from typing import Dict, List, Tuple, Coroutine, Callable, Any, Awaitable, Optional
from typing import Union # Separate line because it's gonna be deprecated soon. Can't break Justin's userbot tho :>
from asyncevents.constants import ExceptionHandling, UnknownEventHandling, ExecutionMode
from asyncevents.errors import UnknownEvent
class AsyncEventEmitter:
"""
A simple priority-based asynchronous event emitter. In contrast to a scheduler, which
continuously runs in the background orchestrating execution of tasks, an emitter runs
only when an event is emitted and it only runs the tasks that are meant to catch said
event, if any.
:param on_error: Tells the emitter what to do when an exception occurs inside an event
handler. This value can either be an entry from the asyncevents.ExceptionHandling
enum or a coroutine function. If the passed object is a coroutine function, it is awaited
whenever an exception is caught with the AsyncEventEmitter instance, the exception
object and the event name as arguments (errors from the exception handler itself are
not caught). Defaults to ExceptionHandling.PROPAGATE, which lets exceptions fall trough
the execution chain (other enum values are LOG, which prints a log message on the
logging.ERROR level, and IGNORE which silences the exception entirely)
:type on_error: Union[ExceptionHandling, Callable[[AsyncEventEmitter, Exception, str], Coroutine[Any, Any, Any]]],
optional
:param on_unknown_event: Tells the emitter what to do when an unknown event is triggered. An
unknown event is an event for which no handler is registered (either because it has never
been registered or because all of its handlers have been removed). This value can either be
an entry from the asyncevents.UnknownEventHandling enum or a coroutine function. If the argument
is a coroutine function, it is awaited with the AsyncEventEmitter instance and the event name as arguments.
Defaults to UnknownEventHandling.IGNORE, which does nothing (other enum values are LOG, which
prints a log message on the logging.WARNING level, and ERROR which raises an UnknownEvent exception)
:type on_unknown_event: Union[UnknownEventHandling, Callable[[AsyncEventEmitter, str], Coroutine[Any, Any, Any]]],
optional
:param mode: Tells the emitter how event handlers should be spawned. It should be an entry of the
the asyncevents.ExecutionMode enum. If it is set to ExecutionMode.PAUSE, the default, the event
emitter spawns tasks by awaiting each matching handler: this causes it to pause on every handler.
If ExecutionMode.NOWAIT is used, the emitter uses asyncio.create_task to spawns all the handlers
at the same time (note though that using this mode kind of breaks the priority queueing: the handlers
are started according to their priorities, but once they are started they are handled by asyncio's
event loop which is non-deterministic, so expect some disorder). Using ExecutionMode.NOWAIT allows
to call the emitter's wait() method, which pauses until all currently running event handlers have
completed executing (when ExecutionMode.PAUSE is used, wait() is a no-op)
:type mode: ExecutionMode
"""
async def _check_event(self, event: str):
"""
Performs checks about the given event
and raises/logs appropriately before
emitting/waiting on it
:param event: The event name
:type event: str
"""
if not self.handlers.get(event):
if self.on_unknown_event == UnknownEventHandling.IGNORE:
return
elif self.on_unknown_event == UnknownEventHandling.ERROR:
raise UnknownEvent(f"unknown event {event!r}")
elif self.on_unknown_event == UnknownEventHandling.LOG:
self.logger.warning(f"Attempted to emit or wait on an unknown event {event!r}")
else:
# It's a coroutine! Call it
await self.on_unknown_event(self, event)
async def _handle_errors_in_awaitable(self, event: str, obj: Awaitable) -> Any:
# Thanks to asyncio's *utterly amazing* (HUGE sarcasm there)
# exception handling, we have to make this wrapper so we can
# catch errors on a per-handler basis
try:
return await obj
except Exception as e:
if (event, obj) in self._tasks:
obj: asyncio.Task # Silences PyCharm's warnings
self._tasks.remove((event, obj))
if inspect.iscoroutinefunction(self.on_error):
await self.on_error(self, e, event)
elif self.on_error == ExceptionHandling.PROPAGATE:
raise
elif self.on_error == ExceptionHandling.LOG:
self.logger.error(f"An exception occurred while handling {event!r}: {type(e).__name__} -> {e}")
# Note how the IGNORE case is excluded: we just do nothing after all
# Implementations for emit()
async def _emit_nowait(self, event: str, *args, **kwargs):
# This implementation of emit() returns immediately
# and runs the handlers in the background
await self._check_event(event)
temp: List[Tuple[int, float, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]], bool]] = []
t: Tuple[int, float, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]], bool]
while self.handlers[event]:
# We use heappop because we want the first
# by priority and the heap queue only has
# the guarantee we need for heap[0]
temp.append(heappop(self.handlers[event]))
t = temp[-1]
if t[-1]:
# It won't be re-scheduled
temp.pop()
task = asyncio.create_task(t[-2](self, event, *args, **kwargs))
self._tasks.append((event, asyncio.create_task(self._handle_errors_in_awaitable(event, task))))
# We push back the elements
for t in temp:
heappush(self.handlers[event], t)
async def _emit_await(self, event: str, *args, **kwargs) -> List[Any]:
# This implementation of emit() returns only after
# all handlers have finished executing
await self._check_event(event)
result = []
temp: List[
Tuple[int, float, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]], bool]
] = self.handlers[event].copy()
t: Tuple[int, float, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]], bool]
while temp:
# We use heappop because we want the first
# by priority and the heap queue only has
# the guarantee we need for heap[0]
t = heappop(temp)
if t[-1]:
self.unregister_handler(event, t[-2])
result.append(await self._handle_errors_in_awaitable(event, t[-2](self, event, *args, **kwargs)))
return result
def __init__(
self,
# These type hints come from https://stackoverflow.com/a/59177557/12159081
# and should correctly hint a coroutine function
on_error: Union[
ExceptionHandling, Callable[["AsyncEventEmitter", Exception, str], Coroutine[Any, Any, Any]]
] = ExceptionHandling.PROPAGATE,
on_unknown_event: Union[
UnknownEventHandling, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]]
] = UnknownEventHandling.IGNORE,
mode: ExecutionMode = ExecutionMode.PAUSE,
):
"""
Public object constructor
"""
self.on_error = on_error
self.on_unknown_event = on_unknown_event
self.mode = mode
self.logger: Logger = getLogger("asyncevents")
self.logger.handlers = []
self.logger.setLevel(INFO)
handler: StreamHandler = StreamHandler(sys.stderr)
handler.setFormatter(Formatter(fmt="[%(levelname)s - %(asctime)s] %(message)s", datefmt="%d/%m/%Y %H:%M:%S %p"))
self.logger.addHandler(handler)
# Stores asyncio tasks so that wait() can call
# asyncio.gather() on them
self._tasks: List[Tuple[str, asyncio.Task]] = []
# Stores events and their priorities. Each
# entry in the dictionary is a (name, handlers)
# tuple where name is a string and handlers is
# list of tuples. Each tuple in the list contains
# a priority (defaults to 0), the insertion time of
# when the handler was registered (to act as a tie
# breaker for tasks with identical priorities or
# when priorities aren't used at all) a coroutine
# function object and a boolean that signals if the
# handler is to be deleted after it fires the first
# time (aka 'oneshot')
self.handlers: Dict[
str, List[Tuple[int, float, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]], bool]]
] = defaultdict(list)
@property
def on_error(self) -> Union[ExceptionHandling, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]]]:
"""
Property getter for on_error
"""
return self._on_error
@on_error.setter
def on_error(
self, on_error: Union[ExceptionHandling, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]]]
):
"""
Property setter for on_error
:param on_error: Tells the emitter what to do when an exception occurs inside an event
handler. This value can either be an entry from the asyncevents.ExceptionHandling
enum or a coroutine function. If the passed object is a coroutine function, it is awaited
whenever an exception is caught with the AsyncEventEmitter instance, the exception
object and the event name as arguments (errors from the exception handler itself are
not caught). Defaults to ExceptionHandling.PROPAGATE, which lets exceptions fall trough
the execution chain (other enum values are LOG, which prints a log message on the
logging.ERROR level, and IGNORE which silences the exception entirely)
:type on_error: Union[ExceptionHandling, Callable[[AsyncEventEmitter, Exception, str], Coroutine[Any, Any, Any]]],
optional
:raises:
TypeError: If the provided handler is not valid
"""
if not inspect.iscoroutinefunction(on_error) and on_error not in ExceptionHandling:
if inspect.iscoroutine(on_error):
raise TypeError(
"on_error should be a coroutine *function*, not a coroutine! Pass the function"
" object without calling it!"
)
raise TypeError(
"expected on_error to be a coroutine function or an entry from the ExceptionHandling"
f" enum, found {type(on_error).__name__!r} instead"
)
self._on_error = on_error
@property
def on_unknown_event(
self,
) -> Union[UnknownEventHandling, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]]]:
"""
Property getter for on_unknown_event
"""
return self._on_unknown_event
@on_unknown_event.setter
def on_unknown_event(
self,
on_unknown_event: Union[
UnknownEventHandling, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]]
] = UnknownEventHandling.IGNORE,
):
"""
Property setter for on_unknown_event
:param on_unknown_event: Tells the emitter what to do when an unknown event is triggered. An
unknown event is an event for which no handler is registered (either because it has never
been registered or because all of its handlers have been removed). This value can either be
an entry from the asyncevents.UnknownEventHandling enum or a coroutine function. If the argument
is a coroutine function, it is awaited with the AsyncEventEmitter instance and the event name as arguments.
Defaults to UnknownEventHandling.IGNORE, which does nothing (other enum values are LOG, which
prints a log message on the logging.WARNING level, and ERROR which raises an UnknownEvent exception)
Note: if the given callable is a coroutine, it is awaited, while it's called normally otherwise
and its return value is discarded
:type on_unknown_event: Union[UnknownEventHandling, Callable[[AsyncEventEmitter, str], Coroutine[Any, Any, Any]]],
optional
:raises:
TypeError: If the provided handler is not valid
"""
if not inspect.iscoroutinefunction(on_unknown_event) and on_unknown_event not in UnknownEventHandling:
if inspect.iscoroutine(on_unknown_event):
raise TypeError(
"on_unknown_event should be a coroutine *function*, not a coroutine! Pass the function"
" object without calling it!"
)
raise TypeError(
"expected on_unknown_event to be a coroutine function or an entry from the"
f" UnknownEventHandling enum, found {type(on_unknown_event).__name__!r} instead"
)
self._on_unknown_event = on_unknown_event
@property
def mode(self):
"""
Property getter for mode
"""
return self._mode
@mode.setter
def mode(self, mode: ExecutionMode):
"""
Property setter for mode
:param mode: Tells the emitter how event handlers should be spawned. It should be an entry of the
the asyncevents.ExecutionMode enum. If it is set to ExecutionMode.PAUSE, the default, the event
emitter spawns tasks by awaiting each matching handler: this causes it to pause on every handler.
If ExecutionMode.NOWAIT is used, the emitter uses asyncio.create_task to spawns all the handlers
at the same time (note though that using this mode kind of breaks the priority queueing: the handlers
are started according to their priorities, but once they are started they are handled by asyncio's
event loop which is non-deterministic, so expect some disorder). Using ExecutionMode.NOWAIT allows
to call the emitter's wait() method, which pauses until all currently running event handlers have
completed executing (when ExecutionMode.PAUSE is used, wait() is a no-op), but note that return
values from event handlers are not returned
:type mode: ExecutionMode
:raises:
TypeError: If the given mode is invalid
"""
if mode not in ExecutionMode:
raise TypeError(
f"expected mode to be an entry from the ExecutionMode enum, found {type(mode).__name__!r}" " instead"
)
self._mode = mode
def exists(self, event: str) -> bool:
"""
Returns if the given event has at least
one handler registered
:param event: The event name
:type event: str
:return: True if the event has at least one handler,
False otherwise
"""
return len(self.handlers.get(event)) > 0
def register_event(
self,
event: str,
handler: Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]],
priority: int = 0,
oneshot: bool = False,
):
"""
Registers an event and its associated handler. If
the event is already registered, the given handler
is added to the already existing handler queue. Each
event will be called with the AsyncEventEmitter instance
that triggered the event as well as the event name itself
:param event: The event name
:type event: str
:param handler: A coroutine function to be called
when the event is generated
:type handler: Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]]
:param priority: The handler's execution priority,
defaults to 0 (lower priority means earlier
execution!)
:type priority: int, optional
:param oneshot: If True, the event is unregistered after if fires the first time,
defaults to False
:type oneshot: bool, optional
"""
heappush(self.handlers[event], (priority, time.monotonic(), handler, oneshot))
def unregister_event(self, event: str):
"""
Unregisters all handlers for the given
event in one go. Does nothing if the
given event is not registered already.
Note that this does not affect any
already started event handler for the
given event
:param event: The event name
:type event: str
"""
self.handlers.pop(event, None)
def _get(
self, event: str, handler: Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]]
) -> Union[
None, bool, Tuple[int, float, Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]], bool]
]:
"""
Returns the tuple of (priority, date, corofunc, oneshot) representing the
given handler. Only the first matching entry is returned. False is returned
if the given handler is not registered for the given event.
Note: This method is meant mostly for internal use
:param event: The event name
:type event: str
:param handler: A coroutine function to be called
when the event is generated
:type handler: Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]]
:raises:
UnknownEvent: If self.on_unknown_error == UnknownEventHandling.ERROR
:returns: The tuple of (priority, date, coro) representing the
given handler
"""
if not self.exists(event):
return None
for (priority, tie, corofunc, oneshot) in self.handlers[event]:
if corofunc == handler:
return priority, tie, corofunc, oneshot
return False
def unregister_handler(
self,
event: str,
handler: Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]],
remove_all: bool = False,
):
"""
Unregisters a given handler for the given event. If
remove_all is True (defaults to False), all occurrences
of the given handler are removed, otherwise only the first
one is unregistered. Does nothing if the given event is not
registered already. This method does nothing if the given
event exists, but the given handler is not registered for it
:param event: The event name
:type event: str
:param handler: The coroutine function to be unregistered
:type handler: Callable[["AsyncEventEmitter", str, ...], Coroutine[Any, Any, Any]]
:param remove_all: If True, all occurrences of the
given handler are removed, otherwise only the first
one is unregistered
:type remove_all: bool, optional
:raises:
UnknownEvent: If self.on_unknown_error == UnknownEventHandling.ERROR
:return:
"""
if remove_all:
for (priority, tie, coro, oneshot) in self.handlers[event]:
if handler == coro:
self.handlers[event].remove((priority, tie, coro, oneshot))
else:
if t := self._get(event, handler):
self.handlers[event].remove(t)
# We maintain the heap queue invariant
heapify(self.handlers[event])
async def wait(self, event: Optional[str] = None) -> List[Any]:
"""
Waits until all the event handlers for the given
event have finished executing. When the given event
is None, the default, waits for all handlers of all
events to terminate. This method is a no-op when the
emitter is configured with anything other than
ExecutionMode.NOWAIT or if emit() hasn't been called
with block=False. Returns a list of all return values
from the event handlers
"""
if not event:
result = [e for e in await asyncio.gather(*[t[1] for t in self._tasks])]
self._tasks = []
return result
else:
await self._check_event(event)
result = [e for e in await asyncio.gather(*[t[1] for t in self._tasks if t[0] == event])]
for t in self._tasks:
if t[0] == event:
self._tasks.remove(t)
return result
async def emit(self, event: str, block: bool = True, *args, **kwargs) -> List[Any]:
"""
Emits an event. Any extra positional and keyword arguments besides
the event name and the "block" boolean are passed over to the
event handlers themselves. Returns the values of the event
handlers in a list when using blocking mode or an empty
list otherwise
:param event: The event to trigger. Note that,
depending on the configuration, unknown events
may raise errors or log to stderr
:type event: str
:param block: Temporarily overrides the emitter's global execution
mode. If block is True, the default, this call will pause until
execution of all event handlers has finished, otherwise it returns
as soon as they're scheduled
:type block: bool, optional
:raises:
UnknownEvent: If self.on_unknown_error == UnknownEventHandling.ERROR
and the given event is not registered
"""
mode = self._mode
if block:
self._mode = ExecutionMode.PAUSE
result = await self._emit_await(event, *args, **kwargs)
else:
self._mode = ExecutionMode.NOWAIT
await self._emit_nowait(event, *args, **kwargs)
result = []
self._mode = mode
return result
| 47.139059 | 122 | 0.648215 |
acf525a7a567dd8926298d7a54eb3bea61db7275 | 630 | py | Python | backend/manage.py | crowdbotics-apps/test-33024 | 09ff5fe2740de7d38aced0309cba713511e7482e | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/test-33024 | 09ff5fe2740de7d38aced0309cba713511e7482e | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/test-33024 | 09ff5fe2740de7d38aced0309cba713511e7482e | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_33024.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| 28.636364 | 74 | 0.684127 |
acf525baee151e5e0e6333711116406117176ae0 | 403 | py | Python | gram/migrations/0008_alter_image_likes.py | MutuaFranklin/Insta-clone | 012d16f771be5766129d2d10ad5ab2d3cfc76587 | [
"MIT"
] | null | null | null | gram/migrations/0008_alter_image_likes.py | MutuaFranklin/Insta-clone | 012d16f771be5766129d2d10ad5ab2d3cfc76587 | [
"MIT"
] | null | null | null | gram/migrations/0008_alter_image_likes.py | MutuaFranklin/Insta-clone | 012d16f771be5766129d2d10ad5ab2d3cfc76587 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-13 08:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gram', '0007_auto_20210913_1142'),
]
operations = [
migrations.AlterField(
model_name='image',
name='likes',
field=models.ManyToManyField(blank=True, to='gram.Profile'),
),
]
| 21.210526 | 72 | 0.600496 |
acf52658486fcc5cfef8d1bf841f77183f10b7a1 | 1,833 | py | Python | {{cookiecutter.project_slug}}/tests/api/test_api_login.py | huogerac/cookiecutter-flask-openapi | cbf80cb6321440d546cb6d5a461cb79087671771 | [
"BSD-3-Clause"
] | 4 | 2021-12-17T03:12:52.000Z | 2021-12-19T01:51:42.000Z | {{cookiecutter.project_slug}}/tests/api/test_api_login.py | huogerac/cookiecutter-flask-openapi | cbf80cb6321440d546cb6d5a461cb79087671771 | [
"BSD-3-Clause"
] | 8 | 2021-12-17T23:31:25.000Z | 2021-12-21T01:51:03.000Z | {{cookiecutter.project_slug}}/tests/api/test_api_login.py | huogerac/cookiecutter-flask-openapi | cbf80cb6321440d546cb6d5a461cb79087671771 | [
"BSD-3-Clause"
] | 1 | 2021-12-21T01:46:25.000Z | 2021-12-21T01:46:25.000Z | import mock
from {{cookiecutter.project_slug}}.exceptions import UnauthorizedException
def test_should_return_bad_request_when_there_is_no_body(client):
"""
Teste tem que retornar 400 quando nao é enviado email/password no body
"""
response = client.post(
"/api/auth/login",
headers={"content-type": "application/json"},
)
assert response.status_code == 400
assert response.json["detail"] == "None is not of type 'object'"
@mock.patch("{{cookiecutter.project_slug}}.services.auth.login")
def test_should_return_tokens(auth_mock, client):
"""
Teste tem que retornar os dados do usuário,
um token e um refresh_token
"""
auth_mock.return_value = {
"token": "abcd",
"refresh_token": "abcd",
}
response = client.post(
"/api/auth/login",
headers={"content-type": "application/json"},
json={
"username": "jd",
"password": "abacate",
},
)
auth_mock.assert_called_once_with("jd", "abacate")
assert response.status_code == 200
assert response.json["token"] is not None
assert response.json["refresh_token"] is not None
def test_should_return_invalid_login(client):
"""
Teste tem que retornar 401 quando o service retorna erro
"""
with mock.patch("{{cookiecutter.project_slug}}.services.auth.login") as auth_mock:
auth_mock.side_effect = UnauthorizedException("Username or password invalid")
response = client.post(
"/api/auth/login",
headers={"content-type": "application/json"},
json={
"username": "jd",
"password": "invalid-pwd",
},
)
assert response.status_code == 401
assert response.json["detail"] == "Username or password invalid"
| 28.2 | 86 | 0.628478 |
acf5276b11beb6b009e800bb25e1f14586b390b2 | 1,143 | py | Python | tnm/transitapis/models.py | ahlusar1989/TransitNearMe | 9df101a0b3b0499e4661c4b9a9a3b8450e697433 | [
"MIT"
] | 1 | 2016-02-26T04:09:06.000Z | 2016-02-26T04:09:06.000Z | tnm/transitapis/models.py | ahlusar1989/TransitNearMe | 9df101a0b3b0499e4661c4b9a9a3b8450e697433 | [
"MIT"
] | null | null | null | tnm/transitapis/models.py | ahlusar1989/TransitNearMe | 9df101a0b3b0499e4661c4b9a9a3b8450e697433 | [
"MIT"
] | 2 | 2015-03-19T15:39:29.000Z | 2017-05-09T17:58:51.000Z | from django.contrib.gis.db import models
from stringfield import StringField
class Stop(models.Model):
name = StringField(blank=True)
location = models.PointField()
api_name = StringField()
api_data = StringField(blank=True)
objects = models.GeoManager()
def __unicode__(self):
return self.name
def json_dict(self):
return {
'name': self.name,
'api_name': self.api_name,
'lat': self.location.y,
'lng': self.location.x,
'code': self.api_name.lower().replace(' ','-') + ':' + self.api_data, # normalize to lower case without spaces
}
class Prediction(models.Model):
retrieved = models.DateTimeField()
stop = models.ForeignKey(Stop)
route = StringField(null=True)
destination = StringField(null=True)
wait = StringField(null=True)
def __unicode__(self):
return '%s %s %s' % (self.route, self.destination, self.wait)
def json_dict(self):
return {
'route': self.route,
'destination': self.destination,
'wait': str(self.wait),
}
| 27.878049 | 122 | 0.600175 |
acf528107eac4dfb1fc6f8810a25f08b74dc36b5 | 1,473 | py | Python | shoppingapp/models.py | KONAPAVANKUMAR/shopping-app-django | f0494a47e4b953cc63198c66f50bc59b94ddc860 | [
"MIT"
] | null | null | null | shoppingapp/models.py | KONAPAVANKUMAR/shopping-app-django | f0494a47e4b953cc63198c66f50bc59b94ddc860 | [
"MIT"
] | null | null | null | shoppingapp/models.py | KONAPAVANKUMAR/shopping-app-django | f0494a47e4b953cc63198c66f50bc59b94ddc860 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class ItemModel(models.Model):
name = models.CharField(max_length = 128)
price = models.IntegerField()
cover = models.ImageField(upload_to = 'images/',blank = True,null = True)
def __str__(self):
return self.name + " "+ str(self.price)
class CategoryModel(models.Model):
name = models.CharField(max_length=128)
items = models.ManyToManyField(ItemModel)
def __str__(self):
return self.name
class CartModel(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
item = models.ForeignKey(ItemModel, on_delete=models.CASCADE)
# quantity = models.IntegerField(default=0)
def __str__(self):
return self.user.username
class OrderModel(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
items = models.JSONField()
discount = models.CharField(max_length=128)
delivery_date = models.DateField(max_length=128,null=True,blank=True)
def __str__(self):
return self.user.username
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.total_price = sum(item[1] for item in self.items)
self.final_price = sum(item[1] for item in self.items) - int(self.discount)
class PromoCodeModel(models.Model):
promo = models.CharField(max_length=128)
discount = models.CharField(max_length=128) | 38.763158 | 83 | 0.706042 |
acf529324e0ced15b9d305e954765c1ddd0ea7d1 | 166 | py | Python | src/oca_github_bot/router.py | fkantelberg/oca-github-bot | 633c1e3fbb63b05ebff1ff9b08d4949ca1e07491 | [
"MIT"
] | 1 | 2019-11-20T10:54:44.000Z | 2019-11-20T10:54:44.000Z | src/oca_github_bot/router.py | fkantelberg/oca-github-bot | 633c1e3fbb63b05ebff1ff9b08d4949ca1e07491 | [
"MIT"
] | 2 | 2020-05-28T14:39:54.000Z | 2021-01-29T16:27:20.000Z | src/oca_github_bot/router.py | fkantelberg/oca-github-bot | 633c1e3fbb63b05ebff1ff9b08d4949ca1e07491 | [
"MIT"
] | 1 | 2020-07-13T11:00:21.000Z | 2020-07-13T11:00:21.000Z | # Copyright (c) ACSONE SA/NV 2018
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
from gidgethub import routing
router = routing.Router()
| 23.714286 | 73 | 0.759036 |
acf52a2e72f23b57a7304a5e4d25fa904f5c4a4d | 50,674 | py | Python | fairseq/models/transformer.py | DallesLee/fairseq | 3ef27eab5a4aa094ff82e918172b182e95abd0c6 | [
"MIT"
] | null | null | null | fairseq/models/transformer.py | DallesLee/fairseq | 3ef27eab5a4aa094ff82e918172b182e95abd0c6 | [
"MIT"
] | null | null | null | fairseq/models/transformer.py | DallesLee/fairseq | 3ef27eab5a4aa094ff82e918172b182e95abd0c6 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
TransformerDecoderLayer,
TransformerEncoderLayer,
GradMultiply,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
import numpy as np
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
EPSILON = torch.finfo(torch.double).tiny
# EPSILON = torch.finfo(torch.float32).tiny
def gumbel_soft_top_k(w, k, t):
# apply gumbel noise
u = torch.rand_like(w) * (1-EPSILON) + EPSILON
r = -torch.log(-torch.log(u)) + w
epsilon = torch.ones_like(r)
epsilon *= EPSILON
# soft top k
p = torch.zeros([k, w.size()[0]]).to(w.device).double()
# p = torch.zeros([k, w.size()[0]]).to(w.device)
p[0] = torch.exp(nn.functional.log_softmax(r / t, 0))
# p[0] = torch.softmax(r / t, 0)
for j in range(1,k):
r += torch.log(torch.max(1-p[j-1], epsilon))
p[j] = torch.exp(nn.functional.log_softmax(r / t, 0))
# p[j] = torch.softmax(r / t, 0)
return p.sum(0)
class STEFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, k):
threshold = input.sort(descending = True)[0][k]
return (input > threshold).float()
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
@register_model("transformer")
class TransformerModel(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
@classmethod
def hub_models(cls):
# fmt: off
def moses_subword(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'subword_nmt',
}
def moses_fastbpe(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'fastbpe',
}
def spm(path):
return {
'path': path,
'bpe': 'sentencepiece',
'tokenizer': 'space',
}
return {
'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),
'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),
'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),
'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),
'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),
'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),
'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),
'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),
'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'),
'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'),
'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'),
'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'),
'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'),
'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'),
}
# fmt: on
def __init__(self, args, encoder, decoder):
super().__init__(encoder, decoder)
self.args = args
self.supports_align_args = True
self.head_size = [self.args.encoder_layers+self.args.decoder_layers*2, self.args.encoder_attention_heads]
self.w = nn.Parameter(torch.empty(self.head_size).double())
# self.w = nn.Parameter(torch.empty(self.head_size))
nn.init.xavier_uniform_(self.w)
self.num_of_heads = None
self.temperature = None
self._apply_dropout = False
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension (extra linear layer '
'if different from decoder embed dim')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
parser.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activations at each layer, which saves GPU '
'memory usage at the cost of some additional compute')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(args, encoder, decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, "no_cross_attention", False),
)
# TorchScript doesn't support optional arguments with variable length (**kwargs).
# Current workaround is to add union of all arguments in child classes.
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = True,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Run the forward pass for an encoder-decoder model.
Copied from the base class, but without ``**kwargs``,
which are not supported by TorchScript.
"""
if self._apply_dropout:
# head_mask = gumbel_soft_top_k(self.w.view(-1), self.num_of_heads, self.temperature).view_as(self.w)
head_mask = STEFunction.apply(self.w.view(-1), self.num_of_heads).view_as(self.w)
self.apply_masks(head_mask.float())
# self.apply_masks(head_mask)
encoder_out = self.encoder(
src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
# Since get_normalized_probs is in the Fairseq Model which is not scriptable,
# I rewrite the get_normalized_probs from Base Class to call the
# helper function in the Base Class.
@torch.jit.export
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
def convert_head_mask(self, head_mask):
new_head_mask = {}
encoder_layers = self.args.encoder_layers
decoder_layers = self.args.decoder_layers
new_head_mask['encoder'] = head_mask[:encoder_layers].unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
new_head_mask['decoder'] = []
encoder_decoders = head_mask[encoder_layers:encoder_layers+decoder_layers]
decoder_selfs = head_mask[encoder_layers+decoder_layers:]
for decoder_self, encoder_decoder in zip(decoder_selfs, encoder_decoders):
new_head_mask['decoder'].append({
"self": decoder_self.unsqueeze(0).unsqueeze(-1).unsqueeze(-1),
"encoder": encoder_decoder.unsqueeze(0).unsqueeze(-1).unsqueeze(-1),
})
return new_head_mask
def apply_masks(self, head_mask):
head_mask = self.convert_head_mask(head_mask)
self.encoder.apply_masks(head_mask['encoder'])
self.decoder.apply_masks(head_mask['decoder'])
def apply_gates(self, l0_penalty):
self.encoder.apply_gates(l0_penalty)
self.decoder.apply_gates(l0_penalty)
def get_penalty(self):
return self.encoder.get_penalty() + self.decoder.get_penalty()
def get_gate_values(self):
gate_values = []
gate_values.extend(self.encoder.get_gate_values())
gate_values.extend([gate['encoder'] for gate in self.decoder.get_gate_values()])
gate_values.extend([gate['self'] for gate in self.decoder.get_gate_values()])
return torch.stack(gate_values) if gate_values[0] is not None else gate_values
def remove_gates(self):
self.encoder.remove_gates()
self.decoder.remove_gates()
def apply_dropout(self, num_of_heads, temperature):
self.num_of_heads = num_of_heads
self.temperature = temperature
self._apply_dropout = True
def get_w(self):
return self.w
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.encoder_layerdrop = args.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(args) for i in range(args.encoder_layers)]
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def build_encoder_layer(self, args):
layer = TransformerEncoderLayer(args)
if getattr(args, "checkpoint_activations", False):
layer = checkpoint_wrapper(layer)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
encoder_states = []
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `foward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def apply_masks(self, head_mask):
for i, layer in enumerate(self.layers):
layer.apply_masks(head_mask[i])
def apply_gates(self, l0_penalty):
for layer in self.layers:
layer.apply_gates(l0_penalty)
def get_penalty(self):
return sum(layer.get_penalty() for layer in self.layers)
def get_gate_values(self):
gate_values = []
for layer in self.layers:
gate_values.append(layer.get_gate_values())
return gate_values
def remove_gates(self):
for layer in self.layers:
layer.remove_gates()
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
self.cross_self_attention = getattr(args, "cross_self_attention", False)
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = TransformerDecoderLayer(args, no_encoder_attn)
if getattr(args, "checkpoint_activations", False):
layer = checkpoint_wrapper(layer)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def apply_masks(self, head_mask):
for i, layer in enumerate(self.layers):
layer.apply_masks(head_mask[i])
def apply_gates(self, l0_penalty):
for layer in self.layers:
layer.apply_gates(l0_penalty)
def get_penalty(self):
return sum(layer.get_penalty() for layer in self.layers)
def get_gate_values(self):
gate_values = []
for layer in self.layers:
gate_values.append(layer.get_gate_values())
return gate_values
def remove_gates(self):
for layer in self.layers:
layer.remove_gates()
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
@register_model_architecture("transformer", "transformer")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
@register_model_architecture("transformer", "transformer_iwslt_de_en")
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
@register_model_architecture("transformer", "transformer_wmt_en_de")
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big")
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big")
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture("transformer", "transformer_wmt_en_de_big")
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t")
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
| 42.583193 | 159 | 0.631606 |
acf52be94f8bb08c1e4b6dcb97ee6f18dbe2bc1e | 1,446 | py | Python | library/lib_study/142_i18n_locale.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 13 | 2020-01-04T07:37:38.000Z | 2021-08-31T05:19:58.000Z | library/lib_study/142_i18n_locale.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 3 | 2020-06-05T22:42:53.000Z | 2020-08-24T07:18:54.000Z | library/lib_study/142_i18n_locale.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 9 | 2020-10-19T04:53:06.000Z | 2021-08-31T05:20:01.000Z | import datetime
import locale
print(locale.getdefaultlocale())
# loc = locale.getlocale() # get current locale
# print(loc)
# locale.setlocale(locale.LC_ALL, 'de_DE')
# locale.strcoll('f\xe4n', 'foo') # compare a string containing an umlaut
# locale.setlocale(locale.LC_ALL, '') # use user's preferred locale
# locale.setlocale(locale.LC_ALL, 'C') # use default (C) locale
# locale.setlocale(locale.LC_ALL, loc) # restore saved locale
alllocale = locale.locale_alias
for k in alllocale.keys():
print('locale[%s] %s' % (k, alllocale[k]))
import time
print(time.strptime('Thu, 24 Nov 2016 07:01:59 GMT', '%a, %d %b %Y %H:%M:%S GMT'))
locale.setlocale(locale.LC_TIME)
print(time.strptime('Thu, 24 Nov 2016 07:01:59 GMT', '%a, %d %b %Y %H:%M:%S GMT'))
locale.setlocale(locale.LC_TIME, 'en_US')
print(time.strptime('Thu, 24 Nov 2016 07:01:59 GMT', '%a, %d %b %Y %H:%M:%S GMT'))
# print(locale.getlocale())
# print(locale.setlocale(locale.LC_ALL, ''))
# print(locale.getlocale())
# print(time.strptime('Thu, 24 Nov 2016 07:01:59 GMT', '%a, %d %b %Y %H:%M:%S GMT'))
# # 出错 这里的 %a %b 等,所以在不对的 locale 环境下,格式化出现了错误。
dt = datetime.datetime(2015, 11, 15, 16, 30)
locale.setlocale(locale.LC_ALL, "en_GB.utf8")
print(dt.strftime("%A, %d. %B %Y %I:%M%p"))
locale.setlocale(locale.LC_ALL, "zh_cn.utf8")
print(dt.strftime("%A, %d. %B %Y %I:%M%p"))
locale.setlocale(locale.LC_ALL, "nb_NO.utf8")
print(dt.strftime("%A, %d. %B %Y %I:%M%p"))
| 31.434783 | 84 | 0.665975 |
acf52c931dce99ca573ca2ee1ebc459e6f896bf4 | 2,219 | py | Python | psono/restapi/serializers/read_shard.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 48 | 2018-04-19T15:50:58.000Z | 2022-01-23T15:58:11.000Z | psono/restapi/serializers/read_shard.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 9 | 2018-09-13T14:56:18.000Z | 2020-01-17T16:44:33.000Z | psono/restapi/serializers/read_shard.py | dirigeant/psono-server | a18c5b3c4d8bbbe4ecf1615b210d99fb77752205 | [
"Apache-2.0",
"CC0-1.0"
] | 11 | 2019-09-20T11:53:47.000Z | 2021-07-18T22:41:31.000Z | from rest_framework import serializers
from django.utils import timezone
from django.conf import settings
from datetime import timedelta
from typing import List, Dict
import ipaddress
from ..models import (
Fileserver_Cluster_Member_Shard_Link,
)
from ..utils import get_ip, fileserver_access
class ReadShardSerializer(serializers.Serializer):
def validate(self, attrs: dict) -> dict:
ip_address = ipaddress.ip_address(get_ip(self.context['request']))
cluster_member_shard_link_objs = Fileserver_Cluster_Member_Shard_Link.objects.select_related('member', 'shard')\
.filter(member__valid_till__gt=timezone.now() - timedelta(seconds=settings.FILESERVER_ALIVE_TIMEOUT),
shard__active=True)\
.only('read', 'write', 'ip_read_blacklist', 'ip_read_whitelist', 'ip_write_blacklist', 'ip_write_whitelist',
'member__url', 'member__read', 'member__write', 'member__public_key', 'member__url', 'shard__id', 'shard__title', 'shard__description')
shards: List[Dict] = []
shard_dic: Dict[str, Dict] = {}
for cmsl in cluster_member_shard_link_objs:
read = fileserver_access(cmsl, ip_address, read=True)
write = fileserver_access(cmsl, ip_address, write=True)
if cmsl.shard.id not in shard_dic:
shard_dic[cmsl.shard.id] = {
'id': cmsl.shard.id,
'shard_title': cmsl.shard.title,
'shard_description': cmsl.shard.description,
'fileserver': [],
'read': False,
'write': False,
}
shards.append(shard_dic[cmsl.shard.id])
shard_dic[cmsl.shard.id]['fileserver'].append({
'fileserver_public_key': cmsl.member.public_key,
'fileserver_url': cmsl.member.url,
'read': read,
'write': write,
})
shard_dic[cmsl.shard.id]['read'] = shard_dic[cmsl.shard.id]['read'] or read
shard_dic[cmsl.shard.id]['write'] = shard_dic[cmsl.shard.id]['write'] or write
attrs['shards'] = shards
return attrs | 37.610169 | 153 | 0.616494 |
acf52ca6c60f65e9e89375b39fe553ed1950c335 | 563 | py | Python | llama/types/AbstractDjangoApi.py | atilante/llama-cli | 2aa2ebddca9dd76d0a62a36f75b31af75eedea84 | [
"MIT"
] | null | null | null | llama/types/AbstractDjangoApi.py | atilante/llama-cli | 2aa2ebddca9dd76d0a62a36f75b31af75eedea84 | [
"MIT"
] | null | null | null | llama/types/AbstractDjangoApi.py | atilante/llama-cli | 2aa2ebddca9dd76d0a62a36f75b31af75eedea84 | [
"MIT"
] | 1 | 2021-11-29T14:26:53.000Z | 2021-11-29T14:26:53.000Z | from .AbstractApi import AbstractApi
class AbstractDjangoApi(AbstractApi):
def __init__(self, source_id, token):
super().__init__(source_id)
self.token = token
def fetch(self, url, headers={}):
headers['Authorization'] = f'Token {self.token}'
return super().fetch(url, headers=headers)
def get_paged_json(self, url):
results = []
next = url
while next:
response = self.fetch_json(next)
results.extend(response['results'])
next = response['next']
if next:
self.fetch_delay()
return results
| 24.478261 | 52 | 0.65897 |
acf52d15eef2d0883ccc8b4f1a5bd85a44e9ab97 | 2,631 | py | Python | python/2020/august/aug-11/bloomfilter.py | lineville/Daily-Coding-Problem | 9088616669c5f183457c9238128f0d47b85097d9 | [
"MIT"
] | null | null | null | python/2020/august/aug-11/bloomfilter.py | lineville/Daily-Coding-Problem | 9088616669c5f183457c9238128f0d47b85097d9 | [
"MIT"
] | 4 | 2021-08-09T00:30:32.000Z | 2021-10-04T21:38:12.000Z | python/2020/august/aug-11/bloomfilter.py | lineville/Daily-Coding-Problem | 9088616669c5f183457c9238128f0d47b85097d9 | [
"MIT"
] | null | null | null |
# * Daily Coding Problem August 11 2020
# * [Medium] -- TripleByte
# * Implement a data structure which carries out the following operations
# * without resizing the underlying array:
# * add(value): Add a value to the set of values.
# * check(value): Check whether a value is in the set.
# * The check method may return occasional false positives
# * (in other words, incorrectly identifying an element as
# * part of the set), but should always correctly identify a true element.
# * This article was used in writing this code:
# https://www.geeksforgeeks.org/bloom-filters-introduction-and-python-implementation/
# * A bloom filter will be used to store the data to allow infinite adds with
# * partially incorrect results about membership
import math
import mmh3
from bitarray import bitarray
class BloomFilter(object):
def __init__(self, size, falsePositiveProbability):
# False posible probability in decimal
self.falsePositiveProbability = falsePositiveProbability
# Size of bit array to use
self.size = self.get_size(size, falsePositiveProbability)
# number of hash functions to use
self.hash_count = self.get_hash_count(self.size, size)
# Bit array of given size
self.bit_array = bitarray(self.size)
# initialize all bits as 0
self.bit_array.setall(0)
def add(self, item):
digests = []
for i in range(self.hash_count):
# create digest for given item.
# i work as seed to mmh3.hash() function
# With different seed, digest created is different
digest = mmh3.hash(item, i) % self.size
digests.append(digest)
print(digests)
# set the bit True in bit_array
self.bit_array[digest] = True
def check(self, item):
for i in range(self.hash_count):
digest = mmh3.hash(item, i) % self.size
if self.bit_array[digest] == False:
# if any of bit is False then,its not present
# in filter
# else there is probability that it exist
return False
return True
@classmethod
def get_size(self, n, p):
m = -(n * math.log(p))/(math.log(2)**2)
return int(m)
@classmethod
def get_hash_count(self, m, n):
k = (m/n) * math.log(2)
return int(k)
def show(self):
print(self.items)
# def testBloomFiler1():
# bloom = BloomFilter(10)s
# def main():
# bloomFilter = BloomFilter(10, .05)
# bloomFilter.show()
# bloomFilter.add("Liam")
# main()
| 27.989362 | 86 | 0.631699 |
acf52de981180d56916fa0ddd178e91f25aca2da | 1,126 | py | Python | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/EXT/texture_mirror_clamp.py | temelkirci/Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | 1 | 2022-03-02T17:07:20.000Z | 2022-03-02T17:07:20.000Z | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/EXT/texture_mirror_clamp.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/EXT/texture_mirror_clamp.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | '''OpenGL extension EXT.texture_mirror_clamp
This module customises the behaviour of the
OpenGL.raw.GL.EXT.texture_mirror_clamp to provide a more
Python-friendly API
Overview (from the spec)
EXT_texture_mirror_clamp extends the set of texture wrap modes to
include three modes (GL_MIRROR_CLAMP_EXT, GL_MIRROR_CLAMP_TO_EDGE_EXT,
GL_MIRROR_CLAMP_TO_BORDER_EXT) that effectively use a texture map
twice as large as the original image in which the additional half
of the new image is a mirror image of the original image.
This new mode relaxes the need to generate images whose opposite
edges match by using the original image to generate a matching
"mirror image". This mode allows the texture to be mirrored only
once in the negative s, t, and r directions.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/texture_mirror_clamp.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.texture_mirror_clamp import *
### END AUTOGENERATED SECTION | 40.214286 | 71 | 0.811723 |
acf52e69f98aff35a70661360c17b77c8ab057d8 | 973 | py | Python | 32_FaceMesh/01_float32/02_integer_quantization.py | khanfarhan10/PINTO_model_zoo | 4cad2e506d8c0fb604aa7b5f84115a840ab59ba1 | [
"MIT"
] | 1,529 | 2019-12-11T13:36:23.000Z | 2022-03-31T18:38:27.000Z | 32_FaceMesh/01_float32/02_integer_quantization.py | khanfarhan10/PINTO_model_zoo | 4cad2e506d8c0fb604aa7b5f84115a840ab59ba1 | [
"MIT"
] | 200 | 2020-01-06T09:24:42.000Z | 2022-03-31T17:29:08.000Z | 32_FaceMesh/01_float32/02_integer_quantization.py | khanfarhan10/PINTO_model_zoo | 4cad2e506d8c0fb604aa7b5f84115a840ab59ba1 | [
"MIT"
] | 288 | 2020-02-21T14:56:02.000Z | 2022-03-30T03:00:35.000Z | ### tensorflow==2.2.0
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
from PIL import Image
import os
import glob
def representative_dataset_gen():
for data in raw_test_data.take(100):
image = data['image'].numpy()
image = tf.image.resize(image, (192, 192))
image = image[np.newaxis,:,:,:]
image = image - 127.5
image = image * 0.007843
yield [image]
raw_test_data, info = tfds.load(name="the300w_lp", with_info=True, split="train", data_dir="~/TFDS", download=True)
# Integer Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
tflite_quant_model = converter.convert()
with open('face_landmark_192_integer_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Integer Quantization complete! - face_landmark_192_integer_quant.tflite") | 34.75 | 115 | 0.758479 |
acf530a1694d939a050e08d82704ad34d1f48d67 | 166 | py | Python | hello_django_project/hello_django/apps/orders/models/__init__.py | KimSoungRyoul/PersistenceLayerInPythonApplication | 2431553a6cdd913babd546adc6c9376855eb3438 | [
"MIT"
] | 2 | 2021-11-01T08:08:13.000Z | 2021-11-01T08:11:51.000Z | hello_django_project/hello_django/apps/orders/models/__init__.py | KimSoungRyoul/PersistenceLayerInPythonApplication | 2431553a6cdd913babd546adc6c9376855eb3438 | [
"MIT"
] | null | null | null | hello_django_project/hello_django/apps/orders/models/__init__.py | KimSoungRyoul/PersistenceLayerInPythonApplication | 2431553a6cdd913babd546adc6c9376855eb3438 | [
"MIT"
] | null | null | null | from .order import *
from .orderedproduct import *
from .report import *
__all__ = [
"Order",
"OrderedProduct",
"OrderReport",
"DailyOrderReport",
]
| 15.090909 | 29 | 0.650602 |
acf531e842c486da367bedf18f03bdcd692bab70 | 1,923 | py | Python | apps/hosts/models.py | charanjit-singh/EvalAI | bb2a6c5375e53f2dec48bb862034009af3f2a0c7 | [
"BSD-3-Clause"
] | 1 | 2019-01-31T10:06:47.000Z | 2019-01-31T10:06:47.000Z | apps/hosts/models.py | rshrc/EvalAI | 72b7b52c500008e79558fb763d16f6a18529a504 | [
"BSD-3-Clause"
] | null | null | null | apps/hosts/models.py | rshrc/EvalAI | 72b7b52c500008e79558fb763d16f6a18529a504 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from base.models import (TimeStampedModel, )
# from challenges.models import (Challenge, )
class ChallengeHostTeam(TimeStampedModel):
"""
Model representing the Host Team for a partiuclar challenge
"""
team_name = models.CharField(max_length=100, unique=True)
created_by = models.ForeignKey(User, related_name='challenge_host_team_creator')
team_url = models.CharField(max_length=1000, default="", null=True)
def __str__(self):
return '{0}: {1}'.format(self.team_name, self.created_by)
def get_all_challenge_host_email(self):
email_ids = ChallengeHost.objects.filter(team_name=self).values_list('user__email', flat=True)
return list(email_ids)
class Meta:
app_label = 'hosts'
db_table = 'challenge_host_teams'
class ChallengeHost(TimeStampedModel):
# permission options
ADMIN = 'Admin'
READ = 'Read'
RESTRICTED = 'Restricted'
WRITE = 'Write'
# status options
ACCEPTED = 'Accepted'
DENIED = 'Denied'
PENDING = 'Pending'
SELF = 'Self'
UNKNOWN = 'Unknown'
PERMISSION_OPTIONS = (
(ADMIN, ADMIN),
(READ, READ),
(RESTRICTED, RESTRICTED),
(WRITE, WRITE),
)
STATUS_OPTIONS = (
(ACCEPTED, ACCEPTED),
(DENIED, DENIED),
(PENDING, PENDING),
(SELF, SELF),
(UNKNOWN, UNKNOWN),
)
user = models.ForeignKey(User)
team_name = models.ForeignKey('ChallengeHostTeam')
status = models.CharField(max_length=30, choices=STATUS_OPTIONS)
permissions = models.CharField(max_length=30, choices=PERMISSION_OPTIONS)
def __str__(self):
return '{0}:{1}:{2}'.format(self.team_name, self.user, self.permissions)
class Meta:
app_label = 'hosts'
db_table = 'challenge_host'
| 27.084507 | 102 | 0.663547 |
acf532ce55e3f911bcf97d5c33b004acd04b2bcc | 8,313 | py | Python | ztf_sim/Scheduler.py | ZwickyTransientFacility/ztf_sim | 78b184e5cfb0e16705b74c7bb40466b44d42cd71 | [
"BSD-3-Clause"
] | 11 | 2015-10-22T05:58:18.000Z | 2021-01-18T08:46:08.000Z | ztf_sim/Scheduler.py | ZwickyTransientFacility/ztf_sim | 78b184e5cfb0e16705b74c7bb40466b44d42cd71 | [
"BSD-3-Clause"
] | 2 | 2019-06-12T17:36:58.000Z | 2019-10-07T05:34:22.000Z | ztf_sim/Scheduler.py | ZwickyTransientFacility/ztf_sim | 78b184e5cfb0e16705b74c7bb40466b44d42cd71 | [
"BSD-3-Clause"
] | 5 | 2015-09-29T11:06:02.000Z | 2020-01-24T15:10:15.000Z | """Core scheduler classes."""
import configparser
from collections import defaultdict
import logging
import numpy as np
from astropy.time import Time
import astropy.units as u
from .QueueManager import ListQueueManager, GreedyQueueManager, GurobiQueueManager
from .ObsLogger import ObsLogger
from .configuration import SchedulerConfiguration
from .constants import BASE_DIR, PROGRAM_IDS, EXPOSURE_TIME, READOUT_TIME
from .utils import block_index, block_use_fraction
from .utils import next_12deg_evening_twilight, next_12deg_morning_twilight
class Scheduler(object):
def __init__(self, scheduler_config_file_fullpath,
run_config_file_fullpath, other_queue_configs = None,
output_path = BASE_DIR+'../sims/'):
self.logger = logging.getLogger(__name__)
self.scheduler_config = SchedulerConfiguration(
scheduler_config_file_fullpath)
self.queue_configs = self.scheduler_config.build_queue_configs()
self.queues = self.scheduler_config.build_queues(self.queue_configs)
self.timed_queues_tonight = []
self.set_queue('default')
self.run_config = configparser.ConfigParser()
self.run_config.read(run_config_file_fullpath)
if 'log_name' in self.run_config['scheduler']:
log_name = self.run_config['scheduler']['log_name']
else:
log_name = self.scheduler_config.config['run_name']
# initialize sqlite history
self.obs_log = ObsLogger(log_name,
output_path = output_path,
clobber=self.run_config['scheduler'].getboolean('clobber_db'),)
def set_queue(self, queue_name):
if queue_name not in self.queues:
raise ValueError(f'Requested queue {queue_name} not available!')
self.Q = self.queues[queue_name]
def add_queue(self, queue_name, queue, clobber=True):
if clobber or (queue_name not in self.queues):
self.queues[queue_name] = queue
else:
raise ValueError(f"Queue {queue_name} already exists!")
def delete_queue(self, queue_name):
if (queue_name in self.queues):
if self.Q.queue_name == queue_name:
self.set_queue('default')
del self.queues[queue_name]
else:
raise ValueError(f"Queue {queue_name} does not exist!")
def find_block_use_tonight(self, time_now):
# also sets up timed_queues_tonight
# start of the night
mjd_today = np.floor(time_now.mjd).astype(int)
# Look for timed queues that will be valid tonight,
# to exclude from the nightly solution
self.timed_queues_tonight = []
today = Time(mjd_today, format='mjd')
tomorrow = Time(mjd_today + 1, format='mjd')
block_start = block_index(today)[0]
block_stop = block_index(tomorrow)[0]
block_use = defaultdict(float)
# compute fraction of twilight blocks not available
evening_twilight = next_12deg_evening_twilight(today)
morning_twilight = next_12deg_morning_twilight(today)
evening_twilight_block = block_index(evening_twilight)[0]
frac_evening_twilight = block_use_fraction(
evening_twilight_block, today, evening_twilight)
block_use[evening_twilight_block] = frac_evening_twilight
self.logger.debug(f'{frac_evening_twilight} of block {evening_twilight_block} is before 12 degree twilight')
morning_twilight_block = block_index(morning_twilight)[0]
frac_morning_twilight = block_use_fraction(
morning_twilight_block, morning_twilight, tomorrow)
block_use[morning_twilight_block] = frac_morning_twilight
self.logger.debug(f'{frac_morning_twilight} of block {morning_twilight_block} is before 12 degree twilight')
for qq_name, qq in self.queues.items():
if qq.queue_name in ['default', 'fallback']:
continue
if qq.validity_window is not None:
qq_block_use = qq.compute_block_use()
is_tonight = False
# sum block use
for block, frac in qq_block_use.items():
if (block_start <= block <= block_stop):
if frac > 0:
is_tonight = True
self.logger.debug(f'{frac} of block {block} used by queue {qq.queue_name}')
block_use[block] += frac
if block_use[block] > 1:
self.logger.warn(f'Too many observations for block {block}: {block_use[block]}')
block_use[block] = 1.
if is_tonight:
self.timed_queues_tonight.append(qq_name)
return block_use
def count_timed_observations_tonight(self):
# determine how many equivalent obs are in timed queues
timed_obs = {prog:0 for prog in PROGRAM_IDS}
if len(self.timed_queues_tonight) == 0:
return timed_obs
for qq in self.timed_queues_tonight:
queue = self.queues[qq].queue.copy()
if 'n_repeats' not in queue.columns:
queue['n_repeats'] = 1.
queue['total_time'] = (queue['exposure_time'] +
READOUT_TIME.to(u.second).value)*queue['n_repeats']
net = queue[['program_id','total_time']].groupby('program_id').agg(np.sum)
count_equivalent = np.round(net['total_time']/(EXPOSURE_TIME + READOUT_TIME).to(u.second).value).astype(int).to_dict()
for k, v in count_equivalent.items():
timed_obs[k] += v
return timed_obs
def check_for_TOO_queue_and_switch(self, time_now):
# check if a TOO queue is now valid
for qq_name, qq in self.queues.items():
if qq.is_TOO:
if qq.is_valid(time_now):
# switch if the current queue is not a TOO
if (not self.Q.is_TOO) and len(qq.queue):
self.set_queue(qq_name)
# or if the current TOO queue is empty
if ((self.Q.is_TOO) and (len(self.Q.queue) == 0)
and len(qq.queue)):
self.set_queue(qq_name)
def check_for_timed_queue_and_switch(self, time_now):
# drop out of a timed queue if it's no longer valid
if self.Q.queue_name != 'default':
if not self.Q.is_valid(time_now):
self.set_queue('default')
# only switch from default or fallback queues
if self.Q.queue_name in ['default', 'fallback']:
# check if a timed queue is now valid
for qq_name, qq in self.queues.items():
if (qq.validity_window is not None) and (qq.is_valid(time_now)):
if (qq.queue_type == 'list'):
# list queues should have items in them
if len(qq.queue):
self.set_queue(qq_name)
else:
# don't have a good way to check length of non-list
# queues before nightly assignments
if qq.requests_in_window:
self.set_queue(qq_name)
def remove_empty_and_expired_queues(self, time_now):
queues_for_deletion = []
for qq_name, qq in self.queues.items():
if qq.queue_name in ['default', 'fallback']:
continue
if qq.validity_window is not None:
if qq.validity_window[1] < time_now:
self.logger.info(f'Deleting expired queue {qq_name}')
queues_for_deletion.append(qq_name)
continue
if (qq.queue_type == 'list') and (len(qq.queue) == 0):
self.logger.info(f'Deleting empty queue {qq_name}')
queues_for_deletion.append(qq_name)
# ensure we don't have duplicate values
queues_for_deletion = set(queues_for_deletion)
for qq_name in queues_for_deletion:
self.delete_queue(qq_name)
| 40.75 | 130 | 0.603031 |
acf53322794348c196f27d449150361d4396ef62 | 1,044 | py | Python | codigo/Live72/pyramid_cornice/pyramid_cornice/views/default.py | cassiasamp/live-de-python | 00b5e51793097544ba9b75c97a0d30e63970bf45 | [
"MIT"
] | 572 | 2018-04-03T03:17:08.000Z | 2022-03-31T19:05:32.000Z | codigo/Live72/pyramid_cornice/pyramid_cornice/views/default.py | cassiasamp/live-de-python | 00b5e51793097544ba9b75c97a0d30e63970bf45 | [
"MIT"
] | 176 | 2018-05-18T15:56:16.000Z | 2022-03-28T20:39:07.000Z | codigo/Live72/pyramid_cornice/pyramid_cornice/views/default.py | cassiasamp/live-de-python | 00b5e51793097544ba9b75c97a0d30e63970bf45 | [
"MIT"
] | 140 | 2018-04-18T13:59:11.000Z | 2022-03-29T00:43:49.000Z | from pyramid.view import view_config
from pyramid.response import Response
from sqlalchemy.exc import DBAPIError
from .. import models
@view_config(route_name='home', renderer='../templates/mytemplate.jinja2')
def my_view(request):
try:
query = request.dbsession.query(models.MyModel)
one = query.filter(models.MyModel.name == 'one').first()
except DBAPIError:
return Response(db_err_msg, content_type='text/plain', status=500)
return {'one': one, 'project': 'Pyramid Cornice'}
db_err_msg = """\
Pyramid is having a problem using your SQL database. The problem
might be caused by one of the following things:
1. You may need to initialize your database tables with `alembic`.
Check your README.txt for descriptions and try to run it.
2. Your database server may not be running. Check that the
database server referred to by the "sqlalchemy.url" setting in
your "development.ini" file is running.
After you fix the problem, please restart the Pyramid application to
try it again.
"""
| 31.636364 | 74 | 0.734674 |
acf533b6aef3278ce8b131dc6248b8abefdd6a8f | 2,785 | py | Python | tests/optimizers/test_binary.py | jole6826/pyswarms | d8bf200ea57cf013e158160d91423513c220e478 | [
"MIT"
] | 1 | 2019-03-07T06:41:43.000Z | 2019-03-07T06:41:43.000Z | tests/optimizers/test_binary.py | jole6826/pyswarms | d8bf200ea57cf013e158160d91423513c220e478 | [
"MIT"
] | null | null | null | tests/optimizers/test_binary.py | jole6826/pyswarms | d8bf200ea57cf013e158160d91423513c220e478 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import modules
import pytest
import numpy as np
# Import from package
from pyswarms.discrete import BinaryPSO
@pytest.mark.parametrize(
"options",
[
{"c2": 0.7, "w": 0.5, "k": 2, "p": 2},
{"c1": 0.5, "w": 0.5, "k": 2, "p": 2},
{"c1": 0.5, "c2": 0.7, "k": 2, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2},
],
)
def test_keyword_exception(options):
"""Tests if exceptions are thrown when keywords are missing"""
with pytest.raises(KeyError):
BinaryPSO(5, 2, options)
@pytest.mark.parametrize(
"options",
[
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": -1, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 6, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 5},
],
)
def test_invalid_k_or_p_values(options):
"""Tests if exception is thrown when passing
an invalid value for k or p"""
with pytest.raises(ValueError):
BinaryPSO(5, 2, options)
@pytest.mark.parametrize("velocity_clamp", [[1, 3], np.array([1, 3])])
def test_vclamp_type_exception(velocity_clamp, options):
"""Tests if exception is raised when velocity_clamp type is not a
tuple"""
with pytest.raises(TypeError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
@pytest.mark.parametrize("velocity_clamp", [(1, 1, 1), (2, 3, 1)])
def test_vclamp_shape_exception(velocity_clamp, options):
"""Tests if exception is raised when velocity_clamp's size is not equal
to 2"""
with pytest.raises(IndexError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
@pytest.mark.parametrize("velocity_clamp", [(3, 2), (10, 8)])
def test_vclamp_maxmin_exception(velocity_clamp, options):
"""Tests if the max velocity_clamp is less than min velocity_clamp and
vice-versa"""
with pytest.raises(ValueError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
def test_reset_default_values(binary_reset):
"""Tests if best cost and best pos are set properly when the reset()
method is called"""
assert binary_reset.swarm.best_cost == np.inf
assert set(binary_reset.swarm.best_pos) == set(np.array([]))
@pytest.mark.parametrize(
"history, expected_shape",
[
("cost_history", (1000,)),
("mean_pbest_history", (1000,)),
("mean_neighbor_history", (1000,)),
("pos_history", (1000, 10, 2)),
("velocity_history", (1000, 10, 2)),
],
)
def test_training_history_shape(binary_history, history, expected_shape):
"""Test if training histories are of expected shape"""
pso = vars(binary_history)
assert np.array(pso[history]).shape == expected_shape
| 31.647727 | 75 | 0.623339 |
acf533f516033c3d70e03fa1d96fdf38ed1cbe40 | 1,927 | py | Python | server/mahjong_portal/urls.py | MahjongRepository/mahjong-leaderboard | 77dfd26cb812c12fa7c2b11e862bb80a9135ccb0 | [
"MIT"
] | 2 | 2017-12-18T18:43:03.000Z | 2017-12-21T05:57:44.000Z | server/mahjong_portal/urls.py | MahjongRepository/mahjong-leaderboard | 77dfd26cb812c12fa7c2b11e862bb80a9135ccb0 | [
"MIT"
] | 11 | 2017-12-18T14:13:32.000Z | 2017-12-29T03:09:44.000Z | server/mahjong_portal/urls.py | MahjongRepository/mahjong-leaderboard | 77dfd26cb812c12fa7c2b11e862bb80a9135ccb0 | [
"MIT"
] | null | null | null | from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.views.decorators.cache import cache_page
from mahjong_portal.sitemap import (
ClubSitemap,
EMATournamentListSitemap,
PlayerSitemap,
RatingSitemap,
StaticSitemap,
TournamentAnnouncementSitemap,
TournamentListSitemap,
TournamentSitemap,
)
from online.views import finish_game_api
from website.views import players_api
sitemaps = {
"static": StaticSitemap,
"tournament_list": TournamentListSitemap,
"ema_tournaments_list": EMATournamentListSitemap,
"tournament_details": TournamentSitemap,
"tournament_announcement_details": TournamentAnnouncementSitemap,
"club_details": ClubSitemap,
"player_details": PlayerSitemap,
"rating_details": RatingSitemap,
}
urlpatterns = [
url(r"^admin/", include(admin.site.urls[:2])),
url(r"^i18n/", include("django.conf.urls.i18n")),
url(
r"^sitemap\.xml$",
cache_page(86400)(sitemap),
{"sitemaps": sitemaps},
name="django.contrib.sitemaps.views.sitemap",
),
url("^api/v0/players/$", players_api),
url("^api/v0/finish_game_api/$", finish_game_api),
url(r"^online/", include("online.urls")),
]
urlpatterns += i18n_patterns(
url(r"", include("website.urls")),
url(r"^rating/", include("rating.urls")),
url(r"^tournaments/", include("tournament.urls")),
url(r"^clubs/", include("club.urls")),
url(r"^players/", include("player.urls")),
url(r"^tenhou/", include("player.tenhou.urls")),
url(r"^ms/", include("player.mahjong_soul.urls")),
url(r"^system/", include("system.urls")),
url(r"^ema/", include("ema.urls")),
url(r"^account/", include("account.urls")),
url(r"^wiki/", include("wiki.urls")),
url(r"^league/", include("league.urls")),
)
| 32.661017 | 69 | 0.68604 |
acf534da8ee7d001394d394e7a301bee1aecd32c | 23,240 | py | Python | src/Model.py | Yuliang-Zou/tf_fcn | 88d8aad43987002e24d8c14c7678e052acfd9954 | [
"MIT"
] | 40 | 2017-03-15T02:37:24.000Z | 2021-06-03T08:02:20.000Z | src/Model.py | Yuliang-Zou/tf_fcn | 88d8aad43987002e24d8c14c7678e052acfd9954 | [
"MIT"
] | 6 | 2017-04-05T09:48:07.000Z | 2018-01-19T07:50:45.000Z | src/Model.py | Yuliang-Zou/tf_fcn | 88d8aad43987002e24d8c14c7678e052acfd9954 | [
"MIT"
] | 20 | 2017-04-08T00:15:23.000Z | 2019-07-11T08:34:44.000Z | # Define the vgg16 style model
# Author: Yuliang Zou
# ylzou@umich.edu
# Date: 2017-02-19
import tensorflow as tf
import numpy as np
from util import bilinear_upsample_weights
import ipdb
"""Define a base class, containing some useful layer functions"""
class Network(object):
def __init__(self, inputs):
self.inputs = []
self.layers = {}
self.outputs = {}
"""Extract parameters from ckpt file to npy file"""
def extract(self, data_path, session, saver):
raise NotImplementedError('Must be subclassed.')
"""Load pre-trained model from numpy data_dict"""
def load(self, data_dict, session, ignore_missing=True):
fc_shapes = {'fc6':(7,7,512,4096), 'fc7':(1,1,4096,4096)}
fc_scopes = {'fc6':'conv6', 'fc7':'conv7'}
for key in data_dict:
# Special cases: fc6 and fc7
if key == 'fc6' or key == 'fc7':
w = np.reshape(data_dict[key]['weights'], fc_shapes[key])
b = data_dict[key]['biases']
with tf.variable_scope(fc_scopes[key], reuse=True):
var1 = tf.get_variable('weights')
session.run(var1.assign(w))
print "Assign pretrain model weights to " + fc_scopes[key]
var2 = tf.get_variable('biases')
session.run(var2.assign(b))
print "Assign pretrain model biases to " + fc_scopes[key]
continue
with tf.variable_scope(key, reuse=True):
for subkey in data_dict[key]:
try:
var = tf.get_variable(subkey)
session.run(var.assign(data_dict[key][subkey]))
print "Assign pretrain model " + subkey + " to " + key
except ValueError:
print "Ignore " + key
if not ignore_missing:
raise
"""Get outputs given key names"""
def get_output(self, key):
if key not in self.outputs:
raise KeyError
return self.outputs[key]
"""Get parameters given key names"""
def get_param(self, key):
if key not in self.layers:
raise KeyError
return self.layers[key]['weights'], self.layers[key]['biases']
"""Add conv part of vgg16"""
def add_conv(self, inputs, num_classes, stage='TRAIN'):
# Dropout is different for training and testing
if stage == 'TRAIN':
keep_prob = 0.5
elif stage == 'TEST':
keep_prob = 1
else:
raise ValueError
# Conv1
with tf.variable_scope('conv1_1') as scope:
w_conv1_1 = tf.get_variable('weights', [3, 3, 3, 64],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv1_1 = tf.get_variable('biases', [64],
initializer=tf.constant_initializer(0))
z_conv1_1 = tf.nn.conv2d(inputs, w_conv1_1, strides=[1, 1, 1, 1],
padding='SAME') + b_conv1_1
a_conv1_1 = tf.nn.relu(z_conv1_1)
with tf.variable_scope('conv1_2') as scope:
w_conv1_2 = tf.get_variable('weights', [3, 3, 64, 64],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv1_2 = tf.get_variable('biases', [64],
initializer=tf.constant_initializer(0))
z_conv1_2 = tf.nn.conv2d(a_conv1_1, w_conv1_2, strides=[1, 1, 1, 1],
padding='SAME') + b_conv1_2
a_conv1_2 = tf.nn.relu(z_conv1_2)
pool1 = tf.nn.max_pool(a_conv1_2, ksize=[1,2,2,1], strides=[1,2,2,1],
padding='SAME', name='pool1')
# Conv2
with tf.variable_scope('conv2_1') as scope:
w_conv2_1 = tf.get_variable('weights', [3, 3, 64, 128],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv2_1 = tf.get_variable('biases', [128],
initializer=tf.constant_initializer(0))
z_conv2_1 = tf.nn.conv2d(pool1, w_conv2_1, strides=[1, 1, 1, 1],
padding='SAME') + b_conv2_1
a_conv2_1 = tf.nn.relu(z_conv2_1)
with tf.variable_scope('conv2_2') as scope:
w_conv2_2 = tf.get_variable('weights', [3, 3, 128, 128],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv2_2 = tf.get_variable('biases', [128],
initializer=tf.constant_initializer(0))
z_conv2_2 = tf.nn.conv2d(a_conv2_1, w_conv2_2, strides=[1, 1, 1, 1],
padding='SAME') + b_conv2_2
a_conv2_2 = tf.nn.relu(z_conv2_2)
pool2 = tf.nn.max_pool(a_conv2_2, ksize=[1,2,2,1], strides=[1,2,2,1],
padding='SAME', name='pool2')
# Conv3
with tf.variable_scope('conv3_1') as scope:
w_conv3_1 = tf.get_variable('weights', [3, 3, 128, 256],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv3_1 = tf.get_variable('biases', [256],
initializer=tf.constant_initializer(0))
z_conv3_1 = tf.nn.conv2d(pool2, w_conv3_1, strides= [1, 1, 1, 1],
padding='SAME') + b_conv3_1
a_conv3_1 = tf.nn.relu(z_conv3_1)
with tf.variable_scope('conv3_2') as scope:
w_conv3_2 = tf.get_variable('weights', [3, 3, 256, 256],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv3_2 = tf.get_variable('biases', [256],
initializer=tf.constant_initializer(0))
z_conv3_2 = tf.nn.conv2d(a_conv3_1, w_conv3_2, strides= [1, 1, 1, 1],
padding='SAME') + b_conv3_2
a_conv3_2 = tf.nn.relu(z_conv3_2)
with tf.variable_scope('conv3_3') as scope:
w_conv3_3 = tf.get_variable('weights', [3, 3, 256, 256],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv3_3 = tf.get_variable('biases', [256],
initializer=tf.constant_initializer(0))
z_conv3_3 = tf.nn.conv2d(a_conv3_2, w_conv3_3, strides= [1, 1, 1, 1],
padding='SAME') + b_conv3_3
a_conv3_3 = tf.nn.relu(z_conv3_3)
pool3 = tf.nn.max_pool(a_conv3_3, ksize=[1,2,2,1], strides=[1,2,2,1],
padding='SAME', name='pool3')
# Conv4
with tf.variable_scope('conv4_1') as scope:
w_conv4_1 = tf.get_variable('weights', [3, 3, 256, 512],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv4_1 = tf.get_variable('biases', [512],
initializer=tf.constant_initializer(0))
z_conv4_1 = tf.nn.conv2d(pool3, w_conv4_1, strides= [1, 1, 1, 1],
padding='SAME') + b_conv4_1
a_conv4_1 = tf.nn.relu(z_conv4_1)
with tf.variable_scope('conv4_2') as scope:
w_conv4_2 = tf.get_variable('weights', [3, 3, 512, 512],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv4_2 = tf.get_variable('biases', [512],
initializer=tf.constant_initializer(0))
z_conv4_2 = tf.nn.conv2d(a_conv4_1, w_conv4_2, strides= [1, 1, 1, 1],
padding='SAME') + b_conv4_2
a_conv4_2 = tf.nn.relu(z_conv4_2)
with tf.variable_scope('conv4_3') as scope:
w_conv4_3 = tf.get_variable('weights', [3, 3, 512, 512],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv4_3 = tf.get_variable('biases', [512],
initializer=tf.constant_initializer(0))
z_conv4_3 = tf.nn.conv2d(a_conv4_2, w_conv4_3, strides= [1, 1, 1, 1],
padding='SAME') + b_conv4_3
a_conv4_3 = tf.nn.relu(z_conv4_3)
pool4 = tf.nn.max_pool(a_conv4_3, ksize=[1,2,2,1], strides=[1,2,2,1],
padding='SAME', name='pool4')
# Conv5
with tf.variable_scope('conv5_1') as scope:
w_conv5_1 = tf.get_variable('weights', [3, 3, 512, 512],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv5_1 = tf.get_variable('biases', [512],
initializer=tf.constant_initializer(0))
z_conv5_1 = tf.nn.conv2d(pool4, w_conv5_1, strides= [1, 1, 1, 1],
padding='SAME') + b_conv5_1
a_conv5_1 = tf.nn.relu(z_conv5_1)
with tf.variable_scope('conv5_2') as scope:
w_conv5_2 = tf.get_variable('weights', [3, 3, 512, 512],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv5_2 = tf.get_variable('biases', [512],
initializer=tf.constant_initializer(0))
z_conv5_2 = tf.nn.conv2d(a_conv5_1, w_conv5_2, strides= [1, 1, 1, 1],
padding='SAME') + b_conv5_2
a_conv5_2 = tf.nn.relu(z_conv5_2)
with tf.variable_scope('conv5_3') as scope:
w_conv5_3 = tf.get_variable('weights', [3, 3, 512, 512],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv5_3 = tf.get_variable('biases', [512],
initializer=tf.constant_initializer(0))
z_conv5_3 = tf.nn.conv2d(a_conv5_2, w_conv5_3, strides= [1, 1, 1, 1],
padding='SAME') + b_conv5_3
a_conv5_3 = tf.nn.relu(z_conv5_3)
pool5 = tf.nn.max_pool(a_conv5_3, ksize=[1,2,2,1], strides=[1,2,2,1],
padding='SAME', name='pool5')
# Transform fully-connected layers to convolutional layers
with tf.variable_scope('conv6') as scope:
w_conv6 = tf.get_variable('weights', [7, 7, 512, 4096],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv6 = tf.get_variable('biases', [4096],
initializer=tf.constant_initializer(0))
z_conv6 = tf.nn.conv2d(pool5, w_conv6, strides= [1, 1, 1, 1],
padding='SAME') + b_conv6
a_conv6 = tf.nn.relu(z_conv6)
d_conv6 = tf.nn.dropout(a_conv6, keep_prob)
with tf.variable_scope('conv7') as scope:
w_conv7 = tf.get_variable('weights', [1, 1, 4096, 4096],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv7 = tf.get_variable('biases', [4096],
initializer=tf.constant_initializer(0))
z_conv7 = tf.nn.conv2d(d_conv6, w_conv7, strides= [1, 1, 1, 1],
padding='SAME') + b_conv7
a_conv7 = tf.nn.relu(z_conv7)
d_conv7 = tf.nn.dropout(a_conv7, keep_prob)
# Replace the original classifier layer
with tf.variable_scope('conv8') as scope:
w_conv8 = tf.get_variable('weights', [1, 1, 4096, num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_conv8 = tf.get_variable('biases', [num_classes],
initializer=tf.constant_initializer(0))
z_conv8 = tf.nn.conv2d(d_conv7, w_conv8, strides= [1, 1, 1, 1],
padding='SAME') + b_conv8
# Add to store dicts
self.outputs['conv1_1'] = a_conv1_1
self.outputs['conv1_2'] = a_conv1_2
self.outputs['pool1'] = pool1
self.outputs['conv2_1'] = a_conv2_1
self.outputs['conv2_2'] = a_conv2_2
self.outputs['pool2'] = pool2
self.outputs['conv3_1'] = a_conv3_1
self.outputs['conv3_2'] = a_conv3_2
self.outputs['conv3_3'] = a_conv3_3
self.outputs['pool3'] = pool3
self.outputs['conv4_1'] = a_conv4_1
self.outputs['conv4_2'] = a_conv4_2
self.outputs['conv4_3'] = a_conv4_3
self.outputs['pool4'] = pool4
self.outputs['conv5_1'] = a_conv5_1
self.outputs['conv5_2'] = a_conv5_2
self.outputs['conv5_3'] = a_conv5_3
self.outputs['pool5'] = pool5
self.outputs['conv6'] = d_conv6
self.outputs['conv7'] = d_conv7
self.outputs['conv8'] = z_conv8
self.layers['conv1_1'] = {'weights':w_conv1_1, 'biases':b_conv1_1}
self.layers['conv1_2'] = {'weights':w_conv1_2, 'biases':b_conv1_2}
self.layers['conv2_1'] = {'weights':w_conv2_1, 'biases':b_conv2_1}
self.layers['conv2_2'] = {'weights':w_conv2_2, 'biases':b_conv2_2}
self.layers['conv3_1'] = {'weights':w_conv3_1, 'biases':b_conv3_1}
self.layers['conv3_2'] = {'weights':w_conv3_2, 'biases':b_conv3_2}
self.layers['conv3_3'] = {'weights':w_conv3_3, 'biases':b_conv3_3}
self.layers['conv4_1'] = {'weights':w_conv4_1, 'biases':b_conv4_1}
self.layers['conv4_2'] = {'weights':w_conv4_2, 'biases':b_conv4_2}
self.layers['conv4_3'] = {'weights':w_conv4_3, 'biases':b_conv4_3}
self.layers['conv5_1'] = {'weights':w_conv5_1, 'biases':b_conv5_1}
self.layers['conv5_2'] = {'weights':w_conv5_2, 'biases':b_conv5_2}
self.layers['conv5_3'] = {'weights':w_conv5_3, 'biases':b_conv5_3}
self.layers['conv6'] = {'weights':w_conv6, 'biases':b_conv6}
self.layers['conv7'] = {'weights':w_conv7, 'biases':b_conv7}
self.layers['conv8'] = {'weights':w_conv8, 'biases':b_conv8}
"""Baseline model"""
class FCN32(Network):
def __init__(self, config):
self.num_classes = config['num_classes']
self.batch_num = config['batch_num']
self.max_size = config['max_size']
self.weight_decay = config['weight_decay']
self.base_lr = config['base_lr']
self.momentum = config['momentum']
self.img = tf.placeholder(tf.float32,
[self.batch_num, self.max_size[0], self.max_size[1], 3])
self.seg = tf.placeholder(tf.int32,
[self.batch_num, self.max_size[0], self.max_size[1], 1])
self.mask = tf.placeholder(tf.float32,
[self.batch_num, self.max_size[0], self.max_size[1], 1])
self.layers = {}
self.outputs = {}
self.set_up()
def set_up(self):
self.add_conv(self.img, self.num_classes)
self.add_deconv(bilinear=False)
self.add_loss_op()
self.add_weight_decay()
self.add_train_op()
"""Extract parameters from ckpt file to npy file"""
def extract(self, data_path, session, saver):
saver.restore(session, data_path)
scopes = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1',
'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1',
'conv5_2', 'conv5_3', 'conv6', 'conv7', 'conv8']
data_dict = {}
for scope in scopes:
[w, b] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
data_dict[scope] = {'weights':w.eval(), 'biases':b.eval()}
file_name = data_path[0:-5]
np.save(file_name, data_dict)
ipdb.set_trace()
return file_name + '.npy'
"""Add the deconv(upsampling) layer to get dense prediction"""
def add_deconv(self, bilinear=False):
conv8 = self.get_output('conv8')
with tf.variable_scope('deconv') as scope:
# Learn from scratch
if not bilinear:
w_deconv = tf.get_variable('weights', [64, 64, self.num_classes, self.num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
# Using fiexed bilinearing upsampling filter
else:
w_deconv = tf.get_variable('weights', trainable=True,
initializer=bilinear_upsample_weights(32, self.num_classes))
b_deconv = tf.get_variable('biases', [self.num_classes],
initializer=tf.constant_initializer(0))
z_deconv = tf.nn.conv2d_transpose(conv8, w_deconv,
[self.batch_num, self.max_size[0], self.max_size[1], self.num_classes],
strides=[1,32,32,1], padding='SAME', name='z') + b_deconv
# Add to store dicts
self.outputs['deconv'] = z_deconv
self.layers['deconv'] = {'weights':w_deconv, 'biases':b_deconv}
"""Add pixelwise softmax loss"""
def add_loss_op(self):
pred = self.get_output('deconv')
pred_reshape = tf.reshape(pred, [-1, self.num_classes])
gt_reshape = tf.reshape(self.seg, [-1])
loss_reshape = tf.nn.sparse_softmax_cross_entropy_with_logits(pred_reshape, gt_reshape)
loss = tf.reshape(loss_reshape, [self.batch_num, self.max_size[0], self.max_size[1], 1])
loss_valid = tf.reduce_sum(loss * self.mask, (1,2,3))
valid_pixels = tf.reduce_sum(self.mask, (1,2,3))
loss_avg = tf.reduce_mean(loss_valid / valid_pixels)
self.loss = loss_avg
"""Add weight decay"""
def add_weight_decay(self):
for key in self.layers:
w = self.layers[key]['weights']
self.loss += self.weight_decay * tf.nn.l2_loss(w)
"""Set up training optimization"""
def add_train_op(self):
# self.train_op = tf.train.MomentumOptimizer(self.base_lr,
# self.momentum).minimize(self.loss)
self.train_op = tf.train.AdamOptimizer(self.base_lr).minimize(self.loss)
"""A better model"""
class FCN16(FCN32):
def __init__(self, config):
FCN32.__init__(self, config)
def set_up(self):
self.add_conv(self.img, self.num_classes)
self.add_shortcut(bilinear=True)
self.add_deconv(bilinear=False)
self.add_loss_op()
self.add_weight_decay()
self.add_train_op()
"""Extract parameters from ckpt file to npy file"""
def extract(self, data_path, session, saver):
saver.restore(session, data_path)
scopes = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1',
'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1',
'conv5_2', 'conv5_3', 'conv6', 'conv7', 'conv8', '2x_conv8',
'pool4_1x1']
data_dict = {}
for scope in scopes:
[w, b] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
data_dict[scope] = {'weights':w.eval(), 'biases':b.eval()}
file_name = data_path[0:-5]
np.save(file_name, data_dict)
ipdb.set_trace()
return file_name + '.npy'
def add_shortcut(self, bilinear=True):
conv8 = self.get_output('conv8')
pool4 = self.get_output('pool4')
target_size = int(pool4.get_shape()[1])
with tf.variable_scope('2x_conv8') as scope:
# Learn from scratch
if not bilinear:
w_deconv = tf.get_variable('weights', [4, 4, self.num_classes, self.num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
# Using fiexed bilinearing upsampling filter
else:
w_deconv = tf.get_variable('weights', trainable=True,
initializer=bilinear_upsample_weights(2, self.num_classes))
b_deconv = tf.get_variable('biases', [self.num_classes],
initializer=tf.constant_initializer(0))
z_deconv = tf.nn.conv2d_transpose(conv8, w_deconv,
[self.batch_num, target_size, target_size, self.num_classes],
strides=[1,2,2,1], padding='SAME', name='z') + b_deconv
with tf.variable_scope('pool4_1x1') as scope:
w_pool4 = tf.get_variable('weights', [1, 1, 512, self.num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_pool4 = tf.get_variable('biases', [self.num_classes],
initializer=tf.constant_initializer(0))
z_pool4 = tf.nn.conv2d(pool4, w_pool4, strides= [1, 1, 1, 1],
padding='SAME') + b_pool4
# Element-wise sum
fusion = z_deconv + z_pool4
# Add to store dicts
self.outputs['2x_conv8'] = z_deconv
self.outputs['pool4_1x1'] = z_pool4
self.outputs['fusion'] = fusion
self.layers['2x_conv8'] = {'weights':w_deconv, 'biases':b_deconv}
self.layers['pool4_1x1'] = {'weights':w_pool4, 'biases':b_pool4}
"""Add the deconv(upsampling) layer to get dense prediction"""
def add_deconv(self, bilinear=False):
fusion = self.get_output('fusion')
with tf.variable_scope('deconv') as scope:
# Learn from scratch
if not bilinear:
w_deconv = tf.get_variable('weights', [32, 32, self.num_classes, self.num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
# Using fiexed bilinearing upsampling filter
else:
w_deconv = tf.get_variable('weights', trainable=True,
initializer=bilinear_upsample_weights(16, self.num_classes))
b_deconv = tf.get_variable('biases', [self.num_classes],
initializer=tf.constant_initializer(0))
z_deconv = tf.nn.conv2d_transpose(fusion, w_deconv,
[self.batch_num, self.max_size[0], self.max_size[1], self.num_classes],
strides=[1,16,16,1], padding='SAME', name='z') + b_deconv
# Add to store dicts
self.outputs['deconv'] = z_deconv
self.layers['deconv'] = {'weights':w_deconv, 'biases':b_deconv}
"""The best model"""
class FCN8(FCN16):
def __init__(self, config):
FCN16.__init__(self, config)
def add_shortcut(self, bilinear=True):
conv8 = self.get_output('conv8')
pool4 = self.get_output('pool4')
target_size = int(pool4.get_shape()[1])
with tf.variable_scope('2x_conv8') as scope:
# Learn from scratch
if not bilinear:
w_deconv = tf.get_variable('weights', [4, 4, self.num_classes, self.num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
# Using fiexed bilinearing upsampling filter
else:
w_deconv = tf.get_variable('weights', trainable=True,
initializer=bilinear_upsample_weights(2, self.num_classes))
b_deconv = tf.get_variable('biases', [self.num_classes],
initializer=tf.constant_initializer(0))
z_deconv = tf.nn.conv2d_transpose(conv8, w_deconv,
[self.batch_num, target_size, target_size, self.num_classes],
strides=[1,2,2,1], padding='SAME', name='z') + b_deconv
with tf.variable_scope('pool4_1x1') as scope:
w_pool4 = tf.get_variable('weights', [1, 1, 512, self.num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_pool4 = tf.get_variable('biases', [self.num_classes],
initializer=tf.constant_initializer(0))
z_pool4 = tf.nn.conv2d(pool4, w_pool4, strides= [1, 1, 1, 1],
padding='SAME') + b_pool4
# Element-wise sum
fusion1 = z_deconv + z_pool4
## Second fusion stage
pool3 = self.get_output('pool3')
with tf.variable_scope('pool3_1x1') as scope:
w_pool3 = tf.get_variable('weights', [1, 1, 256, self.num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
b_pool3 = tf.get_variable('biases', [self.num_classes],
initializer=tf.constant_initializer(0))
z_pool3 = tf.nn.conv2d(pool3, w_pool3, strides= [1, 1, 1, 1],
padding='SAME') + b_pool3
target_size = int(pool3.get_shape()[1])
with tf.variable_scope('2x_fusion') as scope:
# Learn from scratch
if not bilinear:
w_deconv2 = tf.get_variable('weights', [4, 4, self.num_classes, self.num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
# Using fiexed bilinearing upsampling filter
else:
w_deconv2 = tf.get_variable('weights', trainable=True,
initializer=bilinear_upsample_weights(2, self.num_classes))
b_deconv2 = tf.get_variable('biases', [self.num_classes],
initializer=tf.constant_initializer(0))
z_deconv2 = tf.nn.conv2d_transpose(fusion1, w_deconv2,
[self.batch_num, target_size, target_size, self.num_classes],
strides=[1,2,2,1], padding='SAME', name='z') + b_deconv2
fusion2 = z_pool3 + z_deconv2
# Add to store dicts
self.outputs['2x_conv8'] = z_deconv
self.outputs['pool4_1x1'] = z_pool4
self.outputs['pool3_1x1'] = z_pool3
self.outputs['2x_fusion'] = z_deconv2
self.outputs['fusion'] = fusion2
self.layers['2x_conv8'] = {'weights':w_deconv, 'biases':b_deconv}
self.layers['pool4_1x1'] = {'weights':w_pool4, 'biases':b_pool4}
self.layers['pool3_1x1'] = {'weights':w_pool3, 'biases':b_pool3}
self.layers['2x_fusion'] = {'weights':w_deconv2, 'biases':b_deconv2}
"""Add the deconv(upsampling) layer to get dense prediction"""
def add_deconv(self, bilinear=False):
fusion = self.get_output('fusion')
with tf.variable_scope('deconv') as scope:
# Learn from scratch
if not bilinear:
w_deconv = tf.get_variable('weights', [16, 16, self.num_classes, self.num_classes],
initializer=tf.truncated_normal_initializer(0.0, stddev=0.01))
# Using fiexed bilinearing upsampling filter
else:
w_deconv = tf.get_variable('weights', trainable=True,
initializer=bilinear_upsample_weights(16, self.num_classes))
b_deconv = tf.get_variable('biases', [self.num_classes],
initializer=tf.constant_initializer(0))
z_deconv = tf.nn.conv2d_transpose(fusion, w_deconv,
[self.batch_num, self.max_size[0], self.max_size[1], self.num_classes],
strides=[1,8,8,1], padding='SAME', name='z') + b_deconv
# Add to store dicts
self.outputs['deconv'] = z_deconv
self.layers['deconv'] = {'weights':w_deconv, 'biases':b_deconv}
class FCN32_test(FCN32):
def __init__(self, config):
FCN32.__init__(self, config)
def set_up(self):
self.add_conv(self.img, self.num_classes, 'TEST')
self.add_deconv(bilinear=False)
class FCN16_test(FCN16):
def __init__(self, config):
FCN16.__init__(self, config)
def set_up(self):
self.add_conv(self.img, self.num_classes, 'TEST')
self.add_shortcut(bilinear=True)
self.add_deconv(bilinear=False)
class FCN8_test(FCN8):
def __init__(self, config):
FCN8.__init__(self, config)
def set_up(self):
self.add_conv(self.img, self.num_classes, 'TEST')
self.add_shortcut(bilinear=True)
self.add_deconv(bilinear=False)
if __name__ == '__main__':
config = {
'batch_num':5,
'iter':100000,
'num_classes':21,
'max_size':(500,500),
'weight_decay': 0.0005,
'base_lr': 0.0001,
'momentum': 0.9
}
model = FCN32(config)
#model = FCN16(config)
# model = FCN8(config)
| 37.303371 | 90 | 0.695267 |
acf534f940c7b22a4a61a653febe96f8d9b7ce9b | 6,755 | py | Python | tensorforce/core/optimizers/natural_gradient.py | marload/tensorforce | 7101282b2c4a0524361aeeab22d3a2c5a3dd03bc | [
"Apache-2.0"
] | 1 | 2020-04-02T11:51:48.000Z | 2020-04-02T11:51:48.000Z | tensorforce/core/optimizers/natural_gradient.py | marload/tensorforce | 7101282b2c4a0524361aeeab22d3a2c5a3dd03bc | [
"Apache-2.0"
] | null | null | null | tensorforce/core/optimizers/natural_gradient.py | marload/tensorforce | 7101282b2c4a0524361aeeab22d3a2c5a3dd03bc | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import util
from tensorforce.core import parameter_modules
from tensorforce.core.optimizers import Optimizer
from tensorforce.core.optimizers.solvers import solver_modules
class NaturalGradient(Optimizer):
"""
Natural gradient optimizer (specification key: `natural_gradient`).
Args:
name (string): Module name
(<span style="color:#0000C0"><b>internal use</b></span>).
learning_rate (parameter, float >= 0.0): Learning rate as KL-divergence of distributions
between optimization steps (<span style="color:#C00000"><b>required</b></span>).
cg_max_iterations (int >= 0): Maximum number of conjugate gradient iterations.
(<span style="color:#00C000"><b>default</b></span>: 10).
cg_damping (0.0 <= float <= 1.0): Conjugate gradient damping factor.
(<span style="color:#00C000"><b>default</b></span>: 1e-3).
cg_unroll_loop (bool): Whether to unroll the conjugate gradient loop
(<span style="color:#00C000"><b>default</b></span>: false).
summary_labels ('all' | iter[string]): Labels of summaries to record
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
"""
def __init__(
self, name, learning_rate, cg_max_iterations=10, cg_damping=1e-3, cg_unroll_loop=False,
summary_labels=None
):
super().__init__(name=name, summary_labels=summary_labels)
self.learning_rate = self.add_module(
name='learning-rate', module=learning_rate, modules=parameter_modules, dtype='float',
min_value=0.0
)
self.solver = self.add_module(
name='conjugate-gradient', module='conjugate_gradient', modules=solver_modules,
max_iterations=cg_max_iterations, damping=cg_damping, unroll_loop=cg_unroll_loop
)
def tf_step(
self, variables, arguments, fn_loss, fn_kl_divergence, return_estimated_improvement=False,
**kwargs
):
# Optimize: argmin(w) loss(w + delta) such that kldiv(P(w) || P(w + delta)) = learning_rate
# For more details, see our blogpost:
# https://reinforce.io/blog/end-to-end-computation-graphs-for-reinforcement-learning/
# Calculates the product x * F of a given vector x with the fisher matrix F.
# Incorporating the product prevents having to calculate the entire matrix explicitly.
def fisher_matrix_product(deltas):
# Gradient is not propagated through solver.
deltas = [tf.stop_gradient(input=delta) for delta in deltas]
# kldiv
kldiv = fn_kl_divergence(**arguments)
# grad(kldiv)
kldiv_gradients = [
tf.convert_to_tensor(value=grad) for grad in tf.gradients(ys=kldiv, xs=variables)
]
# delta' * grad(kldiv)
delta_kldiv_gradients = tf.add_n(inputs=[
tf.reduce_sum(input_tensor=(delta * grad))
for delta, grad in zip(deltas, kldiv_gradients)
])
# [delta' * F] = grad(delta' * grad(kldiv))
return [
tf.convert_to_tensor(value=grad)
for grad in tf.gradients(ys=delta_kldiv_gradients, xs=variables)
]
# loss
arguments = util.fmap(function=tf.stop_gradient, xs=arguments)
loss = fn_loss(**arguments)
# grad(loss)
loss_gradients = tf.gradients(ys=loss, xs=variables)
# Solve the following system for delta' via the conjugate gradient solver.
# [delta' * F] * delta' = -grad(loss)
# --> delta' (= lambda * delta)
deltas = self.solver.solve(
fn_x=fisher_matrix_product, x_init=None, b=[-grad for grad in loss_gradients]
)
# delta' * F
delta_fisher_matrix_product = fisher_matrix_product(deltas=deltas)
# c' = 0.5 * delta' * F * delta' (= lambda * c)
# TODO: Why constant and hence KL-divergence sometimes negative?
half = tf.constant(value=0.5, dtype=util.tf_dtype(dtype='float'))
constant = half * tf.add_n(inputs=[
tf.reduce_sum(input_tensor=(delta_F * delta))
for delta_F, delta in zip(delta_fisher_matrix_product, deltas)
])
learning_rate = self.learning_rate.value()
# Zero step if constant <= 0
def no_step():
zero_deltas = [tf.zeros_like(input=delta) for delta in deltas]
if return_estimated_improvement:
return zero_deltas, tf.constant(value=0.0, dtype=util.tf_dtype(dtype='float'))
else:
return zero_deltas
# Natural gradient step if constant > 0
def apply_step():
# lambda = sqrt(c' / c)
lagrange_multiplier = tf.sqrt(x=(constant / learning_rate))
# delta = delta' / lambda
estimated_deltas = [delta / lagrange_multiplier for delta in deltas]
# improvement = grad(loss) * delta (= loss_new - loss_old)
estimated_improvement = tf.add_n(inputs=[
tf.reduce_sum(input_tensor=(grad * delta))
for grad, delta in zip(loss_gradients, estimated_deltas)
])
# Apply natural gradient improvement.
applied = self.apply_step(variables=variables, deltas=estimated_deltas)
with tf.control_dependencies(control_inputs=(applied,)):
# Trivial operation to enforce control dependency
estimated_delta = util.fmap(function=util.identity_operation, xs=estimated_deltas)
if return_estimated_improvement:
return estimated_delta, estimated_improvement
else:
return estimated_delta
# Natural gradient step only works if constant > 0
skip_step = constant > tf.constant(value=0.0, dtype=util.tf_dtype(dtype='float'))
return self.cond(pred=skip_step, true_fn=no_step, false_fn=apply_step)
| 43.301282 | 99 | 0.629312 |
acf5358f739ac59d3aca13160fa692f652c3cf2b | 6,231 | py | Python | pyAPES_utilities/timeseries_tools.py | LukeEcomod/pyAPES_VESBO | fdb4f44907e3055eb42db4a1260e0d7b9c55b415 | [
"MIT"
] | 3 | 2020-12-21T16:33:21.000Z | 2021-12-02T05:13:14.000Z | pyAPES_utilities/timeseries_tools.py | LukeEcomod/pyAPES_VESBO | fdb4f44907e3055eb42db4a1260e0d7b9c55b415 | [
"MIT"
] | null | null | null | pyAPES_utilities/timeseries_tools.py | LukeEcomod/pyAPES_VESBO | fdb4f44907e3055eb42db4a1260e0d7b9c55b415 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 10:19:19 2018
Note:
migrated to python3
- print()
@author: Kersti Haahti
"""
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def yearly_cumulative(results, variables):
if type(results) is pd.DataFrame:
years = results.index.year
dt = (results.index[1] - results.index[0]).total_seconds()
else: # xarray
years = results.date.dt.year.values
dt=(results.date[1]-results.date[0]).values.astype('timedelta64[s]').astype('float')
yearly_cum = np.empty([len(variables), len(years)])
for k in range (0, len(variables)):
yearly_cum[k,:] = np.cumsum(results[variables[k]].values)
for t in range (years[0], years[-1]):
ix = np.where(years > t)
for i in range(0, len(variables)):
yearly_cum[i,ix] = yearly_cum[i,ix] - yearly_cum[i,ix[0][0]]
for i in range(len(years)-1):
if years[i] != years[i+1]:
yearly_cum[:,i] = np.nan
return yearly_cum * dt
def diurnal_cycle(data, ap='hour'):
"""
computes ensemble diurnal cycle of flux or environmental data
Args:
data - pd.DataFrame or pd.Series. index = pd.datetime, i.e. '1996-01-01 00:30:00'
ap - averaging period. ap='hour' or 'minute'
Returns:
res - dict: keys == data.columns
values == pd.dataframe where
columns = ['hour', 'minu', 'N', 'mean', 'std', 'se',
'median', '5th', '25th', '75th', '95th'
]
NOTE:
seeks for unique hours and minutes in data, ensembles them and returns statistics.
Nodata == np.NaN are omited when statistics are computed.
Samuli Launiainen, Luke Jan 7th, 2018
"""
if isinstance(data, pd.Series):
data = data.to_frame()
if isinstance(data, pd.DataFrame):
r, c = np.shape(data) # rows, cols
hr = data.index.hour
mn = data.index.minute
hour = np.unique(hr)
minu = np.unique(mn)
cols = data.columns
else:
print('diurnal_cycle: data must be pd.DataFrame or pd.Series')
print('********** computing diurnal cycles *********')
res = {}
for k in range(c):
if ap.lower() == 'hour':
N = len(hour)
x = np.ones((N, 11))*np.NaN
x[:, 0] = hour
x[:, 1] = 0.0
n = 0
for t in hour:
y = data.iloc[:, k] # column k
f = np.where((hr == t) & (np.isfinite(y)))[0]
x[n, 2] = len(f) # no of observations
x[n, 3] = np.mean(y[f])
x[n, 4] = np.std(y[f])
x[n, 5] = x[n, 3] / x[n, 2] # s.e.
x[n, 6:] = np.percentile(y[f], [50.0, 5.0, 25.0, 75.0, 95.0])
n += 1
res[cols[k]] = pd.DataFrame(x, columns=['hour', 'minu', 'N', 'mean', 'std', 'se',
'median', '5th', '25th', '75th', '95th'])
if ap.lower() == 'minute':
N = len(hour) * len(minu)
x = np.ones((N, 11))*np.NaN
n = 0
for t in hour:
for p in minu:
# print(k, t, p)
y = data.iloc[:, k] # column k
f = np.where((hr == t) & (mn == p) & (np.isfinite(y)))[0]
# print f
x[n, 0] = t
x[n, 1] = p
x[n, 2] = len(f) # no of observations
x[n, 3] = np.mean(y[f])
x[n, 4] = np.std(y[f])
x[n, 5] = x[n, 3] / x[n, 2] # s.e.
x[n, 6:] = np.percentile(y[f], [50.0, 5.0, 25.0, 75.0, 95.0])
n += 1
res[cols[k]] = pd.DataFrame(x, columns=['hour', 'minu', 'N', 'mean', 'std', 'se',
'median', '5th', '25th', '75th', '95th'])
return res
def fill_gaps(df, res_col_name, description, fill_nan=None, plot=False):
"""
Fill gaps with other available data
Args:
df (DataFrame): columns in priority order to be used
res_col_name (string): name of column in dataframe returned by function
Returns:
df (Dataframe): resulting dataframe with
'res_col_name': data collected from input
'res_col_name_flag': flags defining data source
info (string): flag definitions and their fequency
"""
col_names = list(df.columns.values)
info = "\n" + res_col_name + ": " + description
flag = res_col_name + "_flag"
NN = float(len(df))
for i in range(len(col_names)):
col_name = col_names[i]
if i == 0:
df[res_col_name] = df[col_name].values
df[flag] = np.where(df[res_col_name].notnull(), i, len(col_names))
info += "\n flag %s (%.2f" % (i, df[res_col_name].notnull().sum()/NN * 100) + "%): " + col_name
else:
df[flag][df[res_col_name].isnull() & df[col_name].notnull()] = i
df[res_col_name] = df[res_col_name].fillna(df[col_name])
info += "\n flag %s (%.2f" % (i, sum(df[flag]==i)/NN * 100) + "%): " + col_name
if fill_nan == 'linear':
df = df.interpolate()
message = "linearly interpolated"
# fill nans in beginning and end
if df[res_col_name].isnull().sum() > 0:
df[flag][df[res_col_name].isnull()] = len(col_names) + 1
df = df.fillna(method='bfill')
df = df.fillna(method='ffill')
message += "\n flag %s (%.2f" % (i, sum(df[flag]==len(col_names) + 1)/NN * 100) + "%): filled with nearest"
elif type(fill_nan) == float:
df = df.fillna(fill_nan)
message = "filled with " + res_col_name + " = "+ str(fill_nan)
else:
message = "still missing!!!"
info += "\n flag %s (%.2f" % (len(col_names), sum(df[flag]==len(col_names))/NN * 100) + "%): " + message
if plot:
df[[res_col_name, flag]].plot(subplots=True)
plt.title(info, y=-0.6, fontsize=9)
return df[[res_col_name, flag]], info
| 37.536145 | 120 | 0.497031 |
acf53647d829d2ca9ba8aea0f6d6f7cf9d7ed100 | 430 | py | Python | signal_processing/Filters/highpass_filter.py | jiruifu-jerry0219/EMG_Process_Toolkit | 2eb74cd853fb194562b9f67ac794286697100943 | [
"MIT"
] | null | null | null | signal_processing/Filters/highpass_filter.py | jiruifu-jerry0219/EMG_Process_Toolkit | 2eb74cd853fb194562b9f67ac794286697100943 | [
"MIT"
] | null | null | null | signal_processing/Filters/highpass_filter.py | jiruifu-jerry0219/EMG_Process_Toolkit | 2eb74cd853fb194562b9f67ac794286697100943 | [
"MIT"
] | null | null | null | from butterworth_filter import butter_highpass_filter as bhpf
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import math
def hpf(data, fs, fc, order):
"""
Apply the Butterworth high pass Filter
data: raw signal for processing
fs: sampling rate
fc: cutoff Frequency
order: order of Filter
"""
b, a = bhpf(fs, fc, order)
y = signal.filtfilt(b, a, data)
return y
| 23.888889 | 61 | 0.695349 |
acf5371f643b30a2a6ceec95299c4d398d586b03 | 4,033 | py | Python | guild/plugins/gpu.py | dwolfschlaeger/guildai | f82102ad950d7c89c8f2c2eafe596b2d7109dc57 | [
"Apache-2.0"
] | null | null | null | guild/plugins/gpu.py | dwolfschlaeger/guildai | f82102ad950d7c89c8f2c2eafe596b2d7109dc57 | [
"Apache-2.0"
] | null | null | null | guild/plugins/gpu.py | dwolfschlaeger/guildai | f82102ad950d7c89c8f2c2eafe596b2d7109dc57 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import csv
import io
import subprocess
import sys
from guild import util
from guild.plugins.summary_util import SummaryPlugin
STATS = [
"index",
"fan.speed",
"pstate",
"memory.total",
"memory.used",
"utilization.gpu",
"utilization.memory",
"temperature.gpu",
"power.draw",
]
class GPUPlugin(SummaryPlugin):
_stats_cmd = None
def __init__(self, ep):
super(GPUPlugin, self).__init__(ep)
self._stats_cmd = _stats_cmd()
def enabled_for_op(self, _op):
if not self._stats_cmd:
return False, "nvidia-smi not available"
return True, ""
def read_summary_values(self, _step):
return self._gpu_stats(self._stats_cmd) if self._stats_cmd else {}
def _gpu_stats(self, stats_cmd):
stats = {}
for raw in self._read_raw_gpu_stats(stats_cmd):
stats.update(self._calc_gpu_stats(raw))
return stats
def _read_raw_gpu_stats(self, stats_cmd):
p = subprocess.Popen(stats_cmd, stdout=subprocess.PIPE)
raw_lines = _read_csv_lines(p.stdout)
result = p.wait()
if result == 0:
return raw_lines
else:
self.log.debug("reading GPU stats (smi output: '%s')", raw_lines)
return []
@staticmethod
def _calc_gpu_stats(raw):
# See STATS for list of stat names/indexes
index = raw[0]
mem_total = _parse_raw(raw[3], _parse_bytes)
mem_used = _parse_raw(raw[4], _parse_bytes)
vals = [
("fanspeed", _parse_raw(raw[1], _parse_percent)),
("pstate", _parse_raw(raw[2], _parse_pstate)),
("mem_total", mem_total),
("mem_used", mem_used),
("mem_free", mem_total - mem_used),
("mem_util", _parse_raw(raw[6], _parse_percent)),
("util", _parse_raw(raw[5], _parse_percent)),
("temp", _parse_raw(raw[7], _parse_int)),
("powerdraw", _parse_raw(raw[8], _parse_watts)),
]
return dict([(_gpu_val_key(index, name), val) for name, val in vals])
def _stats_cmd():
nvidia_smi = util.which("nvidia-smi")
if not nvidia_smi:
return None
else:
return [
nvidia_smi,
"--format=csv,noheader",
"--query-gpu=%s" % ",".join(STATS),
]
def _read_csv_lines(raw_in):
csv_in = raw_in if sys.version_info[0] == 2 else io.TextIOWrapper(raw_in)
return list(csv.reader(csv_in))
def _parse_raw(raw, parser):
stripped = raw.strip()
if stripped == "[Not Supported]":
return None
else:
return parser(stripped)
def _parse_pstate(val):
assert val.startswith("P"), val
return int(val[1:])
def _parse_int(val):
return int(val)
def _parse_percent(val):
if val.endswith(" %"):
return float(val[0:-2]) / 100
elif "N/A" in val:
return None
else:
assert False, repr(val)
def _parse_bytes(val):
if val.endswith(" MiB"):
return int(val[0:-4]) * 1024 * 1024
elif "N/A" in val:
return None
else:
assert False, repr(val)
def _parse_watts(val):
if val.endswith(" W"):
return float(val[0:-2])
elif "N/A" in val:
return None
else:
assert False, repr(val)
def _gpu_val_key(index, name):
return "sys/gpu%s/%s" % (index, name)
| 26.019355 | 77 | 0.620878 |
acf5372c35e168a95ff032fec68e04937bb380d8 | 783 | py | Python | Part_2/Flask_Blog/flaskblog.py | andchrlambda/flask_blog | 878d568f083e60e49fede2ac4507e62704d44fb8 | [
"MIT"
] | null | null | null | Part_2/Flask_Blog/flaskblog.py | andchrlambda/flask_blog | 878d568f083e60e49fede2ac4507e62704d44fb8 | [
"MIT"
] | 2 | 2021-06-08T20:44:44.000Z | 2022-01-13T01:57:14.000Z | Part_2/Flask_Blog/flaskblog.py | andchrlambda/flask_blog | 878d568f083e60e49fede2ac4507e62704d44fb8 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, url_for
app = Flask(__name__)
posts = [
{
'author': 'Andrea Christelle',
'title': 'Philosophy for breakfast',
'content': 'First post content',
'date_posted': 'April 20, 2018'
},
{
'author': 'Andrea Christelle',
'title': 'Philosophy for lunch',
'content': 'Second post content',
'date_posted': 'April 21, 2018'
}
]
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html', posts=posts)
# the first posts is an argument
# the second posts is the data
@app.route("/about")
def about():
return render_template('about.html', title='About')
if __name__=='__main__':
app.run(debug=True) | 24.46875 | 56 | 0.582375 |
acf53784678b07f35751ff88f65585d331ed043b | 738 | py | Python | Remove-pdf-page/rmpdfpg.py | AShuuu-Technoid/Python-Project | c3795fe59481ff0442f55874367ee1fb7370a229 | [
"MIT"
] | null | null | null | Remove-pdf-page/rmpdfpg.py | AShuuu-Technoid/Python-Project | c3795fe59481ff0442f55874367ee1fb7370a229 | [
"MIT"
] | null | null | null | Remove-pdf-page/rmpdfpg.py | AShuuu-Technoid/Python-Project | c3795fe59481ff0442f55874367ee1fb7370a229 | [
"MIT"
] | null | null | null | import os
from PyPDF2 import PdfFileWriter, PdfFileReader
def rmpdfpg():
FileList = os.listdir("/Users/ashwin.m/Documents/scripts/python/removepdfpage")
for fn in FileList:
if fn.endswith(".pdf"):
fnm=os.path.join("/Users/ashwin.m/Documents/scripts/python/removepdfpage",fn)
pages_to_delete = [0] # page numbering starts from 0
infile = PdfFileReader(fnm, 'rb')
output = PdfFileWriter()
for i in range(infile.getNumPages()):
if i not in pages_to_delete:
p=infile.getPage(i)
output.addPage(p)
with open(fnm, 'wb') as f:
output.write(f)
if __name__ == "__main__":
rmpdfpg()
| 35.142857 | 89 | 0.585366 |
acf537e2e70a0b82ec9f9143f6a590e621ef2026 | 4,153 | py | Python | video_transfer.py | sonnguyentruong129/Real-time-Style-Transfer | 64803c40f50e2737917fb8506e12733d480bd2dd | [
"MIT"
] | 1 | 2021-06-06T23:14:06.000Z | 2021-06-06T23:14:06.000Z | video_transfer.py | sonnguyentruong129/Real-time-Style-Transfer | 64803c40f50e2737917fb8506e12733d480bd2dd | [
"MIT"
] | null | null | null | video_transfer.py | sonnguyentruong129/Real-time-Style-Transfer | 64803c40f50e2737917fb8506e12733d480bd2dd | [
"MIT"
] | null | null | null | import argparse
from model.VGG import *
from model.Decoder import *
from model.Transform import *
import os
from PIL import Image
from os.path import basename
from os.path import splitext
from torchvision import transforms
from torchvision.utils import save_image
import cv2 as cv
import numpy as np
import time
def test_transform():
transform_list = []
transform_list.append(transforms.ToTensor())
transform = transforms.Compose(transform_list)
return transform
parser = argparse.ArgumentParser()
# Basic options
parser.add_argument('--content', type=str, default = 'input/190625_04_CityMisc_HD_08.mp4',
help='File path to the content image')
parser.add_argument('--style', type=str, default = 'style/style11.jpg',
help='File path to the style image, or multiple style \
images separated by commas if you want to do style \
interpolation or spatial control')
parser.add_argument('--steps', type=str, default = 1)
parser.add_argument('--vgg', type=str, default = 'weight/vgg_normalised.pth')
parser.add_argument('--decoder', type=str, default = 'experiments/decoder_iter_600000.pth')
parser.add_argument('--transform', type=str, default = 'experiments/transformer_iter_600000.pth')
# Additional options
parser.add_argument('--save_ext', default = 'output.avi',
help='The extension name of the output viedo')
parser.add_argument('--output', type=str, default = 'output',
help='Directory to save the output image(s)')
# Advanced options
args = parser.parse_args('')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not os.path.exists(args.output):
os.mkdir(args.output)
decoder = Decoder('Decoder')
transform = Transform(in_planes=512)
vgg = VGG('VGG19')
decoder.eval()
transform.eval()
vgg.eval()
# decoder.features.load_state_dict(torch.load(args.decoder))
decoder.load_state_dict(torch.load(args.decoder))
transform.load_state_dict(torch.load(args.transform))
vgg.features.load_state_dict(torch.load(args.vgg))
enc_1 = nn.Sequential(*list(vgg.features.children())[:4]) # input -> relu1_1
enc_2 = nn.Sequential(*list(vgg.features.children())[4:11]) # relu1_1 -> relu2_1
enc_3 = nn.Sequential(*list(vgg.features.children())[11:18]) # relu2_1 -> relu3_1
enc_4 = nn.Sequential(*list(vgg.features.children())[18:31]) # relu3_1 -> relu4_1
enc_5 = nn.Sequential(*list(vgg.features.children())[31:44]) # relu4_1 -> relu5_1
enc_1.to(device)
enc_2.to(device)
enc_3.to(device)
enc_4.to(device)
enc_5.to(device)
transform.to(device)
decoder.to(device)
content_tf = test_transform()
style_tf = test_transform()
cap = cv.VideoCapture(args.content)
style = style_tf(Image.open(args.style))
style = style.to(device).unsqueeze(0)
fourcc = cv.VideoWriter_fourcc(*'XVID')
fps = cap.get(cv.CAP_PROP_FPS)
out = cv.VideoWriter(args.save_ext, fourcc, fps, (512, 512))
fps = 0
while(1):
fps += 1
ret, frame = cap.read()
if ret!=True:
break
frame = cv.resize(frame, (512, 512))
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
frame = frame.astype(np.float) / 255
content = torch.from_numpy(frame)
content = content.transpose(0, 2)
content = content.transpose(1, 2)
content = content.type(torch.FloatTensor)
content = content.to(device).unsqueeze(0)
with torch.no_grad():
start = time.time()
Style4_1 = enc_4(enc_3(enc_2(enc_1(style))))
Style5_1 = enc_5(Style4_1)
Content4_1 = enc_4(enc_3(enc_2(enc_1(content))))
Content5_1 = enc_5(Content4_1)
content = decoder(transform(Content4_1, Style4_1, Content5_1, Style5_1))
end = time.time()
content.clamp(0, 255)
content = content.cpu()
content = content[0]
content = content.transpose(1, 2)
content = content.transpose(0, 2)
content = content.numpy()*255
output_value = np.clip(content, 0, 255).astype(np.uint8)
output_value = cv.cvtColor(output_value, cv.COLOR_RGB2BGR)
out.write(output_value)
print('frame(%s) transfer! fps:%.2f' % (fps,1/(end-start))) | 33.764228 | 97 | 0.691067 |
acf5387299829b2ea77ddefa6d67f42abadb5596 | 979 | py | Python | examples/tutorial/run_experiment.py | Koozco/pmp | 3ad96b01afb2ce6644eb1917ae88ee6d72adc8cf | [
"MIT"
] | 1 | 2020-03-30T18:56:02.000Z | 2020-03-30T18:56:02.000Z | examples/tutorial/run_experiment.py | Koozco/pmp | 3ad96b01afb2ce6644eb1917ae88ee6d72adc8cf | [
"MIT"
] | 9 | 2018-11-19T00:04:52.000Z | 2022-03-11T23:51:18.000Z | examples/tutorial/run_experiment.py | Koozco/pmp | 3ad96b01afb2ce6644eb1917ae88ee6d72adc8cf | [
"MIT"
] | 2 | 2018-12-02T10:43:34.000Z | 2020-03-30T10:15:50.000Z | from pmp.experiments import ExperimentConfig, generate_uniform, Experiment, generate_gauss
from pmp.rules import Bloc, Borda
experiments_num = 100
n = 200
m = 200
k = 20
uniform_config = ExperimentConfig('uniform')
uniform_config.add_candidates(lambda: generate_uniform(-3, -3, 3, 3, m, 'None'))
uniform_config.add_voters(lambda: generate_uniform(-3, -3, 3, 3, n, 'None'))
gaussian_config = ExperimentConfig('gaussian')
gaussian_config.add_candidates(lambda: generate_gauss(0.0, 0.0, 1.0, m, 'None'))
gaussian_config.add_voters(lambda: generate_gauss(0.0, 0.0, 1.0, n, 'None'))
configs = [uniform_config, gaussian_config]
rules = [Bloc, Borda]
for config in configs:
experiment = Experiment(config)
experiment.set_generated_dir_path('paper_generated')
for rule in rules:
election_name = "{}-{}".format(config.id, rule.__name__)
experiment.add_election(rule, k, election_name)
experiment.run(n=experiments_num, save_win=True, log_on=False)
| 32.633333 | 90 | 0.743616 |
acf5387d8962b3d1629ac9fa92d8b8ba37972c95 | 4,233 | py | Python | tests/integration/widgets/test_checkbox_group.py | ArchaeotheriumSapienter/bokeh | 08bfae97a91db5bdc989c6ab33ec6a5125ed0d01 | [
"BSD-3-Clause"
] | null | null | null | tests/integration/widgets/test_checkbox_group.py | ArchaeotheriumSapienter/bokeh | 08bfae97a91db5bdc989c6ab33ec6a5125ed0d01 | [
"BSD-3-Clause"
] | null | null | null | tests/integration/widgets/test_checkbox_group.py | ArchaeotheriumSapienter/bokeh | 08bfae97a91db5bdc989c6ab33ec6a5125ed0d01 | [
"BSD-3-Clause"
] | null | null | null | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
from flaky import flaky
# Bokeh imports
from bokeh._testing.plugins.project import BokehModelPage, BokehServerPage
from bokeh._testing.util.selenium import RECORD, find_element_for, find_elements_for
from bokeh.layouts import column
from bokeh.models import (
CheckboxGroup,
Circle,
ColumnDataSource,
CustomJS,
Plot,
Range1d,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
)
LABELS = ["Option 1", "Option 2", "Option 3"]
@pytest.mark.selenium
class Test_CheckboxGroup:
@pytest.mark.parametrize('inline', [True, False])
def test_displays_options_list_of_string_labels_setting_inline(self, inline, bokeh_model_page: BokehModelPage) -> None:
group = CheckboxGroup(labels=LABELS, inline=inline)
page = bokeh_model_page(group)
labels = find_elements_for(page.driver, group, "label")
assert len(labels) == 3
for i, label in enumerate(labels):
assert label.text == LABELS[i]
input = label.find_element_by_tag_name('input')
assert input.get_attribute('value') == str(i)
assert input.get_attribute('type') == 'checkbox'
@flaky(max_runs=10)
def test_server_on_change_round_trip(self, bokeh_server_page: BokehServerPage) -> None:
group = CheckboxGroup(labels=LABELS)
def modify_doc(doc):
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1], val=["a", "b"]))
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Circle(x='x', y='y', size=20))
plot.tags.append(CustomJS(name="custom-action", args=dict(s=source), code=RECORD("data", "s.data")))
def cb(active):
source.data['val'] = (active + [0, 0])[:2] # keep col length at 2, padded with zero
group.on_click(cb)
doc.add_root(column(group, plot))
page = bokeh_server_page(modify_doc)
el = find_element_for(page.driver, group, 'input[value="2"]')
el.click()
page.eval_custom_action()
results = page.results
assert results['data']['val'] == [2, 0]
el = find_element_for(page.driver, group, 'input[value="0"]')
el.click()
page.eval_custom_action()
results = page.results
assert results['data']['val'] == [0, 2]
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
def test_js_on_change_executes(self, bokeh_model_page: BokehModelPage) -> None:
group = CheckboxGroup(labels=LABELS)
group.js_on_click(CustomJS(code=RECORD("active", "cb_obj.active")))
page = bokeh_model_page(group)
el = find_element_for(page.driver, group, 'input[value="2"]')
el.click()
results = page.results
assert results['active'] == [2]
el = find_element_for(page.driver, group, 'input[value="0"]')
el.click()
results = page.results
assert results['active'] == [0, 2]
el = find_element_for(page.driver, group, 'input[value="2"]')
el.click()
results = page.results
assert results['active'] == [0]
assert page.has_no_console_errors()
| 34.414634 | 123 | 0.547838 |
acf53899551d80a0133fcbf5e6ddab9e62313557 | 2,187 | py | Python | recommender/time_periods.py | google/article-recommender | ab5d8472702388550926505a3b99439bb9228cd6 | [
"Apache-2.0"
] | 8 | 2020-09-16T16:31:21.000Z | 2021-06-03T17:19:55.000Z | recommender/time_periods.py | google/article-recommender | ab5d8472702388550926505a3b99439bb9228cd6 | [
"Apache-2.0"
] | null | null | null | recommender/time_periods.py | google/article-recommender | ab5d8472702388550926505a3b99439bb9228cd6 | [
"Apache-2.0"
] | 3 | 2020-10-13T16:30:13.000Z | 2020-12-08T10:56:38.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines time periods that are used in the app."""
from datetime import timedelta
LAST_VISIT_RESTRICTED = 'LAST_VISIT_RESTRICTED'
LAST_VISIT = 'LAST_VISIT'
RECENT = 'RECENT'
HOUR = 'HOUR'
DAY = 'DAY'
WEEK = 'WEEK'
MONTH = 'MONTH'
YEAR = 'YEAR'
ALL = 'ALL'
ALL_NUMERIC = 0
YEAR_NUMERIC = 1
MONTH_NUMERIC = 2
WEEK_NUMERIC = 3
DAY_NUMERIC = 4
HOUR_NUMERIC = 5
RECENT_NUMERIC = 6
LAST_VISIT_NUMERIC = 7
LAST_VISIT_RESTRICTED_NUMERIC = 8
TIME_PERIODS = [
{
'timedelta': timedelta.max,
'name': ALL,
'numeric': ALL_NUMERIC
},
{
'timedelta': timedelta(days=365),
'name': YEAR,
'numeric': YEAR_NUMERIC
},
{
'timedelta': timedelta(days=31),
'name': MONTH,
'numeric': MONTH_NUMERIC
},
{
'timedelta': timedelta(weeks=1),
'name': WEEK,
'numeric': WEEK_NUMERIC
},
{
'timedelta': timedelta(days=1),
'name': DAY,
'numeric': DAY_NUMERIC
},
{
'timedelta': timedelta(hours=1),
'name': HOUR,
'numeric': HOUR_NUMERIC
},
{
'timedelta': timedelta(hours=0),
'name': RECENT,
'numeric': RECENT_NUMERIC
},
{
'timedelta': timedelta(hours=0),
'name': LAST_VISIT,
'numeric': LAST_VISIT_NUMERIC
},
{
'timedelta': timedelta(hours=0),
'name': LAST_VISIT_RESTRICTED,
'numeric': LAST_VISIT_RESTRICTED_NUMERIC
},
]
def Get(name):
for time_period in TIME_PERIODS:
if time_period['name'] == name:
return time_period
return TIME_PERIODS[-1]
| 23.771739 | 74 | 0.628258 |
acf538cabeaba58ba4742632a9a2034e849785af | 827 | py | Python | src/streamlink/utils/encoding.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 5 | 2019-07-26T17:03:26.000Z | 2020-10-17T23:23:43.000Z | src/streamlink/utils/encoding.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 9 | 2018-01-14T15:20:23.000Z | 2021-03-08T20:29:51.000Z | src/streamlink/utils/encoding.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 4 | 2018-01-14T13:27:25.000Z | 2021-11-15T22:28:30.000Z | from sys import getfilesystemencoding
from streamlink.compat import is_win32, is_py2
def get_filesystem_encoding():
file_system_encoding = getfilesystemencoding()
if file_system_encoding is None: # `None` not possible after python 3.2
if is_win32:
file_system_encoding = 'mbcs'
else:
file_system_encoding = 'utf-8'
return file_system_encoding
def maybe_encode(text, encoding="utf8"):
if is_py2:
if isinstance(text, unicode):
return text.encode(encoding)
else:
return text
else:
return text
def maybe_decode(text, encoding="utf8"):
if is_py2 and isinstance(text, str):
return text.decode(encoding)
else:
return text
__all__ = ["get_filesystem_encoding", "maybe_decode", "maybe_encode"]
| 24.323529 | 76 | 0.665054 |
acf53946f0c0694d25a569bc54c2add442422ac9 | 1,460 | py | Python | aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/DescribeClusterConnectionRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/DescribeClusterConnectionRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/DescribeClusterConnectionRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbase.endpoint import endpoint_data
class DescribeClusterConnectionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'HBase', '2019-01-01', 'DescribeClusterConnection','hbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId) | 38.421053 | 88 | 0.771233 |
acf539a8822a503e84cd616266e48a8ec48ee4ef | 6,274 | py | Python | src/strings.py | sachithdickwella/python-tute | 42583d04db0ab907288b9bef0b022730eec0aa7d | [
"Apache-2.0"
] | null | null | null | src/strings.py | sachithdickwella/python-tute | 42583d04db0ab907288b9bef0b022730eec0aa7d | [
"Apache-2.0"
] | null | null | null | src/strings.py | sachithdickwella/python-tute | 42583d04db0ab907288b9bef0b022730eec0aa7d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
name = "Python 'strings' program"
print(type(name)) # Type of the reference.
print(len(name)) # Length of the string value.
# Get the character from the index out of string.
hello = "Hello, World"
print(hello[0]) # First letter of the string(H).
print(hello[10]) # 11th letter of the string(l).
print(hello[-2]) # 2nd letter from the end(l)
print(hello[-1]) # 1st letter from the end [last letter](d).
# String slicing (substring).
chars = "abcdefghijk"
print(chars[2:]) # 3rd letter to end of the string.
print(chars[:3]) # Beginning to 4th letter (4th letter not included).
print(chars[3:6]) # From 4th(index 3) up to 7th(index 6) letter(7th letter not included).
print(chars[1:3]) # From 2nd(index 1) up to 4th(index 3) letter(4th letter not included).
# String steps.
print(chars[:]) # All the way to the end.
print(chars[::]) # All the way to the end with steps.
print(chars[::1]) # All the way to the end with steps 1 (default value is 1).
print(chars[::2]) # All the way to the end with steps 2 (skip 1 by 1 from start).
print(chars[::3]) # All the way to the end with steps 3 (skip 2 by 2 from start).
print(chars[2:7:2]) # From 3rd letter(index 2) to 8th letter(index 7) by two steps.
print(chars[::-1]) # Reverse the string with -1 steps (Interview question).
print(chars[::-2]) # Reverse the string with -2 steps (skip 1 by 1 backward).
print(chars[-2:-7:-2]) # Reverse the string from -2nd index to -7th index by 2 steps.
# (+ start and end indexes doesn't work with - (backward) steps.)
# String properties and methods.
name = "Sam"
# name[0] = 'P' ; String in python is immutable. Cannot modify unless creating a new string.
print(f'P{name[1:]}') # String concatenation
print((name + " ") * 10) # String multiplication to append a string specific number of times.
# print(2 + "5") ; This is prohibited in python unlike java unless parse one of each value to other
# value's type.
print(str(2) + "5") # Parse 'int' to 'string'.
print(2 + int("5")) # Parse 'string' to 'int'.
prop = "Hi, This is a String in Python"
print(prop.upper()) # To upper case.
print(prop.lower()) # To lower case.
print(prop) # Doesn't change the 'prop' variable value hence string immutable.
print(prop.upper) # This is valid. Instead of executing the function, just return the function
# instance.
# Split string.
print(prop.split()) # 'split()' return a list base on the parameter letter.
# (Default whitespace if no letter passed.)
print(prop.split("i")) # 'split()' by 'i' character/letter.
# Print formatting with "placeholders".
print("I'm going to inject %s here" % "something")
print("I'm going to inject %s text here, and %s text here" % ("some", "more"))
x, y = "some", "more"
print("I'm going to inject %s text here, and %s text here" % (x, y))
# %s and %r represent 'str()' and 'repr()' functions respectively.
print("He said, his name was %s" % "Fred")
print("He said, his name was %r" % "Fred") # %r takes the string representation with quotes.
print("I once caught a fish %s" % "this \tbig")
print("I once caught a fish %r" % "this \tbig") # %r does not render escape characters.
print("I wrote %s programs today" % 3.75)
print("I wrote %d programs today" % 3.75) # %d converts numbers to integer without rounding.
# Padding and precision of floating point numbers.
print("Floating point number : %5.2f" % 13.144)
print("Floating point number : %1.0f" % 13.144)
print("Floating point number : %1.5f" % 13.144)
print("Floating point number : %0.5f" % 13.144)
print("Floating point number : %10.5f" % 13.144)
print("First : %s, Second : %5.2f, Third : %r, Fourth : %d" % ("Hi", 3.1415, "Bye!", 3.1415))
# Print formatting with strings "format()" function.
print("This is a string {}".format("INSERTED"))
print("The {} {} {}".format("fox", "brown", "quick"))
print("The {2} {1} {0}".format("fox", "brown", "quick"))
print("The {0} {1} {2}".format("fox", "brown", "quick")) # Repeat by refering same index.
print("The {q} {b} {f}".format(f="fox", b="brown", q="quick")) # Assigning variable to index.
print("The {q} {q} {q}".format(f="fox", b="brown", q="quick")) # Repeat by refering same index variable.
# Float formatting "{value:width.precision f}" with "format()" function.
result = 100/777
print(result)
print("The result was {r}".format(r=result))
print("The result was {r:1.3f}".format(r=result)) # Float formatting while roundup.
print("The result was {r:10.3f}".format(r=result)) # width=10 add preceding 10 spaces.
result = 100 / 0.245
print(result)
print("The result was {r:1.4f}".format(r=result)) # Value keep the length even width=1.
# Alignment, padding and precision with 'format()'.
print("{0:8} | {1:9}".format("Fruit", "Quantity"))
print("{0:8} | {1:9}".format("Apples", 3.))
print("{0:8} | {1:9}".format("Oranges", 10))
print("{0:<8} | {1:^8} | {2:>8}".format("Left", "Centre", "Right"))
print("{1:<8} | {2:^8} | {0:>8}".format(11, 22, 43))
# Precede the alignment operator with padding character.
print("{0:-<8} | {1:=^8} | {2:.>8}".format("Left", "Centre", "Right"))
print("{0:-<8} | {1:=^8} | {2:.>8}".format(11, 22, 43))
# Print formatting with string "f-strings".
name = "Jose"
print(f"Hello, his name is {name}")
name = "Sam"
age = 30
print(f"{name} is {age} years old")
print(f"{name!r} is {age} years old") # !r function to 'repr()' string.
# Float formatting "{value:{width}.{precision}}"
num = 24.45678
print(f"My 10 character, four decimal number is:{num:{10}.{6}}") # Precision refers to total number
# of digits.
print(f"My 10 character, four decimal number is:{num:10.4f}") # Equivalent to 'format()' function.
import string
print(string.ascii_uppercase)
print('A'.isupper()) # Supplementary functions.
print('a'.islower())
# Find string in a larger string content.
hello = 'Hello, World!'
idx = hello.find('Wor') # Return the index or -1 if not found.
print(idx)
# OR
idx = hello.index('lo') # Return the index or throws an error -> ValueError: substring not found.
print(idx)
| 42.972603 | 105 | 0.636436 |
acf53b55e1ee2fe6110b4bc8e2029484b04cfb32 | 460 | py | Python | boot.py | mc-b/iotkitmp | a526617c3f5347d1ae607063ae8c759a46b4715d | [
"MIT"
] | null | null | null | boot.py | mc-b/iotkitmp | a526617c3f5347d1ae607063ae8c759a46b4715d | [
"MIT"
] | null | null | null | boot.py | mc-b/iotkitmp | a526617c3f5347d1ae607063ae8c759a46b4715d | [
"MIT"
] | 1 | 2022-03-04T09:38:26.000Z | 2022-03-04T09:38:26.000Z | #####
# Boot - wird beim Start/Reboot der Device durchlaufen.
#
# Dient z.B. zum Verbinden des Boards mit dem WLAN.
#
import network
import esp
esp.osdebug(None)
import gc
gc.collect()
# WLAN
ssid = 'LERNKUBE'
password = 'l3rnk4b3'
station = network.WLAN(network.STA_IF)
station.active(True)
station.connect(ssid, password)
while station.isconnected() == False:
pass
print('Connection successful')
print(station.ifconfig())
| 17.037037 | 57 | 0.68913 |
acf53c1ff274f6807fe52e400e10f3dc55586d79 | 9,324 | py | Python | yt_dlp/extractor/naver.py | kevinoconnor7/yt-dlp | 73d829c144601c105f7ee1a3d8f2aed6d8e1b76d | [
"Unlicense"
] | 3,001 | 2020-10-24T05:24:18.000Z | 2022-03-31T06:45:32.000Z | yt_dlp/extractor/naver.py | kevinoconnor7/yt-dlp | 73d829c144601c105f7ee1a3d8f2aed6d8e1b76d | [
"Unlicense"
] | 274 | 2020-10-24T04:57:21.000Z | 2022-03-22T01:34:56.000Z | yt_dlp/extractor/naver.py | kevinoconnor7/yt-dlp | 73d829c144601c105f7ee1a3d8f2aed6d8e1b76d | [
"Unlicense"
] | 479 | 2020-10-24T07:38:48.000Z | 2022-03-29T15:41:03.000Z | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
dict_get,
ExtractorError,
int_or_none,
parse_duration,
try_get,
update_url_query,
)
class NaverBaseIE(InfoExtractor):
_CAPTION_EXT_RE = r'\.(?:ttml|vtt)'
def _extract_video_info(self, video_id, vid, key):
video_data = self._download_json(
'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid,
video_id, query={
'key': key,
})
meta = video_data['meta']
title = meta['subject']
formats = []
get_list = lambda x: try_get(video_data, lambda y: y[x + 's']['list'], list) or []
def extract_formats(streams, stream_type, query={}):
for stream in streams:
stream_url = stream.get('source')
if not stream_url:
continue
stream_url = update_url_query(stream_url, query)
encoding_option = stream.get('encodingOption', {})
bitrate = stream.get('bitrate', {})
formats.append({
'format_id': '%s_%s' % (stream.get('type') or stream_type, dict_get(encoding_option, ('name', 'id'))),
'url': stream_url,
'width': int_or_none(encoding_option.get('width')),
'height': int_or_none(encoding_option.get('height')),
'vbr': int_or_none(bitrate.get('video')),
'abr': int_or_none(bitrate.get('audio')),
'filesize': int_or_none(stream.get('size')),
'protocol': 'm3u8_native' if stream_type == 'HLS' else None,
})
extract_formats(get_list('video'), 'H264')
for stream_set in video_data.get('streams', []):
query = {}
for param in stream_set.get('keys', []):
query[param['name']] = param['value']
stream_type = stream_set.get('type')
videos = stream_set.get('videos')
if videos:
extract_formats(videos, stream_type, query)
elif stream_type == 'HLS':
stream_url = stream_set.get('source')
if not stream_url:
continue
formats.extend(self._extract_m3u8_formats(
update_url_query(stream_url, query), video_id,
'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False))
self._sort_formats(formats)
replace_ext = lambda x, y: re.sub(self._CAPTION_EXT_RE, '.' + y, x)
def get_subs(caption_url):
if re.search(self._CAPTION_EXT_RE, caption_url):
return [{
'url': replace_ext(caption_url, 'ttml'),
}, {
'url': replace_ext(caption_url, 'vtt'),
}]
else:
return [{'url': caption_url}]
automatic_captions = {}
subtitles = {}
for caption in get_list('caption'):
caption_url = caption.get('source')
if not caption_url:
continue
sub_dict = automatic_captions if caption.get('type') == 'auto' else subtitles
sub_dict.setdefault(dict_get(caption, ('locale', 'language')), []).extend(get_subs(caption_url))
user = meta.get('user', {})
return {
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'automatic_captions': automatic_captions,
'thumbnail': try_get(meta, lambda x: x['cover']['source']),
'view_count': int_or_none(meta.get('count')),
'uploader_id': user.get('id'),
'uploader': user.get('name'),
'uploader_url': user.get('url'),
}
class NaverIE(NaverBaseIE):
_VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/(?:v|embed)/(?P<id>\d+)'
_GEO_BYPASS = False
_TESTS = [{
'url': 'http://tv.naver.com/v/81652',
'info_dict': {
'id': '81652',
'ext': 'mp4',
'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
'description': '메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
'timestamp': 1378200754,
'upload_date': '20130903',
'uploader': '메가스터디, 합격불변의 법칙',
'uploader_id': 'megastudy',
},
}, {
'url': 'http://tv.naver.com/v/395837',
'md5': '8a38e35354d26a17f73f4e90094febd3',
'info_dict': {
'id': '395837',
'ext': 'mp4',
'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
'description': 'md5:eb6aca9d457b922e43860a2a2b1984d3',
'timestamp': 1432030253,
'upload_date': '20150519',
'uploader': '4가지쇼 시즌2',
'uploader_id': 'wrappinguser29',
},
'skip': 'Georestricted',
}, {
'url': 'http://tvcast.naver.com/v/81652',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
content = self._download_json(
'https://tv.naver.com/api/json/v/' + video_id,
video_id, headers=self.geo_verification_headers())
player_info_json = content.get('playerInfoJson') or {}
current_clip = player_info_json.get('currentClip') or {}
vid = current_clip.get('videoId')
in_key = current_clip.get('inKey')
if not vid or not in_key:
player_auth = try_get(player_info_json, lambda x: x['playerOption']['auth'])
if player_auth == 'notCountry':
self.raise_geo_restricted(countries=['KR'])
elif player_auth == 'notLogin':
self.raise_login_required()
raise ExtractorError('couldn\'t extract vid and key')
info = self._extract_video_info(video_id, vid, in_key)
info.update({
'description': clean_html(current_clip.get('description')),
'timestamp': int_or_none(current_clip.get('firstExposureTime'), 1000),
'duration': parse_duration(current_clip.get('displayPlayTime')),
'like_count': int_or_none(current_clip.get('recommendPoint')),
'age_limit': 19 if current_clip.get('adult') else None,
})
return info
class NaverLiveIE(InfoExtractor):
IE_NAME = 'Naver:live'
_VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/l/(?P<id>\d+)'
_GEO_BYPASS = False
_TESTS = [{
'url': 'https://tv.naver.com/l/52010',
'info_dict': {
'id': '52010',
'ext': 'm3u8',
'title': '[LIVE] 뉴스특보 : "수도권 거리두기, 2주간 2단계로 조정"',
'description': 'md5:df7f0c237a5ed5e786ce5c91efbeaab3',
'channel_id': 'NTV-ytnnews24-0',
'start_time': 1597026780000,
},
}, {
'url': 'https://tv.naver.com/l/51549',
'info_dict': {
'id': '51549',
'ext': 'm3u8',
'title': '연합뉴스TV - 코로나19 뉴스특보',
'description': 'md5:c655e82091bc21e413f549c0eaccc481',
'channel_id': 'NTV-yonhapnewstv-0',
'start_time': 1596406380000,
},
}, {
'url': 'https://tv.naver.com/l/54887',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
page = self._download_webpage(url, video_id, 'Downloading Page', 'Unable to download Page')
secure_url = self._search_regex(r'sApiF:\s+(?:"|\')([^"\']+)', page, 'secureurl')
info = self._extract_video_info(video_id, secure_url)
info.update({
'description': self._og_search_description(page)
})
return info
def _extract_video_info(self, video_id, url):
video_data = self._download_json(url, video_id, headers=self.geo_verification_headers())
meta = video_data.get('meta')
status = meta.get('status')
if status == 'CLOSED':
raise ExtractorError('Stream is offline.', expected=True)
elif status != 'OPENED':
raise ExtractorError('Unknown status %s' % status)
title = meta.get('title')
stream_list = video_data.get('streams')
if stream_list is None:
raise ExtractorError('Could not get stream data.', expected=True)
formats = []
for quality in stream_list:
if not quality.get('url'):
continue
prop = quality.get('property')
if prop.get('abr'): # This abr doesn't mean Average audio bitrate.
continue
formats.extend(self._extract_m3u8_formats(
quality.get('url'), video_id, 'm3u8',
m3u8_id=quality.get('qualityId'), live=True
))
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'channel_id': meta.get('channelId'),
'channel_url': meta.get('channelUrl'),
'thumbnail': meta.get('imgUrl'),
'start_time': meta.get('startTime'),
'categories': [meta.get('categoryId')],
'is_live': True
}
| 37 | 122 | 0.539575 |
acf53c4dddd0f250ef6a63f74322d1f3742fa4b5 | 4,524 | py | Python | converters/sol6_converter_cisco.py | NSO-developer/nfvo-converter-tosca-sol6 | 2c43900b0e96927e23c668a4c67e09d0b13f55d7 | [
"Apache-2.0"
] | 9 | 2019-06-03T07:50:37.000Z | 2021-03-26T00:25:19.000Z | converters/sol6_converter_cisco.py | NSO-developer/nfvo-converter-tosca-sol6 | 2c43900b0e96927e23c668a4c67e09d0b13f55d7 | [
"Apache-2.0"
] | null | null | null | converters/sol6_converter_cisco.py | NSO-developer/nfvo-converter-tosca-sol6 | 2c43900b0e96927e23c668a4c67e09d0b13f55d7 | [
"Apache-2.0"
] | 4 | 2020-03-06T16:16:28.000Z | 2020-09-30T06:13:56.000Z | from keys.sol6_keys_cisco import *
from converters.sol6_converter import Sol6Converter
from keys.sol6_keys import *
from utils.dict_utils import *
class SOL6ConverterCisco(Sol6Converter):
def __init__(self, tosca_vnf, parsed_dict, variables=None):
super().__init__(tosca_vnf, parsed_dict, variables)
# Initialize the flag variables you use here, even though they'll always be defined
# by set_flags_false, it's good practice
self.req_delta_valid = False
self.format_as_ip = False
self.is_variable = False
self.default_root = False
def convert(self, provider=None):
"""
Convert the tosca_vnf to sol6 VNFD
Currently only handles converting a single VNF to VNFD
"""
log.info("Starting Cisco TOSCA -> SOL6 converter.")
# The very first thing we want to do is set up the path variables
log.debug("Setting path variables: {}".format(self.variables))
formatted_vars = PathMapping.format_paths(self.variables)
TOSCA.set_variables(self.variables["tosca"], TOSCA, variables=formatted_vars,
dict_tosca=self.tosca_vnf, cur_provider=provider)
self.vnfd = {}
keys = V2Map(self.tosca_vnf, self.vnfd, variables=self.variables)
self.run_mapping(keys)
return self.vnfd
def run_mapping_islist(self, tosca_path, map_sol6):
mapping_list = map_sol6[1] # List of MapElems
sol6_path = map_sol6[0]
i = -1
for elem in mapping_list:
i = i + 1
# Skip this mapping element if it is None, but allow a none name to pass
if not elem:
continue
tosca_use_value = self.tosca_use_value
f_tosca_path = MapElem.format_path(elem, tosca_path, use_value=tosca_use_value)
f_sol6_path = MapElem.format_path(elem, sol6_path, use_value=True)
log.debug("Formatted paths:\n\ttosca: {} --> sol6: {}"
.format(f_tosca_path, f_sol6_path))
# Skip this element if it requires deltas to be valid
# This has to be outside the flag method
if self.req_delta_valid:
if not self.run_deltas:
continue
# Handle flags for mapped values
value = self.handle_flags(f_sol6_path, f_tosca_path, i)
# If the value doesn't exist, don't write it
# Do write it if the value is 0, though
write = True
if not value:
write = True if value is 0 else False
if write:
set_path_to(f_sol6_path, self.vnfd, value, create_missing=True)
def set_flags_false(self):
"""
Set all the flags defined in this class to false.
This resets them every loop so they aren't applied when they shouldn't be.
"""
super().set_flags_false()
self.is_variable = False
self.default_root = False
self.req_delta_valid = False
self.format_as_ip = False
def set_flags_loop(self, flags, keys):
super().set_flags_loop(flags, keys)
# Ensure flags is iterable
if not isinstance(flags, tuple):
flags = [flags]
for flag in flags:
if flag == keys.FLAG_VAR:
self.is_variable = True
if flag == keys.FLAG_TYPE_ROOT_DEF:
self.default_root = True
if flag == keys.FLAG_REQ_DELTA:
self.req_delta_valid = True
def handle_flags(self, f_sol6_path, f_tosca_path, run):
value = super().handle_flags(f_sol6_path, f_tosca_path, run)
value = self._handle_input(self.is_variable, f_sol6_path, value)
value = self._handle_default_root(self.default_root, f_sol6_path, value)
return value
# Flag option formatting methods
def _handle_default_root(self, option, path, value):
if not option:
return value
if not value:
return self.variables["sol6"]["VIRT_STORAGE_DEFAULT_VAL"]
return value
@staticmethod
def _handle_input(option, path, value):
if not option:
return value
# If this isn't actually an input, then don't assign it
if not V2MapBase.is_tosca_input(value):
return value
return V2MapBase.tosca_get_input_key(value)
# ----------------------------------------------------------------------------------------------
| 35.622047 | 100 | 0.606764 |
acf53cacae39ebf0b9fbf3a97e8f7f9f0073cdae | 120 | py | Python | keras_contrib/constraints/__init__.py | rgreenblatt/keras-contrib | 46fcdb9384b3bc9399c651b2b43640aa54098e64 | [
"MIT"
] | 11 | 2019-03-23T13:23:49.000Z | 2022-01-20T07:57:56.000Z | keras_contrib/constraints/__init__.py | rgreenblatt/keras-contrib | 46fcdb9384b3bc9399c651b2b43640aa54098e64 | [
"MIT"
] | 1 | 2017-12-26T02:59:59.000Z | 2017-12-26T02:59:59.000Z | keras_contrib/constraints/__init__.py | rgreenblatt/keras-contrib | 46fcdb9384b3bc9399c651b2b43640aa54098e64 | [
"MIT"
] | 11 | 2017-07-06T14:11:51.000Z | 2021-08-21T23:18:20.000Z | from __future__ import absolute_import
from keras.constraints import *
from .clip import Clip
# Aliases.
clip = Clip
| 13.333333 | 38 | 0.783333 |
acf53d15b28ef50d381b65cd2bb269c5e937db10 | 841 | py | Python | trilateration/model/test_circle.py | robinroyer/trilateration | 9a8d1388f6ba03f72537defbddb3e984826a640e | [
"Apache-2.0"
] | 3 | 2018-12-18T00:15:12.000Z | 2020-11-02T02:44:22.000Z | trilateration/model/test_circle.py | robinroyer/trilateration | 9a8d1388f6ba03f72537defbddb3e984826a640e | [
"Apache-2.0"
] | 2 | 2020-11-02T02:44:17.000Z | 2020-11-23T05:05:45.000Z | trilateration/model/test_circle.py | robinroyer/trilateration | 9a8d1388f6ba03f72537defbddb3e984826a640e | [
"Apache-2.0"
] | 3 | 2019-05-06T04:49:16.000Z | 2021-09-12T11:59:39.000Z | import unittest
import time
import datetime
from circle import circle
from point import point
# do not forget to use nose2 at root to run test
class Test_circle(unittest.TestCase):
# =============================================== OBJECT UNIT TEST
def test_circle_creation(self):
c = circle(point(48.84, 2.26), 300)
self.assertEqual(c.center.lat, 48.84)
self.assertEqual(c.center.lon, 2.26)
self.assertEqual(c.radius, 300)
# =============================================== ERROR CHECKING
def test_negative_radius(self):
self.assertRaises(ValueError, lambda: circle(point(0,0), -1))
def test_bad_circle_parameter(self):
self.assertRaises(ValueError, lambda: circle(point(0,0), -1).distance_from_circle_center(42))
if __name__ == '__main__':
unittest.main() | 29 | 101 | 0.613555 |
acf53de6a92db66acfc1bed5ef449bfc6f1d6e7d | 6,460 | py | Python | federatedscope/core/trainers/trainer_pFedMe.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | 9 | 2022-03-24T07:59:37.000Z | 2022-03-31T06:47:52.000Z | federatedscope/core/trainers/trainer_pFedMe.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | 1 | 2022-03-28T13:52:17.000Z | 2022-03-28T13:52:17.000Z | federatedscope/core/trainers/trainer_pFedMe.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | null | null | null | import copy
from federatedscope.core.trainers.trainer import GeneralTorchTrainer
from federatedscope.core.optimizer import wrap_regularized_optimizer
from typing import Type
def wrap_pFedMeTrainer(
base_trainer: Type[GeneralTorchTrainer]) -> Type[GeneralTorchTrainer]:
"""
Build a `pFedMeTrainer` with a plug-in manner, by registering new functions into specific `BaseTrainer`
The pFedMe implementation, "Personalized Federated Learning with Moreau Envelopes (NeurIPS 2020)"
is based on the Algorithm 1 in their paper and official codes: https://github.com/CharlieDinh/pFedMe
"""
# ---------------- attribute-level plug-in -----------------------
init_pFedMe_ctx(base_trainer)
# ---------------- action-level plug-in -----------------------
base_trainer.register_hook_in_train(
new_hook=hook_on_fit_start_set_local_para_tmp,
trigger="on_fit_start",
insert_pos=-1)
base_trainer.register_hook_in_train(
new_hook=hook_on_epoch_end_update_local,
trigger="on_epoch_end",
insert_pos=-1)
base_trainer.register_hook_in_train(new_hook=hook_on_fit_end_update_local,
trigger="on_fit_end",
insert_pos=-1)
base_trainer.replace_hook_in_train(
new_hook=_hook_on_batch_forward_flop_count,
target_trigger="on_batch_forward",
target_hook_name="_hook_on_batch_forward_flop_count")
base_trainer.register_hook_in_train(new_hook=_hook_on_epoch_end_flop_count,
trigger="on_epoch_end",
insert_pos=-1)
# for "on_batch_start" trigger: replace the original hooks into new ones of pFedMe
# 1) cache the original hooks for "on_batch_start"
base_trainer.ctx.original_hook_on_batch_start_train = base_trainer.hooks_in_train[
"on_batch_start"]
base_trainer.ctx.original_hook_on_batch_start_eval = base_trainer.hooks_in_eval[
"on_batch_start"]
# 2) replace the original hooks for "on_batch_start"
base_trainer.replace_hook_in_train(
new_hook=hook_on_batch_start_init_pfedme,
target_trigger="on_batch_start",
target_hook_name=None)
base_trainer.replace_hook_in_eval(new_hook=hook_on_batch_start_init_pfedme,
target_trigger="on_batch_start",
target_hook_name=None)
return base_trainer
def init_pFedMe_ctx(base_trainer):
"""
init necessary attributes used in pFedMe,
some new attributes will be with prefix `pFedMe` optimizer to avoid namespace pollution
"""
ctx = base_trainer.ctx
cfg = base_trainer.cfg
# pFedMe finds approximate model with K steps using the same data batch
# the complexity of each pFedMe client is K times the one of FedAvg
ctx.pFedMe_K = cfg.personalization.K
ctx.num_train_epoch *= ctx.pFedMe_K
ctx.pFedMe_approx_fit_counter = 0
# the local_model_tmp is used to be the referenced parameter when finding the approximate \theta in paper
# will be copied from model every run_routine
ctx.pFedMe_local_model_tmp = None
# the optimizer used in pFedMe is based on Moreau Envelopes regularization
# besides, there are two distinct lr for the approximate model and base model
ctx.optimizer = wrap_regularized_optimizer(
ctx.optimizer, cfg.personalization.regular_weight)
for g in ctx.optimizer.param_groups:
g['lr'] = cfg.personalization.lr
ctx.pFedMe_outer_lr = cfg.optimizer.lr
def hook_on_fit_start_set_local_para_tmp(ctx):
ctx.pFedMe_local_model_tmp = copy.deepcopy(ctx.model)
# set the compared model data, then the optimizer will find approximate model using trainer.cfg.personalization.lr
compared_global_model_para = [{
"params": list(ctx.pFedMe_local_model_tmp.parameters())
}]
ctx.optimizer.set_compared_para_group(compared_global_model_para)
def hook_on_batch_start_init_pfedme(ctx):
# refresh data every K step
if ctx.pFedMe_approx_fit_counter == 0:
if ctx.cur_mode == "train":
for hook in ctx.original_hook_on_batch_start_train:
hook(ctx)
else:
for hook in ctx.original_hook_on_batch_start_eval:
hook(ctx)
ctx.data_batch_cache = copy.deepcopy(ctx.data_batch)
else:
# reuse the data_cache since the original hook `_hook_on_batch_end` will clean `data_batch`
ctx.data_batch = copy.deepcopy(ctx.data_batch_cache)
ctx.pFedMe_approx_fit_counter = (ctx.pFedMe_approx_fit_counter +
1) % ctx.pFedMe_K
def _hook_on_batch_forward_flop_count(ctx):
if ctx.monitor.flops_per_sample == 0:
# calculate the flops_per_sample
x, _ = [_.to(ctx.device) for _ in ctx.data_batch]
from fvcore.nn import FlopCountAnalysis
flops_one_batch = FlopCountAnalysis(ctx.model, x).total()
# besides the normal forward flops, pFedMe introduces
# 1) the regularization adds the cost of number of model parameters
flops_one_batch += ctx.monitor.total_model_size / 2
ctx.monitor.track_avg_flops(flops_one_batch, ctx.batch_size)
ctx.monitor.total_flops += ctx.monitor.flops_per_sample * ctx.batch_size
def _hook_on_epoch_end_flop_count(ctx):
# due to the local weight updating
ctx.monitor.total_flops += ctx.monitor.total_model_size / 2
def hook_on_epoch_end_update_local(ctx):
# update local weight after finding approximate theta
for client_param, local_para_tmp in zip(
ctx.model.parameters(), ctx.pFedMe_local_model_tmp.parameters()):
local_para_tmp.data = local_para_tmp.data - ctx.optimizer.regular_weight * \
ctx.pFedMe_outer_lr * (local_para_tmp.data - client_param.data)
# set the compared model data, then the optimizer will find approximate model using trainer.cfg.personalization.lr
compared_global_model_para = [{
"params": list(ctx.pFedMe_local_model_tmp.parameters())
}]
ctx.optimizer.set_compared_para_group(compared_global_model_para)
def hook_on_fit_end_update_local(ctx):
for param, local_para_tmp in zip(ctx.model.parameters(),
ctx.pFedMe_local_model_tmp.parameters()):
param.data = local_para_tmp.data
del ctx.pFedMe_local_model_tmp
| 43.066667 | 118 | 0.699536 |
acf53ee28eae6d839a4c54bf2b6c165e0a725daa | 52,473 | py | Python | scvi/model/_totalvi.py | YosefLab/scVI | 73952af10292852b8e5b7319409678cc66d85a47 | [
"MIT"
] | 398 | 2017-10-11T06:19:23.000Z | 2020-09-14T02:46:25.000Z | scvi/model/_totalvi.py | YosefLab/scVI | 73952af10292852b8e5b7319409678cc66d85a47 | [
"MIT"
] | 708 | 2017-11-13T14:51:21.000Z | 2020-09-16T21:09:19.000Z | scvi/model/_totalvi.py | YosefLab/scVI | 73952af10292852b8e5b7319409678cc66d85a47 | [
"MIT"
] | 154 | 2017-10-16T06:53:59.000Z | 2020-09-11T23:06:30.000Z | import logging
import warnings
from collections.abc import Iterable as IterableClass
from functools import partial
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
import torch
from anndata import AnnData
from mudata import MuData
import scvi.data.fields as fields
from scvi import REGISTRY_KEYS
from scvi._compat import Literal
from scvi._types import Number
from scvi._utils import _doc_params
from scvi.data import AnnDataManager
from scvi.data._utils import _check_nonnegative_integers
from scvi.dataloaders import DataSplitter
from scvi.model._utils import (
_get_batch_code_from_category,
_get_var_names_from_manager,
_init_library_size,
cite_seq_raw_counts_properties,
)
from scvi.model.base._utils import _de_core
from scvi.module import TOTALVAE
from scvi.train import AdversarialTrainingPlan, TrainRunner
from scvi.utils._docstrings import doc_differential_expression, setup_anndata_dsp
from .base import ArchesMixin, BaseModelClass, RNASeqMixin, VAEMixin
logger = logging.getLogger(__name__)
class TOTALVI(RNASeqMixin, VAEMixin, ArchesMixin, BaseModelClass):
"""
total Variational Inference [GayosoSteier21]_.
Parameters
----------
adata
AnnData object that has been registered via :meth:`~scvi.model.TOTALVI.setup_anndata`.
n_latent
Dimensionality of the latent space.
gene_dispersion
One of the following:
* ``'gene'`` - genes_dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - genes_dispersion can differ between different batches
* ``'gene-label'`` - genes_dispersion can differ between different labels
protein_dispersion
One of the following:
* ``'protein'`` - protein_dispersion parameter is constant per protein across cells
* ``'protein-batch'`` - protein_dispersion can differ between different batches NOT TESTED
* ``'protein-label'`` - protein_dispersion can differ between different labels NOT TESTED
gene_likelihood
One of:
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
latent_distribution
One of:
* ``'normal'`` - Normal distribution
* ``'ln'`` - Logistic normal distribution (Normal(0, I) transformed by softmax)
empirical_protein_background_prior
Set the initialization of protein background prior empirically. This option fits a GMM for each of
100 cells per batch and averages the distributions. Note that even with this option set to `True`,
this only initializes a parameter that is learned during inference. If `False`, randomly initializes.
The default (`None`), sets this to `True` if greater than 10 proteins are used.
override_missing_proteins
If `True`, will not treat proteins with all 0 expression in a particular batch as missing.
**model_kwargs
Keyword args for :class:`~scvi.module.TOTALVAE`
Examples
--------
>>> adata = anndata.read_h5ad(path_to_anndata)
>>> scvi.model.TOTALVI.setup_anndata(adata, batch_key="batch", protein_expression_obsm_key="protein_expression")
>>> vae = scvi.model.TOTALVI(adata)
>>> vae.train()
>>> adata.obsm["X_totalVI"] = vae.get_latent_representation()
Notes
-----
See further usage examples in the following tutorials:
1. :doc:`/tutorials/notebooks/totalVI`
2. :doc:`/tutorials/notebooks/cite_scrna_integration_w_totalVI`
3. :doc:`/tutorials/notebooks/scarches_scvi_tools`
"""
def __init__(
self,
adata: AnnData,
n_latent: int = 20,
gene_dispersion: Literal[
"gene", "gene-batch", "gene-label", "gene-cell"
] = "gene",
protein_dispersion: Literal[
"protein", "protein-batch", "protein-label"
] = "protein",
gene_likelihood: Literal["zinb", "nb"] = "nb",
latent_distribution: Literal["normal", "ln"] = "normal",
empirical_protein_background_prior: Optional[bool] = None,
override_missing_proteins: bool = False,
**model_kwargs,
):
super(TOTALVI, self).__init__(adata)
self.protein_state_registry = self.adata_manager.get_state_registry(
REGISTRY_KEYS.PROTEIN_EXP_KEY
)
if (
fields.ProteinObsmField.PROTEIN_BATCH_MASK in self.protein_state_registry
and not override_missing_proteins
):
batch_mask = self.protein_state_registry.protein_batch_mask
msg = (
"Some proteins have all 0 counts in some batches. "
+ "These proteins will be treated as missing measurements; however, "
+ "this can occur due to experimental design/biology. "
+ "Reinitialize the model with `override_missing_proteins=True`,"
+ "to override this behavior."
)
warnings.warn(msg, UserWarning)
self._use_adversarial_classifier = True
else:
batch_mask = None
self._use_adversarial_classifier = False
emp_prior = (
empirical_protein_background_prior
if empirical_protein_background_prior is not None
else (self.summary_stats.n_proteins > 10)
)
if emp_prior:
prior_mean, prior_scale = self._get_totalvi_protein_priors(adata)
else:
prior_mean, prior_scale = None, None
n_cats_per_cov = (
self.adata_manager.get_state_registry(REGISTRY_KEYS.CAT_COVS_KEY)[
fields.CategoricalJointObsField.N_CATS_PER_KEY
]
if REGISTRY_KEYS.CAT_COVS_KEY in self.adata_manager.data_registry
else None
)
n_batch = self.summary_stats.n_batch
use_size_factor_key = (
REGISTRY_KEYS.SIZE_FACTOR_KEY in self.adata_manager.data_registry
)
library_log_means, library_log_vars = None, None
if not use_size_factor_key:
library_log_means, library_log_vars = _init_library_size(
self.adata_manager, n_batch
)
self.module = TOTALVAE(
n_input_genes=self.summary_stats.n_vars,
n_input_proteins=self.summary_stats.n_proteins,
n_batch=n_batch,
n_latent=n_latent,
n_continuous_cov=self.summary_stats.get("n_extra_continuous_covs", 0),
n_cats_per_cov=n_cats_per_cov,
gene_dispersion=gene_dispersion,
protein_dispersion=protein_dispersion,
gene_likelihood=gene_likelihood,
latent_distribution=latent_distribution,
protein_batch_mask=batch_mask,
protein_background_prior_mean=prior_mean,
protein_background_prior_scale=prior_scale,
use_size_factor_key=use_size_factor_key,
library_log_means=library_log_means,
library_log_vars=library_log_vars,
**model_kwargs,
)
self._model_summary_string = (
"TotalVI Model with the following params: \nn_latent: {}, "
"gene_dispersion: {}, protein_dispersion: {}, gene_likelihood: {}, latent_distribution: {}"
).format(
n_latent,
gene_dispersion,
protein_dispersion,
gene_likelihood,
latent_distribution,
)
self.init_params_ = self._get_init_params(locals())
def train(
self,
max_epochs: Optional[int] = 400,
lr: float = 4e-3,
use_gpu: Optional[Union[str, int, bool]] = None,
train_size: float = 0.9,
validation_size: Optional[float] = None,
batch_size: int = 256,
early_stopping: bool = True,
check_val_every_n_epoch: Optional[int] = None,
reduce_lr_on_plateau: bool = True,
n_steps_kl_warmup: Union[int, None] = None,
n_epochs_kl_warmup: Union[int, None] = None,
adversarial_classifier: Optional[bool] = None,
plan_kwargs: Optional[dict] = None,
**kwargs,
):
"""
Trains the model using amortized variational inference.
Parameters
----------
max_epochs
Number of passes through the dataset.
lr
Learning rate for optimization.
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
train_size
Size of training set in the range [0.0, 1.0].
validation_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + validation_size < 1`, the remaining cells belong to a test set.
batch_size
Minibatch size to use during training.
early_stopping
Whether to perform early stopping with respect to the validation set.
check_val_every_n_epoch
Check val every n train epochs. By default, val is not checked, unless `early_stopping` is `True`
or `reduce_lr_on_plateau` is `True`. If either of the latter conditions are met, val is checked
every epoch.
reduce_lr_on_plateau
Reduce learning rate on plateau of validation metric (default is ELBO).
n_steps_kl_warmup
Number of training steps (minibatches) to scale weight on KL divergences from 0 to 1.
Only activated when `n_epochs_kl_warmup` is set to None. If `None`, defaults
to `floor(0.75 * adata.n_obs)`.
n_epochs_kl_warmup
Number of epochs to scale weight on KL divergences from 0 to 1.
Overrides `n_steps_kl_warmup` when both are not `None`.
adversarial_classifier
Whether to use adversarial classifier in the latent space. This helps mixing when
there are missing proteins in any of the batches. Defaults to `True` is missing proteins
are detected.
plan_kwargs
Keyword args for :class:`~scvi.train.AdversarialTrainingPlan`. Keyword arguments passed to
`train()` will overwrite values present in `plan_kwargs`, when appropriate.
**kwargs
Other keyword args for :class:`~scvi.train.Trainer`.
"""
if adversarial_classifier is None:
adversarial_classifier = self._use_adversarial_classifier
n_steps_kl_warmup = (
n_steps_kl_warmup
if n_steps_kl_warmup is not None
else int(0.75 * self.adata.n_obs)
)
if reduce_lr_on_plateau:
check_val_every_n_epoch = 1
update_dict = {
"lr": lr,
"adversarial_classifier": adversarial_classifier,
"reduce_lr_on_plateau": reduce_lr_on_plateau,
"n_epochs_kl_warmup": n_epochs_kl_warmup,
"n_steps_kl_warmup": n_steps_kl_warmup,
}
if plan_kwargs is not None:
plan_kwargs.update(update_dict)
else:
plan_kwargs = update_dict
if max_epochs is None:
n_cells = self.adata.n_obs
max_epochs = np.min([round((20000 / n_cells) * 400), 400])
plan_kwargs = plan_kwargs if isinstance(plan_kwargs, dict) else dict()
data_splitter = DataSplitter(
self.adata_manager,
train_size=train_size,
validation_size=validation_size,
batch_size=batch_size,
use_gpu=use_gpu,
)
training_plan = AdversarialTrainingPlan(self.module, **plan_kwargs)
runner = TrainRunner(
self,
training_plan=training_plan,
data_splitter=data_splitter,
max_epochs=max_epochs,
use_gpu=use_gpu,
early_stopping=early_stopping,
check_val_every_n_epoch=check_val_every_n_epoch,
**kwargs,
)
return runner()
@torch.no_grad()
def get_latent_library_size(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
give_mean: bool = True,
batch_size: Optional[int] = None,
) -> np.ndarray:
r"""
Returns the latent library size for each cell.
This is denoted as :math:`\ell_n` in the totalVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
give_mean
Return the mean or a sample from the posterior distribution.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
self._check_if_trained(warn=False)
adata = self._validate_anndata(adata)
post = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
libraries = []
for tensors in post:
inference_inputs = self.module._get_inference_input(tensors)
outputs = self.module.inference(**inference_inputs)
if give_mean:
ql = outputs["ql"]
library = torch.exp(ql.loc + 0.5 * (ql.scale**2))
else:
library = outputs["library_gene"]
libraries += [library.cpu()]
return torch.cat(libraries).numpy()
@torch.no_grad()
def get_normalized_expression(
self,
adata=None,
indices=None,
n_samples_overall: Optional[int] = None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
gene_list: Optional[Sequence[str]] = None,
protein_list: Optional[Sequence[str]] = None,
library_size: Optional[Union[float, Literal["latent"]]] = 1,
n_samples: int = 1,
sample_protein_mixing: bool = False,
scale_protein: bool = False,
include_protein_background: bool = False,
batch_size: Optional[int] = None,
return_mean: bool = True,
return_numpy: Optional[bool] = None,
) -> Tuple[Union[np.ndarray, pd.DataFrame], Union[np.ndarray, pd.DataFrame]]:
r"""
Returns the normalized gene expression and protein expression.
This is denoted as :math:`\rho_n` in the totalVI paper for genes, and TODO
for proteins, :math:`(1-\pi_{nt})\alpha_{nt}\beta_{nt}`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples_overall
Number of samples to use in total
transform_batch
Batch to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- List[int], then average over batches in list
gene_list
Return frequencies of expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
protein_list
Return protein expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
library_size
Scale the expression frequencies to a common library size.
This allows gene expression levels to be interpreted on a common scale of relevant
magnitude.
n_samples
Get sample scale from multiple samples.
sample_protein_mixing
Sample mixing bernoulli, setting background to zero
scale_protein
Make protein expression sum to 1
include_protein_background
Include background component for protein expression
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
return_mean
Whether to return the mean of the samples.
return_numpy
Return a `np.ndarray` instead of a `pd.DataFrame`. Includes gene
names as columns. If either n_samples=1 or return_mean=True, defaults to False.
Otherwise, it defaults to True.
Returns
-------
- **gene_normalized_expression** - normalized expression for RNA
- **protein_normalized_expression** - normalized expression for proteins
If ``n_samples`` > 1 and ``return_mean`` is False, then the shape is ``(samples, cells, genes)``.
Otherwise, shape is ``(cells, genes)``. Return type is ``pd.DataFrame`` unless ``return_numpy`` is True.
"""
adata = self._validate_anndata(adata)
adata_manager = self.get_anndata_manager(adata)
if indices is None:
indices = np.arange(adata.n_obs)
if n_samples_overall is not None:
indices = np.random.choice(indices, n_samples_overall)
post = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = _get_var_names_from_manager(adata_manager)
gene_mask = [True if gene in gene_list else False for gene in all_genes]
if protein_list is None:
protein_mask = slice(None)
else:
all_proteins = self.protein_state_registry.column_names
protein_mask = [True if p in protein_list else False for p in all_proteins]
if indices is None:
indices = np.arange(adata.n_obs)
if n_samples > 1 and return_mean is False:
if return_numpy is False:
warnings.warn(
"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray"
)
return_numpy = True
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(adata_manager, transform_batch)
scale_list_gene = []
scale_list_pro = []
for tensors in post:
x = tensors[REGISTRY_KEYS.X_KEY]
y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]
px_scale = torch.zeros_like(x)[..., gene_mask]
py_scale = torch.zeros_like(y)[..., protein_mask]
if n_samples > 1:
px_scale = torch.stack(n_samples * [px_scale])
py_scale = torch.stack(n_samples * [py_scale])
for b in transform_batch:
generative_kwargs = dict(transform_batch=b)
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
if library_size == "latent":
px_scale += generative_outputs["px_"]["rate"].cpu()[..., gene_mask]
else:
px_scale += generative_outputs["px_"]["scale"].cpu()[..., gene_mask]
py_ = generative_outputs["py_"]
# probability of background
protein_mixing = 1 / (1 + torch.exp(-py_["mixing"].cpu()))
if sample_protein_mixing is True:
protein_mixing = torch.distributions.Bernoulli(
protein_mixing
).sample()
protein_val = py_["rate_fore"].cpu() * (1 - protein_mixing)
if include_protein_background is True:
protein_val += py_["rate_back"].cpu() * protein_mixing
if scale_protein is True:
protein_val = torch.nn.functional.normalize(
protein_val, p=1, dim=-1
)
protein_val = protein_val[..., protein_mask]
py_scale += protein_val
px_scale /= len(transform_batch)
py_scale /= len(transform_batch)
scale_list_gene.append(px_scale)
scale_list_pro.append(py_scale)
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
scale_list_gene = torch.cat(scale_list_gene, dim=1)
scale_list_pro = torch.cat(scale_list_pro, dim=1)
# (cells, features, samples)
scale_list_gene = scale_list_gene.permute(1, 2, 0)
scale_list_pro = scale_list_pro.permute(1, 2, 0)
else:
scale_list_gene = torch.cat(scale_list_gene, dim=0)
scale_list_pro = torch.cat(scale_list_pro, dim=0)
if return_mean is True and n_samples > 1:
scale_list_gene = torch.mean(scale_list_gene, dim=-1)
scale_list_pro = torch.mean(scale_list_pro, dim=-1)
scale_list_gene = scale_list_gene.cpu().numpy()
scale_list_pro = scale_list_pro.cpu().numpy()
if return_numpy is None or return_numpy is False:
gene_df = pd.DataFrame(
scale_list_gene,
columns=_get_var_names_from_manager(adata_manager)[gene_mask],
index=adata.obs_names[indices],
)
protein_names = self.protein_state_registry.column_names
pro_df = pd.DataFrame(
scale_list_pro,
columns=protein_names[protein_mask],
index=adata.obs_names[indices],
)
return gene_df, pro_df
else:
return scale_list_gene, scale_list_pro
@torch.no_grad()
def get_protein_foreground_probability(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
protein_list: Optional[Sequence[str]] = None,
n_samples: int = 1,
batch_size: Optional[int] = None,
return_mean: bool = True,
return_numpy: Optional[bool] = None,
):
r"""
Returns the foreground probability for proteins.
This is denoted as :math:`(1 - \pi_{nt})` in the totalVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
transform_batch
Batch to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- List[int], then average over batches in list
protein_list
Return protein expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
return_mean
Whether to return the mean of the samples.
return_numpy
Return a :class:`~numpy.ndarray` instead of a :class:`~pandas.DataFrame`. DataFrame includes
gene names as columns. If either `n_samples=1` or `return_mean=True`, defaults to `False`.
Otherwise, it defaults to `True`.
Returns
-------
- **foreground_probability** - probability foreground for each protein
If `n_samples` > 1 and `return_mean` is False, then the shape is `(samples, cells, genes)`.
Otherwise, shape is `(cells, genes)`. In this case, return type is :class:`~pandas.DataFrame` unless `return_numpy` is True.
"""
adata = self._validate_anndata(adata)
post = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
if protein_list is None:
protein_mask = slice(None)
else:
all_proteins = self.protein_state_registry.column_names
protein_mask = [True if p in protein_list else False for p in all_proteins]
if n_samples > 1 and return_mean is False:
if return_numpy is False:
warnings.warn(
"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray"
)
return_numpy = True
if indices is None:
indices = np.arange(adata.n_obs)
py_mixings = []
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(
self.adata_manager, transform_batch
)
for tensors in post:
y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]
py_mixing = torch.zeros_like(y[..., protein_mask])
if n_samples > 1:
py_mixing = torch.stack(n_samples * [py_mixing])
for b in transform_batch:
generative_kwargs = dict(transform_batch=b)
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
py_mixing += torch.sigmoid(generative_outputs["py_"]["mixing"])[
..., protein_mask
].cpu()
py_mixing /= len(transform_batch)
py_mixings += [py_mixing]
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
py_mixings = torch.cat(py_mixings, dim=1)
# (cells, features, samples)
py_mixings = py_mixings.permute(1, 2, 0)
else:
py_mixings = torch.cat(py_mixings, dim=0)
if return_mean is True and n_samples > 1:
py_mixings = torch.mean(py_mixings, dim=-1)
py_mixings = py_mixings.cpu().numpy()
if return_numpy is True:
return 1 - py_mixings
else:
pro_names = self.protein_state_registry.column_names
foreground_prob = pd.DataFrame(
1 - py_mixings,
columns=pro_names[protein_mask],
index=adata.obs_names[indices],
)
return foreground_prob
def _expression_for_de(
self,
adata=None,
indices=None,
n_samples_overall=None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
scale_protein=False,
batch_size: Optional[int] = None,
sample_protein_mixing=False,
include_protein_background=False,
protein_prior_count=0.5,
):
rna, protein = self.get_normalized_expression(
adata=adata,
indices=indices,
n_samples_overall=n_samples_overall,
transform_batch=transform_batch,
return_numpy=True,
n_samples=1,
batch_size=batch_size,
scale_protein=scale_protein,
sample_protein_mixing=sample_protein_mixing,
include_protein_background=include_protein_background,
)
protein += protein_prior_count
joint = np.concatenate([rna, protein], axis=1)
return joint
@_doc_params(
doc_differential_expression=doc_differential_expression,
)
def differential_expression(
self,
adata: Optional[AnnData] = None,
groupby: Optional[str] = None,
group1: Optional[Iterable[str]] = None,
group2: Optional[str] = None,
idx1: Optional[Union[Sequence[int], Sequence[bool], str]] = None,
idx2: Optional[Union[Sequence[int], Sequence[bool], str]] = None,
mode: Literal["vanilla", "change"] = "change",
delta: float = 0.25,
batch_size: Optional[int] = None,
all_stats: bool = True,
batch_correction: bool = False,
batchid1: Optional[Iterable[str]] = None,
batchid2: Optional[Iterable[str]] = None,
fdr_target: float = 0.05,
silent: bool = False,
protein_prior_count: float = 0.1,
scale_protein: bool = False,
sample_protein_mixing: bool = False,
include_protein_background: bool = False,
**kwargs,
) -> pd.DataFrame:
r"""
A unified method for differential expression analysis.
Implements `"vanilla"` DE [Lopez18]_ and `"change"` mode DE [Boyeau19]_.
Parameters
----------
{doc_differential_expression}
protein_prior_count
Prior count added to protein expression before LFC computation
scale_protein
Force protein values to sum to one in every single cell (post-hoc normalization)
sample_protein_mixing
Sample the protein mixture component, i.e., use the parameter to sample a Bernoulli
that determines if expression is from foreground/background.
include_protein_background
Include the protein background component as part of the protein expression
**kwargs
Keyword args for :meth:`scvi.model.base.DifferentialComputation.get_bayes_factors`
Returns
-------
Differential expression DataFrame.
"""
adata = self._validate_anndata(adata)
adata_manager = self.get_anndata_manager(adata, required=True)
model_fn = partial(
self._expression_for_de,
scale_protein=scale_protein,
sample_protein_mixing=sample_protein_mixing,
include_protein_background=include_protein_background,
protein_prior_count=protein_prior_count,
batch_size=batch_size,
)
col_names = np.concatenate(
[
np.asarray(_get_var_names_from_manager(adata_manager)),
self.protein_state_registry.column_names,
]
)
result = _de_core(
adata_manager,
model_fn,
groupby,
group1,
group2,
idx1,
idx2,
all_stats,
cite_seq_raw_counts_properties,
col_names,
mode,
batchid1,
batchid2,
delta,
batch_correction,
fdr_target,
silent,
**kwargs,
)
return result
@torch.no_grad()
def posterior_predictive_sample(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: int = 1,
batch_size: Optional[int] = None,
gene_list: Optional[Sequence[str]] = None,
protein_list: Optional[Sequence[str]] = None,
) -> np.ndarray:
r"""
Generate observation samples from the posterior predictive distribution.
The posterior predictive distribution is written as :math:`p(\hat{x}, \hat{y} \mid x, y)`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of required samples for each cell
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
gene_list
Names of genes of interest
protein_list
Names of proteins of interest
Returns
-------
x_new : :class:`~numpy.ndarray`
tensor with shape (n_cells, n_genes, n_samples)
"""
if self.module.gene_likelihood not in ["nb"]:
raise ValueError("Invalid gene_likelihood")
adata = self._validate_anndata(adata)
adata_manager = self.get_anndata_manager(adata, required=True)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = _get_var_names_from_manager(adata_manager)
gene_mask = [True if gene in gene_list else False for gene in all_genes]
if protein_list is None:
protein_mask = slice(None)
else:
all_proteins = self.protein_state_registry.column_names
protein_mask = [True if p in protein_list else False for p in all_proteins]
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
scdl_list = []
for tensors in scdl:
rna_sample, protein_sample = self.module.sample(
tensors, n_samples=n_samples
)
rna_sample = rna_sample[..., gene_mask]
protein_sample = protein_sample[..., protein_mask]
data = torch.cat([rna_sample, protein_sample], dim=-1).numpy()
scdl_list += [data]
if n_samples > 1:
scdl_list[-1] = np.transpose(scdl_list[-1], (1, 2, 0))
scdl_list = np.concatenate(scdl_list, axis=0)
return scdl_list
@torch.no_grad()
def _get_denoised_samples(
self,
adata=None,
indices=None,
n_samples: int = 25,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[int] = None,
) -> np.ndarray:
"""
Return samples from an adjusted posterior predictive.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
indices of `adata` to use
n_samples
How may samples per cell
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution
transform_batch
int of which batch to condition on for all cells
"""
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
scdl_list = []
for tensors in scdl:
x = tensors[REGISTRY_KEYS.X_KEY]
y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]
generative_kwargs = dict(transform_batch=transform_batch)
inference_kwargs = dict(n_samples=n_samples)
with torch.no_grad():
inference_outputs, generative_outputs, = self.module.forward(
tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
px_ = generative_outputs["px_"]
py_ = generative_outputs["py_"]
device = px_["r"].device
pi = 1 / (1 + torch.exp(-py_["mixing"]))
mixing_sample = torch.distributions.Bernoulli(pi).sample()
protein_rate = py_["rate_fore"]
rate = torch.cat((rna_size_factor * px_["scale"], protein_rate), dim=-1)
if len(px_["r"].size()) == 2:
px_dispersion = px_["r"]
else:
px_dispersion = torch.ones_like(x).to(device) * px_["r"]
if len(py_["r"].size()) == 2:
py_dispersion = py_["r"]
else:
py_dispersion = torch.ones_like(y).to(device) * py_["r"]
dispersion = torch.cat((px_dispersion, py_dispersion), dim=-1)
# This gamma is really l*w using scVI manuscript notation
p = rate / (rate + dispersion)
r = dispersion
l_train = torch.distributions.Gamma(r, (1 - p) / p).sample()
data = l_train.cpu().numpy()
# make background 0
data[:, :, x.shape[1] :] = (
data[:, :, x.shape[1] :] * (1 - mixing_sample).cpu().numpy()
)
scdl_list += [data]
scdl_list[-1] = np.transpose(scdl_list[-1], (1, 2, 0))
return np.concatenate(scdl_list, axis=0)
@torch.no_grad()
def get_feature_correlation_matrix(
self,
adata=None,
indices=None,
n_samples: int = 10,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
correlation_type: Literal["spearman", "pearson"] = "spearman",
log_transform: bool = False,
) -> pd.DataFrame:
"""
Generate gene-gene correlation matrix using scvi uncertainty and expression.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution
transform_batch
Batches to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- list of int, then values are averaged over provided batches.
correlation_type
One of "pearson", "spearman".
log_transform
Whether to log transform denoised values prior to correlation calculation.
Returns
-------
Gene-protein-gene-protein correlation matrix
"""
from scipy.stats import spearmanr
adata = self._validate_anndata(adata)
adata_manager = self.get_anndata_manager(adata, required=True)
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(
self.get_anndata_manager(adata, required=True), transform_batch
)
corr_mats = []
for b in transform_batch:
denoised_data = self._get_denoised_samples(
n_samples=n_samples,
batch_size=batch_size,
rna_size_factor=rna_size_factor,
transform_batch=b,
)
flattened = np.zeros(
(denoised_data.shape[0] * n_samples, denoised_data.shape[1])
)
for i in range(n_samples):
flattened[
denoised_data.shape[0] * (i) : denoised_data.shape[0] * (i + 1)
] = denoised_data[:, :, i]
if log_transform is True:
flattened[:, : self.n_genes] = np.log(
flattened[:, : self.n_genes] + 1e-8
)
flattened[:, self.n_genes :] = np.log1p(flattened[:, self.n_genes :])
if correlation_type == "pearson":
corr_matrix = np.corrcoef(flattened, rowvar=False)
else:
corr_matrix, _ = spearmanr(flattened, axis=0)
corr_mats.append(corr_matrix)
corr_matrix = np.mean(np.stack(corr_mats), axis=0)
var_names = _get_var_names_from_manager(adata_manager)
names = np.concatenate(
[
np.asarray(var_names),
self.protein_state_registry.column_names,
]
)
return pd.DataFrame(corr_matrix, index=names, columns=names)
@torch.no_grad()
def get_likelihood_parameters(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: Optional[int] = 1,
give_mean: Optional[bool] = False,
batch_size: Optional[int] = None,
) -> Dict[str, np.ndarray]:
r"""
Estimates for the parameters of the likelihood :math:`p(x, y \mid z)`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
give_mean
Return expected value of parameters or a samples
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
raise NotImplementedError
def _validate_anndata(
self, adata: Optional[AnnData] = None, copy_if_view: bool = True
):
adata = super()._validate_anndata(adata=adata, copy_if_view=copy_if_view)
error_msg = "Number of {} in anndata different from when setup_anndata was run. Please rerun setup_anndata."
if REGISTRY_KEYS.PROTEIN_EXP_KEY in self.adata_manager.data_registry.keys():
pro_exp = self.get_from_registry(adata, REGISTRY_KEYS.PROTEIN_EXP_KEY)
if self.summary_stats.n_proteins != pro_exp.shape[1]:
raise ValueError(error_msg.format("proteins"))
is_nonneg_int = _check_nonnegative_integers(pro_exp)
if not is_nonneg_int:
warnings.warn(
"Make sure the registered protein expression in anndata contains unnormalized count data."
)
else:
raise ValueError("No protein data found, please setup or transfer anndata")
return adata
def _get_totalvi_protein_priors(self, adata, n_cells=100):
"""Compute an empirical prior for protein background."""
from sklearn.exceptions import ConvergenceWarning
from sklearn.mixture import GaussianMixture
with warnings.catch_warnings():
warnings.filterwarnings("error")
logger.info(
"Computing empirical prior initialization for protein background."
)
adata = self._validate_anndata(adata)
adata_manager = self.get_anndata_manager(adata)
pro_exp = adata_manager.get_from_registry(REGISTRY_KEYS.PROTEIN_EXP_KEY)
pro_exp = (
pro_exp.to_numpy() if isinstance(pro_exp, pd.DataFrame) else pro_exp
)
batch_mask = adata_manager.get_state_registry(
REGISTRY_KEYS.PROTEIN_EXP_KEY
).get(fields.ProteinObsmField.PROTEIN_BATCH_MASK)
batch = adata_manager.get_from_registry(REGISTRY_KEYS.BATCH_KEY).ravel()
cats = adata_manager.get_state_registry(REGISTRY_KEYS.BATCH_KEY)[
fields.CategoricalObsField.CATEGORICAL_MAPPING_KEY
]
codes = np.arange(len(cats))
batch_avg_mus, batch_avg_scales = [], []
for b in np.unique(codes):
# can happen during online updates
# the values of these batches will not be used
num_in_batch = np.sum(batch == b)
if num_in_batch == 0:
batch_avg_mus.append(0)
batch_avg_scales.append(1)
continue
batch_pro_exp = pro_exp[batch == b]
# non missing
if batch_mask is not None:
batch_pro_exp = batch_pro_exp[:, batch_mask[b]]
if batch_pro_exp.shape[1] < 5:
logger.debug(
f"Batch {b} has too few proteins to set prior, setting randomly."
)
batch_avg_mus.append(0.0)
batch_avg_scales.append(0.05)
continue
# a batch is missing because it's in the reference but not query data
# for scarches case, these values will be replaced by original state dict
if batch_pro_exp.shape[0] == 0:
batch_avg_mus.append(0.0)
batch_avg_scales.append(0.05)
continue
cells = np.random.choice(
np.arange(batch_pro_exp.shape[0]), size=n_cells
)
batch_pro_exp = batch_pro_exp[cells]
gmm = GaussianMixture(n_components=2)
mus, scales = [], []
# fit per cell GMM
for c in batch_pro_exp:
try:
gmm.fit(np.log1p(c.reshape(-1, 1)))
# when cell is all 0
except ConvergenceWarning:
mus.append(0)
scales.append(0.05)
continue
means = gmm.means_.ravel()
sorted_fg_bg = np.argsort(means)
mu = means[sorted_fg_bg].ravel()[0]
covariances = gmm.covariances_[sorted_fg_bg].ravel()[0]
scale = np.sqrt(covariances)
mus.append(mu)
scales.append(scale)
# average distribution over cells
batch_avg_mu = np.mean(mus)
batch_avg_scale = np.sqrt(np.sum(np.square(scales)) / (n_cells**2))
batch_avg_mus.append(batch_avg_mu)
batch_avg_scales.append(batch_avg_scale)
# repeat prior for each protein
batch_avg_mus = np.array(batch_avg_mus, dtype=np.float32).reshape(1, -1)
batch_avg_scales = np.array(batch_avg_scales, dtype=np.float32).reshape(
1, -1
)
batch_avg_mus = np.tile(batch_avg_mus, (pro_exp.shape[1], 1))
batch_avg_scales = np.tile(batch_avg_scales, (pro_exp.shape[1], 1))
return batch_avg_mus, batch_avg_scales
@torch.no_grad()
def get_protein_background_mean(self, adata, indices, batch_size):
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
background_mean = []
for tensors in scdl:
_, inference_outputs, _ = self.module.forward(tensors)
b_mean = inference_outputs["py_"]["rate_back"]
background_mean += [b_mean.cpu().numpy()]
return np.concatenate(background_mean)
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
protein_expression_obsm_key: str,
protein_names_uns_key: Optional[str] = None,
batch_key: Optional[str] = None,
layer: Optional[str] = None,
size_factor_key: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
**kwargs,
) -> Optional[AnnData]:
"""
%(summary)s.
Parameters
----------
%(param_adata)s
protein_expression_obsm_key
key in `adata.obsm` for protein expression data.
protein_names_uns_key
key in `adata.uns` for protein names. If None, will use the column names of `adata.obsm[protein_expression_obsm_key]`
if it is a DataFrame, else will assign sequential names to proteins.
%(param_batch_key)s
%(param_layer)s
%(param_size_factor_key)s
%(param_cat_cov_keys)s
%(param_cont_cov_keys)s
%(param_copy)s
Returns
-------
%(returns)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
batch_field = fields.CategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key)
anndata_fields = [
fields.LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
fields.CategoricalObsField(
REGISTRY_KEYS.LABELS_KEY, None
), # Default labels field for compatibility with TOTALVAE
batch_field,
fields.NumericalObsField(
REGISTRY_KEYS.SIZE_FACTOR_KEY, size_factor_key, required=False
),
fields.CategoricalJointObsField(
REGISTRY_KEYS.CAT_COVS_KEY, categorical_covariate_keys
),
fields.NumericalJointObsField(
REGISTRY_KEYS.CONT_COVS_KEY, continuous_covariate_keys
),
fields.ProteinObsmField(
REGISTRY_KEYS.PROTEIN_EXP_KEY,
protein_expression_obsm_key,
use_batch_mask=True,
batch_field=batch_field,
colnames_uns_key=protein_names_uns_key,
is_count_data=True,
),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
@classmethod
def setup_mudata(
cls,
mdata: MuData,
rna_layer: Optional[str] = None,
protein_layer: Optional[str] = None,
batch_key: Optional[str] = None,
size_factor_key: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
modalities: Optional[Dict[str, str]] = None,
**kwargs,
) -> Optional[AnnData]:
setup_method_args = cls._get_setup_method_args(**locals())
if modalities is None:
raise ValueError("Modalities cannot be None.")
modalities = cls._create_modalities_attr_dict(modalities, setup_method_args)
batch_field = fields.MuDataCategoricalObsField(
REGISTRY_KEYS.BATCH_KEY,
batch_key,
mod_key=modalities.batch_key,
)
mudata_fields = [
fields.MuDataLayerField(
REGISTRY_KEYS.X_KEY,
rna_layer,
mod_key=modalities.rna_layer,
is_count_data=True,
mod_required=True,
),
fields.MuDataCategoricalObsField(
REGISTRY_KEYS.LABELS_KEY,
None,
mod_key=None,
), # Default labels field for compatibility with TOTALVAE
batch_field,
fields.MuDataNumericalObsField(
REGISTRY_KEYS.SIZE_FACTOR_KEY,
size_factor_key,
mod_key=modalities.size_factor_key,
required=False,
),
fields.MuDataCategoricalJointObsField(
REGISTRY_KEYS.CAT_COVS_KEY,
categorical_covariate_keys,
mod_key=modalities.categorical_covariate_keys,
),
fields.MuDataNumericalJointObsField(
REGISTRY_KEYS.CONT_COVS_KEY,
continuous_covariate_keys,
mod_key=modalities.continuous_covariate_keys,
),
fields.MuDataProteinLayerField(
REGISTRY_KEYS.PROTEIN_EXP_KEY,
protein_layer,
mod_key=modalities.protein_layer,
use_batch_mask=True,
batch_field=batch_field,
is_count_data=True,
mod_required=True,
),
]
adata_manager = AnnDataManager(
fields=mudata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(mdata, **kwargs)
cls.register_manager(adata_manager)
| 39.752273 | 132 | 0.599413 |
acf53f49ece9ec74ee964f19e50f8e442cd1733f | 22,925 | py | Python | brax/experimental/composer/training/mappo.py | bharadwaj1098/brax | 3108a0535b9b59725c97ef35732ed0378c0fd5cc | [
"Apache-2.0"
] | 1,162 | 2021-06-03T20:15:05.000Z | 2022-03-31T19:53:06.000Z | brax/experimental/composer/training/mappo.py | bharadwaj1098/brax | 3108a0535b9b59725c97ef35732ed0378c0fd5cc | [
"Apache-2.0"
] | 160 | 2021-06-05T02:32:39.000Z | 2022-03-31T11:39:58.000Z | brax/experimental/composer/training/mappo.py | bharadwaj1098/brax | 3108a0535b9b59725c97ef35732ed0378c0fd5cc | [
"Apache-2.0"
] | 117 | 2021-06-04T17:18:21.000Z | 2022-03-30T18:04:48.000Z | # Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-agent proximal policy optimization training.
*This is branched from braxlines/training/ppo.py, and will be folded back.*
"""
from collections import OrderedDict as odict
import functools
import time
from typing import Any, Callable, Dict, Optional, Tuple
from absl import logging
from brax import envs
from brax.experimental.braxlines.training import env
from brax.experimental.composer import data_utils
from brax.training import distribution
from brax.training import networks
from brax.training import normalization
from brax.training import pmap
from brax.training import ppo
from brax.training.types import Params
from brax.training.types import PRNGKey
import flax
import jax
import jax.numpy as jnp
import optax
@flax.struct.dataclass
class TrainingState:
"""Contains training state for the learner."""
optimizer_state: optax.OptState
params: Params
key: PRNGKey
normalizer_params: Params
@flax.struct.dataclass
class Agent:
parametric_action_distribution: distribution.ParametricDistribution
policy_model: Any
optimizer_state: Any
init_params: Any
grad_loss: Any
def compute_ppo_loss(
models: Dict[str, Params],
data: ppo.StepData,
udata: ppo.StepData,
rng: PRNGKey,
parametric_action_distribution: distribution.ParametricDistribution,
policy_apply: Any,
value_apply: Any,
entropy_cost: float = 1e-4,
discounting: float = 0.9,
reward_scaling: float = 1.0,
lambda_: float = 0.95,
ppo_epsilon: float = 0.3,
extra_loss_update_ratios: Optional[Dict[str, float]] = None,
extra_loss_fns: Optional[Dict[str, Callable[[ppo.StepData],
jnp.ndarray]]] = None,
action_shapes: Dict[str, Dict[str, Any]] = None,
agent_name: str = None,
):
"""Computes PPO loss."""
policy_params, value_params = models['policy'], models['value']
extra_params = models.get('extra', {})
policy_logits = policy_apply(policy_params, data.obs[:-1])
baseline = value_apply(value_params, data.obs)
baseline = jnp.squeeze(baseline, axis=-1)
# Use last baseline value (from the value function) to bootstrap.
bootstrap_value = baseline[-1]
baseline = baseline[:-1]
# At this point, we have unroll length + 1 steps. The last step is only used
# as bootstrap value, so it's removed.
# already removed at data generation time
# actions = actions[:-1]
# logits = logits[:-1]
agent_index = list(action_shapes.keys()).index(agent_name)
rewards = data.rewards[1:, ..., agent_index] * reward_scaling
truncation = data.truncation[1:]
termination = data.dones[1:] * (1 - truncation)
actions = data.actions[agent_index]
logits = data.logits[agent_index]
target_action_log_probs = parametric_action_distribution.log_prob(
policy_logits, actions)
behaviour_action_log_probs = parametric_action_distribution.log_prob(
logits, actions)
vs, advantages = ppo.compute_gae(
truncation=truncation,
termination=termination,
rewards=rewards,
values=baseline,
bootstrap_value=bootstrap_value,
lambda_=lambda_,
discount=discounting)
rho_s = jnp.exp(target_action_log_probs - behaviour_action_log_probs)
surrogate_loss1 = rho_s * advantages
surrogate_loss2 = jnp.clip(rho_s, 1 - ppo_epsilon,
1 + ppo_epsilon) * advantages
policy_loss = -jnp.mean(jnp.minimum(surrogate_loss1, surrogate_loss2))
# Value function loss
v_error = vs - baseline
value_loss = jnp.mean(v_error * v_error) * 0.5 * 0.5
# Entropy reward
entropy = jnp.mean(parametric_action_distribution.entropy(policy_logits, rng))
entropy_loss = entropy_cost * -entropy
total_loss = policy_loss + value_loss + entropy_loss
# Additional losses
extra_losses = {}
if extra_loss_fns:
for key, loss_fn in extra_loss_fns.items():
loss, rng = loss_fn(data=data, udata=udata, rng=rng, params=extra_params)
if extra_loss_update_ratios and key in extra_loss_update_ratios:
# enable loss gradient p*100 percent of the time
rng, key_update = jax.random.split(rng)
p = extra_loss_update_ratios[key]
b = jax.random.bernoulli(key_update, p=jnp.array(p))
loss = jnp.where(b, loss, jax.lax.stop_gradient(loss))
total_loss += loss
extra_losses[key] = loss
return total_loss, dict(
extra_losses, **{
'total_loss': total_loss,
'policy_loss': policy_loss,
'value_loss': value_loss,
'entropy_loss': entropy_loss,
})
def train(environment_fn: Callable[..., envs.Env],
num_timesteps,
episode_length: int,
action_repeat: int = 1,
num_envs: int = 1,
max_devices_per_host: Optional[int] = None,
num_eval_envs: int = 128,
learning_rate=1e-4,
entropy_cost=1e-4,
discounting=0.9,
seed=0,
unroll_length=10,
batch_size=32,
num_minibatches=16,
num_update_epochs=2,
log_frequency=10,
normalize_observations=False,
reward_scaling=1.,
progress_fn: Optional[Callable[[int, Dict[str, Any]], None]] = None,
parametric_action_distribution_fn: Optional[Callable[[
int,
], distribution.ParametricDistribution]] = distribution
.NormalTanhDistribution,
make_models_fn: Optional[Callable[
[int, int],
Tuple[networks.FeedForwardModel]]] = networks.make_models,
policy_params: Optional[Dict[str, jnp.ndarray]] = None,
value_params: Optional[Dict[str, jnp.ndarray]] = None,
extra_params: Optional[Dict[str, Dict[str, jnp.ndarray]]] = None,
extra_step_kwargs: bool = True,
extra_loss_update_ratios: Optional[Dict[str, float]] = None,
extra_loss_fns: Optional[Dict[str, Callable[[ppo.StepData],
jnp.ndarray]]] = None):
"""PPO training."""
assert batch_size * num_minibatches % num_envs == 0
xt = time.time()
process_count = jax.process_count()
process_id = jax.process_index()
local_device_count = jax.local_device_count()
local_devices_to_use = local_device_count
if max_devices_per_host:
local_devices_to_use = min(local_devices_to_use, max_devices_per_host)
logging.info(
'Device count: %d, process count: %d (id %d), local device count: %d, '
'devices to be used count: %d', jax.device_count(), process_count,
process_id, local_device_count, local_devices_to_use)
# TODO: check key randomness
key = jax.random.PRNGKey(seed)
key, key_models, key_env, key_eval = jax.random.split(key, 4)
# Make sure every process gets a different random key, otherwise they will be
# doing identical work.
key_env = jax.random.split(key_env, process_count)[process_id]
key = jax.random.split(key, process_count)[process_id]
# key_models should be the same, so that models are initialized the same way
# for different processes
key_envs = jax.random.split(key_env, local_devices_to_use)
core_env = environment_fn(
action_repeat=action_repeat,
batch_size=num_envs // local_devices_to_use // process_count,
episode_length=episode_length)
component_env = core_env.unwrapped
action_shapes = component_env.group_action_shapes
action_size = component_env.action_size
core_eval_env = environment_fn(
action_repeat=action_repeat,
batch_size=num_eval_envs,
episode_length=episode_length)
eval_first_state, eval_step_fn = env.wrap(
core_eval_env, key_eval, extra_step_kwargs=extra_step_kwargs)
tmp_env_states = []
for key in key_envs:
first_state, step_fn = env.wrap(
core_env, key, extra_step_kwargs=extra_step_kwargs)
tmp_env_states.append(first_state)
first_state = jax.tree_multimap(lambda *args: jnp.stack(args),
*tmp_env_states)
normalizer_params, obs_normalizer_update_fn, obs_normalizer_apply_fn = (
normalization.create_observation_normalizer(
core_env.observation_size,
normalize_observations,
num_leading_batch_dims=2,
pmap_to_devices=local_devices_to_use))
agents = odict()
policy_params = policy_params or [None] * len(action_shapes)
value_params = value_params or [None] * len(action_shapes)
for i, (k, action_shape) in enumerate(action_shapes.items()):
parametric_action_distribution = parametric_action_distribution_fn(
event_size=action_shape['size'])
policy_model, value_model = make_models_fn(
parametric_action_distribution.param_size, core_env.observation_size)
key_policy, key_value, key_models = jax.random.split(key_models, 3)
optimizer = optax.adam(learning_rate=learning_rate)
init_params = {
'policy': policy_params[i] or policy_model.init(key_policy),
'value': value_params[i] or value_model.init(key_value),
'extra': extra_params
}
optimizer_state = optimizer.init(init_params)
optimizer_state, init_params = pmap.bcast_local_devices(
(optimizer_state, init_params), local_devices_to_use)
loss_fn = functools.partial(
compute_ppo_loss,
parametric_action_distribution=parametric_action_distribution,
policy_apply=policy_model.apply,
value_apply=value_model.apply,
entropy_cost=entropy_cost,
discounting=discounting,
reward_scaling=reward_scaling,
extra_loss_update_ratios=extra_loss_update_ratios,
extra_loss_fns=extra_loss_fns,
action_shapes=action_shapes,
agent_name=k)
grad_loss = jax.grad(loss_fn, has_aux=True)
agents[k] = Agent(parametric_action_distribution, policy_model,
optimizer_state, init_params, grad_loss)
key_debug = jax.random.PRNGKey(seed + 666)
def do_one_step_eval(carry, unused_target_t):
state, policy_params, normalizer_params, extra_params, key = carry
key, key_sample = jax.random.split(key)
obs = obs_normalizer_apply_fn(normalizer_params, state.core.obs)
actions = odict()
for i, (k, agent) in enumerate(agents.items()):
logits = agent.policy_model.apply(policy_params[i], obs)
actions[k] = agent.parametric_action_distribution.sample(
logits, key_sample)
actions_arr = jnp.zeros(obs.shape[:-1] + (action_size,))
actions = data_utils.fill_array(actions, actions_arr, action_shapes)
nstate = eval_step_fn(state, actions, normalizer_params, extra_params)
return (nstate, policy_params, normalizer_params, extra_params, key), ()
@jax.jit
def run_eval(state, key, policy_params, normalizer_params, extra_params):
policy_params = jax.tree_map(lambda x: x[0], policy_params)
normalizer_params = jax.tree_map(lambda x: x[0], normalizer_params)
extra_params = jax.tree_map(lambda x: x[0], extra_params)
(state, _, _, _, key), _ = jax.lax.scan(
do_one_step_eval,
(state, policy_params, normalizer_params, extra_params, key), (),
length=episode_length // action_repeat)
return state, key
def do_one_step(carry, unused_target_t):
state, normalizer_params, policy_params, extra_params, key = carry
key, key_sample = jax.random.split(key)
normalized_obs = obs_normalizer_apply_fn(normalizer_params, state.core.obs)
logits, actions, postprocessed_actions = [], [], odict()
for i, (k, agent) in enumerate(agents.items()):
logits += [agent.policy_model.apply(policy_params[i], normalized_obs)]
actions += [
agent.parametric_action_distribution.sample_no_postprocessing(
logits[-1], key_sample)
]
postprocessed_actions[
k] = agent.parametric_action_distribution.postprocess(actions[-1])
postprocessed_actions_arr = jnp.zeros(normalized_obs.shape[:-1] +
(action_size,))
postprocessed_actions = data_utils.fill_array(postprocessed_actions,
postprocessed_actions_arr,
action_shapes)
nstate = step_fn(state, postprocessed_actions, normalizer_params,
extra_params)
return (nstate, normalizer_params, policy_params, extra_params,
key), ppo.StepData(
obs=state.core.obs,
rewards=state.core.reward,
dones=state.core.done,
truncation=state.core.info['truncation'],
actions=actions,
logits=logits)
def generate_unroll(carry, unused_target_t):
state, normalizer_params, policy_params, extra_params, key = carry
(state, _, _, _, key), data = jax.lax.scan(
do_one_step,
(state, normalizer_params, policy_params, extra_params, key), (),
length=unroll_length)
data = data.replace(
obs=jnp.concatenate([data.obs,
jnp.expand_dims(state.core.obs, axis=0)]),
rewards=jnp.concatenate(
[data.rewards,
jnp.expand_dims(state.core.reward, axis=0)]),
dones=jnp.concatenate(
[data.dones, jnp.expand_dims(state.core.done, axis=0)]),
truncation=jnp.concatenate([
data.truncation,
jnp.expand_dims(state.core.info['truncation'], axis=0)
]))
return (state, normalizer_params, policy_params, extra_params, key), data
def update_model(carry, data_tuple):
optimizer_state, params, key = carry
data, udata = data_tuple
key, key_loss = jax.random.split(key)
metrics = []
for i, agent in enumerate(agents.values()):
loss_grad, agent_metrics = agent.grad_loss(params[i], data, udata,
key_loss)
metrics.append(agent_metrics)
loss_grad = jax.lax.pmean(loss_grad, axis_name='i')
params_update, optimizer_state[i] = optimizer.update(
loss_grad, optimizer_state[i])
params[i] = optax.apply_updates(params[i], params_update)
return (optimizer_state, params, key), metrics
def minimize_epoch(carry, unused_t):
optimizer_state, params, data, udata, key = carry
key, key_perm, key_grad = jax.random.split(key, 3)
permutation = jax.random.permutation(key_perm, data.obs.shape[1])
def convert_data(data, permutation):
data = jnp.take(data, permutation, axis=1, mode='clip')
data = jnp.reshape(data, [data.shape[0], num_minibatches, -1] +
list(data.shape[2:]))
data = jnp.swapaxes(data, 0, 1)
return data
ndata = jax.tree_map(lambda x: convert_data(x, permutation), data)
u_ndata = jax.tree_map(lambda x: convert_data(x, permutation), udata)
(optimizer_state, params, _), metrics = jax.lax.scan(
update_model, (optimizer_state, params, key_grad), (ndata, u_ndata),
length=num_minibatches)
return (optimizer_state, params, data, udata, key), metrics
def get_params(state, key, value=None):
if value is not None:
return [params.get(key, value) for params in state.params]
return [params[key] for params in state.params]
def run_epoch(carry, unused_t):
training_state, state = carry
key_minimize, key_generate_unroll, new_key = jax.random.split(
training_state.key, 3)
(state, _, _, _, _), data = jax.lax.scan(
generate_unroll,
(state, training_state.normalizer_params,
get_params(training_state, 'policy'),
get_params(training_state, 'extra', {}), key_generate_unroll), (),
length=batch_size * num_minibatches // num_envs)
# make unroll first
data = jax.tree_map(lambda x: jnp.swapaxes(x, 0, 1), data)
data = jax.tree_map(
lambda x: jnp.reshape(x, [x.shape[0], -1] + list(x.shape[3:])), data)
# Update normalization params and normalize observations.
normalizer_params = obs_normalizer_update_fn(
training_state.normalizer_params, data.obs[:-1])
udata = data
data = data.replace(
obs=obs_normalizer_apply_fn(normalizer_params, data.obs))
(optimizer_state, params, _, _, _), metrics = jax.lax.scan(
minimize_epoch, (training_state.optimizer_state, training_state.params,
data, udata, key_minimize), (),
length=num_update_epochs)
new_training_state = TrainingState(
optimizer_state=optimizer_state,
params=params,
normalizer_params=normalizer_params,
key=new_key)
return (new_training_state, state), metrics
num_epochs = num_timesteps // (
batch_size * unroll_length * num_minibatches * action_repeat)
def _minimize_loop(training_state, state):
(training_state, state), losses = jax.lax.scan(
run_epoch, (training_state, state), (),
length=num_epochs // log_frequency)
losses = jax.tree_map(jnp.mean, losses)
return (training_state, state), losses
minimize_loop = jax.pmap(_minimize_loop, axis_name='i')
inference = make_inference_fn(
core_env.observation_size, action_shapes, normalize_observations,
parametric_action_distribution_fn, make_models_fn)
training_state = TrainingState(
optimizer_state=[agent.optimizer_state for agent in agents.values()],
params=[agent.init_params for agent in agents.values()],
key=jnp.stack(jax.random.split(key, local_devices_to_use)),
normalizer_params=normalizer_params)
training_walltime = 0
eval_walltime = 0
sps = 0
eval_sps = 0
losses = []
state = first_state
metrics = {}
for it in range(log_frequency + 1):
logging.info('starting iteration %s %s', it, time.time() - xt)
t = time.time()
if process_id == 0:
eval_state, key_debug = (
run_eval(eval_first_state, key_debug,
get_params(training_state, 'policy'),
training_state.normalizer_params,
get_params(training_state, 'extra', {})))
eval_state.total_episodes.block_until_ready()
eval_walltime += time.time() - t
eval_sps = (
episode_length * eval_first_state.core.reward.shape[0] /
(time.time() - t))
metrics = dict(
**dict({
f'eval/episode_{name}': value / eval_state.total_episodes
for name, value in eval_state.total_metrics.items()
}),
**dict({
f'{index}/losses/{k}': jnp.mean(v)
for index, loss in enumerate(losses) for k, v in loss.items()
}),
**dict({
'eval/total_episodes': eval_state.total_episodes,
'speed/sps': sps,
'speed/eval_sps': eval_sps,
'speed/training_walltime': training_walltime,
'speed/eval_walltime': eval_walltime,
'speed/timestamp': training_walltime
}))
logging.info(metrics)
if progress_fn:
params = dict(
normalizer=jax.tree_map(lambda x: x[0],
training_state.normalizer_params),
policy=jax.tree_map(lambda x: x[0],
get_params(training_state, 'policy')),
extra=jax.tree_map(lambda x: x[0],
get_params(training_state, 'extra')))
progress_fn(
int(training_state.normalizer_params[0][0]) * action_repeat,
metrics, params)
if it == log_frequency:
break
t = time.time()
previous_step = training_state.normalizer_params[0][0]
# optimization
(training_state, state), losses = minimize_loop(training_state, state)
jax.tree_map(lambda x: x.block_until_ready(), losses)
sps = ((training_state.normalizer_params[0][0] - previous_step) /
(time.time() - t)) * action_repeat
training_walltime += time.time() - t
# To undo the pmap.
normalizer_params = jax.tree_map(lambda x: x[0],
training_state.normalizer_params)
policy_params = jax.tree_map(lambda x: x[0],
get_params(training_state, 'policy'))
extra_params = jax.tree_map(lambda x: x[0],
get_params(training_state, 'extra'))
logging.info('total steps: %s', normalizer_params[0] * action_repeat)
params = dict(
normalizer=normalizer_params, policy=policy_params, extra=extra_params)
if process_count > 1:
# Make sure all processes stay up until the end of main.
x = jnp.ones([jax.local_device_count()])
x = jax.device_get(jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i')(x))
assert x[0] == jax.device_count()
return (inference, params, metrics)
def make_inference_fn(
observation_size: int,
action_shapes: Dict[str, Any],
normalize_observations: bool = False,
parametric_action_distribution_fn: Optional[Callable[[
int,
], distribution.ParametricDistribution]] = distribution
.NormalTanhDistribution,
make_models_fn: Optional[Callable[
[int, int], Tuple[networks.FeedForwardModel]]] = networks.make_models):
"""Creates params and inference function for the multi-agent PPO agent."""
action_size = sum([s['size'] for s in action_shapes.values()])
_, obs_normalizer_apply_fn = normalization.make_data_and_apply_fn(
observation_size, normalize_observations=normalize_observations)
agents = odict()
for k, action_shape in action_shapes.items():
parametric_action_distribution = parametric_action_distribution_fn(
event_size=action_shape['size'])
policy_model, _ = make_models_fn(parametric_action_distribution.param_size,
observation_size)
agents[k] = (parametric_action_distribution, policy_model)
def inference_fn(params, obs, key):
normalizer_params, policy_params = params['normalizer'], params['policy']
obs = obs_normalizer_apply_fn(normalizer_params, obs)
actions = odict()
for i, (k, (parametric_action_distribution,
policy_model)) in enumerate(agents.items()):
actions[k] = parametric_action_distribution.sample(
policy_model.apply(policy_params[i], obs), key)
actions_arr = jnp.zeros(obs.shape[:-1] + (action_size,))
actions = data_utils.fill_array(actions, actions_arr, action_shapes)
return actions
return inference_fn
| 39.457831 | 80 | 0.674198 |
acf53fae519363e95a7bc336a80b139f55069951 | 19,828 | py | Python | PythonAPI/pycocotools/coco.py | vivint-smarthome/cocoapi | 3ecd9d3606e1dd93fc8956f61ef4f1ff9a99d5e1 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | PythonAPI/pycocotools/coco.py | vivint-smarthome/cocoapi | 3ecd9d3606e1dd93fc8956f61ef4f1ff9a99d5e1 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | PythonAPI/pycocotools/coco.py | vivint-smarthome/cocoapi | 3ecd9d3606e1dd93fc8956f61ef4f1ff9a99d5e1 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | __author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
def download_blob(bucket_name, source_blob_name, destination_file_name):
"""Downloads a blob from the bucket."""
from google.cloud import storage
storage_client = storage.Client(project='video-analytics-193323')
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
def urlretrieve(url, path):
p = url.split('/')
download_blob(p[2], '/'.join(p[3:]), path)
def _isArrayLike(obj):
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs, urls = {}, {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'gs_url' in img:
urls[img['gs_url']] = img['id']
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
self.urls = urls
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if _isArrayLike(catNms) else [catNms]
supNms = supNms if _isArrayLike(supNms) else [supNms]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if _isArrayLike(ids):
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if _isArrayLike(ids):
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if _isArrayLike(ids):
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns, draw_bbox=False):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
if draw_bbox:
[bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox']
poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]]
np_poly = np.array(poly).reshape((4,2))
polygons.append(Polygon(np_poly))
color.append(c)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile, threshold=0.0):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
# remove annotations below the score threshold, to understand score better
anns = [ann for ann in anns if ann['score'] >= threshold]
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
if 'gs_url' not in img:
print('No url provided for ' + img['file_name'] + ', continuing')
continue
urlretrieve(img['gs_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m
def urlToId(self, url):
"""
Given a google storage url returns the image id in the dataset if it exists
:return: image id
"""
return self.urls.get(url)
| 42.277186 | 128 | 0.557041 |
acf54093e4c82cd1c6a66451304887f68bd8eb27 | 673 | py | Python | project_partA_1.py | mpiplani/Profiling-Restaurants-via-Recommendation | 8f39794b58cb6fb7ccd728463a48babebd615bc6 | [
"Apache-2.0"
] | 1 | 2021-10-13T11:23:01.000Z | 2021-10-13T11:23:01.000Z | project_partA_1.py | mpiplani/Profiling-Restaurants-via-Recommendation | 8f39794b58cb6fb7ccd728463a48babebd615bc6 | [
"Apache-2.0"
] | null | null | null | project_partA_1.py | mpiplani/Profiling-Restaurants-via-Recommendation | 8f39794b58cb6fb7ccd728463a48babebd615bc6 | [
"Apache-2.0"
] | 1 | 2021-06-06T03:05:37.000Z | 2021-06-06T03:05:37.000Z | from bs4 import BeautifulSoup
import requests
import sys
city=sys.args[1]
url = 'https://postmates.com/los-angeles'
webpage = requests.get(url)
html = BeautifulSoup(webpage.text, 'html.parser')
soup=html.find_all("div",class_="css-pca8m e12wrbia0")
soup
result=set()
j=0
for i in soup:
print(i.find("a")["href"])
url = 'https://postmates.com/'+str(i.find("a")["href"])
print(url)
webpage = requests.get(url)
html = BeautifulSoup(webpage.text, 'html.parser')
soup=html.find_all('h3',class_="product-name css-1yjxguc e1tw3vxs4")
if len(soup)>0:
j+=1
for i in soup:
result.add(i.text)
if j>9:
break
print(result)
| 23.206897 | 71 | 0.656761 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.