hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0f556816e062ea990fbefa9e842ae385ba4964
| 3,247
|
py
|
Python
|
modellibs/faster_rcnn/third_party/model/rpn/generate_anchors.py
|
fortoon21/detecthangul
|
d59cfc64fe658022040f949b836c9e7fa00d3ecd
|
[
"MIT"
] | 5
|
2020-01-03T10:19:05.000Z
|
2021-07-14T01:47:01.000Z
|
modellibs/faster_rcnn/third_party/model/rpn/generate_anchors.py
|
fortoon21/detecthangul
|
d59cfc64fe658022040f949b836c9e7fa00d3ecd
|
[
"MIT"
] | null | null | null |
modellibs/faster_rcnn/third_party/model/rpn/generate_anchors.py
|
fortoon21/detecthangul
|
d59cfc64fe658022040f949b836c9e7fa00d3ecd
|
[
"MIT"
] | 3
|
2019-08-07T08:49:44.000Z
|
2022-03-31T05:27:43.000Z
|
from __future__ import print_function
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import numpy as np
import pdb
# Verify that we compute the same anchors as Shaoqing's matlab implementation:
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
#array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2**np.arange(3, 6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in xrange(ratio_anchors.shape[0])])
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
if __name__ == '__main__':
import time
t = time.time()
a = generate_anchors()
print(time.time() - t)
print(a)
from IPython import embed; embed()
| 28.734513
| 78
| 0.522328
|
4a0f55c4d30c4b09c1a52d558351d54b68da3089
| 1,600
|
py
|
Python
|
setup.py
|
aptivate/datacleaner
|
46fbbe593d46f7161f0c7c9ab2a492a87c03c55e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
aptivate/datacleaner
|
46fbbe593d46f7161f0c7c9ab2a492a87c03c55e
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
aptivate/datacleaner
|
46fbbe593d46f7161f0c7c9ab2a492a87c03c55e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='datacleaner',
version='0.1.0',
description="Python Data Cleaner is a small package of utils for cleaning up data entered as text without validation",
long_description=readme + '\n\n' + history,
author="Hamish Downer",
author_email='hamish@aptivate.org',
url='https://github.com/foobacca/datacleaner',
packages=[
'datacleaner',
],
package_dir={'datacleaner':
'datacleaner'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='datacleaner',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| 27.586207
| 122
| 0.63625
|
4a0f56aacb22ea488f37babd1842db92574ccf20
| 1,507
|
py
|
Python
|
tests/testclient/__init__.py
|
androbwebb/JenniferVirtualAssistant
|
1c810b3d53d9fcb0c691cf11c0f83dd4a1e1b061
|
[
"MIT"
] | 15
|
2016-04-22T22:56:30.000Z
|
2020-08-29T07:14:12.000Z
|
tests/testclient/__init__.py
|
androbwebb/JenniferVirtualAssistant
|
1c810b3d53d9fcb0c691cf11c0f83dd4a1e1b061
|
[
"MIT"
] | 2
|
2020-02-03T09:10:11.000Z
|
2020-04-14T05:31:52.000Z
|
tests/testclient/__init__.py
|
androbwebb/JenniferVirtualAssistant
|
1c810b3d53d9fcb0c691cf11c0f83dd4a1e1b061
|
[
"MIT"
] | 7
|
2016-04-22T22:56:31.000Z
|
2022-02-02T15:17:26.000Z
|
from ioclients import JenniferClientSupportsResponders
from lessons.base.responses import JenniferTextResponseSegment, JenniferImageReponseSegment, \
JenniferLinkResponseSegement
class JenniferTestClient(JenniferClientSupportsResponders):
ALLOWED_RESPONSE_TYPES = [JenniferTextResponseSegment, JenniferImageReponseSegment, JenniferLinkResponseSegement]
def __init__(self, brain, input_list, debug=False):
"""
All input will be taken from `input_list`
All output will be saved to output_list
"""
assert isinstance(input_list, list)
self.input_list = input_list
self.output_list = []
self.debug = debug
JenniferClientSupportsResponders.__init__(self, brain)
# Overriding some required methods (test client is a special case
def collect_input(self):
try:
popped = self.input_list.pop(0)
if self.debug:
print(f'INPUT: {popped}')
return popped
except IndexError:
raise Exception(f'Prompted for input: \"{self.output_list[-1].to_text()}\", but no input found')
def give_output(self, response_obj):
if self.debug:
print(f'OUTPUT: {response_obj.to_text()}')
self.output_list.append(response_obj)
def run(self):
while len(self.input_list):
text = self.collect_input()
response = self.call_brain(text)
if response:
self.give_output(response)
| 35.880952
| 117
| 0.664897
|
4a0f579eb05f59e3ad655cbd6c05192bad398ffe
| 1,703
|
py
|
Python
|
magnum-8.0.0/magnum/conf/cinder.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
magnum-8.0.0/magnum/conf/cinder.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
magnum-8.0.0/magnum/conf/cinder.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 3
|
2020-02-05T13:17:26.000Z
|
2020-08-24T05:32:32.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from magnum.i18n import _
cinder_group = cfg.OptGroup(
name='cinder',
title='Options for the Cinder configuration')
cinder_client_group = cfg.OptGroup(
name='cinder_client',
title='Options for the Cinder client')
cinder_opts = [
cfg.StrOpt('default_docker_volume_type',
default='',
help=_('The default docker volume_type to use for volumes '
'used for docker storage. To use the cinder volumes '
'for docker storage, you need to select a default '
'value.'))]
cinder_client_opts = [
cfg.StrOpt('region_name',
help=_('Region in Identity service catalog to use for '
'communication with the OpenStack service.'))]
def register_opts(conf):
conf.register_group(cinder_group)
conf.register_group(cinder_client_group)
conf.register_opts(cinder_opts, group=cinder_group)
conf.register_opts(cinder_client_opts, group=cinder_client_group)
def list_opts():
return {
cinder_group: cinder_opts,
cinder_client_group: cinder_client_opts
}
| 33.392157
| 77
| 0.692308
|
4a0f57bd68558d7df471198ea7d0b2929a34ed95
| 3,478
|
py
|
Python
|
python/cuML/test/test_kalman_filter.py
|
akshaysubr/cuml
|
7fceac26242f0155a5fa5cf1951af29230302e31
|
[
"Apache-2.0"
] | 1
|
2019-10-01T15:20:32.000Z
|
2019-10-01T15:20:32.000Z
|
python/cuML/test/test_kalman_filter.py
|
akshaysubr/cuml
|
7fceac26242f0155a5fa5cf1951af29230302e31
|
[
"Apache-2.0"
] | null | null | null |
python/cuML/test/test_kalman_filter.py
|
akshaysubr/cuml
|
7fceac26242f0155a5fa5cf1951af29230302e31
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml import KalmanFilter
import cudf
import numpy as np
from numba import cuda
from numpy.random import randn
from math import sqrt
def np_to_dataframe(df):
pdf = cudf.DataFrame()
for c in range(df.shape[1]):
pdf[c] = df[:, c]
return pdf
@pytest.mark.parametrize('precision', ['single', 'double'])
def test_linear_kalman_filter_base(precision):
f = KalmanFilter(dim_x=2, dim_z=1, precision=precision)
if precision == 'single':
dt = np.float32
else:
dt = np.float64
f.x = cuda.to_device(np.array([[0], [1]], dtype=dt))
f.F = cuda.to_device(np.array([[1., 0],
[1, 1.]], dtype=dt))
f.H = cuda.to_device(np.array([[1., 0.]], dtype=dt))
f.P = cuda.to_device(np.array([[1000, 0],
[0., 1000]], dtype=dt))
f.R = cuda.to_device(np.array([5.0], dtype=dt))
var = 0.001
f.Q = cuda.to_device(np.array([[.25*var, .5*var],
[0.5*var, 1.1*var]], dtype=dt))
rmse_x = 0
rmse_v = 0
n = 100
for i in range(100):
f.predict()
z = i
f.update(cuda.to_device(np.array([z], dtype=dt)))
x = f.x.copy_to_host()
rmse_x = rmse_x + ((x[0] - i)**2)
rmse_v = rmse_v + ((x[1] - 1)**2)
assert sqrt(rmse_x/n) < 0.1
assert sqrt(rmse_v/n) < 0.1
@pytest.mark.parametrize('dim_x', [2, 10, 100])
@pytest.mark.parametrize('dim_z', [1, 2, 10, 100])
@pytest.mark.parametrize('precision', ['single'])
@pytest.mark.parametrize('input_type', ['numpy', 'cudf'])
def test_linear_kalman_filter(precision, dim_x, dim_z, input_type):
f = KalmanFilter(dim_x=dim_x, dim_z=dim_z, precision=precision)
if precision == 'single':
dt = np.float32
else:
dt = np.float64
if input_type == 'numpy':
f.x = np.zeros((dim_x, 1), dtype=dt)
f.F = np.eye(dim_x, dtype=dt)
h = np.zeros((dim_x, dim_z), dtype=dt)
h[0] = 1
f.H = h
f.P = np.eye(dim_x, dtype=dt)*1000
f.R = np.eye(dim_z, dtype=dt)*5.0
else:
f.x = np_to_dataframe(np.zeros((dim_x, 1), dtype=dt))
tmp = np.eye(dim_x, dtype=dt, order='F')
f.F = np_to_dataframe(tmp)
h = np.zeros((dim_x, dim_z), dtype=dt, order='F')
h[0] = 1
f.H = np_to_dataframe(h)
f.P = np_to_dataframe(np.eye(dim_x, dtype=dt, order='F')*1000)
f.R = np_to_dataframe(np.eye(dim_z, dtype=dt, order='F')*5.0)
rmse_x = 0
rmse_v = 0
n = 100
for i in range(100):
f.predict()
z = i*np.ones(dim_z, dtype=dt)
f.update(cuda.to_device(np.array(z, dtype=dt)))
x = f.x.copy_to_host()
rmse_x = rmse_x + ((x[0] - i)**2)
rmse_v = rmse_v + ((x[1] - 1)**2)
assert sqrt(rmse_x/n) < 0.1
assert sqrt(rmse_v/n) == 1.0
| 26.549618
| 74
| 0.582519
|
4a0f57e69ad3ca50751cbfcbf0127a402dbbd2a8
| 25,844
|
py
|
Python
|
networkbrowserpli/src/NetworkBrowser.py
|
builder07/enigma2-plugins_3
|
2fc0d26891fba28ebea1550a39f5e8d7973db10c
|
[
"OLDAP-2.3"
] | 2
|
2020-09-02T18:25:39.000Z
|
2020-09-02T18:39:07.000Z
|
networkbrowserpli/src/NetworkBrowser.py
|
builder07/enigma2-plugins_3
|
2fc0d26891fba28ebea1550a39f5e8d7973db10c
|
[
"OLDAP-2.3"
] | null | null | null |
networkbrowserpli/src/NetworkBrowser.py
|
builder07/enigma2-plugins_3
|
2fc0d26891fba28ebea1550a39f5e8d7973db10c
|
[
"OLDAP-2.3"
] | 11
|
2015-02-26T20:59:14.000Z
|
2021-09-20T08:23:03.000Z
|
# -*- coding: utf-8 -*-
# for localized messages
from __init__ import _
from enigma import eTimer, getDesktop
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.Label import Label
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.Network import iNetwork
from Components.Input import Input
from Components.config import getConfigListEntry, NoSave, config, ConfigIP
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.Console import Console
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_SKIN_IMAGE, SCOPE_ACTIVE_SKIN, fileExists
from Tools.LoadPixmap import LoadPixmap
from cPickle import dump, load
from os import path as os_path, stat, mkdir, remove
from time import time
from stat import ST_MTIME
import netscan
from MountManager import AutoMountManager
from AutoMount import iAutoMount
from MountEdit import AutoMountEdit
from UserDialog import UserDialog
def write_cache(cache_file, cache_data):
#Does a cPickle dump
if not os_path.isdir( os_path.dirname(cache_file) ):
try:
mkdir( os_path.dirname(cache_file) )
except OSError:
print os_path.dirname(cache_file), '[Networkbrowser] is a file'
fd = open(cache_file, 'w')
dump(cache_data, fd, -1)
fd.close()
def valid_cache(cache_file, cache_ttl):
#See if the cache file exists and is still living
try:
mtime = stat(cache_file)[ST_MTIME]
except:
return 0
curr_time = time()
if (curr_time - mtime) > cache_ttl:
return 0
else:
return 1
def load_cache(cache_file):
#Does a cPickle load
fd = open(cache_file)
cache_data = load(fd)
fd.close()
return cache_data
class NetworkDescriptor:
def __init__(self, name = "NetworkServer", description = ""):
self.name = name
self.description = description
class NetworkBrowser(Screen):
skin = """
<screen name="NetworkBrowser" position="center,center" size="560,450" title="Network Neighbourhood">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="540,350" zPosition="10" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryPixmapAlphaTest(pos = (0, 0), size = (48, 48), png = 1), # index 1 is the expandable/expanded/verticalline icon
MultiContentEntryText(pos = (50, 4), size = (420, 26), font=2, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the Hostname
MultiContentEntryText(pos = (140, 5), size = (320, 25), font=0, flags = RT_HALIGN_LEFT, text = 3), # index 3 is the sharename
MultiContentEntryText(pos = (140, 26), size = (320, 17), font=1, flags = RT_HALIGN_LEFT, text = 4), # index 4 is the sharedescription
MultiContentEntryPixmapAlphaTest(pos = (45, 0), size = (48, 48), png = 5), # index 5 is the nfs/cifs icon
MultiContentEntryPixmapAlphaTest(pos = (90, 0), size = (48, 48), png = 6), # index 6 is the isMounted icon
],
"fonts": [gFont("Regular", 20),gFont("Regular", 14),gFont("Regular", 24)],
"itemHeight": 50
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,410" zPosition="1" size="560,2" />
<widget source="infotext" render="Label" position="0,420" size="560,30" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, iface,plugin_path):
Screen.__init__(self, session)
self.skin_path = plugin_path
self.session = session
self.iface = iface
if self.iface is None:
self.iface = self.GetNetworkInterfaces()
print "[Networkbrowser] Using Network Interface: %s" % self.iface
self.networklist = None
self.device = None
self.mounts = None
self.expanded = []
self.cache_ttl = 604800 #Seconds cache is considered valid, 7 Days should be ok
self.cache_file = '/etc/enigma2/networkbrowser.cache' #Path to cache directory
self.Console = Console()
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Mounts management"))
self["key_yellow"] = StaticText(_("Rescan"))
self["key_blue"] = StaticText(_("Expert"))
self["infotext"] = StaticText(_("Press OK to mount!"))
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"ok": self.go,
"back": self.close,
"red": self.close,
"green": self.keyGreen,
"yellow": self.keyYellow,
"blue": self.keyBlue,
})
self.list = []
self.statuslist = []
self.listindex = 0
self["list"] = List(self.list)
self["list"].onSelectionChanged.append(self.selectionChanged)
self.onLayoutFinish.append(self.startRun)
self.onShown.append(self.setWindowTitle)
self.onClose.append(self.cleanup)
self.Timer = eTimer()
self.Timer.callback.append(self.TimerFire)
def GetNetworkInterfaces(self):
adapters = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getAdapterList()]
if not adapters:
adapters = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getConfiguredAdapters()]
if len(adapters) == 0:
adapters = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getInstalledAdapters()]
for x in adapters:
if iNetwork.getAdapterAttribute(x[1], 'up') is True:
return x[1]
return 'eth0'
def cleanup(self):
del self.Timer
iAutoMount.stopMountConsole()
iNetwork.stopRestartConsole()
iNetwork.stopGetInterfacesConsole()
def startRun(self):
self.expanded = []
self.setStatus('update')
self.mounts = iAutoMount.getMountsList()
self["infotext"].setText("")
self.vc = valid_cache(self.cache_file, self.cache_ttl)
if self.cache_ttl > 0 and self.vc != 0:
self.process_NetworkIPs()
else:
self.Timer.start(3000)
def TimerFire(self):
self.Timer.stop()
self.process_NetworkIPs()
def setWindowTitle(self):
self.setTitle(_("Browse network neighbourhood"))
def keyGreen(self):
self.session.open(AutoMountManager, None, self.skin_path)
def keyYellow(self):
if (os_path.exists(self.cache_file) == True):
remove(self.cache_file)
self.startRun()
def keyBlue(self):
self.session.openWithCallback(self.scanIPclosed,ScanIP)
def scanIPclosed(self,result):
if result[0]:
if result[1] == "address":
print "[Networkbrowser] got IP:",result[1]
nwlist = []
nwlist.append(netscan.netzInfo(result[0] + "/24"))
self.networklist += nwlist[0]
elif result[1] == "nfs":
self.networklist.append(['host', result[0], result[0] , '00:00:00:00:00:00', result[0], 'Master Browser'])
if len(self.networklist) > 0:
write_cache(self.cache_file, self.networklist)
self.updateHostsList()
def setStatus(self,status = None):
if status:
self.statuslist = []
if status == 'update':
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/update.png")):
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/update.png"))
else:
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/update.png"))
self.statuslist.append(( ['info'], statuspng, _("Searching your network. Please wait..."), None, None, None, None ))
self['list'].setList(self.statuslist)
elif status == 'error':
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/error.png")):
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/error.png"))
else:
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/error.png"))
self.statuslist.append(( ['info'], statuspng, _("No network devices found!"), None, None, None, None ))
self['list'].setList(self.statuslist)
def process_NetworkIPs(self):
self.inv_cache = 0
self.vc = valid_cache(self.cache_file, self.cache_ttl)
if self.cache_ttl > 0 and self.vc != 0:
print '[Networkbrowser] Loading network cache from ',self.cache_file
try:
self.networklist = load_cache(self.cache_file)
except:
self.inv_cache = 1
if self.cache_ttl == 0 or self.inv_cache == 1 or self.vc == 0:
print '[Networkbrowser] Getting fresh network list'
self.getNetworkIPs()
else:
if len(self.networklist) > 0:
self.updateHostsList()
else:
self.setStatus('error')
def getNetworkIPs(self):
nwlist = []
sharelist = []
self.IP = iNetwork.getAdapterAttribute(self.iface, "ip")
if len(self.IP):
strIP = str(self.IP[0]) + "." + str(self.IP[1]) + "." + str(self.IP[2]) + ".0/24"
nwlist.append(netscan.netzInfo(strIP))
self.networklist = nwlist[0]
if len(self.IP) and (self.IP[0] != 0 or self.IP[1] != 0 or self.IP[2] != 0):
strIP = str(self.IP[0]) + "." + str(self.IP[1]) + "." + str(self.IP[2]) + ".0/24"
self.Console.ePopen("nmap -oX - " + strIP + ' -sP', self.Stage1SettingsComplete)
else:
write_cache(self.cache_file, self.networklist)
if len(self.networklist) > 0:
self.updateHostsList()
else:
self.setStatus('error')
def Stage1SettingsComplete(self, result, retval, extra_args):
import xml.dom.minidom
dom = xml.dom.minidom.parseString(result)
scan_result = []
for dhost in dom.getElementsByTagName('host'):
# host ip
host = ''
hostname = ''
host = dhost.getElementsByTagName('address')[0].getAttributeNode('addr').value
for dhostname in dhost.getElementsByTagName('hostname'):
hostname = dhostname.getAttributeNode('name').value
hostname = hostname.split('.')
hostname = hostname[0]
host = dhost.getElementsByTagName('address')[0].getAttributeNode('addr').value
scan_result.append(['host',str(hostname).upper(),str(host),'00:00:00:00:00:00'])
self.networklist += scan_result
write_cache(self.cache_file, self.networklist)
if len(self.networklist) > 0:
self.updateHostsList()
else:
self.setStatus('error')
def getNetworkShares(self,hostip,hostname,devicetype):
sharelist = []
self.sharecache_file = None
self.sharecache_file = '/etc/enigma2/' + hostname.strip() + '.cache' #Path to cache directory
if os_path.exists(self.sharecache_file):
print '[Networkbrowser] Loading userinfo from ',self.sharecache_file
try:
self.hostdata = load_cache(self.sharecache_file)
username = self.hostdata['username']
password = self.hostdata['password']
except:
username = "username"
password = "password"
else:
username = "username"
password = "password"
if devicetype == 'unix':
smblist=netscan.smbShare(hostip,hostname,username,password)
print '[Networkbrowser] unix smblist ',smblist
for x in smblist:
if len(x) == 6:
if x[3] != 'IPC$':
sharelist.append(x)
print '[Networkbrowser] unix sharelist ',sharelist
nfslist=netscan.nfsShare(hostip,hostname)
print '[Networkbrowser] unix nfslist ',nfslist
for x in nfslist:
if len(x) == 6:
sharelist.append(x)
print '[Networkbrowser] unix sharelist ',sharelist
else:
smblist=netscan.smbShare(hostip,hostname,username,password)
print '[Networkbrowser] smblist ',smblist
for x in smblist:
if len(x) == 6:
if x[3] != 'IPC$':
sharelist.append(x)
print '[Networkbrowser] sharelist ',sharelist
nfslist=netscan.nfsShare(hostip,hostname)
print '[Networkbrowser] nfslist ',nfslist
for x in nfslist:
if len(x) == 6:
sharelist.append(x)
print '[Networkbrowser] sharelist ',sharelist
print '[Networkbrowser] sharelist final ',sharelist
return sharelist
def updateHostsList(self):
self.list = []
self.network = {}
for x in self.networklist:
if not self.network.has_key(x[2]):
self.network[x[2]] = []
self.network[x[2]].append((NetworkDescriptor(name = x[1], description = x[2]), x))
for x in self.network.keys():
hostentry = self.network[x][0][1]
name = hostentry[2] + " ( " +hostentry[1].strip() + " )"
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/host.png")):
expandableIcon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/host.png"))
else:
expandableIcon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/host.png"))
self.list.append(( hostentry, expandableIcon, name, None, None, None, None ))
if len(self.list):
for entry in self.list:
entry[0][2]= "%3s.%3s.%3s.%3s" % tuple(entry[0][2].split("."))
self.list.sort(key=lambda x: x[0][2])
for entry in self.list:
entry[0][2]= entry[0][2].replace(" ", "")
self["list"].setList(self.list)
self["list"].setIndex(self.listindex)
def updateNetworkList(self):
self.list = []
self.network = {}
self.mounts = iAutoMount.getMountsList() # reloading mount list
for x in self.networklist:
if not self.network.has_key(x[2]):
self.network[x[2]] = []
self.network[x[2]].append((NetworkDescriptor(name = x[1], description = x[2]), x))
self.network.keys().sort()
for x in self.network.keys():
if self.network[x][0][1][3] == '00:00:00:00:00:00':
self.device = 'unix'
else:
self.device = 'windows'
if x in self.expanded:
networkshares = self.getNetworkShares(x,self.network[x][0][1][1].strip(),self.device)
hostentry = self.network[x][0][1]
name = hostentry[2] + " ( " +hostentry[1].strip() + " )"
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/host.png")):
expandedIcon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/host.png"))
else:
expandedIcon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/host.png"))
self.list.append(( hostentry, expandedIcon, name, None, None, None, None ))
for share in networkshares:
self.list.append(self.BuildNetworkShareEntry(share))
else: # HOSTLIST - VIEW
hostentry = self.network[x][0][1]
name = hostentry[2] + " ( " +hostentry[1].strip() + " )"
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/host.png")):
expandableIcon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/host.png"))
else:
expandableIcon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/host.png"))
self.list.append(( hostentry, expandableIcon, name, None, None, None, None ))
if len(self.list):
for entry in self.list:
entry[0][2]= "%3s.%3s.%3s.%3s" % tuple(entry[0][2].split("."))
self.list.sort(key=lambda x: x[0][2])
for entry in self.list:
entry[0][2]= entry[0][2].replace(" ", "")
self["list"].setList(self.list)
self["list"].setIndex(self.listindex)
def BuildNetworkShareEntry(self,share):
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/verticalLine.png")):
verticallineIcon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/verticalLine.png"))
else:
verticallineIcon = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/verticalLine.png"))
sharetype = share[0]
localsharename = share[1]
sharehost = share[2]
if sharetype == 'smbShare':
sharedir = share[3]
sharedescription = share[5]
else:
sharedir = share[4]
sharedescription = share[3]
if sharetype == 'nfsShare':
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/i-nfs.png")):
newpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/i-nfs.png"))
else:
newpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/i-nfs.png"))
else:
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/i-smb.png")):
newpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/i-smb.png"))
else:
newpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/i-smb.png"))
self.isMounted = False
for sharename, sharedata in self.mounts.items():
if sharedata['ip'] == sharehost:
if sharetype == 'nfsShare' and sharedata['mounttype'] == 'nfs':
sharedir = sharedir.replace('/', '')
if sharedir == sharedata['sharedir']:
if sharedata["isMounted"] is True:
self.isMounted = True
if sharetype == 'smbShare' and sharedata['mounttype'] == 'cifs':
if sharedir == sharedata['sharedir']:
if sharedata["isMounted"] is True:
self.isMounted = True
if self.isMounted is True:
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/ok.png")):
isMountedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/ok.png"))
else:
isMountedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/ok.png"))
else:
if os_path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/cancel.png")):
isMountedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "networkbrowser/cancel.png"))
else:
isMountedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkBrowser/icons/cancel.png"))
return((share, verticallineIcon, None, sharedir, sharedescription, newpng, isMountedpng))
def selectionChanged(self):
current = self["list"].getCurrent()
self.listindex = self["list"].getIndex()
if current:
if len(current[0]) >= 2:
if current[0][0] in ("nfsShare", "smbShare"):
self["infotext"].setText(_("Press OK to mount this share!"))
else:
selectedhost = current[0][2]
if selectedhost in self.expanded:
self["infotext"].setText(_("Press OK to collapse this host"))
else:
self["infotext"].setText(_("Press OK to expand this host"))
def go(self):
sel = self["list"].getCurrent()
if sel is None:
return
if len(sel[0]) <= 1:
return
selectedhost = sel[0][2]
selectedhostname = sel[0][1]
self.hostcache_file = None
if sel[0][0] == 'host': # host entry selected
print '[Networkbrowser] sel host'
if selectedhost in self.expanded:
self.expanded.remove(selectedhost)
self.updateNetworkList()
else:
self.hostcache_file = None
self.hostcache_file = '/etc/enigma2/' + selectedhostname.strip() + '.cache' #Path to cache directory
if os_path.exists(self.hostcache_file):
print '[Networkbrowser] Loading userinfo cache from ',self.hostcache_file
try:
self.hostdata = load_cache(self.hostcache_file)
self.passwordQuestion(False)
except:
self.session.openWithCallback(self.passwordQuestion, MessageBox, (_("Do you want to enter a username and password for this host?\n") ) )
else:
self.session.openWithCallback(self.passwordQuestion, MessageBox, (_("Do you want to enter a username and password for this host?\n") ) )
if sel[0][0] == 'nfsShare': # share entry selected
print '[Networkbrowser] sel nfsShare'
self.openMountEdit(sel[0])
if sel[0][0] == 'smbShare': # share entry selected
print '[Networkbrowser] sel cifsShare'
self.hostcache_file = None
self.hostcache_file = '/etc/enigma2/' + selectedhostname.strip() + '.cache' #Path to cache directory
if os_path.exists(self.hostcache_file):
print '[Networkbrowser] userinfo found from ',self.sharecache_file
self.openMountEdit(sel[0])
else:
self.session.openWithCallback(self.passwordQuestion, MessageBox, (_("Do you want to enter a username and password for this host?\n") ) )
def passwordQuestion(self, ret = False):
sel = self["list"].getCurrent()
selectedhost = sel[0][2]
selectedhostname = sel[0][1]
if (ret == True):
self.session.openWithCallback(self.UserDialogClosed, UserDialog, self.skin_path, selectedhostname.strip())
else:
if sel[0][0] == 'host': # host entry selected
if selectedhost in self.expanded:
self.expanded.remove(selectedhost)
else:
self.expanded.append(selectedhost)
self.updateNetworkList()
if sel[0][0] == 'nfsShare': # share entry selected
self.openMountEdit(sel[0])
if sel[0][0] == 'smbShare': # share entry selected
self.openMountEdit(sel[0])
def UserDialogClosed(self, *ret):
if ret is not None and len(ret):
self.go()
def openMountEdit(self, selection):
if selection is not None and len(selection):
mounts = iAutoMount.getMountsList()
if selection[0] == 'nfsShare': # share entry selected
#Initialize blank mount enty
data = { 'isMounted': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype' : False, 'options' : False }
# add data
data['mounttype'] = 'nfs'
data['active'] = True
data['ip'] = selection[2]
data['sharename'] = selection[1]
data['sharedir'] = selection[4]
data['options'] = "rw,nolock,tcp"
for sharename, sharedata in mounts.items():
if sharedata['ip'] == selection[2] and sharedata['sharedir'] == selection[4]:
data = sharedata
self.session.openWithCallback(self.MountEditClosed,AutoMountEdit, self.skin_path, data)
if selection[0] == 'smbShare': # share entry selected
#Initialize blank mount enty
data = { 'isMounted': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype' : False, 'options' : False }
# add data
data['mounttype'] = 'cifs'
data['active'] = True
data['ip'] = selection[2]
data['sharename'] = selection[3] + "@" + selection[1]
data['sharedir'] = selection[3]
data['options'] = "rw"
self.sharecache_file = None
self.sharecache_file = '/etc/enigma2/' + selection[1].strip() + '.cache' #Path to cache directory
if os_path.exists(self.sharecache_file):
print '[Networkbrowser] Loading userinfo from ',self.sharecache_file
try:
self.hostdata = load_cache(self.sharecache_file)
data['username'] = self.hostdata['username']
data['password'] = self.hostdata['password']
except:
data['username'] = "username"
data['password'] = "password"
else:
data['username'] = "username"
data['password'] = "password"
for sharename, sharedata in mounts.items():
if sharedata['ip'] == selection[2].strip() and sharedata['sharedir'] == selection[3].strip():
data = sharedata
self.session.openWithCallback(self.MountEditClosed,AutoMountEdit, self.skin_path, data)
def MountEditClosed(self, returnValue = None):
if returnValue == None:
self.updateNetworkList()
class ScanIP(Screen, ConfigListScreen):
skin = """
<screen name="ScanIP" position="center,center" size="560,80" title="Scan IP" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="config" position="5,50" size="540,25" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Scan NFS share"))
self["key_yellow"] = StaticText(_("Scan range"))
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"back": self.exit,
"red": self.exit,
"cancel": self.exit,
"green": self.goNfs,
"yellow": self.goAddress,
}, -1)
self.ipAddress = ConfigIP(default=[0,0,0,0])
ConfigListScreen.__init__(self, [
getConfigListEntry(_("IP Address"), self.ipAddress),
], self.session)
self.onLayoutFinish.append(self.layoutFinished)
def exit(self):
self.close((None,None))
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Enter IP to scan..."))
def goAddress(self):
if self.ipAddress.getText() != "0.0.0.0":
self.close((self.ipAddress.getText(), "address"))
else:
self.exit
def goNfs(self):
if self.ipAddress.getText() != "0.0.0.0":
self.close((self.ipAddress.getText(), "nfs"))
else:
self.exit
| 40.763407
| 186
| 0.698653
|
4a0f582785fdaf2d30b3e6980cfb54b3b07dffc5
| 2,603
|
py
|
Python
|
catkin_ws/src/99-attic/adafruit_imu/script/adafruit_imu.py
|
yxiao1996/dev
|
e2181233aaa3d16c472b792b58fc4863983825bd
|
[
"CC-BY-2.0"
] | 2
|
2018-06-25T02:51:25.000Z
|
2018-06-25T02:51:27.000Z
|
catkin_ws/src/99-attic/adafruit_imu/script/adafruit_imu.py
|
yxiao1996/dev
|
e2181233aaa3d16c472b792b58fc4863983825bd
|
[
"CC-BY-2.0"
] | null | null | null |
catkin_ws/src/99-attic/adafruit_imu/script/adafruit_imu.py
|
yxiao1996/dev
|
e2181233aaa3d16c472b792b58fc4863983825bd
|
[
"CC-BY-2.0"
] | 2
|
2018-09-04T06:44:21.000Z
|
2018-10-15T02:30:50.000Z
|
#!/usr/bin/env python
import rospy
import time
import numpy as np
from Adafruit_LSM303 import Adafruit_LSM303
from Gyro_L3GD20 import Gyro_L3GD20
from sensor_msgs.msg import Imu
from sensor_msgs.msg import MagneticField
class AdafruitIMU(object):
# Physical constants
G = 9.80665 # Standart gravity at sea level (should be g, but
# capitalization rules due to coding practices)
DEG_TO_RAD = 0.0174533 # degrees to radians
def __init__(self):
self.node_name = rospy.get_name()
rospy.loginfo("[%s] Initializing " %(self.node_name))
# Setup compass and accelerometer
self.compass_accel = Adafruit_LSM303()
# Setup gyroscope
self.gyro = Gyro_L3GD20()
# Setup Parameters
self.pub_timestep = self.setupParam("~pub_timestep", 0.02)
# Publications
self.pub_imu = rospy.Publisher("~adafruit_imu", Imu, queue_size=10)
self.pub_mag = rospy.Publisher("~adafruit_mag", MagneticField, queue_size=10)
# timer
self.pub_timer = rospy.Timer(rospy.Duration.from_sec(self.pub_timestep),self.publish)
def setupParam(self, param_name, default_value):
value = rospy.get_param(param_name, default_value)
# Write to parameter server for transparancy
rospy.set_param(param_name, value)
rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
return value
def publish(self, event):
compass_accel = self.compass_accel.read()
compass = compass_accel[0:3]
accel = compass_accel[3:6]
gyro = self.gyro.read()
# Put together an IMU message
imu_msg = Imu()
imu_msg.header.stamp = rospy.Time.now()
imu_msg.orientation_covariance[0] = -1
imu_msg.angular_velocity = gyro[0] * DEG_TO_RAD
imu_msg.angular_velocity = gyro[1] * DEG_TO_RAD
imu_msg.angular_velocity = gyro[2] * DEG_TO_RAD
imu_msg.linear_acceleration.x = accel[0] * G
imu_msg.linear_acceleration.y = accel[1] * G
imu_msg.linear_acceleration.z = accel[2] * G
self.pub_imu.publish(imu_msg)
# Put together a magnetometer message
mag_msg = MagneticField()
mag_msg.header.stamp = rospy.Time.now()
mag_msg.magnetic_field.x = compass[0]
mag_msg.magnetic_field.y = compass[1]
mag_msg.magnetic_field.z = compass[2]
self.pub_mag.publish(mag_msg)
if __name__ == "__main__":
rospy.init_node("Adafruit_IMU", anonymous=False)
adafruit_IMU = AdafruitIMU()
rospy.spin()
| 33.371795
| 93
| 0.657318
|
4a0f58fd0780fd731cbd9b6428bd57da3aba6f0c
| 1,125
|
py
|
Python
|
vim/.vim-python/to-multiline-method-definition.py
|
borisbabic/dotfiles
|
5605001482356d2736afa526b1f53e80f0d79a4f
|
[
"MIT"
] | 4
|
2018-09-02T00:35:38.000Z
|
2020-12-08T02:01:52.000Z
|
vim/.vim-python/to-multiline-method-definition.py
|
borisbabic/dotfiles
|
5605001482356d2736afa526b1f53e80f0d79a4f
|
[
"MIT"
] | null | null | null |
vim/.vim-python/to-multiline-method-definition.py
|
borisbabic/dotfiles
|
5605001482356d2736afa526b1f53e80f0d79a4f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
import fileinput
def prependTabs(string, numTabs=1, tabLength=4):
return numTabs * tabLength * ' ' + string
def handleMethodCallLine(methodCallLine):
tempLines = methodCallLine.split('(');
# adds method call line
newLines = [tempLines[0] + '(']
# removes the trailing )
argumentsString = tempLines[1].replace(')', '')
#turns them into a list
argumentsList = argumentsString.split(', ')
#strips whitespace
strippedArguments = map(str.strip, argumentsList)
#prepends two tabs as spaces
tabbedArguments = map(lambda a: prependTabs(a, 2), strippedArguments)
#adds a comma to the arguments that need them, and adds those to the newLine
newLines.extend(map(lambda a: a + ',', tabbedArguments[:-1]))
#adds the last argument to the newLines
newLines.append(tabbedArguments[-1])
return newLines
def main():
lines = fileinput.input();
newLines = handleMethodCallLine(lines[0])
newLines.append(prependTabs(') {'))
#print newLines
for line in newLines:
print line
if __name__ == "__main__":
main()
| 25.568182
| 80
| 0.672889
|
4a0f590f18f4a8feae830f7f1b9b9f59c6901954
| 1,233
|
py
|
Python
|
saas/backend/apps/role/migrations/0003_auto_20200615_2000.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 7
|
2021-08-13T03:48:16.000Z
|
2021-12-20T15:31:38.000Z
|
saas/backend/apps/role/migrations/0003_auto_20200615_2000.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 456
|
2021-08-16T02:13:57.000Z
|
2022-03-30T10:02:49.000Z
|
saas/backend/apps/role/migrations/0003_auto_20200615_2000.py
|
nannan00/bk-iam-saas
|
217600fa6e5fd466fff9c33c20c4dbd7c69f77d9
|
[
"MIT"
] | 17
|
2021-08-10T04:08:46.000Z
|
2022-03-14T14:24:36.000Z
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 2.2.10 on 2020-06-15 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('role', '0002_auto_20200615_1655'),
]
operations = [
migrations.AlterField(
model_name='role',
name='type',
field=models.CharField(choices=[('staff', '个人用户'), ('super_manager', '超级管理员'), ('system_manager', '系统管理员'), ('rating_manager', '分级管理员')], max_length=32, verbose_name='申请单类型'),
),
]
| 42.517241
| 187
| 0.711273
|
4a0f5920e56174172fc4baea3eab5372ecd3a462
| 1,850
|
py
|
Python
|
ERFNet-CULane-PyTorch/dataset/voc_aug.py
|
yg13/Codes-for-Lane-Detection
|
3d44e0f62122c1d7757d3e5335b54d66aaa0aa52
|
[
"MIT"
] | 3
|
2020-09-14T07:55:02.000Z
|
2022-03-10T12:31:42.000Z
|
ERFNet-CULane-PyTorch/dataset/voc_aug.py
|
yg13/Codes-for-Lane-Detection
|
3d44e0f62122c1d7757d3e5335b54d66aaa0aa52
|
[
"MIT"
] | null | null | null |
ERFNet-CULane-PyTorch/dataset/voc_aug.py
|
yg13/Codes-for-Lane-Detection
|
3d44e0f62122c1d7757d3e5335b54d66aaa0aa52
|
[
"MIT"
] | 1
|
2021-01-13T09:24:12.000Z
|
2021-01-13T09:24:12.000Z
|
import os
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset
class VOCAugDataSet(Dataset):
def __init__(self, dataset_path='/home/yuliangguo/Datasets/CULane/list', data_list='train', transform=None):
with open(os.path.join(dataset_path, data_list + '.txt')) as f:
self.img_list = []
self.img = []
self.label_list = []
self.exist_list = []
for line in f:
self.img.append(line.strip().split(" ")[0])
self.img_list.append(dataset_path.replace('/list', '') + line.strip().split(" ")[0])
self.label_list.append(dataset_path.replace('/list', '') + line.strip().split(" ")[1])
self.exist_list.append(np.array([int(line.strip().split(" ")[2]), int(line.strip().split(" ")[3]), int(line.strip().split(" ")[4]), int(line.strip().split(" ")[5])]))
self.img_path = dataset_path
self.gt_path = dataset_path
self.transform = transform
self.is_testing = data_list == 'test_img' # 'val'
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
image = cv2.imread(os.path.join(self.img_path, self.img_list[idx])).astype(np.float32)
label = cv2.imread(os.path.join(self.gt_path, self.label_list[idx]), cv2.IMREAD_UNCHANGED)
exist = self.exist_list[idx]
image = image[240:, :, :]
label = label[240:, :]
label = label.squeeze()
if self.transform:
image, label = self.transform((image, label))
image = torch.from_numpy(image).permute(2, 0, 1).contiguous().float()
label = torch.from_numpy(label).contiguous().long()
if self.is_testing:
return image, label, self.img[idx]
else:
return image, label, exist
| 41.111111
| 182
| 0.590811
|
4a0f59e6b4ad352222da58b1330ec9760e7b74de
| 25,106
|
py
|
Python
|
webroot/cgi-bin/fixturegen.py
|
elocemearg/atropine
|
894010bcc89d4e6962cf3fc15ef526068c38898d
|
[
"CC-BY-4.0"
] | null | null | null |
webroot/cgi-bin/fixturegen.py
|
elocemearg/atropine
|
894010bcc89d4e6962cf3fc15ef526068c38898d
|
[
"CC-BY-4.0"
] | null | null | null |
webroot/cgi-bin/fixturegen.py
|
elocemearg/atropine
|
894010bcc89d4e6962cf3fc15ef526068c38898d
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/python3
import sys;
import cgi;
import cgitb;
import os;
import cgicommon;
import urllib.request, urllib.parse, urllib.error;
import importlib;
import json;
cgitb.enable();
cgicommon.writeln("Content-Type: text/html; charset=utf-8");
cgicommon.writeln("");
baseurl = "/cgi-bin/fixturegen.py";
form = cgi.FieldStorage();
tourney_name = form.getfirst("tourney");
tourney = None;
request_method = os.environ.get("REQUEST_METHOD", "");
fixgens_r1 = [ "fixgen_manual", "fixgen_random", "fixgen_random_seeded", "fixgen_round_robin" ]
fixgens_not_r1 = [ "fixgen_swiss", "fixgen_random", "fixgen_final" ]
cgicommon.set_module_path();
import generators;
import countdowntourney;
import htmlform;
def int_or_none(s):
try:
value = int(s)
return value
except:
return None
# Class representing the settings passed to a fixture generator. It emulates
# a dictionary. The settings that were passed to the generator the last time
# it generated something for this tourney are also stored in the object and
# individual name-value pairs can be loaded from that into the object's main
# dictionary by the fixture generator.
class FixtureGeneratorSettings(object):
def __init__(self, default_settings=None):
self.default_settings = dict()
if default_settings:
for k in default_settings:
if k[0] != '_':
self.default_settings[k] = default_settings[k]
self.settings = dict()
def __len__(self):
return len(self.settings)
def __getitem__(self, key):
return self.settings[key]
def __setitem__(self, key, value):
self.settings[key] = value
def __delitem__(self, key):
del self.settings[key]
def __iter__(self):
return self.settings.__iter__()
def __contains__(self, key):
return (key in self.settings)
def get(self, key, default_value=None):
return self.settings.get(key, default_value)
def load_from_previous(self, key):
if key in self.default_settings:
self.settings[key] = self.default_settings[key]
def get_previous(self, key, default_value=None):
return self.default_settings.get(key, default_value)
def get_previous_settings(self):
return self.default_settings
def show_fixtures_to_accept(tourney, generator_name, fixtures, fixgen_settings):
tourney_name = tourney.get_name()
cgicommon.writeln("<form method=\"POST\" action=\"/cgi-bin/fixturegen.py\">");
cgicommon.writeln("<div class=\"fixtureacceptbox\">")
cgicommon.writeln("<p>I've generated the following fixtures. They won't be saved until you click the <em>Accept Fixtures</em> button.</p>");
cgicommon.writeln("<input type=\"submit\" name=\"accept\" class=\"bigbutton\" value=\"Accept Fixtures\" />");
cgicommon.writeln("<a href=\"/cgi-bin/fixturegen.py?tourney=%s&generator=%s\" class=\"fixturecancellink\">Discard and return to fixture generator</a>" % (
urllib.parse.quote_plus(tourney_name),
urllib.parse.quote_plus(generator_name)
))
cgicommon.writeln("</div>")
num_divisions = tourney.get_num_divisions()
for r in generated_groups.get_rounds():
round_no = r.get_round_no()
cgicommon.writeln("<h2>%s</h2>" % cgicommon.escape(r.get_round_name()))
for div_index in range(num_divisions):
round_fixtures = [x for x in fixtures if x.round_no == round_no and x.division == div_index];
if len(round_fixtures) == 0:
continue
standings = tourney.get_standings(division=div_index)
standings_dict = dict()
for s in standings:
standings_dict[s.name] = s
if num_divisions > 1:
cgicommon.writeln("<h3>%s</h3>" % (cgicommon.escape(tourney.get_division_name(div_index))))
cgicommon.writeln("<table class=\"fixturetable\">");
cgicommon.writeln("<tr><th>Table</th><th>Type</th><th></th><th></th><th></th><th></th></tr>");
fixnum = 0;
last_table_no = None;
for f in round_fixtures:
if last_table_no is None or last_table_no != f.table_no:
num_games_on_table = len([x for x in round_fixtures if x.table_no == f.table_no]);
first_game_on_table = True;
cgicommon.writeln("<tr class=\"firstgameintable\">");
else:
first_game_on_table = False;
cgicommon.writeln("<tr>");
if first_game_on_table:
cgicommon.writeln("<td class=\"tableno\" rowspan=\"%d\">%d</td>" % (num_games_on_table, f.table_no));
cgicommon.writeln("<td class=\"gametype\">%s</td>" % cgicommon.escape(f.game_type));
player_td_html = []
for player in [f.p1, f.p2]:
name = player.name
standings_row = standings_dict.get(name, None)
if standings_row is None:
player_td_html.append(cgicommon.player_to_link(player, tourney_name, emboldenise=True, disable_tab_order=False, open_in_new_window=True) + " ?")
else:
player_td_html.append(cgicommon.player_to_link(player, tourney_name, emboldenise=True, disable_tab_order=False, open_in_new_window=True) +
" (%s, %d win%s%s)" % (
cgicommon.ordinal_number(standings_row.position),
standings_row.wins,
"" if standings_row.wins == 1 else "s",
"" if standings_row.draws == 0 else ", %d draw%s" % (standings_row.draws, "" if standings_row.draws == 1 else "s")))
cgicommon.writeln("<td class=\"gameplayer1\">%s</td><td class=\"gamescore\">v</td><td class=\"gameplayer2\">%s</td>" % tuple(player_td_html));
num_repeats = tourney.count_games_between(f.p1, f.p2)
if num_repeats:
cgicommon.writeln("<td class=\"gamerepeats\">%s repeat</td>" % (cgicommon.ordinal_number(num_repeats)))
else:
cgicommon.writeln("<td class=\"gameremarks\"></td>")
cgicommon.writeln("</tr>");
fixnum += 1;
last_table_no = f.table_no;
cgicommon.writeln("</table>");
cgicommon.writeln("<input type=\"hidden\" name=\"tourney\" value=\"%s\" />" % cgicommon.escape(tourney_name, True));
cgicommon.writeln("<input type=\"hidden\" name=\"generator\" value=\"%s\" />" % cgicommon.escape(generator_name, True));
# Remember all the _div* settings, or check_ready might
# object when we do try to submit the fixtures
for name in fixgen_settings:
if name[0:4] == "_div":
cgicommon.writeln("<input type=\"hidden\" name=\"%s\" value=\"%s\" />" % (cgicommon.escape(name, True), cgicommon.escape(fixgen_settings[name], True)))
fixture_plan = {
"fixtures" : [
x.make_dict() for x in fixtures
],
"rounds" : [
{
"round" : x.get_round_no(),
"name" : x.get_round_name()
} for x in generated_groups.get_rounds()
]
}
json_fixture_plan = json.dumps(fixture_plan);
cgicommon.writeln("<input type=\"hidden\" name=\"jsonfixtureplan\" value=\"%s\" />" % cgicommon.escape(json_fixture_plan, True));
cgicommon.writeln("<div class=\"fixtureacceptbox\">")
cgicommon.writeln("<input type=\"submit\" name=\"accept\" value=\"Accept Fixtures\" class=\"bigbutton\" />");
cgicommon.writeln("<a href=\"/cgi-bin/fixturegen.py?tourney=%s&generator=%s\" class=\"fixturecancellink\">Discard and return to fixture generator</a>" % (
urllib.parse.quote_plus(tourney_name),
urllib.parse.quote_plus(generator_name)
))
cgicommon.writeln("</div>")
cgicommon.writeln("</form>");
def show_fixgen_table(tourney_name, module_list, title, description):
cgicommon.writeln("<h2>%s</h2>" % (cgicommon.escape(title)))
if description:
cgicommon.writeln("<p>")
cgicommon.writeln(description)
cgicommon.writeln("</p>")
cgicommon.writeln("<table class=\"fixgentable\">");
#cgicommon.writeln("<tr><th class=\"fixgentable fixgenth\"></th><th class=\"fixgentable fixgenth\">Generator</th><th class=\"fixgentable fixgenth\">Description</th></tr>");
for module_name in module_list:
fixgen_module = importlib.import_module(module_name);
cgicommon.writeln("<tr>");
cgicommon.writeln("<td class=\"fixgentable fixgen\">");
cgicommon.writeln("<a href=\"/cgi-bin/fixturegen.py?generator=%s&tourney=%s\">" % (urllib.parse.quote_plus(module_name), urllib.parse.quote_plus(tourney_name)))
cgicommon.writeln("<img src=\"/images/fixgen/%s.png\" alt=\"%s\" />" % (cgicommon.escape(module_name), cgicommon.escape(fixgen_module.name)))
cgicommon.writeln("</a>")
cgicommon.writeln("</td>")
cgicommon.writeln("<td class=\"fixgentable fixgen\">");
cgicommon.writeln("<a href=\"/cgi-bin/fixturegen.py?generator=%s&tourney=%s\">%s</a>" % (urllib.parse.quote_plus(module_name), urllib.parse.quote_plus(tourney_name), cgicommon.escape(fixgen_module.name)));
cgicommon.writeln("</td>");
#cgicommon.writeln("<td class=\"fixgentable fixgenmodule\">%s</td>" % (cgicommon.escape(module_name)));
cgicommon.writeln("<td class=\"fixgentable fixgendescription\">%s</td>" % (cgicommon.escape(fixgen_module.description)));
cgicommon.writeln("</tr>");
cgicommon.writeln("</table>");
cgicommon.print_html_head("Generate Fixtures: " + str(tourney_name));
cgicommon.writeln("<body>");
cgicommon.assert_client_from_localhost()
if tourney_name is None:
cgicommon.show_error_text("No tourney specified.");
cgicommon.writeln("<p><a href=\"/cgi-bin/home.py\">Home</a></p>");
cgicommon.writeln("</body>");
cgicommon.writeln("</html>");
sys.exit(0);
exception_content = None
exceptions_to_show = []
warning_content = None
show_fixgen_list = False
fixgen_ask_divisions = False
show_fixgen_settings_form = None
new_fixtures_to_accept = None
success_content = None
show_link_to_round = None
tourney = None
module_list = []
fixgen_settings = None
check_ready_failed = False
no_players = False
try:
tourney = countdowntourney.tourney_open(tourney_name, cgicommon.dbdir);
generator_name = form.getfirst("generator");
if generator_name:
fixgen_settings = FixtureGeneratorSettings(tourney.get_fixgen_settings(generator_name));
else:
fixgen_settings = None
module_list = generators.get_fixture_generator_list();
num_divisions = tourney.get_num_divisions()
if len(tourney.get_active_players()) == 0:
exception_content = "You can't generate fixtures because the tournament doesn't have any active players."
no_players = True
elif generator_name is None:
num_players_requiring_accessible_table = tourney.get_num_active_players_requiring_accessible_table()
num_accessible_tables = tourney.get_num_accessible_tables()
if num_accessible_tables is not None and num_players_requiring_accessible_table > num_accessible_tables:
warning_content = "You have %d active player%s who %s, but %s. This means the fixture generator cannot ensure %s. You can define accessible tables in <a href=\"/cgi-bin/tourneysetup.py?tourney=%s\">General Setup</a>." % (
num_players_requiring_accessible_table,
"s" if num_players_requiring_accessible_table != 1 else "",
"requires an accessible table" if num_players_requiring_accessible_table == 1 else "require accessible tables",
"you haven't defined any accessible tables" if num_accessible_tables == 0 else ("you have only defined %d accessible table%s" % (num_accessible_tables, "" if num_accessible_tables == 1 else "s")),
"this player is given an accessible table" if num_players_requiring_accessible_table == 1 else "these players are given accessible tables",
urllib.parse.quote_plus(tourney.get_name())
)
show_fixgen_list = True
elif generator_name not in module_list:
exception_content = "No such generator %s." % (cgicommon.escape(generator_name))
elif num_divisions > 1 and not form.getfirst("_divsubmit") and "accept" not in form:
fixgen_ask_divisions = True
else:
fixturegen = importlib.import_module(generator_name);
if "submit" not in form:
fixgen_settings = FixtureGeneratorSettings(tourney.get_fixgen_settings(generator_name));
else:
fixgen_settings = FixtureGeneratorSettings()
for key in form:
fixgen_settings[key] = form.getfirst(key);
if fixgen_settings.get("_divsubmit", None) is None:
fixgen_settings["_divsubmit"] = "Next"
for div in range(num_divisions):
next_free_round_number = tourney.get_next_free_round_number_for_division(div)
fixgen_settings["_div%d" % (div)] = "1"
fixgen_settings["_div%dround" % (div)] = str(next_free_round_number)
div_rounds = dict()
for div in range(num_divisions):
if int_or_none(fixgen_settings.get("_div%d" % (div), "0")):
start_round = int_or_none(fixgen_settings.get("_div%dround" % (div), None))
if start_round is not None and start_round > 0:
div_rounds[div] = start_round
if len(div_rounds) == 0:
raise countdowntourney.FixtureGeneratorException("No divisions selected, so can't generate fixtures.")
(ready, excuse) = fixturegen.check_ready(tourney, div_rounds);
if ready:
settings_form = fixturegen.get_user_form(tourney, fixgen_settings, div_rounds);
if settings_form is None and "accept" not in form:
# We don't require any more information from the user, so
# generate the fixtures.
generated_groups = fixturegen.generate(tourney, fixgen_settings, div_rounds);
# Persist the settings used to generate these fixtures,
# in case the fixture generator wants to refer to them
# when we call it later on
tourney.store_fixgen_settings(generator_name, fixgen_settings)
new_fixtures_to_accept = tourney.make_fixtures_from_groups(generated_groups)
elif "accept" in form:
# Fixtures have been accepted - write them to the db
json_fixture_plan = form.getfirst("jsonfixtureplan");
if not json_fixture_plan:
raise countdowntourney.TourneyException("Accept fixtures form doesn't include the jsonfixtureplan field. This is probably a bug unless you built the HTTP request yourself rather than using the form. If you did that then you're being a smartarse.");
fixture_plan = json.loads(json_fixture_plan);
dict_fixtures = fixture_plan.get("fixtures", []);
dict_rounds = fixture_plan.get("rounds", None);
fixtures = [];
earliest_round_no = None;
try:
for f in dict_fixtures:
round_no = int(f["round_no"])
table_no = int(f["table_no"]);
round_seq = int(f["round_seq"]);
division = int(f["division"])
game_type = f["game_type"];
name1 = f.get("p1");
if name1:
p1 = tourney.get_player_from_name(name1);
else:
p1 = countdowntourney.PlayerPending.from_dict(f["p1pending"]);
name2 = f.get("p2");
if name2:
p2 = tourney.get_player_from_name(name2);
else:
p2 = countdowntourney.PlayerPending.from_dict(f["p2pending"]);
if earliest_round_no is None or earliest_round_no > round_no:
earliest_round_no = round_no;
f = countdowntourney.Game(round_no, round_seq, table_no,
division, game_type, p1, p2);
fixtures.append(f);
except countdowntourney.TourneyException as e:
raise e
except ValueError:
raise countdowntourney.TourneyException("Fixtures contained garbage. Not much else I can do now other than sit down and refuse to work.")
if fixtures:
tourney.merge_games(fixtures);
success_content = "%d fixtures added successfully." % (len(fixtures))
show_link_to_round = earliest_round_no
if dict_rounds:
for r in dict_rounds:
try:
round_no = int(r["round"]);
round_name = r.get("name", "");
tourney.name_round(round_no, round_name);
except countdowntourney.TourneyException as e:
exceptions_to_show.append(e)
else:
settings_form.add_element(htmlform.HTMLFormHiddenInput("tourney", tourney_name));
settings_form.add_element(htmlform.HTMLFormHiddenInput("generator", generator_name));
for name in fixgen_settings:
if name[0:6] != "submit" and settings_form.get_value(name) is None:
settings_form.add_element(htmlform.HTMLFormHiddenInput(name, fixgen_settings.get(name, "")));
if fixgen_settings.get("submit", None) and fixturegen.save_form_on_submit():
tourney.store_fixgen_settings(generator_name, fixgen_settings)
show_fixgen_settings_form = settings_form
else:
# Can't use this fixture generator at the moment, and it's not
# because the user needs to provide us information - it's
# that there aren't the right number of players, or the
# previous round hasn't finished, or something like that.
check_ready_failed = True
exception_content = "Couldn't generate fixtures: %s" % (excuse)
except countdowntourney.TourneyException as e:
exceptions_to_show.append(e)
# We haven't written any body HTML yet, because if the user has just accepted
# a list of fixtures, we want to write those to the database before we display
# the sidebar, so that the sidebar contains a link to the new round.
if tourney:
cgicommon.show_sidebar(tourney);
cgicommon.writeln("<div class=\"mainpane\">");
# First, write a heading, which is the fixture generator name if we know it,
# or the words "Fixture Generator" if that hasn't been selected yet.
if generator_name:
fixturegen = importlib.import_module(generator_name);
else:
fixturegen = None
if fixturegen:
cgicommon.writeln("<h1>%s</h1>" % (fixturegen.name))
else:
cgicommon.writeln("<h1>Generate Fixtures</h1>")
# If exception_content is set, show the exception box.
if exception_content:
cgicommon.show_error_text(exception_content)
# Also show an exception box for each exception in the list exceptions_to_show.
if exceptions_to_show:
for e in exceptions_to_show:
cgicommon.show_tourney_exception(e);
if exception_content or exceptions_to_show:
cgicommon.writeln("<p>")
if generator_name and not check_ready_failed:
cgicommon.writeln("<a href=\"/cgi-bin/fixturegen.py?tourney=%s&generator=%s\">Sigh...</a>" % (urllib.parse.quote_plus(tourney_name), urllib.parse.quote_plus(generator_name)))
elif no_players:
cgicommon.writeln("<a href=\"/cgi-bin/tourneysetup.py?tourney=%s\">Set the player list at the tourney setup page</a>" % (urllib.parse.quote_plus(tourney_name)))
else:
cgicommon.writeln("<a href=\"/cgi-bin/fixturegen.py?tourney=%s\">Sigh...</a>" % (urllib.parse.quote_plus(tourney_name)))
cgicommon.writeln("</p>")
# Show any warning...
if warning_content:
cgicommon.show_warning_box(warning_content)
# And a success box, if we've just saved the new fixtures to the db.
if success_content:
cgicommon.show_success_box(success_content)
# show_fixgen_list is set when the user hasn't yet picked a fixture generator.
if show_fixgen_list:
num_divisions = tourney.get_num_divisions()
cgicommon.writeln("<p>")
cgicommon.writeln("When you want to generate the next round's fixtures, choose a fixture generator from the list below.")
if num_divisions > 1:
cgicommon.writeln("If you want to generate fixtures for only one division or a subset of divisions, you'll be asked which divisions to generate fixtures for on the next screen.")
cgicommon.writeln("</p>");
rounds = tourney.get_rounds()
if rounds:
suggested_fixgens = fixgens_not_r1
suggested_title = "Suggested fixture generators"
suggested_description = "Fixtures for the second round onwards are usually generated by one of these fixture generators."
else:
suggested_fixgens = fixgens_r1
suggested_title = "Suggested fixture generators"
suggested_description = "Fixtures for the first round are usually generated by one of these fixture generators."
remaining_fixgens = []
for fixgen_name in module_list:
if fixgen_name not in suggested_fixgens:
remaining_fixgens.append(fixgen_name)
show_fixgen_table(tourney_name, suggested_fixgens, suggested_title, suggested_description)
show_fixgen_table(tourney_name, remaining_fixgens, "Other fixture generators", "")
# After picking a fixture generator, the user is asked to select which
# divisions they want to generate fixtures for, if there's more than one
# division.
if fixgen_ask_divisions:
elements = []
elements.append(htmlform.HTMLFragment("<p>Which divisions do you want to generate fixtures for, starting from which rounds? By default, a division's fixtures will go in the round after the latest round which has games for that division.</p>"))
num_divisions = tourney.get_num_divisions()
elements.append(htmlform.HTMLFragment("<table class=\"fixdivselector\">"))
elements.append(htmlform.HTMLFragment("<tr><th>Division</th><th>Round number</th></tr>"))
for div in range(num_divisions):
elements.append(htmlform.HTMLFragment("<tr><td>"))
elements.append(htmlform.HTMLFormCheckBox("_div%d" % (div), tourney.get_division_name(div), True))
next_free_round_number = tourney.get_next_free_round_number_for_division(div)
elements.append(htmlform.HTMLFragment("</td><td>"))
elements.append(htmlform.HTMLFormTextInput("", "_div%dround" % (div), str(next_free_round_number), other_attrs={"class": "fixdivroundsel"}))
elements.append(htmlform.HTMLFragment("</td></tr>"))
elements.append(htmlform.HTMLFragment("</table>"))
elements.append(htmlform.HTMLFormSubmitButton("_divsubmit", "Next", other_attrs={"class" : "bigbutton"}))
settings_form = htmlform.HTMLForm("POST", "/cgi-bin/fixturegen.py?tourney=%s&generator=%s" % (urllib.parse.quote_plus(tourney.get_name()), urllib.parse.quote_plus(generator_name)), elements)
cgicommon.writeln(settings_form.html());
# If the user has selected which divisions they want to generate fixtures for,
# or if there is only one division, we now show the settings form for that
# fixture generator. What it actually shows depends on which fixture generator
# it is, and how any previous questions served up in this step were answered.
elif show_fixgen_settings_form:
cgicommon.writeln(show_fixgen_settings_form.html());
# If the user has generated a set of fixtures, they will be in
# new_fixtures_to_accept. Display them as a table with a button inviting the
# user to accept them.
elif new_fixtures_to_accept:
show_fixtures_to_accept(tourney, generator_name, new_fixtures_to_accept, fixgen_settings)
# If the user has just accepted the table of fixtures, we will have displayed
# a "success" info box above, and we also want to show a link to the round
# we just generated, or the earliest such round if we generated for more than
# one round.
if show_link_to_round is not None:
cgicommon.writeln("<p><a href=\"/cgi-bin/games.py?tourney=%s&round=%d\">Go to result entry page</a></p>" % (urllib.parse.quote_plus(tourney_name), show_link_to_round));
# end mainpane div
cgicommon.writeln("</div>");
cgicommon.writeln("</body>");
cgicommon.writeln("</html>");
| 48.560928
| 268
| 0.64849
|
4a0f5a09aa696313efae63513d8f0f4cb67c3064
| 256
|
py
|
Python
|
newsfeed/users/serializers.py
|
mccarrion/newsfeed-django
|
a69a02052c120132eb50c8ecb93ca15c6b2fc081
|
[
"MIT"
] | 2
|
2018-12-20T01:12:38.000Z
|
2021-04-10T00:31:08.000Z
|
newsfeed/users/serializers.py
|
mccarrion/newsfeed-django
|
a69a02052c120132eb50c8ecb93ca15c6b2fc081
|
[
"MIT"
] | null | null | null |
newsfeed/users/serializers.py
|
mccarrion/newsfeed-django
|
a69a02052c120132eb50c8ecb93ca15c6b2fc081
|
[
"MIT"
] | 1
|
2020-11-25T19:38:20.000Z
|
2020-11-25T19:38:20.000Z
|
from rest_framework import serializers
from .models import User
class UserSerializer(serializers.ModelSerializer):
"""
This is the serializer for the User data.
"""
class Meta:
model = User
fields = ('username', 'email')
| 19.692308
| 50
| 0.664063
|
4a0f5a3f1cc3461dab30b420a12f30ae20d45ada
| 1,259
|
py
|
Python
|
cloudbutton/engine/storage/backends/aws_s3/config.py
|
Dahk/cloudbutton
|
61d77123d15d9c2da99e8989220c6271ca737245
|
[
"Apache-2.0"
] | null | null | null |
cloudbutton/engine/storage/backends/aws_s3/config.py
|
Dahk/cloudbutton
|
61d77123d15d9c2da99e8989220c6271ca737245
|
[
"Apache-2.0"
] | null | null | null |
cloudbutton/engine/storage/backends/aws_s3/config.py
|
Dahk/cloudbutton
|
61d77123d15d9c2da99e8989220c6271ca737245
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def load_config(config_data=None):
if 'aws' not in config_data and 'aws_s3' not in config_data:
raise Exception("'aws' and 'aws_s3' sections are mandatory in the configuration")
required_parameters_0 = ('access_key_id', 'secret_access_key')
if not set(required_parameters_0) <= set(config_data['aws']):
raise Exception("'access_key_id' and 'secret_access_key' are mandatory under 'aws' section")
# Put credential keys to 'aws_s3' dict entry
config_data['aws_s3'] = {**config_data['aws_s3'], **config_data['aws']}
if 'endpoint' not in config_data['aws_s3']:
raise Exception("'endpoint' is mandatory under 's3' section")
| 41.966667
| 100
| 0.72359
|
4a0f5a61665076855b383e0e8149d3312a282b99
| 4,317
|
py
|
Python
|
enamlx/widgets/abstract_item.py
|
frmdstryr/enamlx
|
798eefe146aac15e559315fe5ff42dd813656cea
|
[
"MIT"
] | 27
|
2015-08-25T14:37:36.000Z
|
2022-03-14T20:33:41.000Z
|
enamlx/widgets/abstract_item.py
|
frmdstryr/enamlx
|
798eefe146aac15e559315fe5ff42dd813656cea
|
[
"MIT"
] | 27
|
2015-08-28T16:57:31.000Z
|
2021-11-10T07:43:15.000Z
|
enamlx/widgets/abstract_item.py
|
frmdstryr/enamlx
|
798eefe146aac15e559315fe5ff42dd813656cea
|
[
"MIT"
] | 10
|
2016-10-03T16:52:41.000Z
|
2021-07-29T22:25:35.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file COPYING.txt, distributed with this software.
Created on Aug 24, 2015
"""
from atom.api import (
Int,
Enum,
Bool,
Str,
Typed,
Coerced,
Event,
Property,
ForwardInstance,
observe,
)
from enaml.icon import Icon
from enaml.core.declarative import d_
from enaml.widgets.control import Control, ProxyControl
from enaml.layout.geometry import Size
class ProxyAbstractWidgetItemGroup(ProxyControl):
#: Reference to the declaration
declaration = ForwardInstance(lambda: AbstractWidgetItemGroup)
def set_selectable(self, selectable):
pass
class ProxyAbstractWidgetItem(ProxyControl):
#: Reference to the declaration
declaration = ForwardInstance(lambda: AbstractWidgetItem)
def set_row(self, row):
pass
def set_column(self, column):
pass
def set_text(self, text):
pass
def set_text_alignment(self, text_alignment):
pass
def set_icon(self, icon):
pass
def set_icon_size(self, size):
pass
def set_editable(self, editable):
pass
def set_checkable(self, checkable):
pass
class AbstractWidgetItemGroup(Control):
#: Triggered when clicked
clicked = d_(Event(), writable=False)
#: Triggered when double clicked
double_clicked = d_(Event(), writable=False)
#: Triggered when the row, column, or item is entered
entered = d_(Event(), writable=False)
#: Triggered when the row, column, or item is pressed
pressed = d_(Event(), writable=False)
#: Triggered when the row, column, or item's selection changes
selection_changed = d_(Event(bool), writable=False)
def _get_items(self):
return [c for c in self.children if isinstance(c, AbstractWidgetItem)]
#: Internal item reference
_items = Property(lambda self: self._get_items(), cached=True)
def child_added(self, child):
"""Reset the item cache when a child is added"""
super(AbstractWidgetItemGroup, self).child_added(child)
self.get_member("_items").reset(self)
def child_removed(self, child):
"""Reset the item cache when a child is removed"""
super(AbstractWidgetItemGroup, self).child_removed(child)
self.get_member("_items").reset(self)
class AbstractWidgetItem(AbstractWidgetItemGroup):
"""Item to be shared between table views and tree views"""
#: Model index or row within the view
row = d_(Int(), writable=False)
#: Column within the view
column = d_(Int(), writable=False)
#: Text to display within the cell
text = d_(Str())
#: Text alignment within the cell
text_alignment = d_(
Enum(
*[
(h, v)
for h in ("left", "right", "center", "justify")
for v in ("center", "top", "bottom")
]
)
)
#: Icon to display in the cell
icon = d_(Typed(Icon))
#: The size to use for the icon. The default is an invalid size
#: and indicates that an appropriate default should be used.
icon_size = d_(Coerced(Size, (-1, -1)))
#: Whether the item or group can be selected
selectable = d_(Bool(True))
#: Selection state of the item or group
selected = d_(Bool())
#: Whether the item or group can be checked
checkable = d_(Bool())
#: Checked state of the item or group
checked = d_(Bool())
#: Whether the item or group can be edited
editable = d_(Bool())
#: Triggered when the item's contents change
changed = d_(Event(), writable=False)
#: Triggered when the checkbox state changes
toggled = d_(Event(bool), writable=False)
@observe(
"row",
"column",
"text",
"text_alignment",
"icon",
"icon_size",
"selectable",
"selected",
"checkable",
"checked",
"editable",
)
def _update_proxy(self, change):
"""An observer which sends state change to the proxy."""
if change["name"] in ["row", "column"]:
super(AbstractWidgetItem, self)._update_proxy(change)
else:
self.proxy.data_changed(change)
| 25.850299
| 78
| 0.634932
|
4a0f5bb37b7c4a45fe2cf7ecf25cba84e00bef43
| 4,206
|
py
|
Python
|
data/RumEval19/read_RumEval2019.py
|
wmkouw/seq-rumver
|
2b46a141584a0a26c2e4328d42e3dee548bc04cc
|
[
"MIT"
] | 3
|
2020-05-27T21:26:12.000Z
|
2020-12-23T17:05:04.000Z
|
data/RumEval19/read_RumEval2019.py
|
wmkouw/seq-rumver
|
2b46a141584a0a26c2e4328d42e3dee548bc04cc
|
[
"MIT"
] | null | null | null |
data/RumEval19/read_RumEval2019.py
|
wmkouw/seq-rumver
|
2b46a141584a0a26c2e4328d42e3dee548bc04cc
|
[
"MIT"
] | 1
|
2020-10-09T08:43:15.000Z
|
2020-10-09T08:43:15.000Z
|
"""
Read training data from RumourEval 2019.
RumourEval is a shared task in rumour stance classification. More info at:
https://competitions.codalab.org/competitions/19938
Author: W.M. Kouw
Date: 22-10-2018
"""
import os
import numpy as np
import pandas as pd
import pickle as pc
import dateutil.parser
from glob import glob
import json
import codecs
from nltk.tokenize.api import StringTokenizer
from nltk.tokenize import TweetTokenizer
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
# Whether to embed words
embed = True
# Set font size
fS = 20
# Change to twitter data dir
os.chdir('/home/wmkouw/Dropbox/Projects/ucopenhagen/seq-rumour/data/RumEval2019')
# Get labels
with open('train-key.json') as f:
train_key = json.load(f)
with open('dev-key.json') as f:
dev_key = json.load(f)
label_keys = {**train_key['subtaskaenglish'], **dev_key['subtaskaenglish']}
# Get folder paths
twitter_path = 'twitter-english/'
rumours = os.listdir(twitter_path)
# Text array
rumour_id = []
tweet_id = []
thread_ix = []
reply_ix = []
texts = []
created_date = []
created_datetime = []
labels = []
# Loop over rumours
for r, rumour in enumerate(rumours):
# Check threads for current rumour
threads = os.listdir(twitter_path + rumour)
# Loop over threads
for t, thread in enumerate(threads):
with open(twitter_path + rumour + '/' + thread + '/source-tweet/' + thread + '.json') as f:
tweet = json.load(f)
rumour_id.append(rumour)
tweet_id.append(thread)
thread_ix.append(t)
reply_ix.append(0)
texts.append(tweet['text'])
created_date.append(dateutil.parser.parse(tweet['created_at']).date())
created_datetime.append(dateutil.parser.parse(tweet['created_at']))
labels.append(label_keys[thread])
replies = os.listdir(twitter_path + rumour + '/' + thread + '/replies/')
for r, reply in enumerate(replies):
with open(twitter_path + rumour + '/' + thread + '/replies/' + reply) as f:
tweet = json.load(f)
rumour_id.append(rumour)
tweet_id.append(reply[:-5])
thread_ix.append(t)
reply_ix.append(r + 1)
texts.append(tweet['text'])
created_date.append(dateutil.parser.parse(tweet['created_at']).date())
created_datetime.append(dateutil.parser.parse(tweet['created_at']))
labels.append(label_keys[reply[:-5]])
# Convert to dataframe
data = pd.DataFrame({'id': tweet_id,
'rumour': rumour_id,
'thread_ix': thread_ix,
'reply_ix': reply_ix,
'text': texts,
'date': created_date,
'datetime': created_datetime,
'label': labels})
# write frame to csv
data.to_csv('./RumEval19.csv', sep='`', encoding='utf-8')
if embed:
# Change directory to word2vec model
os.chdir('/home/wmkouw/Dropbox/Projects/ucopenhagen/seq-rumour/data/word2vec-twitter')
#!! change 'xrange' in word2vecReader to 'range'
exec(open("repl.py").read())
# Start tokenizer
tt = TweetTokenizer()
# Check number of tweets
num_tweets = len(data)
# Loop over tweets
wemb = np.zeros((num_tweets, 400))
for n in range(num_tweets):
# Tokenize tweet
aa = tt.tokenize(data['text'][n])
# Loop over words
ct = 0
for a in aa:
try:
# Extract embedding of word and add
wemb[n, :] += model.__getitem__(a)
ct += 1
except:
print('.', end='')
# Average embeddings
wemb[n, :] /= ct
# Switch back to data dir
os.chdir('/home/wmkouw/Dropbox/Projects/ucopenhagen/seq-rumour/data/RumEval2019')
# Write embbeding array separately
np.save('rumeval19.npy', wemb)
# Add word embeddings to dataframe
data = data.assign(embedding=wemb.tolist())
# write frame to csv
data.to_csv('./RumEval19_emb.csv', sep='\t', encoding='utf-8', index=False)
| 27.671053
| 99
| 0.614598
|
4a0f5bc8ee0274d70aca20380197b89505c1317d
| 1,047
|
py
|
Python
|
src/Ch08/P3_regexSearch.py
|
JoseALermaIII/automatepracticeprojects
|
0e8ae410a7347953d1686d9464f18cc5a6de65e6
|
[
"MIT"
] | 2
|
2017-04-20T02:57:19.000Z
|
2018-10-12T20:15:47.000Z
|
src/Ch08/P3_regexSearch.py
|
JoseALermaIII/automatepracticeprojects
|
0e8ae410a7347953d1686d9464f18cc5a6de65e6
|
[
"MIT"
] | 8
|
2021-03-18T21:50:16.000Z
|
2022-03-11T23:38:01.000Z
|
src/Ch08/P3_regexSearch.py
|
JoseALermaIII/automatepracticeprojects
|
0e8ae410a7347953d1686d9464f18cc5a6de65e6
|
[
"MIT"
] | 3
|
2018-08-30T20:30:50.000Z
|
2022-01-18T13:40:51.000Z
|
"""Regex search
Write a program that opens all .txt files in a folder and searches for any line
that matches a user-supplied regular expression.
The results should be printed to the screen.
"""
def main():
import os, re
# Get list of all .txt files
all_files = os.listdir("./") # use current working directory
text_files = []
for file in all_files:
if file.endswith(".txt"):
text_files.append(file)
# Get regular expression
regex = input("Enter regular expression to search for: ")
search_regex = re.compile(regex)
# Open .txt file
for file in text_files:
input_file = open(file)
input_content = input_file.readlines()
input_file.close()
# Search for regex in file
for line in input_content:
match_objects = search_regex.findall(line)
if match_objects is not None:
# Print result
for match in match_objects:
print(match)
if __name__ == '__main__':
main()
| 24.928571
| 79
| 0.619866
|
4a0f5c071317e301cd27f3e3b28328221aa933dd
| 2,061
|
py
|
Python
|
src/scripts/data_processing/process_raw_data.py
|
arnabbiswas1/k_tab_aug_muticlass_rmse_logloss_weightedf1_stratified_tsfresh_cesium
|
13db3cb9d0b2f25181ccf4b1316e12425abfc276
|
[
"Apache-2.0"
] | null | null | null |
src/scripts/data_processing/process_raw_data.py
|
arnabbiswas1/k_tab_aug_muticlass_rmse_logloss_weightedf1_stratified_tsfresh_cesium
|
13db3cb9d0b2f25181ccf4b1316e12425abfc276
|
[
"Apache-2.0"
] | null | null | null |
src/scripts/data_processing/process_raw_data.py
|
arnabbiswas1/k_tab_aug_muticlass_rmse_logloss_weightedf1_stratified_tsfresh_cesium
|
13db3cb9d0b2f25181ccf4b1316e12425abfc276
|
[
"Apache-2.0"
] | null | null | null |
"""This script changes the data types, creates parquet files.
Final output is written to the specified directory
Sample Usage:
<PROJECT_HOME>$ python -m src.scripts.process_raw_data
"""
import numpy as np
import pandas as pd
import src.munging.process_data_util as process_data
from src.common import com_util as util
from src.config import constants as constants
if __name__ == "__main__":
# Create a Stream only logger
logger = util.get_logger("process_raw_data")
logger.info("Starting to process raw data")
train_df, test_df, sample_submission_df = process_data.read_raw_data(
logger,
constants.RAW_DATA_DIR,
index_col_name="id",
train=True,
test=True,
sample_submission=True,
)
TARGET = "loss"
target = train_df[TARGET]
combined_df = pd.concat([train_df.drop([TARGET], axis=1), test_df])
logger.info("Changing data type of combined data ..")
combined_df = process_data.change_dtype(logger, combined_df, np.int64, np.int32)
combined_df = process_data.change_dtype(logger, combined_df, np.float64, np.float32)
logger.info("Changing data type of target data ..")
target = target.astype(np.int32)
train_df = combined_df.iloc[0: len(train_df), :]
# Make sure to use the name of the target below
train_df = train_df.assign(loss=target)
test_df = combined_df.iloc[len(train_df):, :]
logger.info("Changing data type of submission data ..")
sample_submission_df = process_data.change_dtype(
logger, sample_submission_df, np.int64, np.int32
)
logger.info(f"Writing processed feather files to {constants.PROCESSED_DATA_DIR}")
train_df.to_parquet(
f"{constants.PROCESSED_DATA_DIR}/train_processed.parquet", index=True
)
test_df.to_parquet(
f"{constants.PROCESSED_DATA_DIR}/test_processed.parquet", index=True
)
sample_submission_df.to_parquet(
f"{constants.PROCESSED_DATA_DIR}/sub_processed.parquet", index=True
)
logger.info("Raw data processing completed")
| 33.241935
| 88
| 0.714216
|
4a0f5d66a197f6ecc039d6c67a21d7b0ebc11280
| 678
|
py
|
Python
|
ex042.py
|
paulo-caixeta/Exercicios_Curso_Python
|
3b77925499c174ea9ff81dec65d6319125219b9a
|
[
"MIT"
] | null | null | null |
ex042.py
|
paulo-caixeta/Exercicios_Curso_Python
|
3b77925499c174ea9ff81dec65d6319125219b9a
|
[
"MIT"
] | null | null | null |
ex042.py
|
paulo-caixeta/Exercicios_Curso_Python
|
3b77925499c174ea9ff81dec65d6319125219b9a
|
[
"MIT"
] | null | null | null |
print('Digite a seguir 3 comprimentos de retas:')
a = int(input('Reta a: '))
b = int(input('Reta b: '))
c = int(input('Reta c: '))
modulo = b-c
if modulo < 0:
modulo = modulo * (-1)
if modulo < a and a < (b+c):
print('Estas retas podem formar um triângulo.')
if a == b == c:
print('O triângulo formado é \033[1mEQUILÁTERO\033[m')
elif a != b and a != c:
print('O triângulo formado é \033[1mESCALENO\033[m')
else:
print('O triangulo formado é \033[1mISÓCELES\033[m')
else:
print('Estas retas NÃO podem formar um triângulo.')
# Equilátero: todos os lados iguais
# Isósceles: dois lados iguais
# Escaleno: todos os lados diferentes
| 30.818182
| 62
| 0.634218
|
4a0f5dedbb80d4dae8e80c1c8d7060a769aad3ac
| 2,667
|
py
|
Python
|
examples/plot_ioneq.py
|
dstansby/fiasco
|
7d46ed92e692709cd90af805c4f6f57014e754ed
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_ioneq.py
|
dstansby/fiasco
|
7d46ed92e692709cd90af805c4f6f57014e754ed
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plot_ioneq.py
|
dstansby/fiasco
|
7d46ed92e692709cd90af805c4f6f57014e754ed
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Ionization fractions in equilibrium
===============================================
This example shows how to compute the ionization fraction as a function of
temperature, assuming equilibrium, for both a single ion as well as a whole
element.
"""
import matplotlib.pyplot as plt
import numpy as np
import astropy.units as u
from astropy.visualization import quantity_support
quantity_support()
from fiasco import Element
################################################
# First, create the `~fiasco.Element` object for carbon.
temperature = 10**np.arange(3.9, 6.5, 0.01) * u.K
el = Element('C', temperature)
################################################
# The ionization fractions in equilibrium can be determined by calculating the
# ionization and recombination rates as a function of temperature for every
# ion of the element and then solving the associated system of equations.
# This can be done by creating a `~fiasco.Element` object and then calling
# the `~fiasco.Element.equilibrium_ionization` method.
ioneq = el.equilibrium_ionization()
################################################
# Plot the population fraction of each ion as a function of temperature.
for ion in el:
_ioneq = ioneq[:, ion.charge_state]
imax = np.argmax(_ioneq)
plt.plot(el.temperature, _ioneq)
plt.text(el.temperature[imax], _ioneq[imax], ion.roman_numeral,
horizontalalignment='center')
plt.xscale('log')
plt.title(f'{el.atomic_symbol} Equilibrium Ionization')
plt.show()
################################################
# The CHIANTI database also includes tabulated ionization equilibria for
# all ions in the database. The `ioneq` attribute on each
# `~fiasco.Ion` object returns the tabulated population
# fractions interpolated onto the `temperature` array.
# Note that these population fractions returned by `~fiasco.Ion.ioneq` are
# stored in the CHIANTI database and therefore are set to NaN
# for temperatures outside of the tabulated temperature data given in CHIANTI.
plt.plot(el.temperature, el[3].ioneq)
plt.xscale('log')
plt.title(f'{el[3].roman_name} Equilibrium Ionization')
plt.show()
################################################
# We can then compare tabulated and calculated results for a single ion.
# Note that the two may not be equal due to differences in the rates when
# the tabulated results were calculated or due to artifacts from the
# interpolation.
plt.plot(el.temperature, ioneq[:, el[3].charge_state], label='calculated')
plt.plot(el.temperature, el[3].ioneq, label='interpolated')
plt.xlim(4e4, 3e5)
plt.xscale('log')
plt.legend()
plt.title(f'{el[3].roman_name} Equilibrium Ionization')
plt.show()
| 39.80597
| 78
| 0.683165
|
4a0f5e09caae63890bd5415683e144423f7f7b29
| 55,142
|
py
|
Python
|
src/sage/quadratic_forms/quadratic_form.py
|
bollu/sage
|
1da6df404d3ea7ff3019e16ea50d65923c1f4ece
|
[
"BSL-1.0"
] | null | null | null |
src/sage/quadratic_forms/quadratic_form.py
|
bollu/sage
|
1da6df404d3ea7ff3019e16ea50d65923c1f4ece
|
[
"BSL-1.0"
] | 1
|
2020-04-18T16:30:43.000Z
|
2020-04-18T16:30:43.000Z
|
src/sage/quadratic_forms/quadratic_form.py
|
dimpase/sage
|
468f23815ade42a2192b0a9cd378de8fdc594dcd
|
[
"BSL-1.0"
] | null | null | null |
"""
Quadratic Forms Overview
AUTHORS:
- Jon Hanke (2007-06-19)
- Anna Haensch (2010-07-01): Formatting and ReSTification
- Simon Brandhorst (2019-10-15): :meth:`quadratic_form_from_invariants`
"""
# ****************************************************************************
# Copyright (C) 2007 William Stein and Jonathan Hanke
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from warnings import warn
from copy import deepcopy
from sage.matrix.constructor import matrix
from sage.matrix.matrix_space import MatrixSpace
from sage.structure.element import is_Matrix
from sage.rings.integer_ring import IntegerRing, ZZ
from sage.rings.ring import Ring
from sage.misc.functional import denominator, is_even, is_field
from sage.arith.all import GCD, LCM
from sage.rings.all import Ideal, QQ
from sage.rings.ring import is_Ring, PrincipalIdealDomain
from sage.structure.sage_object import SageObject
from sage.structure.element import is_Vector
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.modules.free_module_element import vector
from sage.quadratic_forms.genera.genus import genera
from sage.quadratic_forms.quadratic_form__evaluate import QFEvaluateVector, QFEvaluateMatrix
def QuadraticForm__constructor(R, n=None, entries=None):
"""
Wrapper for the QuadraticForm class constructor. This is meant
for internal use within the QuadraticForm class code only. You
should instead directly call QuadraticForm().
EXAMPLES::
sage: from sage.quadratic_forms.quadratic_form import QuadraticForm__constructor
sage: QuadraticForm__constructor(ZZ, 3) # Makes a generic quadratic form over the integers
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 0 0 0 ]
[ * 0 0 ]
[ * * 0 ]
"""
return QuadraticForm(R, n, entries)
def is_QuadraticForm(Q):
"""
Determine if the object Q is an element of the QuadraticForm class.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,2,3])
sage: from sage.quadratic_forms.quadratic_form import is_QuadraticForm
sage: is_QuadraticForm(Q) ##random
True
sage: is_QuadraticForm(2) ##random
False
"""
return isinstance(Q, QuadraticForm)
def quadratic_form_from_invariants(F, rk, det, P, sminus):
r"""
Return a rational quadratic form with given invariants.
INPUT:
- ``F`` -- the base field; currently only ``QQ`` is allowed
- ``rk`` -- integer; the rank
- ``det`` -- rational; the determinant
- ``P`` -- a list of primes where Cassel's Hasse invariant
is negative
- ``sminus`` -- integer; the number of negative eigenvalues
of any Gram matrix
OUTPUT:
- a quadratic form with the specified invariants
Let `(a_1, \ldots, a_n)` be the gram marix of a regular quadratic space.
Then Cassel's Hasse invariant is defined as
.. MATH::
\prod_{i<j} (a_i,a_j),
where `(a_i,a_j)` denotes the Hilbert symbol.
ALGORITHM:
We follow [Kir2016]_.
EXAMPLES::
sage: P = [3,5]
sage: q = quadratic_form_from_invariants(QQ,2,-15,P,1)
sage: q
Quadratic form in 2 variables over Rational Field with coefficients:
[ 5 0 ]
[ * -3 ]
sage: all(q.hasse_invariant(p)==-1 for p in P)
True
TESTS:
This shows that :trac:`28955` is fixed::
sage: quadratic_form_from_invariants(QQ,3,2,[2],2)
Quadratic form in 3 variables over Rational Field with coefficients:
[ -1 0 0 ]
[ * 1 0 ]
[ * * -2 ]
sage: quadratic_form_from_invariants(QQ,4,2,[2],4)
Traceback (most recent call last):
...
ValueError: invariants do not define a rational quadratic form
"""
from sage.arith.misc import hilbert_symbol
# normalize input
if F!=QQ:
raise NotImplementedError('base field must be QQ. If you want this over any field, implement weak approximation.')
P = [ZZ(p) for p in P]
rk = ZZ(rk)
d = QQ(det).squarefree_part()
sminus = ZZ(sminus)
# check if the invariants define a global quadratic form
if d.sign() != (-1)**sminus:
raise ValueError("invariants do not define a rational quadratic form")
if rk == 1 and len(P) != 0:
raise ValueError("invariants do not define a rational quadratic form")
if rk == 2:
for p in P:
if QQ(-d).is_padic_square(p):
raise ValueError("invariants do not define a rational quadratic form")
f = 0
if sminus % 4 in (2, 3):
f = 1
if (f + len(P)) % 2 == 1:
raise ValueError("invariants do not define a rational quadratic form")
D = []
while rk >= 2:
if rk >= 4:
if sminus > 0:
a = ZZ(-1)
else:
a = ZZ(1)
elif rk == 3:
Pprime = [p for p in P if hilbert_symbol(-1, -d, p)==1]
Pprime += [p for p in (2*d).prime_divisors()
if hilbert_symbol(-1, -d, p)==-1 and p not in P]
if sminus > 0:
a = ZZ(-1)
else:
a = ZZ(1)
for p in Pprime:
if d.valuation(p) % 2 == 0:
a *= p
assert all((a*d).valuation(p)%2==1 for p in Pprime)
elif rk == 2:
S = P
if sminus == 2:
S += [-1]
a = QQ.hilbert_symbol_negative_at_S(S,-d)
a = ZZ(a)
P = ([p for p in P if hilbert_symbol(a, -d, p) == 1]
+[p for p in (2*a*d).prime_divisors()
if hilbert_symbol(a, -d, p)==-1 and p not in P])
sminus = max(0, sminus-1)
rk = rk - 1
d = a*d
D.append(a.squarefree_part())
d = d.squarefree_part()
D.append(d)
return DiagonalQuadraticForm(QQ,D)
class QuadraticForm(SageObject):
r"""
The ``QuadraticForm`` class represents a quadratic form in n variables with
coefficients in the ring R.
INPUT:
The constructor may be called in any of the following ways.
#. ``QuadraticForm(R, n, entries)``, where
- `R` -- ring for which the quadratic form is defined
- `n` -- an integer >= 0
- ``entries`` -- a list of `n(n+1)/2` coefficients of the quadratic form
in `R` (given lexicographically, or equivalently, by rows of the
matrix)
#. ``QuadraticForm(R, n)``, where
- `R` -- a ring
- `n` -- a symmetric `n \times n` matrix with even diagonal (relative to
`R`)
#. ``QuadraticForm(R)``, where
- `R` -- a symmetric `n \times n` matrix with even diagonal (relative to
its base ring)
If the keyword argument ``unsafe_initialize`` is True, then the subsequent
fields may by used to force the external initialization of various fields
of the quadratic form. Currently the only fields which can be set are:
- ``number_of_automorphisms``
- ``determinant``
OUTPUT:
quadratic form
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Q
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 1 2 3 ]
[ * 4 5 ]
[ * * 6 ]
::
sage: Q = QuadraticForm(QQ, 3, [1,2,3,4/3 ,5,6])
sage: Q
Quadratic form in 3 variables over Rational Field with coefficients:
[ 1 2 3 ]
[ * 4/3 5 ]
[ * * 6 ]
sage: Q[0,0]
1
sage: Q[0,0].parent()
Rational Field
::
sage: Q = QuadraticForm(QQ, 7, range(28))
sage: Q
Quadratic form in 7 variables over Rational Field with coefficients:
[ 0 1 2 3 4 5 6 ]
[ * 7 8 9 10 11 12 ]
[ * * 13 14 15 16 17 ]
[ * * * 18 19 20 21 ]
[ * * * * 22 23 24 ]
[ * * * * * 25 26 ]
[ * * * * * * 27 ]
::
sage: Q = QuadraticForm(QQ, 2, range(1,4))
sage: A = Matrix(ZZ,2,2,[-1,0,0,1])
sage: Q(A)
Quadratic form in 2 variables over Rational Field with coefficients:
[ 1 -2 ]
[ * 3 ]
::
sage: m = matrix(2,2,[1,2,3,4])
sage: m + m.transpose()
[2 5]
[5 8]
sage: QuadraticForm(m + m.transpose())
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 5 ]
[ * 4 ]
::
sage: QuadraticForm(ZZ, m + m.transpose())
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 5 ]
[ * 4 ]
::
sage: QuadraticForm(QQ, m + m.transpose())
Quadratic form in 2 variables over Rational Field with coefficients:
[ 1 5 ]
[ * 4 ]
"""
## Import specialized methods:
## ---------------------------
## Routines to compute the p-adic local normal form
from sage.quadratic_forms.quadratic_form__local_normal_form import \
find_entry_with_minimal_scale_at_prime, \
local_normal_form, \
jordan_blocks_by_scale_and_unimodular, \
jordan_blocks_in_unimodular_list_by_scale_power
## Routines to perform elementary variable substitutions
from sage.quadratic_forms.quadratic_form__variable_substitutions import \
swap_variables, \
multiply_variable, \
divide_variable, \
scale_by_factor, \
extract_variables, \
elementary_substitution, \
add_symmetric
## Routines to compute p-adic field invariants
from sage.quadratic_forms.quadratic_form__local_field_invariants import \
rational_diagonal_form, \
_rational_diagonal_form_and_transformation, \
signature_vector, \
signature, \
hasse_invariant, \
hasse_invariant__OMeara, \
is_hyperbolic, \
is_anisotropic, \
is_isotropic, \
anisotropic_primes, \
compute_definiteness, \
compute_definiteness_string_by_determinants, \
is_positive_definite, \
is_negative_definite, \
is_indefinite, \
is_definite
## Routines to compute local densities by the reduction procedure
from sage.quadratic_forms.quadratic_form__local_density_congruence import \
count_modp_solutions__by_Gauss_sum, \
local_good_density_congruence_odd, \
local_good_density_congruence_even, \
local_good_density_congruence, \
local_zero_density_congruence, \
local_badI_density_congruence, \
local_badII_density_congruence, \
local_bad_density_congruence, \
local_density_congruence, \
local_primitive_density_congruence
## Routines to compute local densities by counting solutions of various types
from sage.quadratic_forms.quadratic_form__count_local_2 import \
count_congruence_solutions_as_vector, \
count_congruence_solutions, \
count_congruence_solutions__good_type, \
count_congruence_solutions__zero_type, \
count_congruence_solutions__bad_type, \
count_congruence_solutions__bad_type_I, \
count_congruence_solutions__bad_type_II
## Routines to be called by the user to compute local densities
from sage.quadratic_forms.quadratic_form__local_density_interfaces import \
local_density, \
local_primitive_density
## Routines for computing with ternary forms
from sage.quadratic_forms.quadratic_form__ternary_Tornaria import \
disc, \
content, \
adjoint, \
antiadjoint, \
is_adjoint, \
reciprocal, \
omega, \
delta, \
level__Tornaria, \
discrec, \
hasse_conductor, \
clifford_invariant, \
clifford_conductor, \
basiclemma, \
basiclemmavec, \
xi, \
xi_rec, \
lll, \
representation_number_list, \
representation_vector_list, \
is_zero, \
is_zero_nonsingular, \
is_zero_singular
## Routines to compute the theta function
from sage.quadratic_forms.quadratic_form__theta import \
theta_series, \
theta_series_degree_2, \
theta_by_pari, \
theta_by_cholesky
## Routines to compute the product of all local densities
from sage.quadratic_forms.quadratic_form__siegel_product import \
siegel_product
## Routines to compute p-neighbors
from sage.quadratic_forms.quadratic_form__neighbors import \
find_primitive_p_divisible_vector__random, \
find_primitive_p_divisible_vector__next, \
find_p_neighbor_from_vec
## Routines to reduce a given quadratic form
from sage.quadratic_forms.quadratic_form__reduction_theory import \
reduced_binary_form1, \
reduced_ternary_form__Dickson, \
reduced_binary_form, \
minkowski_reduction, \
minkowski_reduction_for_4vars__SP
## Wrappers for Conway-Sloane genus routines (in ./genera/)
from sage.quadratic_forms.quadratic_form__genus import \
global_genus_symbol, \
local_genus_symbol, \
CS_genus_symbol_list
## Routines to compute local masses for ZZ.
from sage.quadratic_forms.quadratic_form__mass import \
shimura_mass__maximal, \
GHY_mass__maximal
from sage.quadratic_forms.quadratic_form__mass__Siegel_densities import \
mass__by_Siegel_densities, \
Pall_mass_density_at_odd_prime, \
Watson_mass_at_2, \
Kitaoka_mass_at_2, \
mass_at_two_by_counting_mod_power
from sage.quadratic_forms.quadratic_form__mass__Conway_Sloane_masses import \
parity, \
is_even, \
is_odd, \
conway_species_list_at_odd_prime, \
conway_species_list_at_2, \
conway_octane_of_this_unimodular_Jordan_block_at_2, \
conway_diagonal_factor, \
conway_cross_product_doubled_power, \
conway_type_factor, \
conway_p_mass, \
conway_standard_p_mass, \
conway_standard_mass, \
conway_mass
# conway_generic_mass, \
# conway_p_mass_adjustment
## Routines to check local representability of numbers
from sage.quadratic_forms.quadratic_form__local_representation_conditions import \
local_representation_conditions, \
is_locally_universal_at_prime, \
is_locally_universal_at_all_primes, \
is_locally_universal_at_all_places, \
is_locally_represented_number_at_place, \
is_locally_represented_number
## Routines to make a split local covering of the given quadratic form.
from sage.quadratic_forms.quadratic_form__split_local_covering import \
cholesky_decomposition, \
vectors_by_length, \
complementary_subform_to_vector, \
split_local_cover
## Routines to make automorphisms of the given quadratic form.
from sage.quadratic_forms.quadratic_form__automorphisms import \
basis_of_short_vectors, \
short_vector_list_up_to_length, \
short_primitive_vector_list_up_to_length, \
_compute_automorphisms, \
automorphism_group, \
automorphisms, \
number_of_automorphisms, \
set_number_of_automorphisms
## Routines to test the local and global equivalence/isometry of two quadratic forms.
from sage.quadratic_forms.quadratic_form__equivalence_testing import \
is_globally_equivalent_to, \
is_locally_equivalent_to, \
has_equivalent_Jordan_decomposition_at_prime, \
is_rationally_isometric
## Routines for solving equations of the form Q(x) = c.
from sage.quadratic_forms.qfsolve import solve
def __init__(self, R, n=None, entries=None, unsafe_initialization=False, number_of_automorphisms=None, determinant=None):
"""
EXAMPLES::
sage: s = QuadraticForm(ZZ, 4, range(10))
sage: s.dim()
4
TESTS::
sage: s == loads(dumps(s))
True
sage: QuadraticForm(ZZ, -1)
Traceback (most recent call last):
...
ValueError: the size must be a non-negative integer, not -1
sage: x = polygen(ZZ, 'x')
sage: QuadraticForm(x**2)
Traceback (most recent call last):
....
TypeError: wrong input for QuadraticForm
"""
# Deal with: QuadraticForm(ring, matrix)
matrix_init_flag = False
if isinstance(R, Ring):
if is_Matrix(n):
# Test if n is symmetric and has even diagonal
if not self._is_even_symmetric_matrix_(n, R):
raise TypeError("Oops! The matrix is not a symmetric with even diagonal defined over R.")
# Rename the matrix and ring
M = n
M_ring = R
matrix_init_flag = True
elif not is_Matrix(R):
# first argument, if not a ring, must be a matrix
raise TypeError('wrong input for QuadraticForm')
else:
# Deal with: QuadraticForm(matrix)
# Test if R is symmetric and has even diagonal
if not self._is_even_symmetric_matrix_(R):
raise TypeError("Oops! The matrix is not a symmetric with even diagonal.")
# Rename the matrix and ring
M = R
M_ring = R.base_ring()
matrix_init_flag = True
## Perform the quadratic form initialization
if matrix_init_flag:
self.__n = ZZ(M.nrows())
self.__base_ring = M_ring
self.__coeffs = []
for i in range(M.nrows()):
for j in range(i, M.nrows()):
if (i == j):
self.__coeffs += [ M_ring(M[i,j] / 2) ]
else:
self.__coeffs += [ M_ring(M[i,j]) ]
return
## -----------------------------------------------------------
## Verify the size of the matrix is an integer >= 0
n = ZZ(n)
if n < 0:
raise ValueError("the size must be a non-negative integer, not {}".format(n))
# Store the relevant variables
N = n * (n + 1) // 2
self.__n = n
self.__base_ring = R
self.__coeffs = [self.__base_ring.zero() for i in range(N)]
# Check if entries is a list, tuple or iterator for the
# current size, and if so, write the upper-triangular matrix
if entries is not None:
try:
entries = list(entries)
except TypeError:
raise TypeError('entries must be an iterable')
if len(entries) == N:
for i in range(N):
self.__coeffs[i] = self.__base_ring(entries[i])
else:
raise TypeError("Oops! The entries " + str(entries) + " must be a list of size n(n+1)/2.")
## -----------------------------------------------------------
## Process possible forced initialization of various fields
self._external_initialization_list = []
if unsafe_initialization:
## Set the number of automorphisms
if number_of_automorphisms is not None:
self.set_number_of_automorphisms(number_of_automorphisms)
#self.__number_of_automorphisms = number_of_automorphisms
#self.__external_initialization_list.append('number_of_automorphisms')
## Set the determinant
if determinant is not None:
self.__det = determinant
self._external_initialization_list.append('determinant')
def list_external_initializations(self):
"""
Return a list of the fields which were set externally at
creation, and not created through the usual QuadraticForm
methods. These fields are as good as the external process
that made them, and are thus not guaranteed to be correct.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,0,5])
sage: Q.list_external_initializations()
[]
sage: T = Q.theta_series()
sage: Q.list_external_initializations()
[]
sage: Q = QuadraticForm(ZZ, 2, [1,0,5], unsafe_initialization=False, number_of_automorphisms=3, determinant=0)
sage: Q.list_external_initializations()
[]
::
sage: Q = QuadraticForm(ZZ, 2, [1,0,5], unsafe_initialization=False, number_of_automorphisms=3, determinant=0)
sage: Q.list_external_initializations()
[]
sage: Q = QuadraticForm(ZZ, 2, [1,0,5], unsafe_initialization=True, number_of_automorphisms=3, determinant=0)
sage: Q.list_external_initializations()
['number_of_automorphisms', 'determinant']
"""
return deepcopy(self._external_initialization_list)
def __pari__(self):
"""
Return a PARI-formatted Hessian matrix for Q.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,0,5])
sage: Q.__pari__()
[2, 0; 0, 10]
"""
return self.matrix().__pari__()
def _pari_init_(self):
"""
Return a PARI-formatted Hessian matrix for Q, as string.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,0,5])
sage: Q._pari_init_()
'Mat([2,0;0,10])'
"""
return self.matrix()._pari_init_()
def _repr_(self):
"""
Give a text representation for the quadratic form given as an upper-triangular matrix of coefficients.
EXAMPLES::
sage: QuadraticForm(ZZ, 2, [1,3,5])
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 3 ]
[ * 5 ]
"""
n = self.dim()
out_str = "Quadratic form in " + str(n) + " variables over " + str(self.base_ring()) + " with coefficients: \n"
for i in range(n):
if i > 0:
out_str += '\n'
out_str += "[ "
for j in range(n):
if (i > j):
out_str += "* "
else:
out_str += str(self[i,j]) + " "
out_str += "]"
return out_str
def _latex_(self):
"""
Give a LaTeX representation for the quadratic form given as an upper-triangular matrix of coefficients.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [2,3,5])
sage: Q._latex_()
'Quadratic form in 2 variables over Integer Ring with coefficients: \\newline\\left[ \\begin{array}{cc}2 & 3 & * & 5 & \\end{array} \\right]'
"""
n = self.dim()
out_str = ""
out_str += "Quadratic form in " + str(n) + " variables over " + str(self.base_ring())
out_str += " with coefficients: \\newline"
out_str += "\\left[ \\begin{array}{" + n * "c" + "}"
for i in range(n):
for j in range(n):
if (i > j):
out_str += " * & "
else:
out_str += str(self[i,j]) + " & "
# if i < (n-1):
# out_str += "\\"
out_str += "\\end{array} \\right]"
return out_str
def __getitem__(self, ij):
"""
Return the coefficient `a_{ij}` of `x_i * x_j`.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: matrix(ZZ, 3, 3, [Q[i,j] for i in range(3) for j in range(3)])
[1 2 3]
[2 4 5]
[3 5 6]
"""
## Unpack the list of indices
i, j = ij
i = int(i)
j = int(j)
## Ensure we're using upper-triangular coordinates
if i > j:
tmp = i
i = j
j = tmp
return self.__coeffs[i*self.__n - i*(i-1)//2 + j - i]
def __setitem__(self, ij, coeff):
"""
Set the coefficient `a_{ij}` in front of `x_i * x_j`.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Q
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 1 2 3 ]
[ * 4 5 ]
[ * * 6 ]
sage: Q[2,1] = 17
sage: Q
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 1 2 3 ]
[ * 4 17 ]
[ * * 6 ]
"""
## Unpack the list of indices
i, j = ij
i = int(i)
j = int(j)
## TO DO: Verify that 0 <= i, j <= (n-1)
## Ensure we're using upper-triangular coordinates
if i > j:
tmp = i
i = j
j = tmp
## Set the entry
try:
self.__coeffs[i*self.__n - i*(i-1)//2 + j -i] = self.__base_ring(coeff)
except Exception:
raise RuntimeError("Oops! This coefficient can't be coerced to an element of the base ring for the quadratic form.")
def __hash__(self):
r"""
TESTS::
sage: Q1 = QuadraticForm(QQ, 2, [1,1,1])
sage: Q2 = QuadraticForm(QQ, 2, [1,1,1])
sage: Q3 = QuadraticForm(QuadraticField(2), 2, [1,1,1])
sage: hash(Q1) == hash(Q2)
True
sage: hash(Q1) == hash(Q3)
False
"""
return hash(self.__base_ring) ^ hash(tuple(self.__coeffs))
def __eq__(self, right):
"""
Determines if two quadratic forms are equal.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,4,10])
sage: Q == Q
True
sage: Q1 = QuadraticForm(QQ, 2, [1,4,10])
sage: Q == Q1
False
sage: Q2 = QuadraticForm(ZZ, 2, [1,4,-10])
sage: Q == Q1
False
sage: Q == Q2
False
sage: Q1 == Q2
False
"""
if not isinstance(right, QuadraticForm):
return False
return (self.__base_ring == right.__base_ring) and (self.__coeffs == right.__coeffs)
def __add__(self, right):
"""
Return the direct sum of two quadratic forms.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,4,10])
sage: Q
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 4 ]
[ * 10 ]
sage: Q2 = QuadraticForm(ZZ, 2, [1,4,-10])
sage: Q + Q2
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 4 0 0 ]
[ * 10 0 0 ]
[ * * 1 4 ]
[ * * * -10 ]
"""
if not isinstance(right, QuadraticForm):
raise TypeError("Oops! Can't add these objects since they're not both quadratic forms. =(")
elif (self.base_ring() != right.base_ring()):
raise TypeError("Oops! Can't add these since the quadratic forms don't have the same base rings... =(")
else:
Q = QuadraticForm(self.base_ring(), self.dim() + right.dim())
n = self.dim()
m = right.dim()
for i in range(n):
for j in range(i,n):
Q[i,j] = self[i,j]
for i in range(m):
for j in range(i,m):
Q[n+i,n+j] = right[i,j]
return Q
def sum_by_coefficients_with(self, right):
"""
Return the sum (on coefficients) of two quadratic forms of the same size.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,4,10])
sage: Q
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 4 ]
[ * 10 ]
sage: Q+Q
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 4 0 0 ]
[ * 10 0 0 ]
[ * * 1 4 ]
[ * * * 10 ]
sage: Q2 = QuadraticForm(ZZ, 2, [1,4,-10])
sage: Q.sum_by_coefficients_with(Q2)
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 2 8 ]
[ * 0 ]
"""
if not isinstance(right, QuadraticForm):
raise TypeError("Oops! Can't add these objects since they're not both quadratic forms. =(")
elif (self.__n != right.__n):
raise TypeError("Oops! Can't add these since the quadratic forms don't have the same sizes... =(")
elif (self.__base_ring != right.__base_ring):
raise TypeError("Oops! Can't add these since the quadratic forms don't have the same base rings... =(")
else:
return QuadraticForm(self.__base_ring, self.__n, [self.__coeffs[i] + right.__coeffs[i] for i in range(len(self.__coeffs))])
## ======================== CHANGE THIS TO A TENSOR PRODUCT?!? Even in Characteristic 2?!? =======================
# def __mul__(self, right):
# """
# Multiply (on the right) the quadratic form Q by an element of the ring that Q is defined over.
#
# EXAMPLES::
#
# sage: Q = QuadraticForm(ZZ, 2, [1,4,10])
# sage: Q*2
# Quadratic form in 2 variables over Integer Ring with coefficients:
# [ 2 8 ]
# [ * 20 ]
#
# sage: Q+Q == Q*2
# True
#
# """
# try:
# c = self.base_ring()(right)
# except Exception:
# raise TypeError, "Oh no! The multiplier cannot be coerced into the base ring of the quadratic form. =("
#
# return QuadraticForm(self.base_ring(), self.dim(), [c * self.__coeffs[i] for i in range(len(self.__coeffs))])
# =========================================================================================================================
def __call__(self, v):
"""
Evaluate this quadratic form Q on a vector or matrix of elements
coercible to the base ring of the quadratic form. If a vector
is given then the output will be the ring element Q(`v`), but if a
matrix is given then the output will be the quadratic form Q'
which in matrix notation is given by:
.. MATH::
Q' = v^t * Q * v.
EXAMPLES:
Evaluate a quadratic form at a vector::
sage: Q = QuadraticForm(QQ, 3, range(6))
sage: Q
Quadratic form in 3 variables over Rational Field with coefficients:
[ 0 1 2 ]
[ * 3 4 ]
[ * * 5 ]
sage: Q([1,2,3])
89
sage: Q([1,0,0])
0
sage: Q([1,1,1])
15
Evaluate a quadratic form using a column matrix::
sage: Q = QuadraticForm(QQ, 2, range(1,4))
sage: A = Matrix(ZZ,2,2,[-1,0,0,1])
sage: Q(A)
Quadratic form in 2 variables over Rational Field with coefficients:
[ 1 -2 ]
[ * 3 ]
sage: Q([1,0])
1
sage: type(Q([1,0]))
<... 'sage.rings.rational.Rational'>
sage: Q = QuadraticForm(QQ, 2, range(1,4))
sage: Q(matrix(2, [1,0]))
Quadratic form in 1 variables over Rational Field with coefficients:
[ 1 ]
Simple 2x2 change of variables::
sage: Q = QuadraticForm(ZZ, 2, [1,0,1])
sage: Q
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 0 ]
[ * 1 ]
sage: M = Matrix(ZZ, 2, 2, [1,1,0,1])
sage: M
[1 1]
[0 1]
sage: Q(M)
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 2 ]
[ * 2 ]
Some more tests::
sage: Q = DiagonalQuadraticForm(ZZ, [1,1,1])
sage: Q([1,2,3])
14
sage: v = vector([1,2,3])
sage: Q(v)
14
sage: t = tuple([1,2,3])
sage: Q(v)
14
sage: M = Matrix(ZZ, 3, [1,2,3])
sage: Q(M)
Quadratic form in 1 variables over Integer Ring with coefficients:
[ 14 ]
"""
## If we are passed a matrix A, return the quadratic form Q(A(x))
## (In matrix notation: A^t * Q * A)
n = self.dim()
if is_Matrix(v):
## Check that v has the correct number of rows
if v.nrows() != n:
raise TypeError("the matrix must have {} rows".format(n))
## Create the new quadratic form
m = v.ncols()
Q2 = QuadraticForm(self.base_ring(), m)
return QFEvaluateMatrix(self, v, Q2)
elif (is_Vector(v) or isinstance(v, (list, tuple))):
## Check the vector/tuple/list has the correct length
if not (len(v) == n):
raise TypeError("your vector needs to have length {}".format(n))
## TO DO: Check that the elements can be coerced into the base ring of Q -- on first elt.
if len(v) > 0:
try:
self.base_ring()(v[0])
except Exception:
raise TypeError("your vector is not coercible to the base ring of the quadratic form")
## Attempt to evaluate Q[v]
return QFEvaluateVector(self, v)
else:
raise TypeError
## =====================================================================================================
def _is_even_symmetric_matrix_(self, A, R=None):
"""
Tests if a matrix is symmetric, defined over R, and has even diagonal in R.
INPUT:
A -- matrix
R -- ring
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [2,3,5])
sage: A = Q.matrix()
sage: A
[ 4 3]
[ 3 10]
sage: Q._is_even_symmetric_matrix_(A)
True
sage: A[0,0] = 1
sage: Q._is_even_symmetric_matrix_(A)
False
"""
if not is_Matrix(A):
raise TypeError("A is not a matrix.")
ring_coerce_test = True
if R is None: ## This allows us to omit the ring from the variables, and take it from the matrix
R = A.base_ring()
ring_coerce_test = False
if not isinstance(R, Ring):
raise TypeError("R is not a ring.")
if not A.is_square():
return False
## Test that the matrix is symmetric
n = A.nrows()
for i in range(n):
for j in range(i+1, n):
if A[i,j] != A[j,i]:
return False
## Test that all entries coerce to R
if not ((A.base_ring() == R) or ring_coerce_test):
try:
for i in range(n):
for j in range(i, n):
R(A[i,j])
except Exception:
return False
## Test that the diagonal is even (if 1/2 isn't in R)
if not R(2).is_unit():
for i in range(n):
if not is_even(R(A[i,i])):
return False
return True
## =====================================================================================================
def matrix(self):
"""
Return the Hessian matrix A for which Q(X) = `(1/2) * X^t * A * X`.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, range(6))
sage: Q.matrix()
[ 0 1 2]
[ 1 6 4]
[ 2 4 10]
"""
return self.Hessian_matrix()
def Hessian_matrix(self):
"""
Return the Hessian matrix A for which Q(X) = `(1/2) * X^t * A * X`.
EXAMPLES::
sage: Q = QuadraticForm(QQ, 2, range(1,4))
sage: Q
Quadratic form in 2 variables over Rational Field with coefficients:
[ 1 2 ]
[ * 3 ]
sage: Q.Hessian_matrix()
[2 2]
[2 6]
sage: Q.matrix().base_ring()
Rational Field
"""
mat_entries = []
for i in range(self.dim()):
for j in range(self.dim()):
if (i == j):
mat_entries += [ 2 * self[i,j] ]
else:
mat_entries += [ self[i,j] ]
return matrix(self.base_ring(), self.dim(), self.dim(), mat_entries)
def Gram_matrix_rational(self):
"""
Return a (symmetric) Gram matrix A for the quadratic form Q,
meaning that
.. MATH::
Q(x) = x^t * A * x,
defined over the fraction field of the base ring.
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,3,5,7])
sage: A = Q.Gram_matrix_rational(); A
[1 0 0 0]
[0 3 0 0]
[0 0 5 0]
[0 0 0 7]
sage: A.base_ring()
Rational Field
"""
return (ZZ(1) / ZZ(2)) * self.matrix()
def Gram_matrix(self):
"""
Return a (symmetric) Gram matrix A for the quadratic form Q,
meaning that
.. MATH::
Q(x) = x^t * A * x,
defined over the base ring of Q. If this is not possible,
then a TypeError is raised.
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,3,5,7])
sage: A = Q.Gram_matrix(); A
[1 0 0 0]
[0 3 0 0]
[0 0 5 0]
[0 0 0 7]
sage: A.base_ring()
Integer Ring
"""
A = (ZZ(1) / ZZ(2)) * self.matrix()
n = self.dim()
## Test to see if it has an integral Gram matrix
Int_flag = True
for i in range(n):
for j in range(i,n):
Int_flag = Int_flag and A[i,j] in self.base_ring()
## Return the Gram matrix, or an error
if Int_flag:
return MatrixSpace(self.base_ring(), n, n)(A)
else:
raise TypeError("Oops! This form does not have an integral Gram matrix. =(")
def has_integral_Gram_matrix(self):
"""
Return whether the quadratic form has an integral Gram matrix (with respect to its base ring).
A warning is issued if the form is defined over a field, since in that case the return is trivially true.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [7,8,9])
sage: Q.has_integral_Gram_matrix()
True
::
sage: Q = QuadraticForm(ZZ, 2, [4,5,6])
sage: Q.has_integral_Gram_matrix()
False
"""
## Warning over fields
if is_field(self.base_ring()):
warn("Warning -- A quadratic form over a field always has integral Gram matrix. Do you really want to do this?!?")
## Determine integrality of the Gram matrix
flag = True
try:
self.Gram_matrix()
except Exception:
flag = False
return flag
def gcd(self):
"""
Return the greatest common divisor of the coefficients of the
quadratic form (as a polynomial).
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 4, range(1, 21, 2))
sage: Q.gcd()
1
::
sage: Q = QuadraticForm(ZZ, 4, range(0, 20, 2))
sage: Q.gcd()
2
"""
if self.base_ring() != ZZ:
raise TypeError("Oops! The given quadratic form must be defined over ZZ.")
return GCD(self.coefficients())
def polynomial(self,names='x'):
r"""
Return the polynomial in 'n' variables of the quadratic form in the ring 'R[names].'
INPUT:
-'self' - a quadratic form over a commutative ring.
-'names' - the name of the variables. Digits will be appended to the name for each different canonical
variable e.g x1, x2, x3 etc.
OUTPUT:
The polynomial form of the quadratic form.
EXAMPLES::
sage: Q = DiagonalQuadraticForm(QQ,[1, 3, 5, 7])
sage: P = Q.polynomial(); P
x0^2 + 3*x1^2 + 5*x2^2 + 7*x3^2
::
sage: F.<a> = NumberField(x^2 - 5)
sage: Z = F.ring_of_integers()
sage: Q = QuadraticForm(Z,3,[2*a, 3*a, 0 , 1 - a, 0, 2*a + 4])
sage: P = Q.polynomial(names='y'); P
2*a*y0^2 + 3*a*y0*y1 + (-a + 1)*y1^2 + (2*a + 4)*y2^2
sage: Q = QuadraticForm(F,4,[a, 3*a, 0, 1 - a, a - 3, 0, 2*a + 4, 4 + a, 0, 1])
sage: Q.polynomial(names='z')
(a)*z0^2 + (3*a)*z0*z1 + (a - 3)*z1^2 + (a + 4)*z2^2 + (-a + 1)*z0*z3 + (2*a + 4)*z1*z3 + z3^2
sage: B.<i,j,k> = QuaternionAlgebra(F,-1,-1)
sage: Q = QuadraticForm(B, 3, [2*a, 3*a, i, 1 - a, 0, 2*a + 4])
sage: Q.polynomial()
Traceback (most recent call last):
...
ValueError: Can only create polynomial rings over commutative rings.
"""
B = self.base_ring()
n = self.dim()
M = matrix(B, n)
for i in range(n):
for j in range(i, n):
M[i,j] = self[i,j]
try:
R = PolynomialRing(self.base_ring(), names, n)
except Exception:
raise ValueError('Can only create polynomial rings over commutative rings.')
V = vector(R.gens())
P = (V*M).dot_product(V)
return P
def is_primitive(self):
"""
Determines if the given integer-valued form is primitive
(i.e. not an integer (>1) multiple of another integer-valued
quadratic form).
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [2,3,4])
sage: Q.is_primitive()
True
sage: Q = QuadraticForm(ZZ, 2, [2,4,8])
sage: Q.is_primitive()
False
"""
return (self.gcd() == 1)
def primitive(self):
"""
Return a primitive version of an integer-valued quadratic form, defined over `ZZ`.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [2,3,4])
sage: Q.primitive()
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 2 3 ]
[ * 4 ]
sage: Q = QuadraticForm(ZZ, 2, [2,4,8])
sage: Q.primitive()
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 2 ]
[ * 4 ]
"""
if self.base_ring() != ZZ:
raise TypeError("Oops! The given quadratic form must be defined over ZZ.")
g = self.gcd()
return QuadraticForm(self.base_ring(), self.dim(), [ZZ(x/g) for x in self.coefficients()])
def adjoint_primitive(self):
"""
Return the primitive adjoint of the quadratic form, which is
the smallest discriminant integer-valued quadratic form whose
matrix is a scalar multiple of the inverse of the matrix of
the given quadratic form.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,2,3])
sage: Q.adjoint_primitive()
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 3 -2 ]
[ * 1 ]
"""
return QuadraticForm(self.Hessian_matrix().adjoint_classical()).primitive()
def dim(self):
"""
Gives the number of variables of the quadratic form.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,2,3])
sage: Q.dim()
2
sage: parent(Q.dim())
Integer Ring
sage: Q = QuadraticForm(Q.matrix())
sage: Q.dim()
2
sage: parent(Q.dim())
Integer Ring
"""
return self.__n
def base_ring(self):
"""
Gives the ring over which the quadratic form is defined.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,2,3])
sage: Q.base_ring()
Integer Ring
"""
return self.__base_ring
def coefficients(self):
"""
Gives the matrix of upper triangular coefficients,
by reading across the rows from the main diagonal.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,2,3])
sage: Q.coefficients()
[1, 2, 3]
"""
return self.__coeffs
def det(self):
"""
Gives the determinant of the Gram matrix of 2*Q, or
equivalently the determinant of the Hessian matrix of Q.
(Note: This is always defined over the same ring as the
quadratic form.)
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,2,3])
sage: Q.det()
8
"""
try:
return self.__det
except AttributeError:
## Compute the determinant
if self.dim() == 0:
new_det = self.base_ring()(1)
else:
new_det = self.matrix().det()
## Cache and return the determinant
self.__det = new_det
return new_det
def Gram_det(self):
"""
Gives the determinant of the Gram matrix of Q.
(Note: This is defined over the fraction field of the ring of
the quadratic form, but is often not defined over the same
ring as the quadratic form.)
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [1,2,3])
sage: Q.Gram_det()
2
"""
return self.det() / ZZ(2**self.dim())
def base_change_to(self, R):
"""
Alters the quadratic form to have all coefficients
defined over the new base_ring R. Here R must be
coercible to from the current base ring.
Note: This is preferable to performing an explicit
coercion through the base_ring() method, which does
not affect the individual coefficients. This is
particularly useful for performing fast modular
arithmetic evaluations.
INPUT:
R -- a ring
OUTPUT:
quadratic form
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ,[1,1]); Q
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 1 0 ]
[ * 1 ]
::
sage: Q1 = Q.base_change_to(IntegerModRing(5)); Q1
Quadratic form in 2 variables over Ring of integers modulo 5 with coefficients:
[ 1 0 ]
[ * 1 ]
sage: Q1([35,11])
1
"""
## Check that a canonical coercion is possible
if not is_Ring(R):
raise TypeError("Oops! R is not a ring. =(")
if not R.has_coerce_map_from(self.base_ring()):
raise TypeError("Oops! There is no canonical coercion from " + str(self.base_ring()) + " to R.")
## Return the coerced form
return QuadraticForm(R, self.dim(), [R(x) for x in self.coefficients()])
def level(self):
r"""
Determines the level of the quadratic form over a PID, which is a
generator for the smallest ideal `N` of `R` such that N * (the matrix of
2*Q)^(-1) is in R with diagonal in 2*R.
Over `\ZZ` this returns a non-negative number.
(Caveat: This always returns the unit ideal when working over a field!)
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, range(1,4))
sage: Q.level()
8
sage: Q1 = QuadraticForm(QQ, 2, range(1,4))
sage: Q1.level() # random
UserWarning: Warning -- The level of a quadratic form over a field is always 1. Do you really want to do this?!?
1
sage: Q = DiagonalQuadraticForm(ZZ, [1,3,5,7])
sage: Q.level()
420
"""
## Try to return the cached level
try:
return self.__level
except AttributeError:
## Check that the base ring is a PID
if not isinstance(self.base_ring(), PrincipalIdealDomain):
raise TypeError("Oops! The level (as a number) is only defined over a Principal Ideal Domain. Try using level_ideal().")
## Warn the user if the form is defined over a field!
if self.base_ring().is_field():
warn("Warning -- The level of a quadratic form over a field is always 1. Do you really want to do this?!?")
#raise RuntimeError, "Warning -- The level of a quadratic form over a field is always 1. Do you really want to do this?!?"
## Check invertibility and find the inverse
try:
mat_inv = self.matrix()**(-1)
except ZeroDivisionError:
raise TypeError("Oops! The quadratic form is degenerate (i.e. det = 0). =(")
## Compute the level
inv_denoms = []
for i in range(self.dim()):
for j in range(i, self.dim()):
if (i == j):
inv_denoms += [denominator(mat_inv[i,j] / 2)]
else:
inv_denoms += [denominator(mat_inv[i,j])]
lvl = LCM(inv_denoms)
lvl = Ideal(self.base_ring()(lvl)).gen()
##############################################################
## To do this properly, the level should be the inverse of the
## fractional ideal (over R) generated by the entries whose
## denominators we take above. =)
##############################################################
## Normalize the result over ZZ
if self.base_ring() == IntegerRing():
lvl = abs(lvl)
## Cache and return the level
self.__level = lvl
return lvl
def level_ideal(self):
"""
Determines the level of the quadratic form (over R), which is the
smallest ideal N of R such that N * (the matrix of 2*Q)^(-1) is
in R with diagonal in 2*R.
(Caveat: This always returns the principal ideal when working over a field!)
WARNING: THIS ONLY WORKS OVER A PID RING OF INTEGERS FOR NOW!
(Waiting for Sage fractional ideal support.)
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, range(1,4))
sage: Q.level_ideal()
Principal ideal (8) of Integer Ring
::
sage: Q1 = QuadraticForm(QQ, 2, range(1,4))
sage: Q1.level_ideal()
Principal ideal (1) of Rational Field
::
sage: Q = DiagonalQuadraticForm(ZZ, [1,3,5,7])
sage: Q.level_ideal()
Principal ideal (420) of Integer Ring
"""
##############################################################
## To do this properly, the level should be the inverse of the
## fractional ideal (over R) generated by the entries whose
## denominators we take above. =)
##############################################################
return Ideal(self.base_ring()(self.level()))
def bilinear_map(self, v, w):
r"""
Return the value of the associated bilinear map on two vectors
Given a quadratic form `Q` over some base ring `R` with
characteristic not equal to 2, this gives the image of two
vectors with coefficients in `R` under the associated bilinear
map `B`, given by the relation `2 B(v,w) = Q(v) + Q(w) - Q(v+w)`.
INPUT:
`v, w` -- two vectors
OUTPUT:
an element of the base ring `R`.
EXAMPLES:
First, an example over `\ZZ`::
sage: Q = QuadraticForm(ZZ,3,[1,4,0,1,4,1])
sage: v = vector(ZZ,(1,2,0))
sage: w = vector(ZZ,(0,1,1))
sage: Q.bilinear_map(v,w)
8
This also works over `\QQ`::
sage: Q = QuadraticForm(QQ,2,[1/2,2,1])
sage: v = vector(QQ,(1,1))
sage: w = vector(QQ,(1/2,2))
sage: Q.bilinear_map(v,w)
19/4
The vectors must have the correct length::
sage: Q = DiagonalQuadraticForm(ZZ,[1,7,7])
sage: v = vector((1,2))
sage: w = vector((1,1,1))
sage: Q.bilinear_map(v,w)
Traceback (most recent call last):
...
TypeError: vectors must have length 3
This does not work if the characteristic is 2::
sage: Q = DiagonalQuadraticForm(GF(2),[1,1,1])
sage: v = vector((1,1,1))
sage: w = vector((1,1,1))
sage: Q.bilinear_map(v,w)
Traceback (most recent call last):
...
TypeError: not defined for rings of characteristic 2
"""
if len(v) != self.dim() or len(w) != self.dim():
raise TypeError("vectors must have length " + str(self.dim()))
if self.base_ring().characteristic() == 2:
raise TypeError("not defined for rings of characteristic 2")
return (self(v+w) - self(v) - self(w))/2
genera = staticmethod(genera)
## ============================================================================
def DiagonalQuadraticForm(R, diag):
"""
Return a quadratic form over `R` which is a sum of squares.
INPUT:
- `R` -- ring
- ``diag`` -- list/tuple of elements coercible to R
OUTPUT:
quadratic form
EXAMPLES::
sage: Q = DiagonalQuadraticForm(ZZ, [1,3,5,7])
sage: Q
Quadratic form in 4 variables over Integer Ring with coefficients:
[ 1 0 0 0 ]
[ * 3 0 0 ]
[ * * 5 0 ]
[ * * * 7 ]
"""
Q = QuadraticForm(R, len(diag))
for i in range(len(diag)):
Q[i, i] = diag[i]
return Q
| 32.059302
| 154
| 0.529633
|
4a0f5e9141703eaef1155641e17c6f1dd1fe0577
| 3,475
|
py
|
Python
|
nnunet/utilities/task_name_id_conversion.py
|
yuan-xiaohan/nnUnet_OnWindows
|
5cb561df75f22ee564592393a26837bd6b9e6fef
|
[
"Apache-2.0"
] | 2
|
2021-11-02T03:42:28.000Z
|
2022-02-23T14:58:23.000Z
|
nnunet/utilities/task_name_id_conversion.py
|
yuan-xiaohan/nnUnet_OnWindows
|
5cb561df75f22ee564592393a26837bd6b9e6fef
|
[
"Apache-2.0"
] | 1
|
2022-03-02T02:03:57.000Z
|
2022-03-02T02:03:57.000Z
|
nnunet/utilities/task_name_id_conversion.py
|
yuan-xiaohan/nnUnet_OnWindows
|
5cb561df75f22ee564592393a26837bd6b9e6fef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.paths import nnUNet_raw_data, preprocessing_output_dir, nnUNet_cropped_data, network_training_output_dir
from nnunet.common import *
import numpy as np
def convert_id_to_task_name(task_id: int):
startswith = "Task%03.0d" % task_id
if preprocessing_output_dir is not None:
candidates_preprocessed = subdirs(preprocessing_output_dir, prefix=startswith, join=False)
else:
candidates_preprocessed = []
if nnUNet_raw_data is not None:
candidates_raw = subdirs(nnUNet_raw_data, prefix=startswith, join=False)
else:
candidates_raw = []
if nnUNet_cropped_data is not None:
candidates_cropped = subdirs(nnUNet_cropped_data, prefix=startswith, join=False)
else:
candidates_cropped = []
candidates_trained_models = []
if network_training_output_dir is not None:
for m in ['2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres']:
if isdir(join(network_training_output_dir, m)):
candidates_trained_models += subdirs(join(network_training_output_dir, m), prefix=startswith, join=False)
all_candidates = candidates_cropped + candidates_preprocessed + candidates_raw + candidates_trained_models
unique_candidates = np.unique(all_candidates)
if len(unique_candidates) > 1:
raise RuntimeError("More than one task name found for task id %d. Please correct that. (I looked in the "
"following folders:\n%s\n%s\n%s" % (task_id, nnUNet_raw_data, preprocessing_output_dir,
nnUNet_cropped_data))
if len(unique_candidates) == 0:
raise RuntimeError("Could not find a task with the ID %d. Make sure the requested task ID exists and that "
"nnU-Net knows where raw and preprocessed data are located (see Documentation - "
"Installation). Here are your currently defined folders:\nnnUNet_preprocessed=%s\nRESULTS_"
"FOLDER=%s\nnnUNet_raw_data_base=%s\nIf something is not right, adapt your environemnt "
"variables." %
(task_id,
os.environ.get('nnUNet_preprocessed') if os.environ.get('nnUNet_preprocessed') is not None else 'None',
os.environ.get('RESULTS_FOLDER') if os.environ.get('RESULTS_FOLDER') is not None else 'None',
os.environ.get('nnUNet_raw_data_base') if os.environ.get('nnUNet_raw_data_base') is not None else 'None',
))
return unique_candidates[0]
def convert_task_name_to_id(task_name: str):
assert task_name.startswith("Task")
task_id = int(task_name[4:7])
return task_id
| 51.102941
| 133
| 0.670504
|
4a0f5ec1c472674d8d94ab914e4a378916c1b0c4
| 443
|
py
|
Python
|
data/scripts/templates/object/mobile/shared_dressed_official.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/mobile/shared_dressed_official.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/mobile/shared_dressed_official.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_official.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.058824
| 62
| 0.72912
|
4a0f5f78f16935a261abe17129cf467890fb3714
| 4,602
|
py
|
Python
|
trajectometry/convert.py
|
farid-fari/epidemics
|
4e24b79e67ae8e7c91ba4abefe7f20a7b3720064
|
[
"MIT"
] | null | null | null |
trajectometry/convert.py
|
farid-fari/epidemics
|
4e24b79e67ae8e7c91ba4abefe7f20a7b3720064
|
[
"MIT"
] | null | null | null |
trajectometry/convert.py
|
farid-fari/epidemics
|
4e24b79e67ae8e7c91ba4abefe7f20a7b3720064
|
[
"MIT"
] | 1
|
2022-01-29T15:52:40.000Z
|
2022-01-29T15:52:40.000Z
|
''' Outil permettant de convertir le format des données de trajectométrie.'''
import sqlite3
import csv
try:
f = open('trajecto.csv', encoding='ANSI')
except FileNotFoundError:
raise FileNotFoundError("Le fichier n'existe pas")
cols = f.readline().strip().split(';') # Les étiquettes
cols = [t.replace(":", "") for t in cols] # On enlève les séparateurs des heures
ancien = csv.reader(f, delimiter=";")
nouveau = sqlite3.connect('trajecto_other.db')
curs = nouveau.cursor()
try:
curs.execute("CREATE TABLE Personnes"
" (cle BIGINT PRIMARY KEY, secteur INTEGER, age INTEGER,"
" redressement FLOAT, occupation INTEGER,"
" '0000' INTEGER,"
" '0015' INTEGER,"
" '0030' INTEGER,"
" '0045' INTEGER,"
" '0100' INTEGER,"
" '0115' INTEGER,"
" '0130' INTEGER,"
" '0145' INTEGER,"
" '0200' INTEGER,"
" '0215' INTEGER,"
" '0230' INTEGER,"
" '0245' INTEGER,"
" '0300' INTEGER,"
" '0315' INTEGER,"
" '0330' INTEGER,"
" '0345' INTEGER,"
" '0400' INTEGER,"
" '0415' INTEGER,"
" '0430' INTEGER,"
" '0445' INTEGER,"
" '0500' INTEGER,"
" '0515' INTEGER,"
" '0530' INTEGER,"
" '0545' INTEGER,"
" '0600' INTEGER,"
" '0615' INTEGER,"
" '0630' INTEGER,"
" '0645' INTEGER,"
" '0700' INTEGER,"
" '0715' INTEGER,"
" '0730' INTEGER,"
" '0745' INTEGER,"
" '0800' INTEGER,"
" '0815' INTEGER,"
" '0830' INTEGER,"
" '0845' INTEGER,"
" '0900' INTEGER,"
" '0915' INTEGER,"
" '0930' INTEGER,"
" '0945' INTEGER,"
" '1000' INTEGER,"
" '1015' INTEGER,"
" '1030' INTEGER,"
" '1045' INTEGER,"
" '1100' INTEGER,"
" '1115' INTEGER,"
" '1130' INTEGER,"
" '1145' INTEGER,"
" '1200' INTEGER,"
" '1215' INTEGER,"
" '1230' INTEGER,"
" '1245' INTEGER,"
" '1300' INTEGER,"
" '1315' INTEGER,"
" '1330' INTEGER,"
" '1345' INTEGER,"
" '1400' INTEGER,"
" '1415' INTEGER,"
" '1430' INTEGER,"
" '1445' INTEGER,"
" '1500' INTEGER,"
" '1515' INTEGER,"
" '1530' INTEGER,"
" '1545' INTEGER,"
" '1600' INTEGER,"
" '1615' INTEGER,"
" '1630' INTEGER,"
" '1645' INTEGER,"
" '1700' INTEGER,"
" '1715' INTEGER,"
" '1730' INTEGER,"
" '1745' INTEGER,"
" '1800' INTEGER,"
" '1815' INTEGER,"
" '1830' INTEGER,"
" '1845' INTEGER,"
" '1900' INTEGER,"
" '1915' INTEGER,"
" '1930' INTEGER,"
" '1945' INTEGER,"
" '2000' INTEGER,"
" '2015' INTEGER,"
" '2030' INTEGER,"
" '2045' INTEGER,"
" '2100' INTEGER,"
" '2115' INTEGER,"
" '2130' INTEGER,"
" '2145' INTEGER,"
" '2200' INTEGER,"
" '2215' INTEGER,"
" '2230' INTEGER,"
" '2245' INTEGER,"
" '2300' INTEGER,"
" '2315' INTEGER,"
" '2330' INTEGER,"
" '2345' INTEGER)")
except sqlite3.OperationalError:
raise FileExistsError("La table de données convertie existe déja.")
q = ["?"]*101
q = "(" + ",".join(q) + ")"
# On prendra garde aux lignes nulles
for k in ancien:
if k[0]:
curs.execute("INSERT INTO Personnes VALUES " + q,
tuple(k))
curs.execute("SELECT COUNT(*) FROM Personnes")
pers = curs.fetchone()[0]
print(f"{pers} personnes converties.")
nouveau.commit()
curs.close()
nouveau.close()
f.close()
| 33.347826
| 80
| 0.394394
|
4a0f5f88d40a7866cf8f23a2d5d9e6bc12066568
| 94,314
|
py
|
Python
|
airflow/models/dag.py
|
alexlshon/airflow
|
8eddc8b5019890a712810b8e5b1185997adb9bf4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-03-03T01:44:04.000Z
|
2021-03-03T01:44:04.000Z
|
airflow/models/dag.py
|
alexlshon/airflow
|
8eddc8b5019890a712810b8e5b1185997adb9bf4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/models/dag.py
|
alexlshon/airflow
|
8eddc8b5019890a712810b8e5b1185997adb9bf4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-03-03T01:44:08.000Z
|
2021-03-03T01:44:08.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import functools
import logging
import os
import pickle
import re
import sys
import traceback
import warnings
from collections import OrderedDict
from datetime import datetime, timedelta
from inspect import signature
from typing import (
TYPE_CHECKING,
Callable,
Collection,
Dict,
FrozenSet,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
Union,
cast,
)
import jinja2
import pendulum
from croniter import croniter
from dateutil.relativedelta import relativedelta
from sqlalchemy import Boolean, Column, ForeignKey, Index, Integer, String, Text, func, or_
from sqlalchemy.orm import backref, joinedload, relationship
from sqlalchemy.orm.session import Session
from airflow import settings, utils
from airflow.configuration import conf
from airflow.exceptions import AirflowException, DuplicateTaskIdFound, TaskNotFound
from airflow.models.base import ID_LEN, Base
from airflow.models.baseoperator import BaseOperator
from airflow.models.dagbag import DagBag
from airflow.models.dagcode import DagCode
from airflow.models.dagparam import DagParam
from airflow.models.dagpickle import DagPickle
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import Context, TaskInstance, clear_task_instances
from airflow.security import permissions
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.dates import cron_presets, date_range as utils_date_range
from airflow.utils.file import correct_maybe_zipped
from airflow.utils.helpers import validate_key
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import Interval, UtcDateTime, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
if TYPE_CHECKING:
from airflow.utils.task_group import TaskGroup
# Before Py 3.7, there is no re.Pattern class
try:
from re import Pattern as PatternType # type: ignore
except ImportError:
PatternType = type(re.compile('', 0))
log = logging.getLogger(__name__)
ScheduleInterval = Union[str, timedelta, relativedelta]
DEFAULT_VIEW_PRESETS = ['tree', 'graph', 'duration', 'gantt', 'landing_times']
ORIENTATION_PRESETS = ['LR', 'TB', 'RL', 'BT']
DagStateChangeCallback = Callable[[Context], None]
def get_last_dagrun(dag_id, session, include_externally_triggered=False):
"""
Returns the last dag run for a dag, None if there was none.
Last dag run can be any type of run eg. scheduled or backfilled.
Overridden DagRuns are ignored.
"""
DR = DagRun
query = session.query(DR).filter(DR.dag_id == dag_id)
if not include_externally_triggered:
query = query.filter(DR.external_trigger == False) # noqa pylint: disable=singleton-comparison
query = query.order_by(DR.execution_date.desc())
return query.first()
@functools.total_ordering
class DAG(LoggingMixin):
"""
A dag (directed acyclic graph) is a collection of tasks with directional
dependencies. A dag also has a schedule, a start date and an end date
(optional). For each schedule, (say daily or hourly), the DAG needs to run
each individual tasks as their dependencies are met. Certain tasks have
the property of depending on their own past, meaning that they can't run
until their previous schedule (and upstream tasks) are completed.
DAGs essentially act as namespaces for tasks. A task_id can only be
added once to a DAG.
:param dag_id: The id of the DAG; must consist exclusively of alphanumeric
characters, dashes, dots and underscores (all ASCII)
:type dag_id: str
:param description: The description for the DAG to e.g. be shown on the webserver
:type description: str
:param schedule_interval: Defines how often that DAG runs, this
timedelta object gets added to your latest task instance's
execution_date to figure out the next schedule
:type schedule_interval: datetime.timedelta or
dateutil.relativedelta.relativedelta or str that acts as a cron
expression
:param start_date: The timestamp from which the scheduler will
attempt to backfill
:type start_date: datetime.datetime
:param end_date: A date beyond which your DAG won't run, leave to None
for open ended scheduling
:type end_date: datetime.datetime
:param template_searchpath: This list of folders (non relative)
defines where jinja will look for your templates. Order matters.
Note that jinja/airflow includes the path of your DAG file by
default
:type template_searchpath: str or list[str]
:param template_undefined: Template undefined type.
:type template_undefined: jinja2.StrictUndefined
:param user_defined_macros: a dictionary of macros that will be exposed
in your jinja templates. For example, passing ``dict(foo='bar')``
to this argument allows you to ``{{ foo }}`` in all jinja
templates related to this DAG. Note that you can pass any
type of object here.
:type user_defined_macros: dict
:param user_defined_filters: a dictionary of filters that will be exposed
in your jinja templates. For example, passing
``dict(hello=lambda name: 'Hello %s' % name)`` to this argument allows
you to ``{{ 'world' | hello }}`` in all jinja templates related to
this DAG.
:type user_defined_filters: dict
:param default_args: A dictionary of default parameters to be used
as constructor keyword parameters when initialising operators.
Note that operators have the same hook, and precede those defined
here, meaning that if your dict contains `'depends_on_past': True`
here and `'depends_on_past': False` in the operator's call
`default_args`, the actual value will be `False`.
:type default_args: dict
:param params: a dictionary of DAG level parameters that are made
accessible in templates, namespaced under `params`. These
params can be overridden at the task level.
:type params: dict
:param concurrency: the number of task instances allowed to run
concurrently
:type concurrency: int
:param max_active_runs: maximum number of active DAG runs, beyond this
number of DAG runs in a running state, the scheduler won't create
new active DAG runs
:type max_active_runs: int
:param dagrun_timeout: specify how long a DagRun should be up before
timing out / failing, so that new DagRuns can be created. The timeout
is only enforced for scheduled DagRuns.
:type dagrun_timeout: datetime.timedelta
:param sla_miss_callback: specify a function to call when reporting SLA
timeouts.
:type sla_miss_callback: types.FunctionType
:param default_view: Specify DAG default view (tree, graph, duration,
gantt, landing_times), default tree
:type default_view: str
:param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT), default LR
:type orientation: str
:param catchup: Perform scheduler catchup (or only run latest)? Defaults to True
:type catchup: bool
:param on_failure_callback: A function to be called when a DagRun of this dag fails.
A context dictionary is passed as a single parameter to this function.
:type on_failure_callback: callable
:param on_success_callback: Much like the ``on_failure_callback`` except
that it is executed when the dag succeeds.
:type on_success_callback: callable
:param access_control: Specify optional DAG-level permissions, e.g.,
"{'role1': {'can_read'}, 'role2': {'can_read', 'can_edit'}}"
:type access_control: dict
:param is_paused_upon_creation: Specifies if the dag is paused when created for the first time.
If the dag exists already, this flag will be ignored. If this optional parameter
is not specified, the global config setting will be used.
:type is_paused_upon_creation: bool or None
:param jinja_environment_kwargs: additional configuration options to be passed to Jinja
``Environment`` for template rendering
**Example**: to avoid Jinja from removing a trailing newline from template strings ::
DAG(dag_id='my-dag',
jinja_environment_kwargs={
'keep_trailing_newline': True,
# some other jinja2 Environment options here
}
)
**See**: `Jinja Environment documentation
<https://jinja.palletsprojects.com/en/master/api/#jinja2.Environment>`_
:type jinja_environment_kwargs: dict
:param tags: List of tags to help filtering DAGS in the UI.
:type tags: List[str]
"""
_comps = {
'dag_id',
'task_ids',
'parent_dag',
'start_date',
'schedule_interval',
'full_filepath',
'template_searchpath',
'last_loaded',
}
__serialized_fields: Optional[FrozenSet[str]] = None
def __init__(
self,
dag_id: str,
description: Optional[str] = None,
schedule_interval: Optional[ScheduleInterval] = timedelta(days=1),
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
full_filepath: Optional[str] = None,
template_searchpath: Optional[Union[str, Iterable[str]]] = None,
template_undefined: Type[jinja2.StrictUndefined] = jinja2.StrictUndefined,
user_defined_macros: Optional[Dict] = None,
user_defined_filters: Optional[Dict] = None,
default_args: Optional[Dict] = None,
concurrency: int = conf.getint('core', 'dag_concurrency'),
max_active_runs: int = conf.getint('core', 'max_active_runs_per_dag'),
dagrun_timeout: Optional[timedelta] = None,
sla_miss_callback: Optional[Callable] = None,
default_view: str = conf.get('webserver', 'dag_default_view').lower(),
orientation: str = conf.get('webserver', 'dag_orientation'),
catchup: bool = conf.getboolean('scheduler', 'catchup_by_default'),
on_success_callback: Optional[DagStateChangeCallback] = None,
on_failure_callback: Optional[DagStateChangeCallback] = None,
doc_md: Optional[str] = None,
params: Optional[Dict] = None,
access_control: Optional[Dict] = None,
is_paused_upon_creation: Optional[bool] = None,
jinja_environment_kwargs: Optional[Dict] = None,
tags: Optional[List[str]] = None,
):
from airflow.utils.task_group import TaskGroup
self.user_defined_macros = user_defined_macros
self.user_defined_filters = user_defined_filters
self.default_args = copy.deepcopy(default_args or {})
self.params = params or {}
# merging potentially conflicting default_args['params'] into params
if 'params' in self.default_args:
self.params.update(self.default_args['params'])
del self.default_args['params']
validate_key(dag_id)
self._dag_id = dag_id
self._full_filepath = full_filepath if full_filepath else ''
self._concurrency = concurrency
self._pickle_id: Optional[int] = None
self._description = description
# set file location to caller source path
back = sys._getframe().f_back
self.fileloc = back.f_code.co_filename if back else ""
self.task_dict: Dict[str, BaseOperator] = {}
# set timezone from start_date
if start_date and start_date.tzinfo:
self.timezone = start_date.tzinfo
elif 'start_date' in self.default_args and self.default_args['start_date']:
if isinstance(self.default_args['start_date'], str):
self.default_args['start_date'] = timezone.parse(self.default_args['start_date'])
self.timezone = self.default_args['start_date'].tzinfo
if not hasattr(self, 'timezone') or not self.timezone:
self.timezone = settings.TIMEZONE
# Apply the timezone we settled on to end_date if it wasn't supplied
if 'end_date' in self.default_args and self.default_args['end_date']:
if isinstance(self.default_args['end_date'], str):
self.default_args['end_date'] = timezone.parse(
self.default_args['end_date'], timezone=self.timezone
)
self.start_date = timezone.convert_to_utc(start_date)
self.end_date = timezone.convert_to_utc(end_date)
# also convert tasks
if 'start_date' in self.default_args:
self.default_args['start_date'] = timezone.convert_to_utc(self.default_args['start_date'])
if 'end_date' in self.default_args:
self.default_args['end_date'] = timezone.convert_to_utc(self.default_args['end_date'])
self.schedule_interval = schedule_interval
if isinstance(template_searchpath, str):
template_searchpath = [template_searchpath]
self.template_searchpath = template_searchpath
self.template_undefined = template_undefined
self.parent_dag: Optional[DAG] = None # Gets set when DAGs are loaded
self.last_loaded = timezone.utcnow()
self.safe_dag_id = dag_id.replace('.', '__dot__')
self.max_active_runs = max_active_runs
self.dagrun_timeout = dagrun_timeout
self.sla_miss_callback = sla_miss_callback
if default_view in DEFAULT_VIEW_PRESETS:
self._default_view: str = default_view
else:
raise AirflowException(
f'Invalid values of dag.default_view: only support '
f'{DEFAULT_VIEW_PRESETS}, but get {default_view}'
)
if orientation in ORIENTATION_PRESETS:
self.orientation = orientation
else:
raise AirflowException(
f'Invalid values of dag.orientation: only support '
f'{ORIENTATION_PRESETS}, but get {orientation}'
)
self.catchup = catchup
self.is_subdag = False # DagBag.bag_dag() will set this to True if appropriate
self.partial = False
self.on_success_callback = on_success_callback
self.on_failure_callback = on_failure_callback
# To keep it in parity with Serialized DAGs
# and identify if DAG has on_*_callback without actually storing them in Serialized JSON
self.has_on_success_callback = self.on_success_callback is not None
self.has_on_failure_callback = self.on_failure_callback is not None
self.doc_md = doc_md
self._access_control = DAG._upgrade_outdated_dag_access_control(access_control)
self.is_paused_upon_creation = is_paused_upon_creation
self.jinja_environment_kwargs = jinja_environment_kwargs
self.tags = tags
self._task_group = TaskGroup.create_root(self)
def __repr__(self):
return f"<DAG: {self.dag_id}>"
def __eq__(self, other):
if type(self) == type(other):
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
return all(getattr(self, c, None) == getattr(other, c, None) for c in self._comps)
return False
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.dag_id < other.dag_id
def __hash__(self):
hash_components = [type(self)]
for c in self._comps:
# task_ids returns a list and lists can't be hashed
if c == 'task_ids':
val = tuple(self.task_dict.keys())
else:
val = getattr(self, c, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# Context Manager -----------------------------------------------
def __enter__(self):
DagContext.push_context_managed_dag(self)
return self
def __exit__(self, _type, _value, _tb):
DagContext.pop_context_managed_dag()
# /Context Manager ----------------------------------------------
@staticmethod
def _upgrade_outdated_dag_access_control(access_control=None):
"""
Looks for outdated dag level permissions (can_dag_read and can_dag_edit) in DAG
access_controls (for example, {'role1': {'can_dag_read'}, 'role2': {'can_dag_read', 'can_dag_edit'}})
and replaces them with updated permissions (can_read and can_edit).
"""
if not access_control:
return None
new_perm_mapping = {
permissions.DEPRECATED_ACTION_CAN_DAG_READ: permissions.ACTION_CAN_READ,
permissions.DEPRECATED_ACTION_CAN_DAG_EDIT: permissions.ACTION_CAN_EDIT,
}
updated_access_control = {}
for role, perms in access_control.items():
updated_access_control[role] = {new_perm_mapping.get(perm, perm) for perm in perms}
if access_control != updated_access_control:
warnings.warn(
"The 'can_dag_read' and 'can_dag_edit' permissions are deprecated. "
"Please use 'can_read' and 'can_edit', respectively.",
DeprecationWarning,
stacklevel=3,
)
return updated_access_control
def date_range(
self,
start_date: datetime,
num: Optional[int] = None,
end_date: Optional[datetime] = timezone.utcnow(),
) -> List[datetime]:
if num is not None:
end_date = None
return utils_date_range(
start_date=start_date, end_date=end_date, num=num, delta=self.normalized_schedule_interval
)
def is_fixed_time_schedule(self):
"""
Figures out if the DAG schedule has a fixed time (e.g. 3 AM).
:return: True if the schedule has a fixed time, False if not.
"""
now = datetime.now()
cron = croniter(self.normalized_schedule_interval, now)
start = cron.get_next(datetime)
cron_next = cron.get_next(datetime)
if cron_next.minute == start.minute and cron_next.hour == start.hour:
return True
return False
def following_schedule(self, dttm):
"""
Calculates the following schedule for this dag in UTC.
:param dttm: utc datetime
:return: utc datetime
"""
if isinstance(self.normalized_schedule_interval, str):
# we don't want to rely on the transitions created by
# croniter as they are not always correct
dttm = pendulum.instance(dttm)
naive = timezone.make_naive(dttm, self.timezone)
cron = croniter(self.normalized_schedule_interval, naive)
# We assume that DST transitions happen on the minute/hour
if not self.is_fixed_time_schedule():
# relative offset (eg. every 5 minutes)
delta = cron.get_next(datetime) - naive
following = dttm.in_timezone(self.timezone) + delta
else:
# absolute (e.g. 3 AM)
naive = cron.get_next(datetime)
tz = pendulum.timezone(self.timezone.name)
following = timezone.make_aware(naive, tz)
return timezone.convert_to_utc(following)
elif self.normalized_schedule_interval is not None:
return timezone.convert_to_utc(dttm + self.normalized_schedule_interval)
def previous_schedule(self, dttm):
"""
Calculates the previous schedule for this dag in UTC
:param dttm: utc datetime
:return: utc datetime
"""
if isinstance(self.normalized_schedule_interval, str):
# we don't want to rely on the transitions created by
# croniter as they are not always correct
dttm = pendulum.instance(dttm)
naive = timezone.make_naive(dttm, self.timezone)
cron = croniter(self.normalized_schedule_interval, naive)
# We assume that DST transitions happen on the minute/hour
if not self.is_fixed_time_schedule():
# relative offset (eg. every 5 minutes)
delta = naive - cron.get_prev(datetime)
previous = dttm.in_timezone(self.timezone) - delta
else:
# absolute (e.g. 3 AM)
naive = cron.get_prev(datetime)
tz = pendulum.timezone(self.timezone.name)
previous = timezone.make_aware(naive, tz)
return timezone.convert_to_utc(previous)
elif self.normalized_schedule_interval is not None:
return timezone.convert_to_utc(dttm - self.normalized_schedule_interval)
def next_dagrun_info(
self,
date_last_automated_dagrun: Optional[pendulum.DateTime],
) -> Tuple[Optional[pendulum.DateTime], Optional[pendulum.DateTime]]:
"""
Get information about the next DagRun of this dag after ``date_last_automated_dagrun`` -- the
execution date, and the earliest it could be scheduled
:param date_last_automated_dagrun: The max(execution_date) of existing
"automated" DagRuns for this dag (scheduled or backfill, but not
manual)
"""
if (
self.schedule_interval == "@once" and date_last_automated_dagrun
) or self.schedule_interval is None:
# Manual trigger, or already created the run for @once, can short circuit
return (None, None)
next_execution_date = self.next_dagrun_after_date(date_last_automated_dagrun)
if next_execution_date is None:
return (None, None)
if self.schedule_interval == "@once":
# For "@once" it can be created "now"
return (next_execution_date, next_execution_date)
return (next_execution_date, self.following_schedule(next_execution_date))
def next_dagrun_after_date(self, date_last_automated_dagrun: Optional[pendulum.DateTime]):
"""
Get the next execution date after the given ``date_last_automated_dagrun``, according to
schedule_interval, start_date, end_date etc. This doesn't check max active run or any other
"concurrency" type limits, it only performs calculations based on the various date and interval fields
of this dag and it's tasks.
:param date_last_automated_dagrun: The execution_date of the last scheduler or
backfill triggered run for this dag
:type date_last_automated_dagrun: pendulum.Pendulum
"""
if not self.schedule_interval or self.is_subdag:
return None
# don't schedule @once again
if self.schedule_interval == '@once' and date_last_automated_dagrun:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (self.catchup or self.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = self.following_schedule(now)
last_start = self.previous_schedule(now)
if next_start <= now or isinstance(self.schedule_interval, timedelta):
new_start = last_start
else:
new_start = self.previous_schedule(last_start)
if self.start_date:
if new_start >= self.start_date:
self.start_date = new_start
else:
self.start_date = new_start
next_run_date = None
if not date_last_automated_dagrun:
# First run
task_start_dates = [t.start_date for t in self.tasks if t.start_date]
if task_start_dates:
next_run_date = self.normalize_schedule(min(task_start_dates))
self.log.debug("Next run date based on tasks %s", next_run_date)
else:
next_run_date = self.following_schedule(date_last_automated_dagrun)
if date_last_automated_dagrun and next_run_date:
while next_run_date <= date_last_automated_dagrun:
next_run_date = self.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if self.start_date:
next_run_date = self.start_date if not next_run_date else max(next_run_date, self.start_date)
if next_run_date == self.start_date:
next_run_date = self.normalize_schedule(self.start_date)
self.log.debug("Dag start date: %s. Next run date: %s", self.start_date, next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and self.end_date and next_run_date > self.end_date:
return None
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
task_end_dates = [t.end_date for t in self.tasks if t.end_date]
if task_end_dates and next_run_date:
min_task_end_date = min(task_end_dates)
if next_run_date > min_task_end_date:
return None
return next_run_date
def get_run_dates(self, start_date, end_date=None):
"""
Returns a list of dates between the interval received as parameter using this
dag's schedule interval. Returned dates can be used for execution dates.
:param start_date: the start date of the interval
:type start_date: datetime
:param end_date: the end date of the interval, defaults to timezone.utcnow()
:type end_date: datetime
:return: a list of dates within the interval following the dag's schedule
:rtype: list
"""
run_dates = []
using_start_date = start_date
using_end_date = end_date
# dates for dag runs
using_start_date = using_start_date or min([t.start_date for t in self.tasks])
using_end_date = using_end_date or timezone.utcnow()
# next run date for a subdag isn't relevant (schedule_interval for subdags
# is ignored) so we use the dag run's start date in the case of a subdag
next_run_date = self.normalize_schedule(using_start_date) if not self.is_subdag else using_start_date
while next_run_date and next_run_date <= using_end_date:
run_dates.append(next_run_date)
next_run_date = self.following_schedule(next_run_date)
return run_dates
def normalize_schedule(self, dttm):
"""Returns dttm + interval unless dttm is first interval then it returns dttm"""
following = self.following_schedule(dttm)
# in case of @once
if not following:
return dttm
if self.previous_schedule(following) != dttm:
return following
return dttm
@provide_session
def get_last_dagrun(self, session=None, include_externally_triggered=False):
return get_last_dagrun(
self.dag_id, session=session, include_externally_triggered=include_externally_triggered
)
@provide_session
def has_dag_runs(self, session=None, include_externally_triggered=True) -> bool:
return (
get_last_dagrun(
self.dag_id, session=session, include_externally_triggered=include_externally_triggered
)
is not None
)
@property
def dag_id(self) -> str:
return self._dag_id
@dag_id.setter
def dag_id(self, value: str) -> None:
self._dag_id = value
@property
def full_filepath(self) -> str:
return self._full_filepath
@full_filepath.setter
def full_filepath(self, value) -> None:
self._full_filepath = value
@property
def concurrency(self) -> int:
return self._concurrency
@concurrency.setter
def concurrency(self, value: int):
self._concurrency = value
@property
def access_control(self):
return self._access_control
@access_control.setter
def access_control(self, value):
self._access_control = DAG._upgrade_outdated_dag_access_control(value)
@property
def description(self) -> Optional[str]:
return self._description
@property
def default_view(self) -> str:
return self._default_view
@property
def pickle_id(self) -> Optional[int]:
return self._pickle_id
@pickle_id.setter
def pickle_id(self, value: int) -> None:
self._pickle_id = value
def param(self, name: str, default=None) -> DagParam:
"""
Return a DagParam object for current dag.
:param name: dag parameter name.
:param default: fallback value for dag parameter.
:return: DagParam instance for specified name and current dag.
"""
return DagParam(current_dag=self, name=name, default=default)
@property
def tasks(self) -> List[BaseOperator]:
return list(self.task_dict.values())
@tasks.setter
def tasks(self, val):
raise AttributeError('DAG.tasks can not be modified. Use dag.add_task() instead.')
@property
def task_ids(self) -> List[str]:
return list(self.task_dict.keys())
@property
def task_group(self) -> "TaskGroup":
return self._task_group
@property
def filepath(self) -> str:
"""File location of where the dag object is instantiated"""
fn = self.full_filepath.replace(settings.DAGS_FOLDER + '/', '')
fn = fn.replace(os.path.dirname(__file__) + '/', '')
return fn
@property
def folder(self) -> str:
"""Folder location of where the DAG object is instantiated."""
return os.path.dirname(self.full_filepath)
@property
def owner(self) -> str:
"""
Return list of all owners found in DAG tasks.
:return: Comma separated list of owners in DAG tasks
:rtype: str
"""
return ", ".join({t.owner for t in self.tasks})
@property
def allow_future_exec_dates(self) -> bool:
return settings.ALLOW_FUTURE_EXEC_DATES and self.schedule_interval is None
@provide_session
def get_concurrency_reached(self, session=None) -> bool:
"""
Returns a boolean indicating whether the concurrency limit for this DAG
has been reached
"""
TI = TaskInstance
qry = session.query(func.count(TI.task_id)).filter(
TI.dag_id == self.dag_id,
TI.state == State.RUNNING,
)
return qry.scalar() >= self.concurrency
@property
def concurrency_reached(self):
"""This attribute is deprecated. Please use `airflow.models.DAG.get_concurrency_reached` method."""
warnings.warn(
"This attribute is deprecated. Please use `airflow.models.DAG.get_concurrency_reached` method.",
DeprecationWarning,
stacklevel=2,
)
return self.get_concurrency_reached()
@provide_session
def get_is_paused(self, session=None) -> Optional[None]:
"""Returns a boolean indicating whether this DAG is paused"""
qry = session.query(DagModel).filter(DagModel.dag_id == self.dag_id)
return qry.value(DagModel.is_paused)
@property
def is_paused(self):
"""This attribute is deprecated. Please use `airflow.models.DAG.get_is_paused` method."""
warnings.warn(
"This attribute is deprecated. Please use `airflow.models.DAG.get_is_paused` method.",
DeprecationWarning,
stacklevel=2,
)
return self.get_is_paused()
@property
def normalized_schedule_interval(self) -> Optional[ScheduleInterval]:
"""
Returns Normalized Schedule Interval. This is used internally by the Scheduler to
schedule DAGs.
1. Converts Cron Preset to a Cron Expression (e.g ``@monthly`` to ``0 0 1 * *``)
2. If Schedule Interval is "@once" return "None"
3. If not (1) or (2) returns schedule_interval
"""
if isinstance(self.schedule_interval, str) and self.schedule_interval in cron_presets:
_schedule_interval = cron_presets.get(self.schedule_interval) # type: Optional[ScheduleInterval]
elif self.schedule_interval == '@once':
_schedule_interval = None
else:
_schedule_interval = self.schedule_interval
return _schedule_interval
@provide_session
def handle_callback(self, dagrun, success=True, reason=None, session=None):
"""
Triggers the appropriate callback depending on the value of success, namely the
on_failure_callback or on_success_callback. This method gets the context of a
single TaskInstance part of this DagRun and passes that to the callable along
with a 'reason', primarily to differentiate DagRun failures.
.. note: The logs end up in
``$AIRFLOW_HOME/logs/scheduler/latest/PROJECT/DAG_FILE.py.log``
:param dagrun: DagRun object
:param success: Flag to specify if failure or success callback should be called
:param reason: Completion reason
:param session: Database session
"""
callback = self.on_success_callback if success else self.on_failure_callback
if callback:
self.log.info('Executing dag callback function: %s', callback)
tis = dagrun.get_task_instances()
ti = tis[-1] # get first TaskInstance of DagRun
ti.task = self.get_task(ti.task_id)
context = ti.get_template_context(session=session)
context.update({'reason': reason})
try:
callback(context)
except Exception:
self.log.exception("failed to invoke dag state update callback")
Stats.incr("dag.callback_exceptions")
def get_active_runs(self):
"""
Returns a list of dag run execution dates currently running
:return: List of execution dates
"""
runs = DagRun.find(dag_id=self.dag_id, state=State.RUNNING)
active_dates = []
for run in runs:
active_dates.append(run.execution_date)
return active_dates
@provide_session
def get_num_active_runs(self, external_trigger=None, session=None):
"""
Returns the number of active "running" dag runs
:param external_trigger: True for externally triggered active dag runs
:type external_trigger: bool
:param session:
:return: number greater than 0 for active dag runs
"""
# .count() is inefficient
query = (
session.query(func.count())
.filter(DagRun.dag_id == self.dag_id)
.filter(DagRun.state == State.RUNNING)
)
if external_trigger is not None:
query = query.filter(DagRun.external_trigger == external_trigger)
return query.scalar()
@provide_session
def get_dagrun(self, execution_date, session=None):
"""
Returns the dag run for a given execution date if it exists, otherwise
none.
:param execution_date: The execution date of the DagRun to find.
:param session:
:return: The DagRun if found, otherwise None.
"""
dagrun = (
session.query(DagRun)
.filter(DagRun.dag_id == self.dag_id, DagRun.execution_date == execution_date)
.first()
)
return dagrun
@provide_session
def get_dagruns_between(self, start_date, end_date, session=None):
"""
Returns the list of dag runs between start_date (inclusive) and end_date (inclusive).
:param start_date: The starting execution date of the DagRun to find.
:param end_date: The ending execution date of the DagRun to find.
:param session:
:return: The list of DagRuns found.
"""
dagruns = (
session.query(DagRun)
.filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date >= start_date,
DagRun.execution_date <= end_date,
)
.all()
)
return dagruns
@provide_session
def get_latest_execution_date(self, session=None):
"""Returns the latest date for which at least one dag run exists"""
return session.query(func.max(DagRun.execution_date)).filter(DagRun.dag_id == self.dag_id).scalar()
@property
def latest_execution_date(self):
"""This attribute is deprecated. Please use `airflow.models.DAG.get_latest_execution_date` method."""
warnings.warn(
"This attribute is deprecated. Please use `airflow.models.DAG.get_latest_execution_date` method.",
DeprecationWarning,
stacklevel=2,
)
return self.get_latest_execution_date()
@property
def subdags(self):
"""Returns a list of the subdag objects associated to this DAG"""
# Check SubDag for class but don't check class directly
from airflow.operators.subdag import SubDagOperator
subdag_lst = []
for task in self.tasks:
if (
isinstance(task, SubDagOperator)
or
# TODO remove in Airflow 2.0
type(task).__name__ == 'SubDagOperator'
or task.task_type == 'SubDagOperator'
):
subdag_lst.append(task.subdag)
subdag_lst += task.subdag.subdags
return subdag_lst
def resolve_template_files(self):
for t in self.tasks:
t.resolve_template_files()
def get_template_env(self) -> jinja2.Environment:
"""Build a Jinja2 environment."""
# Collect directories to search for template files
searchpath = [self.folder]
if self.template_searchpath:
searchpath += self.template_searchpath
# Default values (for backward compatibility)
jinja_env_options = {
'loader': jinja2.FileSystemLoader(searchpath),
'undefined': self.template_undefined,
'extensions': ["jinja2.ext.do"],
'cache_size': 0,
}
if self.jinja_environment_kwargs:
jinja_env_options.update(self.jinja_environment_kwargs)
env = jinja2.Environment(**jinja_env_options) # type: ignore
# Add any user defined items. Safe to edit globals as long as no templates are rendered yet.
# http://jinja.pocoo.org/docs/2.10/api/#jinja2.Environment.globals
if self.user_defined_macros:
env.globals.update(self.user_defined_macros)
if self.user_defined_filters:
env.filters.update(self.user_defined_filters)
return env
def set_dependency(self, upstream_task_id, downstream_task_id):
"""
Simple utility method to set dependency between two tasks that
already have been added to the DAG using add_task()
"""
self.get_task(upstream_task_id).set_downstream(self.get_task(downstream_task_id))
@provide_session
def get_task_instances(self, start_date=None, end_date=None, state=None, session=None):
if not start_date:
start_date = (timezone.utcnow() - timedelta(30)).date()
start_date = timezone.make_aware(datetime.combine(start_date, datetime.min.time()))
tis = session.query(TaskInstance).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.execution_date >= start_date,
TaskInstance.task_id.in_([t.task_id for t in self.tasks]),
)
# This allows allow_trigger_in_future config to take affect, rather than mandating exec_date <= UTC
if end_date or not self.allow_future_exec_dates:
end_date = end_date or timezone.utcnow()
tis = tis.filter(TaskInstance.execution_date <= end_date)
if state:
if isinstance(state, str):
tis = tis.filter(TaskInstance.state == state)
else:
# this is required to deal with NULL values
if None in state:
if all(x is None for x in state):
tis = tis.filter(TaskInstance.state.is_(None))
else:
not_none_state = [s for s in state if s]
tis = tis.filter(
or_(TaskInstance.state.in_(not_none_state), TaskInstance.state.is_(None))
)
else:
tis = tis.filter(TaskInstance.state.in_(state))
tis = tis.order_by(TaskInstance.execution_date).all()
return tis
@property
def roots(self) -> List[BaseOperator]:
"""Return nodes with no parents. These are first to execute and are called roots or root nodes."""
return [task for task in self.tasks if not task.upstream_list]
@property
def leaves(self) -> List[BaseOperator]:
"""Return nodes with no children. These are last to execute and are called leaves or leaf nodes."""
return [task for task in self.tasks if not task.downstream_list]
def topological_sort(self, include_subdag_tasks: bool = False):
"""
Sorts tasks in topographical order, such that a task comes after any of its
upstream dependencies.
Heavily inspired by:
http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/
:param include_subdag_tasks: whether to include tasks in subdags, default to False
:return: list of tasks in topological order
"""
from airflow.operators.subdag import SubDagOperator # Avoid circular import
# convert into an OrderedDict to speedup lookup while keeping order the same
graph_unsorted = OrderedDict((task.task_id, task) for task in self.tasks)
graph_sorted = [] # type: List[BaseOperator]
# special case
if len(self.tasks) == 0:
return tuple(graph_sorted)
# Run until the unsorted graph is empty.
while graph_unsorted:
# Go through each of the node/edges pairs in the unsorted
# graph. If a set of edges doesn't contain any nodes that
# haven't been resolved, that is, that are still in the
# unsorted graph, remove the pair from the unsorted graph,
# and append it to the sorted graph. Note here that by using
# using the items() method for iterating, a copy of the
# unsorted graph is used, allowing us to modify the unsorted
# graph as we move through it. We also keep a flag for
# checking that that graph is acyclic, which is true if any
# nodes are resolved during each pass through the graph. If
# not, we need to exit as the graph therefore can't be
# sorted.
acyclic = False
for node in list(graph_unsorted.values()):
for edge in node.upstream_list:
if edge.task_id in graph_unsorted:
break
# no edges in upstream tasks
else:
acyclic = True
del graph_unsorted[node.task_id]
graph_sorted.append(node)
if include_subdag_tasks and isinstance(node, SubDagOperator):
graph_sorted.extend(node.subdag.topological_sort(include_subdag_tasks=True))
if not acyclic:
raise AirflowException(f"A cyclic dependency occurred in dag: {self.dag_id}")
return tuple(graph_sorted)
@provide_session
def set_dag_runs_state(
self,
state: str = State.RUNNING,
session: Session = None,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
) -> None:
query = session.query(DagRun).filter_by(dag_id=self.dag_id)
if start_date:
query = query.filter(DagRun.execution_date >= start_date)
if end_date:
query = query.filter(DagRun.execution_date <= end_date)
query.update({DagRun.state: state})
@provide_session
def clear(
self,
start_date=None,
end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
include_parentdag=True,
dag_run_state: str = State.RUNNING,
dry_run=False,
session=None,
get_tis=False,
recursion_depth=0,
max_recursion_depth=None,
dag_bag=None,
visited_external_tis=None,
):
"""
Clears a set of task instances associated with the current dag for
a specified date range.
:param start_date: The minimum execution_date to clear
:type start_date: datetime.datetime or None
:param end_date: The maximum execution_date to clear
:type end_date: datetime.datetime or None
:param only_failed: Only clear failed tasks
:type only_failed: bool
:param only_running: Only clear running tasks.
:type only_running: bool
:param confirm_prompt: Ask for confirmation
:type confirm_prompt: bool
:param include_subdags: Clear tasks in subdags and clear external tasks
indicated by ExternalTaskMarker
:type include_subdags: bool
:param include_parentdag: Clear tasks in the parent dag of the subdag.
:type include_parentdag: bool
:param dag_run_state: state to set DagRun to
:param dry_run: Find the tasks to clear but don't clear them.
:type dry_run: bool
:param session: The sqlalchemy session to use
:type session: sqlalchemy.orm.session.Session
:param get_tis: Return the sqlalchemy query for finding the TaskInstance without clearing the tasks
:type get_tis: bool
:param recursion_depth: The recursion depth of nested calls to DAG.clear().
:type recursion_depth: int
:param max_recursion_depth: The maximum recursion depth allowed. This is determined by the
first encountered ExternalTaskMarker. Default is None indicating no ExternalTaskMarker
has been encountered.
:type max_recursion_depth: int
:param dag_bag: The DagBag used to find the dags
:type dag_bag: airflow.models.dagbag.DagBag
:param visited_external_tis: A set used internally to keep track of the visited TaskInstance when
clearing tasks across multiple DAGs linked by ExternalTaskMarker to avoid redundant work.
:type visited_external_tis: set
"""
TI = TaskInstance
tis = session.query(TI)
if include_subdags:
# Crafting the right filter for dag_id and task_ids combo
conditions = []
for dag in self.subdags + [self]:
conditions.append((TI.dag_id == dag.dag_id) & TI.task_id.in_(dag.task_ids))
tis = tis.filter(or_(*conditions))
else:
tis = session.query(TI).filter(TI.dag_id == self.dag_id)
tis = tis.filter(TI.task_id.in_(self.task_ids))
if include_parentdag and self.is_subdag and self.parent_dag is not None:
p_dag = self.parent_dag.sub_dag(
task_ids_or_regex=r"^{}$".format(self.dag_id.split('.')[1]),
include_upstream=False,
include_downstream=True,
)
tis = tis.union(
p_dag.clear(
start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=confirm_prompt,
include_subdags=include_subdags,
include_parentdag=False,
dag_run_state=dag_run_state,
get_tis=True,
session=session,
recursion_depth=recursion_depth,
max_recursion_depth=max_recursion_depth,
dag_bag=dag_bag,
visited_external_tis=visited_external_tis,
)
)
if start_date:
tis = tis.filter(TI.execution_date >= start_date)
if end_date:
tis = tis.filter(TI.execution_date <= end_date)
if only_failed:
tis = tis.filter(or_(TI.state == State.FAILED, TI.state == State.UPSTREAM_FAILED))
if only_running:
tis = tis.filter(TI.state == State.RUNNING)
if include_subdags:
from airflow.sensors.external_task import ExternalTaskMarker
# Recursively find external tasks indicated by ExternalTaskMarker
instances = tis.all()
for ti in instances:
if ti.operator == ExternalTaskMarker.__name__:
if visited_external_tis is None:
visited_external_tis = set()
ti_key = ti.key.primary
if ti_key not in visited_external_tis:
# Only clear this ExternalTaskMarker if it's not already visited by the
# recursive calls to dag.clear().
task: ExternalTaskMarker = cast(
ExternalTaskMarker, copy.copy(self.get_task(ti.task_id))
)
ti.task = task
if recursion_depth == 0:
# Maximum recursion depth allowed is the recursion_depth of the first
# ExternalTaskMarker in the tasks to be cleared.
max_recursion_depth = task.recursion_depth
if recursion_depth + 1 > max_recursion_depth:
# Prevent cycles or accidents.
raise AirflowException(
"Maximum recursion depth {} reached for {} {}. "
"Attempted to clear too many tasks "
"or there may be a cyclic dependency.".format(
max_recursion_depth, ExternalTaskMarker.__name__, ti.task_id
)
)
ti.render_templates()
external_tis = session.query(TI).filter(
TI.dag_id == task.external_dag_id,
TI.task_id == task.external_task_id,
TI.execution_date == pendulum.parse(task.execution_date),
)
for tii in external_tis:
if not dag_bag:
dag_bag = DagBag(read_dags_from_db=True)
external_dag = dag_bag.get_dag(tii.dag_id)
if not external_dag:
raise AirflowException(f"Could not find dag {tii.dag_id}")
downstream = external_dag.sub_dag(
task_ids_or_regex=fr"^{tii.task_id}$",
include_upstream=False,
include_downstream=True,
)
tis = tis.union(
downstream.clear(
start_date=tii.execution_date,
end_date=tii.execution_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=confirm_prompt,
include_subdags=include_subdags,
include_parentdag=False,
dag_run_state=dag_run_state,
get_tis=True,
session=session,
recursion_depth=recursion_depth + 1,
max_recursion_depth=max_recursion_depth,
dag_bag=dag_bag,
visited_external_tis=visited_external_tis,
)
)
visited_external_tis.add(ti_key)
if get_tis:
return tis
tis = tis.all()
if dry_run:
session.expunge_all()
return tis
# Do not use count() here, it's actually much slower than just retrieving all the rows when
# tis has multiple UNION statements.
count = len(tis)
do_it = True
if count == 0:
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in tis])
question = (
"You are about to delete these {count} tasks:\n{ti_list}\n\nAre you sure? (yes/no): "
).format(count=count, ti_list=ti_list)
do_it = utils.helpers.ask_yesno(question)
if do_it:
clear_task_instances(
tis,
session,
dag=self,
activate_dag_runs=False, # We will set DagRun state later.
)
self.set_dag_runs_state(
session=session,
start_date=start_date,
end_date=end_date,
state=dag_run_state,
)
else:
count = 0
print("Cancelled, nothing was cleared.")
session.commit()
return count
@classmethod
def clear_dags(
cls,
dags,
start_date=None,
end_date=None,
only_failed=False,
only_running=False,
confirm_prompt=False,
include_subdags=True,
include_parentdag=False,
dag_run_state=State.RUNNING,
dry_run=False,
):
all_tis = []
for dag in dags:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
include_parentdag=include_parentdag,
dag_run_state=dag_run_state,
dry_run=True,
)
all_tis.extend(tis)
if dry_run:
return all_tis
count = len(all_tis)
do_it = True
if count == 0:
print("Nothing to clear.")
return 0
if confirm_prompt:
ti_list = "\n".join([str(t) for t in all_tis])
question = f"You are about to delete these {count} tasks:\n{ti_list}\n\nAre you sure? (yes/no): "
do_it = utils.helpers.ask_yesno(question)
if do_it:
for dag in dags:
dag.clear(
start_date=start_date,
end_date=end_date,
only_failed=only_failed,
only_running=only_running,
confirm_prompt=False,
include_subdags=include_subdags,
dag_run_state=dag_run_state,
dry_run=False,
)
else:
count = 0
print("Cancelled, nothing was cleared.")
return count
def __deepcopy__(self, memo):
# Swiwtcharoo to go around deepcopying objects coming through the
# backdoor
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('user_defined_macros', 'user_defined_filters', 'params', '_log'):
setattr(result, k, copy.deepcopy(v, memo))
result.user_defined_macros = self.user_defined_macros
result.user_defined_filters = self.user_defined_filters
result.params = self.params
if hasattr(self, '_log'):
result._log = self._log
return result
def sub_dag(self, *args, **kwargs):
"""This method is deprecated in favor of partial_subset"""
warnings.warn(
"This method is deprecated and will be removed in a future version. Please use partial_subset",
DeprecationWarning,
stacklevel=2,
)
return self.partial_subset(*args, **kwargs)
def partial_subset(
self,
task_ids_or_regex: Union[str, PatternType, Iterable[str]],
include_downstream=False,
include_upstream=True,
include_direct_upstream=False,
):
"""
Returns a subset of the current dag as a deep copy of the current dag
based on a regex that should match one or many tasks, and includes
upstream and downstream neighbours based on the flag passed.
:param task_ids_or_regex: Either a list of task_ids, or a regex to
match against task ids (as a string, or compiled regex pattern).
:type task_ids_or_regex: [str] or str or re.Pattern
:param include_downstream: Include all downstream tasks of matched
tasks, in addition to matched tasks.
:param include_upstream: Include all upstream tasks of matched tasks,
in addition to matched tasks.
"""
# deep-copying self.task_dict and self._task_group takes a long time, and we don't want all
# the tasks anyway, so we copy the tasks manually later
task_dict = self.task_dict
task_group = self._task_group
self.task_dict = {}
self._task_group = None # type: ignore
dag = copy.deepcopy(self)
self.task_dict = task_dict
self._task_group = task_group
if isinstance(task_ids_or_regex, (str, PatternType)):
matched_tasks = [t for t in self.tasks if re.findall(task_ids_or_regex, t.task_id)]
else:
matched_tasks = [t for t in self.tasks if t.task_id in task_ids_or_regex]
also_include = []
for t in matched_tasks:
if include_downstream:
also_include += t.get_flat_relatives(upstream=False)
if include_upstream:
also_include += t.get_flat_relatives(upstream=True)
elif include_direct_upstream:
also_include += t.upstream_list
# Compiling the unique list of tasks that made the cut
# Make sure to not recursively deepcopy the dag while copying the task
dag.task_dict = {
t.task_id: copy.deepcopy(t, {id(t.dag): dag}) # type: ignore
for t in matched_tasks + also_include
}
def filter_task_group(group, parent_group):
"""Exclude tasks not included in the subdag from the given TaskGroup."""
copied = copy.copy(group)
copied.used_group_ids = set(copied.used_group_ids)
copied._parent_group = parent_group
copied.children = {}
for child in group.children.values():
if isinstance(child, BaseOperator):
if child.task_id in dag.task_dict:
copied.children[child.task_id] = dag.task_dict[child.task_id]
else:
filtered_child = filter_task_group(child, copied)
# Only include this child TaskGroup if it is non-empty.
if filtered_child.children:
copied.children[child.group_id] = filtered_child
return copied
dag._task_group = filter_task_group(self._task_group, None)
# Removing upstream/downstream references to tasks and TaskGroups that did not make
# the cut.
subdag_task_groups = dag.task_group.get_task_group_dict()
for group in subdag_task_groups.values():
group.upstream_group_ids = group.upstream_group_ids.intersection(subdag_task_groups.keys())
group.downstream_group_ids = group.downstream_group_ids.intersection(subdag_task_groups.keys())
group.upstream_task_ids = group.upstream_task_ids.intersection(dag.task_dict.keys())
group.downstream_task_ids = group.downstream_task_ids.intersection(dag.task_dict.keys())
for t in dag.tasks:
# Removing upstream/downstream references to tasks that did not
# make the cut
t._upstream_task_ids = t.upstream_task_ids.intersection(dag.task_dict.keys())
t._downstream_task_ids = t.downstream_task_ids.intersection(dag.task_dict.keys())
if len(dag.tasks) < len(self.tasks):
dag.partial = True
return dag
def has_task(self, task_id: str):
return task_id in (t.task_id for t in self.tasks)
def get_task(self, task_id: str, include_subdags: bool = False) -> BaseOperator:
if task_id in self.task_dict:
return self.task_dict[task_id]
if include_subdags:
for dag in self.subdags:
if task_id in dag.task_dict:
return dag.task_dict[task_id]
raise TaskNotFound(f"Task {task_id} not found")
def pickle_info(self):
d = {}
d['is_picklable'] = True
try:
dttm = timezone.utcnow()
pickled = pickle.dumps(self)
d['pickle_len'] = len(pickled)
d['pickling_duration'] = str(timezone.utcnow() - dttm)
except Exception as e:
self.log.debug(e)
d['is_picklable'] = False
d['stacktrace'] = traceback.format_exc()
return d
@provide_session
def pickle(self, session=None) -> DagPickle:
dag = session.query(DagModel).filter(DagModel.dag_id == self.dag_id).first()
dp = None
if dag and dag.pickle_id:
dp = session.query(DagPickle).filter(DagPickle.id == dag.pickle_id).first()
if not dp or dp.pickle != self:
dp = DagPickle(dag=self)
session.add(dp)
self.last_pickled = timezone.utcnow()
session.commit()
self.pickle_id = dp.id
return dp
def tree_view(self) -> None:
"""Print an ASCII tree representation of the DAG."""
def get_downstream(task, level=0):
print((" " * level * 4) + str(task))
level += 1
for t in task.downstream_list:
get_downstream(t, level)
for t in self.roots:
get_downstream(t)
@property
def task(self):
from airflow.operators.python import task
return functools.partial(task, dag=self)
def add_task(self, task):
"""
Add a task to the DAG
:param task: the task you want to add
:type task: task
"""
if not self.start_date and not task.start_date:
raise AirflowException("Task is missing the start_date parameter")
# if the task has no start date, assign it the same as the DAG
elif not task.start_date:
task.start_date = self.start_date
# otherwise, the task will start on the later of its own start date and
# the DAG's start date
elif self.start_date:
task.start_date = max(task.start_date, self.start_date)
# if the task has no end date, assign it the same as the dag
if not task.end_date:
task.end_date = self.end_date
# otherwise, the task will end on the earlier of its own end date and
# the DAG's end date
elif task.end_date and self.end_date:
task.end_date = min(task.end_date, self.end_date)
if (
task.task_id in self.task_dict and self.task_dict[task.task_id] is not task
) or task.task_id in self._task_group.used_group_ids:
raise DuplicateTaskIdFound(f"Task id '{task.task_id}' has already been added to the DAG")
else:
self.task_dict[task.task_id] = task
task.dag = self
# Add task_id to used_group_ids to prevent group_id and task_id collisions.
self._task_group.used_group_ids.add(task.task_id)
self.task_count = len(self.task_dict)
def add_tasks(self, tasks):
"""
Add a list of tasks to the DAG
:param tasks: a lit of tasks you want to add
:type tasks: list of tasks
"""
for task in tasks:
self.add_task(task)
def run(
self,
start_date=None,
end_date=None,
mark_success=False,
local=False,
executor=None,
donot_pickle=conf.getboolean('core', 'donot_pickle'),
ignore_task_deps=False,
ignore_first_depends_on_past=True,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
run_backwards=False,
):
"""
Runs the DAG.
:param start_date: the start date of the range to run
:type start_date: datetime.datetime
:param end_date: the end date of the range to run
:type end_date: datetime.datetime
:param mark_success: True to mark jobs as succeeded without running them
:type mark_success: bool
:param local: True to run the tasks using the LocalExecutor
:type local: bool
:param executor: The executor instance to run the tasks
:type executor: airflow.executor.base_executor.BaseExecutor
:param donot_pickle: True to avoid pickling DAG object and send to workers
:type donot_pickle: bool
:param ignore_task_deps: True to skip upstream tasks
:type ignore_task_deps: bool
:param ignore_first_depends_on_past: True to ignore depends_on_past
dependencies for the first set of tasks only
:type ignore_first_depends_on_past: bool
:param pool: Resource pool to use
:type pool: str
:param delay_on_limit_secs: Time in seconds to wait before next attempt to run
dag run when max_active_runs limit has been reached
:type delay_on_limit_secs: float
:param verbose: Make logging output more verbose
:type verbose: bool
:param conf: user defined dictionary passed from CLI
:type conf: dict
:param rerun_failed_tasks:
:type: bool
:param run_backwards:
:type: bool
"""
from airflow.jobs.backfill_job import BackfillJob
if not executor and local:
from airflow.executors.local_executor import LocalExecutor
executor = LocalExecutor()
elif not executor:
from airflow.executors.executor_loader import ExecutorLoader
executor = ExecutorLoader.get_default_executor()
job = BackfillJob(
self,
start_date=start_date,
end_date=end_date,
mark_success=mark_success,
executor=executor,
donot_pickle=donot_pickle,
ignore_task_deps=ignore_task_deps,
ignore_first_depends_on_past=ignore_first_depends_on_past,
pool=pool,
delay_on_limit_secs=delay_on_limit_secs,
verbose=verbose,
conf=conf,
rerun_failed_tasks=rerun_failed_tasks,
run_backwards=run_backwards,
)
job.run()
def cli(self):
"""Exposes a CLI specific to this DAG"""
from airflow.cli import cli_parser
parser = cli_parser.get_parser(dag_parser=True)
args = parser.parse_args()
args.func(args, self)
@provide_session
def create_dagrun(
self,
state: State,
execution_date: Optional[datetime] = None,
run_id: Optional[str] = None,
start_date: Optional[datetime] = None,
external_trigger: Optional[bool] = False,
conf: Optional[dict] = None,
run_type: Optional[DagRunType] = None,
session=None,
dag_hash: Optional[str] = None,
creating_job_id: Optional[int] = None,
):
"""
Creates a dag run from this dag including the tasks associated with this dag.
Returns the dag run.
:param run_id: defines the run id for this dag run
:type run_id: str
:param run_type: type of DagRun
:type run_type: airflow.utils.types.DagRunType
:param execution_date: the execution date of this dag run
:type execution_date: datetime.datetime
:param state: the state of the dag run
:type state: airflow.utils.state.State
:param start_date: the date this dag run should be evaluated
:type start_date: datetime
:param external_trigger: whether this dag run is externally triggered
:type external_trigger: bool
:param conf: Dict containing configuration/parameters to pass to the DAG
:type conf: dict
:param creating_job_id: id of the job creating this DagRun
:type creating_job_id: int
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dag_hash: Hash of Serialized DAG
:type dag_hash: str
"""
if run_id and not run_type:
if not isinstance(run_id, str):
raise ValueError(f"`run_id` expected to be a str is {type(run_id)}")
run_type: DagRunType = DagRunType.from_run_id(run_id)
elif run_type and execution_date:
if not isinstance(run_type, DagRunType):
raise ValueError(f"`run_type` expected to be a DagRunType is {type(run_type)}")
run_id = DagRun.generate_run_id(run_type, execution_date)
elif not run_id:
raise AirflowException(
"Creating DagRun needs either `run_id` or both `run_type` and `execution_date`"
)
run = DagRun(
dag_id=self.dag_id,
run_id=run_id,
execution_date=execution_date,
start_date=start_date,
external_trigger=external_trigger,
conf=conf,
state=state,
run_type=run_type,
dag_hash=dag_hash,
creating_job_id=creating_job_id,
)
session.add(run)
session.flush()
run.dag = self
# create the associated task instances
# state is None at the moment of creation
run.verify_integrity(session=session)
return run
@classmethod
@provide_session
def bulk_sync_to_db(cls, dags: Collection["DAG"], session=None):
"""This method is deprecated in favor of bulk_write_to_db"""
warnings.warn(
"This method is deprecated and will be removed in a future version. Please use bulk_write_to_db",
DeprecationWarning,
stacklevel=2,
)
return cls.bulk_write_to_db(dags, session)
@classmethod
@provide_session
def bulk_write_to_db(cls, dags: Collection["DAG"], session=None):
"""
Ensure the DagModel rows for the given dags are up-to-date in the dag table in the DB, including
calculated fields.
Note that this method can be called for both DAGs and SubDAGs. A SubDag is actually a SubDagOperator.
:param dags: the DAG objects to save to the DB
:type dags: List[airflow.models.dag.DAG]
:return: None
"""
if not dags:
return
log.info("Sync %s DAGs", len(dags))
dag_by_ids = {dag.dag_id: dag for dag in dags}
dag_ids = set(dag_by_ids.keys())
query = (
session.query(DagModel)
.options(joinedload(DagModel.tags, innerjoin=False))
.filter(DagModel.dag_id.in_(dag_ids))
)
orm_dags = with_row_locks(query, of=DagModel).all()
existing_dag_ids = {orm_dag.dag_id for orm_dag in orm_dags}
missing_dag_ids = dag_ids.difference(existing_dag_ids)
for missing_dag_id in missing_dag_ids:
orm_dag = DagModel(dag_id=missing_dag_id)
dag = dag_by_ids[missing_dag_id]
if dag.is_paused_upon_creation is not None:
orm_dag.is_paused = dag.is_paused_upon_creation
orm_dag.tags = []
log.info("Creating ORM DAG for %s", dag.dag_id)
session.add(orm_dag)
orm_dags.append(orm_dag)
# Get the latest dag run for each existing dag as a single query (avoid n+1 query)
most_recent_dag_runs = dict(
session.query(DagRun.dag_id, func.max_(DagRun.execution_date))
.filter(
DagRun.dag_id.in_(existing_dag_ids),
or_(
DagRun.run_type == DagRunType.BACKFILL_JOB,
DagRun.run_type == DagRunType.SCHEDULED,
),
)
.group_by(DagRun.dag_id)
.all()
)
# Get number of active dagruns for all dags we are processing as a single query.
num_active_runs = dict(
session.query(DagRun.dag_id, func.count('*'))
.filter(
DagRun.dag_id.in_(existing_dag_ids),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for orm_dag in sorted(orm_dags, key=lambda d: d.dag_id):
dag = dag_by_ids[orm_dag.dag_id]
if dag.is_subdag:
orm_dag.is_subdag = True
orm_dag.fileloc = dag.parent_dag.fileloc # type: ignore
orm_dag.root_dag_id = dag.parent_dag.dag_id # type: ignore
orm_dag.owners = dag.parent_dag.owner # type: ignore
else:
orm_dag.is_subdag = False
orm_dag.fileloc = dag.fileloc
orm_dag.owners = dag.owner
orm_dag.is_active = True
orm_dag.default_view = dag.default_view
orm_dag.description = dag.description
orm_dag.schedule_interval = dag.schedule_interval
orm_dag.concurrency = dag.concurrency
orm_dag.has_task_concurrency_limits = any(t.task_concurrency is not None for t in dag.tasks)
orm_dag.calculate_dagrun_date_fields(
dag,
most_recent_dag_runs.get(dag.dag_id),
num_active_runs.get(dag.dag_id, 0),
)
for orm_tag in list(orm_dag.tags):
if orm_tag.name not in orm_dag.tags:
session.delete(orm_tag)
orm_dag.tags.remove(orm_tag)
if dag.tags:
orm_tag_names = [t.name for t in orm_dag.tags]
for dag_tag in list(dag.tags):
if dag_tag not in orm_tag_names:
dag_tag_orm = DagTag(name=dag_tag, dag_id=dag.dag_id)
orm_dag.tags.append(dag_tag_orm)
session.add(dag_tag_orm)
if settings.STORE_DAG_CODE:
DagCode.bulk_sync_to_db([dag.fileloc for dag in orm_dags])
# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller
# decide when to commit
session.flush()
for dag in dags:
cls.bulk_write_to_db(dag.subdags, session=session)
@provide_session
def sync_to_db(self, session=None):
"""
Save attributes about this DAG to the DB. Note that this method
can be called for both DAGs and SubDAGs. A SubDag is actually a
SubDagOperator.
:return: None
"""
self.bulk_write_to_db([self], session)
def get_default_view(self):
"""This is only there for backward compatible jinja2 templates"""
if self.default_view is None:
return conf.get('webserver', 'dag_default_view').lower()
else:
return self.default_view
@staticmethod
@provide_session
def deactivate_unknown_dags(active_dag_ids, session=None):
"""
Given a list of known DAGs, deactivate any other DAGs that are
marked as active in the ORM
:param active_dag_ids: list of DAG IDs that are active
:type active_dag_ids: list[unicode]
:return: None
"""
if len(active_dag_ids) == 0:
return
for dag in session.query(DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def deactivate_stale_dags(expiration_date, session=None):
"""
Deactivate any DAGs that were last touched by the scheduler before
the expiration date. These DAGs were likely deleted.
:param expiration_date: set inactive DAGs that were touched before this
time
:type expiration_date: datetime
:return: None
"""
for dag in (
session.query(DagModel)
.filter(DagModel.last_scheduler_run < expiration_date, DagModel.is_active)
.all()
):
log.info(
"Deactivating DAG ID %s since it was last touched by the scheduler at %s",
dag.dag_id,
dag.last_scheduler_run.isoformat(),
)
dag.is_active = False
session.merge(dag)
session.commit()
@staticmethod
@provide_session
def get_num_task_instances(dag_id, task_ids=None, states=None, session=None):
"""
Returns the number of task instances in the given DAG.
:param session: ORM session
:param dag_id: ID of the DAG to get the task concurrency of
:type dag_id: unicode
:param task_ids: A list of valid task IDs for the given DAG
:type task_ids: list[unicode]
:param states: A list of states to filter by if supplied
:type states: list[state]
:return: The number of running tasks
:rtype: int
"""
qry = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == dag_id,
)
if task_ids:
qry = qry.filter(
TaskInstance.task_id.in_(task_ids),
)
if states:
if None in states:
if all(x is None for x in states):
qry = qry.filter(TaskInstance.state.is_(None))
else:
not_none_states = [state for state in states if state]
qry = qry.filter(
or_(TaskInstance.state.in_(not_none_states), TaskInstance.state.is_(None))
)
else:
qry = qry.filter(TaskInstance.state.in_(states))
return qry.scalar()
@classmethod
def get_serialized_fields(cls):
"""Stringified DAGs and operators contain exactly these fields."""
if not cls.__serialized_fields:
cls.__serialized_fields = frozenset(vars(DAG(dag_id='test')).keys()) - {
'parent_dag',
'_old_context_manager_dags',
'safe_dag_id',
'last_loaded',
'_full_filepath',
'user_defined_filters',
'user_defined_macros',
'partial',
'_old_context_manager_dags',
'_pickle_id',
'_log',
'is_subdag',
'task_dict',
'template_searchpath',
'sla_miss_callback',
'on_success_callback',
'on_failure_callback',
'template_undefined',
'jinja_environment_kwargs',
# has_on_*_callback are only stored if the value is True, as the default is False
'has_on_success_callback',
'has_on_failure_callback',
}
return cls.__serialized_fields
class DagTag(Base):
"""A tag name per dag, to allow quick filtering in the DAG view."""
__tablename__ = "dag_tag"
name = Column(String(100), primary_key=True)
dag_id = Column(String(ID_LEN), ForeignKey('dag.dag_id'), primary_key=True)
def __repr__(self):
return self.name
class DagModel(Base):
"""Table containing DAG properties"""
__tablename__ = "dag"
"""
These items are stored in the database for state related information
"""
dag_id = Column(String(ID_LEN), primary_key=True)
root_dag_id = Column(String(ID_LEN))
# A DAG can be paused from the UI / DB
# Set this default value of is_paused based on a configuration value!
is_paused_at_creation = conf.getboolean('core', 'dags_are_paused_at_creation')
is_paused = Column(Boolean, default=is_paused_at_creation)
# Whether the DAG is a subdag
is_subdag = Column(Boolean, default=False)
# Whether that DAG was seen on the last DagBag load
is_active = Column(Boolean, default=False)
# Last time the scheduler started
last_scheduler_run = Column(UtcDateTime)
# Last time this DAG was pickled
last_pickled = Column(UtcDateTime)
# Time when the DAG last received a refresh signal
# (e.g. the DAG's "refresh" button was clicked in the web UI)
last_expired = Column(UtcDateTime)
# Whether (one of) the scheduler is scheduling this DAG at the moment
scheduler_lock = Column(Boolean)
# Foreign key to the latest pickle_id
pickle_id = Column(Integer)
# The location of the file containing the DAG object
# Note: Do not depend on fileloc pointing to a file; in the case of a
# packaged DAG, it will point to the subpath of the DAG within the
# associated zip.
fileloc = Column(String(2000))
# String representing the owners
owners = Column(String(2000))
# Description of the dag
description = Column(Text)
# Default view of the inside the webserver
default_view = Column(String(25))
# Schedule interval
schedule_interval = Column(Interval)
# Tags for view filter
tags = relationship('DagTag', cascade='all,delete-orphan', backref=backref('dag'))
concurrency = Column(Integer, nullable=False)
has_task_concurrency_limits = Column(Boolean, nullable=False)
# The execution_date of the next dag run
next_dagrun = Column(UtcDateTime)
# Earliest time at which this ``next_dagrun`` can be created
next_dagrun_create_after = Column(UtcDateTime)
__table_args__ = (
Index('idx_root_dag_id', root_dag_id, unique=False),
Index('idx_next_dagrun_create_after', next_dagrun_create_after, unique=False),
)
NUM_DAGS_PER_DAGRUN_QUERY = conf.getint('scheduler', 'max_dagruns_to_create_per_loop', fallback=10)
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.concurrency is None:
self.concurrency = conf.getint('core', 'dag_concurrency')
if self.has_task_concurrency_limits is None:
# Be safe -- this will be updated later once the DAG is parsed
self.has_task_concurrency_limits = True
def __repr__(self):
return f"<DAG: {self.dag_id}>"
@property
def timezone(self):
return settings.TIMEZONE
@staticmethod
@provide_session
def get_dagmodel(dag_id, session=None):
return session.query(DagModel).filter(DagModel.dag_id == dag_id).first()
@classmethod
@provide_session
def get_current(cls, dag_id, session=None):
return session.query(cls).filter(cls.dag_id == dag_id).first()
@provide_session
def get_last_dagrun(self, session=None, include_externally_triggered=False):
return get_last_dagrun(
self.dag_id, session=session, include_externally_triggered=include_externally_triggered
)
@staticmethod
@provide_session
def get_paused_dag_ids(dag_ids: List[str], session: Session = None) -> Set[str]:
"""
Given a list of dag_ids, get a set of Paused Dag Ids
:param dag_ids: List of Dag ids
:param session: ORM Session
:return: Paused Dag_ids
"""
paused_dag_ids = (
session.query(DagModel.dag_id)
.filter(DagModel.is_paused.is_(True))
.filter(DagModel.dag_id.in_(dag_ids))
.all()
)
paused_dag_ids = {paused_dag_id for paused_dag_id, in paused_dag_ids}
return paused_dag_ids
def get_default_view(self) -> str:
"""
Get the Default DAG View, returns the default config value if DagModel does not
have a value
"""
# This is for backwards-compatibility with old dags that don't have None as default_view
return self.default_view or conf.get('webserver', 'dag_default_view').lower()
@property
def safe_dag_id(self):
return self.dag_id.replace('.', '__dot__')
@provide_session
def set_is_paused(self, is_paused: bool, including_subdags: bool = True, session=None) -> None:
"""
Pause/Un-pause a DAG.
:param is_paused: Is the DAG paused
:param including_subdags: whether to include the DAG's subdags
:param session: session
"""
filter_query = [
DagModel.dag_id == self.dag_id,
]
if including_subdags:
filter_query.append(DagModel.root_dag_id == self.dag_id)
session.query(DagModel).filter(or_(*filter_query)).update(
{DagModel.is_paused: is_paused}, synchronize_session='fetch'
)
session.commit()
@classmethod
@provide_session
def deactivate_deleted_dags(cls, alive_dag_filelocs: List[str], session=None):
"""
Set ``is_active=False`` on the DAGs for which the DAG files have been removed.
Additionally change ``is_active=False`` to ``True`` if the DAG file exists.
:param alive_dag_filelocs: file paths of alive DAGs
:param session: ORM Session
"""
log.debug("Deactivating DAGs (for which DAG files are deleted) from %s table ", cls.__tablename__)
dag_models = session.query(cls).all()
try:
for dag_model in dag_models:
if dag_model.fileloc is not None:
if correct_maybe_zipped(dag_model.fileloc) not in alive_dag_filelocs:
dag_model.is_active = False
else:
# If is_active is set as False and the DAG File still exists
# Change is_active=True
if not dag_model.is_active:
dag_model.is_active = True
else:
continue
session.commit()
except Exception:
session.rollback()
raise
@classmethod
def dags_needing_dagruns(cls, session: Session):
"""
Return (and lock) a list of Dag objects that are due to create a new DagRun.
This will return a resultset of rows that is row-level-locked with a "SELECT ... FOR UPDATE" query,
you should ensure that any scheduling decisions are made in a single transaction -- as soon as the
transaction is committed it will be unlocked.
"""
# TODO[HA]: Bake this query, it is run _A lot_
# We limit so that _one_ scheduler doesn't try to do all the creation
# of dag runs
query = (
session.query(cls)
.filter(
cls.is_paused.is_(False),
cls.is_active.is_(True),
cls.next_dagrun_create_after <= func.now(),
)
.order_by(cls.next_dagrun_create_after)
.limit(cls.NUM_DAGS_PER_DAGRUN_QUERY)
)
return with_row_locks(query, of=cls, **skip_locked(session=session))
def calculate_dagrun_date_fields(
self, dag: DAG, most_recent_dag_run: Optional[pendulum.DateTime], active_runs_of_dag: int
) -> None:
"""
Calculate ``next_dagrun`` and `next_dagrun_create_after``
:param dag: The DAG object
:param most_recent_dag_run: DateTime of most recent run of this dag, or none if not yet scheduled.
:param active_runs_of_dag: Number of currently active runs of this dag
"""
self.next_dagrun, self.next_dagrun_create_after = dag.next_dagrun_info(most_recent_dag_run)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
# Since this happens every time the dag is parsed it would be quite spammy at info
log.debug(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
self.next_dagrun_create_after = None
log.info("Setting next_dagrun for %s to %s", dag.dag_id, self.next_dagrun)
def dag(*dag_args, **dag_kwargs):
"""
Python dag decorator. Wraps a function into an Airflow DAG.
Accepts kwargs for operator kwarg. Can be used to parametrize DAGs.
:param dag_args: Arguments for DAG object
:type dag_args: list
:param dag_kwargs: Kwargs for DAG object.
:type dag_kwargs: dict
"""
def wrapper(f: Callable):
# Get dag initializer signature and bind it to validate that dag_args, and dag_kwargs are correct
dag_sig = signature(DAG.__init__)
dag_bound_args = dag_sig.bind_partial(*dag_args, **dag_kwargs)
@functools.wraps(f)
def factory(*args, **kwargs):
# Generate signature for decorated function and bind the arguments when called
# we do this to extract parameters so we can annotate them on the DAG object.
# In addition, this fails if we are missing any args/kwargs with TypeError as expected.
f_sig = signature(f).bind(*args, **kwargs)
# Apply defaults to capture default values if set.
f_sig.apply_defaults()
# Set function name as dag_id if not set
dag_id = dag_bound_args.arguments.get('dag_id', f.__name__)
dag_bound_args.arguments['dag_id'] = dag_id
# Initialize DAG with bound arguments
with DAG(*dag_bound_args.args, **dag_bound_args.kwargs) as dag_obj:
# Set DAG documentation from function documentation.
if f.__doc__:
dag_obj.doc_md = f.__doc__
# Generate DAGParam for each function arg/kwarg and replace it for calling the function.
# All args/kwargs for function will be DAGParam object and replaced on execution time.
f_kwargs = {}
for name, value in f_sig.arguments.items():
f_kwargs[name] = dag_obj.param(name, value)
# Invoke function to create operators in the DAG scope.
f(**f_kwargs)
# Return dag object such that it's accessible in Globals.
return dag_obj
return factory
return wrapper
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
# Let pylint know about these relationships, without introducing an import cycle
from sqlalchemy.orm import relationship
from airflow.models.serialized_dag import SerializedDagModel
DagModel.serialized_dag = relationship(SerializedDagModel)
class DagContext:
"""
DAG context is used to keep the current DAG when DAG is used as ContextManager.
You can use DAG as context:
.. code-block:: python
with DAG(
dag_id='example_dag',
default_args=default_args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60)
) as dag:
If you do this the context stores the DAG and whenever new task is created, it will use
such stored DAG as the parent DAG.
"""
_context_managed_dag: Optional[DAG] = None
_previous_context_managed_dags: List[DAG] = []
@classmethod
def push_context_managed_dag(cls, dag: DAG):
if cls._context_managed_dag:
cls._previous_context_managed_dags.append(cls._context_managed_dag)
cls._context_managed_dag = dag
@classmethod
def pop_context_managed_dag(cls) -> Optional[DAG]:
old_dag = cls._context_managed_dag
if cls._previous_context_managed_dags:
cls._context_managed_dag = cls._previous_context_managed_dags.pop()
else:
cls._context_managed_dag = None
return old_dag
@classmethod
def get_current_dag(cls) -> Optional[DAG]:
return cls._context_managed_dag
| 39.627731
| 110
| 0.623099
|
4a0f61523b834f076426ed3f4d5c7ea721e09cfc
| 759
|
py
|
Python
|
lib/generator/superfaker.py
|
vikkio88/pyDsManager
|
018e08f7db0852f4653c4da6db851551783584a1
|
[
"MIT"
] | null | null | null |
lib/generator/superfaker.py
|
vikkio88/pyDsManager
|
018e08f7db0852f4653c4da6db851551783584a1
|
[
"MIT"
] | null | null | null |
lib/generator/superfaker.py
|
vikkio88/pyDsManager
|
018e08f7db0852f4653c4da6db851551783584a1
|
[
"MIT"
] | null | null | null |
from .providers import providers
from lib.config.roles import roles
from .providers.football import football
import random
class SuperFaker(object):
providers = None
locale = 'it_IT'
def __init__(self, locale='it_IT'):
self.providers = providers
self.locale = locale
def name(self):
return random.choice(self.providers[self.locale]['names'])
def surname(self):
return random.choice(self.providers[self.locale]['surnames'])
def age(self, mn=16, mx=38):
return random.randint(mn, mx)
def player_role(self):
return random.choice(roles)['name']
def team_name(self):
return random.choice(self.providers[self.locale]['cities']) + " " + random.choice(football['clubs'])
| 26.172414
| 108
| 0.667984
|
4a0f61e2f1da707ceff520cceeefa5a457589d86
| 67
|
py
|
Python
|
tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/__init__.py
|
martellz/TensorRT
|
f182e83b30b5d45aaa3f9a041ff8b3ce83e366f4
|
[
"Apache-2.0"
] | 5,249
|
2019-06-17T17:20:34.000Z
|
2022-03-31T17:56:05.000Z
|
tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/__init__.py
|
martellz/TensorRT
|
f182e83b30b5d45aaa3f9a041ff8b3ce83e366f4
|
[
"Apache-2.0"
] | 1,721
|
2019-06-17T18:13:29.000Z
|
2022-03-31T16:09:53.000Z
|
tools/onnx-graphsurgeon/onnx_graphsurgeon/importers/__init__.py
|
martellz/TensorRT
|
f182e83b30b5d45aaa3f9a041ff8b3ce83e366f4
|
[
"Apache-2.0"
] | 1,414
|
2019-06-18T04:01:17.000Z
|
2022-03-31T09:16:53.000Z
|
from onnx_graphsurgeon.importers.base_importer import BaseImporter
| 33.5
| 66
| 0.910448
|
4a0f63911fece0c068f22747fbb1fad3a1b818f9
| 8,365
|
py
|
Python
|
lark/visitors.py
|
PJCampi/lark
|
924ce954d9f0dc4d060afd4f3b1af5e5ec9fc3ea
|
[
"MIT"
] | null | null | null |
lark/visitors.py
|
PJCampi/lark
|
924ce954d9f0dc4d060afd4f3b1af5e5ec9fc3ea
|
[
"MIT"
] | null | null | null |
lark/visitors.py
|
PJCampi/lark
|
924ce954d9f0dc4d060afd4f3b1af5e5ec9fc3ea
|
[
"MIT"
] | null | null | null |
from functools import wraps
from .utils import smart_decorator
from .tree import Tree
from .exceptions import VisitError, GrammarError
###{standalone
from inspect import getmembers, getmro
class Discard(Exception):
pass
# Transformers
class Transformer:
"""Visits the tree recursively, starting with the leaves and finally the root (bottom-up)
Calls its methods (provided by user via inheritance) according to tree.data
The returned value replaces the old one in the structure.
Can be used to implement map or reduce.
"""
def _call_userfunc(self, tree, new_children=None):
# Assumes tree is already transformed
children = new_children if new_children is not None else tree.children
try:
f = getattr(self, tree.data)
except AttributeError:
return self.__default__(tree.data, children, tree.meta)
else:
try:
if getattr(f, 'meta', False):
return f(children, tree.meta)
elif getattr(f, 'inline', False):
return f(*children)
elif getattr(f, 'whole_tree', False):
if new_children is not None:
tree.children = new_children
return f(tree)
else:
return f(children)
except (GrammarError, Discard):
raise
except Exception as e:
raise VisitError(tree, e)
def _transform_children(self, children):
for c in children:
try:
yield self._transform_tree(c) if isinstance(c, Tree) else c
except Discard:
pass
def _transform_tree(self, tree):
children = list(self._transform_children(tree.children))
return self._call_userfunc(tree, children)
def transform(self, tree):
return self._transform_tree(tree)
def __mul__(self, other):
return TransformerChain(self, other)
def __default__(self, data, children, meta):
"Default operation on tree (for override)"
return Tree(data, children, meta)
@classmethod
def _apply_decorator(cls, decorator, **kwargs):
mro = getmro(cls)
assert mro[0] is cls
libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)}
for name, value in getmembers(cls):
# Make sure the function isn't inherited (unless it's overwritten)
if name.startswith('_') or (name in libmembers and name not in cls.__dict__):
continue
if not callable(cls.__dict__[name]):
continue
# Skip if v_args already applied (at the function level)
if hasattr(cls.__dict__[name], 'vargs_applied'):
continue
static = isinstance(cls.__dict__[name], (staticmethod, classmethod))
setattr(cls, name, decorator(value, static=static, **kwargs))
return cls
class InlineTransformer(Transformer): # XXX Deprecated
def _call_userfunc(self, tree, new_children=None):
# Assumes tree is already transformed
children = new_children if new_children is not None else tree.children
try:
f = getattr(self, tree.data)
except AttributeError:
return self.__default__(tree.data, children, tree.meta)
else:
return f(*children)
class TransformerChain(object):
def __init__(self, *transformers):
self.transformers = transformers
def transform(self, tree):
for t in self.transformers:
tree = t.transform(tree)
return tree
def __mul__(self, other):
return TransformerChain(*self.transformers + (other,))
class Transformer_InPlace(Transformer):
"Non-recursive. Changes the tree in-place instead of returning new instances"
def _transform_tree(self, tree): # Cancel recursion
return self._call_userfunc(tree)
def transform(self, tree):
for subtree in tree.iter_subtrees():
subtree.children = list(self._transform_children(subtree.children))
return self._transform_tree(tree)
class Transformer_InPlaceRecursive(Transformer):
"Recursive. Changes the tree in-place instead of returning new instances"
def _transform_tree(self, tree):
tree.children = list(self._transform_children(tree.children))
return self._call_userfunc(tree)
# Visitors
class VisitorBase:
def _call_userfunc(self, tree):
return getattr(self, tree.data, self.__default__)(tree)
def __default__(self, tree):
"Default operation on tree (for override)"
return tree
class Visitor(VisitorBase):
"""Bottom-up visitor, non-recursive
Visits the tree, starting with the leaves and finally the root (bottom-up)
Calls its methods (provided by user via inheritance) according to tree.data
"""
def visit(self, tree):
for subtree in tree.iter_subtrees():
self._call_userfunc(subtree)
return tree
class Visitor_Recursive(VisitorBase):
"""Bottom-up visitor, recursive
Visits the tree, starting with the leaves and finally the root (bottom-up)
Calls its methods (provided by user via inheritance) according to tree.data
"""
def visit(self, tree):
for child in tree.children:
if isinstance(child, Tree):
self.visit(child)
f = getattr(self, tree.data, self.__default__)
f(tree)
return tree
def visit_children_decor(func):
"See Interpreter"
@wraps(func)
def inner(cls, tree):
values = cls.visit_children(tree)
return func(cls, values)
return inner
class Interpreter:
"""Top-down visitor, recursive
Visits the tree, starting with the root and finally the leaves (top-down)
Calls its methods (provided by user via inheritance) according to tree.data
Unlike Transformer and Visitor, the Interpreter doesn't automatically visit its sub-branches.
The user has to explicitly call visit_children, or use the @visit_children_decor
"""
def visit(self, tree):
return getattr(self, tree.data)(tree)
def visit_children(self, tree):
return [self.visit(child) if isinstance(child, Tree) else child
for child in tree.children]
def __getattr__(self, name):
return self.__default__
def __default__(self, tree):
return self.visit_children(tree)
# Decorators
def _apply_decorator(obj, decorator, **kwargs):
try:
_apply = obj._apply_decorator
except AttributeError:
return decorator(obj, **kwargs)
else:
return _apply(decorator, **kwargs)
def _inline_args__func(func):
@wraps(func)
def create_decorator(_f, with_self):
if with_self:
def f(self, children):
return _f(self, *children)
else:
def f(self, children):
return _f(*children)
return f
return smart_decorator(func, create_decorator)
def inline_args(obj): # XXX Deprecated
return _apply_decorator(obj, _inline_args__func)
def _visitor_args_func_dec(func, inline=False, meta=False, whole_tree=False, static=False):
assert [whole_tree, meta, inline].count(True) <= 1
def create_decorator(_f, with_self):
if with_self:
def f(self, *args, **kwargs):
return _f(self, *args, **kwargs)
else:
def f(self, *args, **kwargs):
return _f(*args, **kwargs)
return f
if static:
f = wraps(func)(create_decorator(func, False))
else:
f = smart_decorator(func, create_decorator)
f.vargs_applied = True
f.inline = inline
f.meta = meta
f.whole_tree = whole_tree
return f
def v_args(inline=False, meta=False, tree=False):
"A convenience decorator factory, for modifying the behavior of user-supplied visitor methods"
if [tree, meta, inline].count(True) > 1:
raise ValueError("Visitor functions can either accept tree, or meta, or be inlined. These cannot be combined.")
def _visitor_args_dec(obj):
return _apply_decorator(obj, _visitor_args_func_dec, inline=inline, meta=meta, whole_tree=tree)
return _visitor_args_dec
###}
| 30.529197
| 119
| 0.641602
|
4a0f644c7223b3bd32f51a7616e15a89172e4261
| 12,976
|
py
|
Python
|
xero/manager.py
|
reachfh/pyxero
|
88b4cbc16daea4e70bf2da48beee08c1ca5c04da
|
[
"BSD-3-Clause"
] | null | null | null |
xero/manager.py
|
reachfh/pyxero
|
88b4cbc16daea4e70bf2da48beee08c1ca5c04da
|
[
"BSD-3-Clause"
] | null | null | null |
xero/manager.py
|
reachfh/pyxero
|
88b4cbc16daea4e70bf2da48beee08c1ca5c04da
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import requests
import six
import json
from xml.dom.minidom import parseString
from xml.etree.ElementTree import tostring, SubElement, Element
from datetime import datetime
from dateutil.parser import parse
from decimal import Decimal
from six.moves.urllib.parse import parse_qs
from .constants import XERO_API_URL
from .exceptions import *
from .utils import singular, isplural, parse_date, json_load_object_hook
class Manager(object):
DECORATED_METHODS = (
'get',
'save',
'filter',
'all',
'put',
'get_attachments',
'get_attachment_data',
'put_attachment_data',
)
DATETIME_FIELDS = (
'UpdatedDateUTC',
'Updated',
'FullyPaidOnDate',
'DateTimeUTC',
'CreatedDateUTC'
)
DATE_FIELDS = (
'DueDate',
'Date',
'PaymentDate',
'StartDate',
'EndDate',
'PeriodLockDate',
'DateOfBirth',
'OpeningBalanceDate',
'PaymentDueDate',
'ReportingDate',
)
BOOLEAN_FIELDS = (
'IsSupplier',
'IsCustomer',
'IsDemoCompany',
'PaysTax',
'IsAuthorisedToApproveTimesheets',
'IsAuthorisedToApproveLeave',
'HasHELPDebt',
'AustralianResidentForTaxPurposes',
'TaxFreeThresholdClaimed',
'HasSFSSDebt',
'EligibleToReceiveLeaveLoading',
'IsExemptFromTax',
'IsExemptFromSuper',
'SentToContact',
'IsSubscriber',
'HasAttachments',
)
DECIMAL_FIELDS = (
'Hours',
'NumberOfUnit',
)
INTEGER_FIELDS = (
'FinancialYearEndDay',
'FinancialYearEndMonth',
)
NO_SEND_FIELDS = (
'UpdatedDateUTC',
)
OPERATOR_MAPPINGS = {
'gt': '>',
'lt': '<',
'lte': '<=',
'gte': '>=',
'ne': '!='
}
def __init__(self, name, credentials, unit_price_4dps=False):
self.credentials = credentials
self.name = name
self.base_url = credentials.base_url + XERO_API_URL
self.extra_params = {"unitdp": 4} if unit_price_4dps else {}
self.singular = singular(name)
for method_name in self.DECORATED_METHODS:
method = getattr(self, '_%s' % method_name)
setattr(self, method_name, self._get_data(method))
def dict_to_xml(self, root_elm, data):
for key in data.keys():
# Xero will complain if we send back these fields.
if key in self.NO_SEND_FIELDS:
continue
sub_data = data[key]
elm = SubElement(root_elm, key)
# Key references a dict. Unroll the dict
# as it's own XML node with subnodes
if isinstance(sub_data, dict):
self.dict_to_xml(elm, sub_data)
# Key references a list/tuple
elif isinstance(sub_data, list) or isinstance(sub_data, tuple):
# key name is a plural. This means each item
# in the list needs to be wrapped in an XML
# node that is a singular version of the list name.
if isplural(key):
for d in sub_data:
self.dict_to_xml(SubElement(elm, singular(key)), d)
# key name isn't a plural. Just insert the content
# as an XML node with subnodes
else:
for d in sub_data:
self.dict_to_xml(elm, d)
# Normal element - just insert the data.
else:
if key in self.BOOLEAN_FIELDS:
val = 'true' if sub_data else 'false'
else:
val = six.text_type(sub_data)
elm.text = val
return root_elm
def _prepare_data_for_save(self, data):
if isinstance(data, list) or isinstance(data, tuple):
root_elm = Element(self.name)
for d in data:
sub_elm = SubElement(root_elm, self.singular)
self.dict_to_xml(sub_elm, d)
else:
root_elm = self.dict_to_xml(Element(self.singular), data)
return tostring(root_elm)
def _parse_api_response(self, response, resource_name):
data = json.loads(response.text, object_hook=json_load_object_hook)
assert data['Status'] == 'OK', "Expected the API to say OK but received %s" % data['Status']
return data[resource_name]
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
from xero import __version__ as VERSION
timeout = kwargs.pop('timeout', None)
uri, params, method, body, headers, singleobject = func(*args, **kwargs)
cert = getattr(self.credentials, 'client_cert', None)
if headers is None:
headers = {}
# Use the JSON API by default, but remember we might request a PDF (application/pdf)
# so don't force the Accept header.
if 'Accept' not in headers:
headers['Accept'] = 'application/json'
# Set a user-agent so Xero knows the traffic is coming from pyxero
headers['User-Agent'] = 'pyxero/%s ' % VERSION + requests.utils.default_user_agent()
response = getattr(requests, method)(
uri, data=body, headers=headers, auth=self.credentials.oauth,
params=params, cert=cert, timeout=timeout)
if response.status_code == 200:
# If we haven't got XML or JSON, assume we're being returned a binary file
if not response.headers['content-type'].startswith('application/json'):
return response.content
return self._parse_api_response(response, self.name)
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper
def _get(self, id, headers=None):
uri = '/'.join([self.base_url, self.name, id])
params = self.extra_params.copy()
return uri, params, 'get', None, headers, True
def _get_attachments(self, id):
"""Retrieve a list of attachments associated with this Xero object."""
uri = '/'.join([self.base_url, self.name, id, 'Attachments']) + '/'
return uri, {}, 'get', None, None, False
def _get_attachment_data(self, id, filename):
"""
Retrieve the contents of a specific attachment (identified by filename).
"""
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
return uri, {}, 'get', None, None, False
def get_attachment(self, id, filename, file):
"""
Retrieve the contents of a specific attachment (identified by filename).
Writes data to file object, returns length of data written.
"""
data = self.get_attachment_data(id, filename)
file.write(data)
return len(data)
def save_or_put(self, data, method='post', headers=None, summarize_errors=True):
uri = '/'.join([self.base_url, self.name])
body = {'xml': self._prepare_data_for_save(data)}
params = self.extra_params.copy()
if not summarize_errors:
params['summarizeErrors'] = 'false'
return uri, params, method, body, headers, False
def _save(self, data):
return self.save_or_put(data, method='post')
def _put(self, data, summarize_errors=True):
return self.save_or_put(data, method='put', summarize_errors=summarize_errors)
def _put_attachment_data(self, id, filename, data, content_type, include_online=False):
"""Upload an attachment to the Xero object."""
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
params = {'IncludeOnline': 'true'} if include_online else {}
headers = {'Content-Type': content_type, 'Content-Length': len(data)}
return uri, params, 'put', data, headers, False
def put_attachment(self, id, filename, file, content_type, include_online=False):
"""Upload an attachment to the Xero object (from file object)."""
self.put_attachment_data(id, filename, file.read(), content_type,
include_online=include_online)
def prepare_filtering_date(self, val):
if isinstance(val, datetime):
val = val.strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
val = '"%s"' % val
return {'If-Modified-Since': val}
def _filter(self, **kwargs):
params = self.extra_params.copy()
headers = None
uri = '/'.join([self.base_url, self.name])
if kwargs:
if 'since' in kwargs:
val = kwargs['since']
headers = self.prepare_filtering_date(val)
del kwargs['since']
def get_filter_params(key, value):
last_key = key.split('_')[-1]
if last_key.upper().endswith('ID'):
return 'Guid("%s")' % six.text_type(value)
if key in self.BOOLEAN_FIELDS:
return 'true' if value else 'false'
elif key in self.DATE_FIELDS:
return 'DateTime(%s,%s,%s)' % (value.year, value.month, value.day)
elif key in self.DATETIME_FIELDS:
return value.isoformat()
else:
return '"%s"' % six.text_type(value)
def generate_param(key, value):
parts = key.split("__")
field = key.replace('_', '.')
fmt = '%s==%s'
if len(parts) == 2:
# support filters:
# Name__Contains=John becomes Name.Contains("John")
if parts[1] in ["contains", "startswith", "endswith"]:
field = parts[0]
fmt = ''.join(['%s.', parts[1], '(%s)'])
elif parts[1] in self.OPERATOR_MAPPINGS:
field = parts[0]
key = field
fmt = '%s' + self.OPERATOR_MAPPINGS[parts[1]] + '%s'
elif parts[1] in ["isnull"]:
sign = '=' if value else '!'
return '%s%s=null' % (parts[0], sign)
return fmt % (
field,
get_filter_params(key, value)
)
# Move any known parameter names to the query string
KNOWN_PARAMETERS = ['order', 'offset', 'page']
for param in KNOWN_PARAMETERS:
if param in kwargs:
params[param] = kwargs.pop(param)
filter_params = []
if 'raw' in kwargs:
raw = kwargs.pop('raw')
filter_params.append(raw)
# Treat any remaining arguments as filter predicates
# Xero will break if you search without a check for null in the first position:
# http://developer.xero.com/documentation/getting-started/http-requests-and-responses/#title3
sortedkwargs = sorted(six.iteritems(kwargs),
key=lambda item: -1 if 'isnull' in item[0] else 0)
for key, value in sortedkwargs:
filter_params.append(generate_param(key, value))
if filter_params:
params['where'] = '&&'.join(filter_params)
return uri, params, 'get', None, headers, False
def _all(self):
uri = '/'.join([self.base_url, self.name])
return uri, {}, 'get', None, None, False
| 36.759207
| 105
| 0.559186
|
4a0f6455c2040bc6dc82e63b581e518ea597fbcf
| 70,569
|
py
|
Python
|
arcgishub/hub.py
|
raykendo/hub-py
|
aebcd5031a2be43c725f7453682bcb01169080fc
|
[
"Apache-2.0"
] | null | null | null |
arcgishub/hub.py
|
raykendo/hub-py
|
aebcd5031a2be43c725f7453682bcb01169080fc
|
[
"Apache-2.0"
] | null | null | null |
arcgishub/hub.py
|
raykendo/hub-py
|
aebcd5031a2be43c725f7453682bcb01169080fc
|
[
"Apache-2.0"
] | null | null | null |
from arcgis.gis import GIS
from arcgis.features import FeatureLayer
from arcgis.geocoding import geocode
from arcgis._impl.common._mixins import PropertyMap
from arcgishub.sites import Site, SiteManager, Page, PageManager
from arcgis.features.enrich_data import enrich_layer
from datetime import datetime
from collections import OrderedDict
import pandas as pd
import time
import matplotlib.pyplot as plt
import seaborn as sns
import json
sns.set(color_codes=True)
def _lazy_property(fn):
'''Decorator that makes a property lazy-evaluated.
'''
# http://stevenloria.com/lazy-evaluated-properties-in-python/
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
class Hub(object):
"""
Entry point into the Hub module. Lets you access an individual hub and its components.
================ ===============================================================
**Argument** **Description**
---------------- ---------------------------------------------------------------
url Required string. If no URL is provided by user while connecting
to the GIS, then the URL will be ArcGIS Online.
---------------- ---------------------------------------------------------------
username Optional string as entered while connecting to GIS. The login user name
(case-sensitive).
---------------- ---------------------------------------------------------------
password Optional string as entered while connecting to GIS. If a username is
provided, a password is expected. This is case-sensitive. If the password
is not provided, the user is prompted in the interactive dialog.
================ ===============================================================
"""
def __init__(self, url, username=None, password=None):
#self.gis = gis
self._username = username
self._password = password
self.url = url
self.gis = GIS(self.url, self._username, self._password)
try:
self._gis_id = self.gis.properties.id
except AttributeError:
self._gis_id = None
@property
def _hub_enabled(self):
"""
Returns True if Hub is enabled on this org
"""
try:
self.gis.properties.subscriptionInfo.hubSettings.enabled
return True
except:
return False
@property
def enterprise_org_id(self):
"""
Returns the AGOL org id of the Enterprise Organization associated with this Hub.
"""
if self._hub_enabled:
try:
_e_org_id = self.gis.properties.portalProperties.hub.settings.enterpriseOrg.orgId
return _e_org_id
except AttributeError:
try:
if self.gis.properties.subscriptionInfo.companionOrganizations.type=='Enterprise':
return 'Enterprise org id is not available'
except:
return self._gis_id
else:
raise Exception("Hub does not exist or is inaccessible.")
@property
def community_org_id(self):
"""
Returns the AGOL org id of the Community Organization associated with this Hub.
"""
if self._hub_enabled:
try:
_c_org_id = self.gis.properties.portalProperties.hub.settings.communityOrg.orgId
return _c_org_id
except AttributeError:
try:
if self.gis.properties.subscriptionInfo.companionOrganizations.type=='Community':
return 'Community org id is not available'
except:
return self._gis_id
else:
raise Exception("Hub does not exist or is inaccessible.")
@property
def enterprise_org_url(self):
"""
Returns the AGOL org url of the Enterprise Organization associated with this Hub.
"""
try:
self.gis.properties.portalProperties.hub
try:
self.gis.properties.portalProperties.hub.settings.enterpriseOrg
try:
_url = self.gis.properties.publicSubscriptionInfo.companionOrganizations[0]['organizationUrl']
except:
_url = self.gis.properties.subscriptionInfo.companionOrganizations[0]['organizationUrl']
return "https://"+_url
except AttributeError:
return self.gis.url
except AttributeError:
print("Hub does not exist or is inaccessible.")
raise
@property
def community_org_url(self):
"""
Returns the AGOL org id of the Community Organization associated with this Hub.
"""
try:
self.gis.properties.portalProperties.hub
try:
self.gis.properties.portalProperties.hub.settings.communityOrg
try:
_url = self.gis.properties.publicSubscriptionInfo.companionOrganizations[0]['organizationUrl']
except:
_url = self.gis.properties.subscriptionInfo.companionOrganizations[0]['organizationUrl']
return "https://"+_url
except AttributeError:
return self.gis.url
except:
print("Hub does not exist or is inaccessible.")
raise
@_lazy_property
def initiatives(self):
"""
The resource manager for Hub initiatives. See :class:`~arcgis.apps.hub.InitiativeManager`.
"""
return InitiativeManager(self)
@_lazy_property
def events(self):
"""
The resource manager for Hub events. See :class:`~arcgis.apps.hub.EventManager`.
"""
return EventManager(self)
@_lazy_property
def sites(self):
"""
The resource manager for Hub sites. See :class:`~hub.sites.SiteManager`.
"""
return SiteManager(self)
@_lazy_property
def pages(self):
"""
The resource manager for Hub pages. See :class:`~hub.sites.PageManager`.
"""
return PageManager(self.gis)
def search(self, title=None, owner=None, created=None, modified=None, tags=None, scope=None):
"""
Provides search functionality within the organization's hub. Results will be organized
as either Initiatives
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Optional string. Return hub items with provided string in title.
--------------- --------------------------------------------------------------------
owner Optional string. Return hub items owned by a username.
--------------- --------------------------------------------------------------------
created Optional string. Date the hub item was created.
Shown in milliseconds since UNIX epoch.
--------------- --------------------------------------------------------------------
modified Optional string. Date the hub item was last modified.
Shown in milliseconds since UNIX epoch
--------------- --------------------------------------------------------------------
tags Optional string. User-defined tags that describe the hub item.
--------------- --------------------------------------------------------------------
scope Optional string. Defines the scope of search.
Valid values are 'official', 'community' or 'all'.
=============== ====================================================================
"""
resultList = []
#Build search query
query = 'typekeywords:hub'
if title!=None:
query += ' AND title:'+title
if owner!=None:
query += ' AND owner:'+owner
if created!=None:
query += ' AND created:'+created
if modified!=None:
query += ' AND modified:'+modified
if tags!=None:
query += ' AND tags:'+tags
#Apply org scope and search
if scope is None or self.gis.url=='https://www.arcgis.com':
items = self.gis.content.search(query=query, item_type='Hub *', max_items=5000)
elif scope.lower()=='official':
query += ' AND access:public'
_gis = GIS(self.enterprise_org_url)
items = _gis.content.search(query=query, item_type='Hub *', max_items=5000)
elif scope.lower()=='community':
query += ' AND access:public'
_gis = GIS(self.community_org_url)
items = _gis.content.search(query=query, item_type='Hub *', max_items=5000)
elif scope.lower()=='all':
items = self.gis.content.search(query=query, item_type='Hub *', outside_org=True, max_items=5000)
else:
raise Exception("Invalid value for scope")
for item in items:
if "hubInitiative" in item.typeKeywords:
resultList.append(Initiative(self, item))
elif "hubSite" in item.typeKeywords:
resultList.append(Site(self.gis, item))
elif "hubPage" in item.typeKeywords:
resultList.append(Page(self.gis, item))
elif "hubInitiativeTemplate" in item.typeKeywords:
# leaving room for an InitiativeTemplate object.
# In the mean time, treat it as an item
resultList.append(item)
else:
# not sure what this is. Will just send back the item
resultList.append(item)
return resultList
class Initiative(OrderedDict):
"""
Represents an initiative within a Hub. An Initiative supports
policy- or activity-oriented goals through workflows, tools and team collaboration.
"""
def __init__(self, hub, initiativeItem):
"""
Constructs an empty Initiative object
"""
self.item = initiativeItem
self._hub = hub
self._gis = self._hub.gis
#self._gis = gis
#self._hub = gis.hub
try:
self._initiativedict = self.item.get_data()
pmap = PropertyMap(self._initiativedict)
self.definition = pmap
except:
self.definition = None
def __repr__(self):
return '<%s title:"%s" owner:%s>' % (type(self).__name__, self.title, self.owner)
@property
def itemid(self):
"""
Returns the item id of the initiative item
"""
return self.item.id
@property
def title(self):
"""
Returns the title of the initiative item
"""
return self.item.title
@property
def description(self):
"""
Getter/Setter for the initiative description
"""
return self.item.description
@description.setter
def description(self, value):
self.item.description = value
@property
def snippet(self):
"""
Getter/Setter for the initiative snippet
"""
return self.item.snippet
@snippet.setter
def snippet(self, value):
self.item.snippet = value
@property
def owner(self):
"""
Returns the owner of the initiative item
"""
return self.item.owner
@property
def tags(self):
"""
Returns the tags of the initiative item
"""
return self.item.tags
@property
def initiative_url(self):
"""
Returns the url of the initiative editor
"""
return self.item.properties['url']
@property
def site_id(self):
"""
Returns the itemid of the initiative site
"""
try:
return self.item.properties['siteId']
except:
return self._initiativedict['steps'][0]['itemIds'][0]
@property
def site_url(self):
"""
Getter/Setter for the url of the initiative site
"""
return self.sites.get(self.site_id).url
@site_url.setter
def site_url(self, value):
self.item.url = value
@property
def content_group_id(self):
"""
Returns the groupId for the content group
"""
return self.item.properties['contentGroupId']
@property
def collab_group_id(self):
"""
Returns the groupId for the collaboration group
"""
return self.item.properties['collaborationGroupId']
@property
def followers_group_id(self):
"""
Returns the groupId for the followers group
"""
return self.item.properties['followersGroupId']
@_lazy_property
def indicators(self):
"""
The resource manager for an Initiative's indicators.
See :class:`~hub.hub.IndicatorManager`.
"""
return IndicatorManager(self._gis, self.item)
@_lazy_property
def sites(self):
"""
The resource manager for an Initiative's sites.
See :class:`~hub.sites.SiteManager`.
"""
return SiteManager(self._hub, self)
@_lazy_property
def all_events(self):
"""
Fetches all events (past or future) pertaining to an initiative
"""
return self._hub.events.search(initiative_id=self.item.id)
@_lazy_property
def followers(self, community_gis=None):
"""
Fetches the list of followers for initiative.
"""
followers = []
_email = False
_users_e = self._gis.users.search(query='hubInitiativeId|'+self.itemid, outside_org=True)
if community_gis is not None:
_users_c = community_gis.users.search(query='hubInitiativeId|'+self.itemid, outside_org=True)
_email = True
for _user in _users_e:
_temp = {}
_temp['name'] = _user.fullName
_temp['username'] = _user.username
if _email:
try:
_temp['email'] = _user.email
except AttributeError:
for _user_c in _users_c:
if _user_c.username==_user.username:
try:
_temp['email'] = _user_c.email
except AttributeError:
pass
followers.append(_temp)
return followers
def delete(self):
"""
Deletes the initiative and its site.
If unable to delete, raises a RuntimeException.
:return:
A bool containing True (for success) or False (for failure).
.. code-block:: python
USAGE EXAMPLE: Delete an initiative successfully
initiative1 = myHub.initiatives.get('itemId12345')
initiative1.delete()
>> True
"""
if self.item is not None:
#Fetch Initiative Collaboration group
_collab_group = self._gis.groups.get(self.collab_group_id)
#Fetch Content Group
_content_group = self._gis.groups.get(self.content_group_id)
#Fetch Followers Group
_followers_group = self._gis.groups.get(self.followers_group_id)
#Fetch initiative site
try:
_site = self._hub.sites.get(self.site_id)
_site.protected = False
_site.delete()
except:
pass
#Disable delete protection on groups and site
_collab_group.protected = False
_content_group.protected = False
_followers_group.protected = False
#Delete groups, site and initiative
_collab_group.delete()
_content_group.delete()
_followers_group.delete()
return self.item.delete()
def update(self, initiative_properties=None, data=None, thumbnail=None, metadata=None):
""" Updates the initiative.
.. note::
For initiative_properties, pass in arguments for only the properties you want to be updated.
All other properties will be untouched. For example, if you want to update only the
initiative's description, then only provide the description argument in initiative_properties.
===================== ====================================================================
**Argument** **Description**
--------------------- --------------------------------------------------------------------
initiative_properties Required dictionary. See URL below for the keys and values.
--------------------- --------------------------------------------------------------------
data Optional string. Either a path or URL to the data.
--------------------- --------------------------------------------------------------------
thumbnail Optional string. Either a path or URL to a thumbnail image.
--------------------- --------------------------------------------------------------------
metadata Optional string. Either a path or URL to the metadata.
===================== ====================================================================
To find the list of applicable options for argument initiative_properties -
https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.gis.toc.html#arcgis.gis.Item.update
:return:
A boolean indicating success (True) or failure (False).
.. code-block:: python
USAGE EXAMPLE: Update an initiative successfully
initiative1 = myHub.initiatives.get('itemId12345')
initiative1.update(initiative_properties={'description':'Create your own initiative to organize people around a shared goal.'})
>> True
"""
if initiative_properties:
_initiative_data = self.definition
for key, value in initiative_properties.items():
_initiative_data[key] = value
if key=='title':
title = value
#Fetch Initiative Collaboration group
_collab_group = self._gis.groups.get(self.collab_group_id)
#Fetch Content Group
_content_group = self._gis.groups.get(self.content_group_id)
#Fetch Followers Group
_followers_group = self._gis.groups.get(self.followers_group_id)
#Update title for all groups
_collab_group.update(title=title+' Core Team')
_content_group.update(title=title+' Content')
_followers_group.update(title=title+' Followers')
return self.item.update(_initiative_data, data, thumbnail, metadata)
class InitiativeManager(object):
"""
Helper class for managing initiatives within a Hub. This class is not created by users directly.
An instance of this class, called 'initiatives', is available as a property of the Hub object. Users
call methods on this 'initiatives' object to manipulate (add, get, search, etc) initiatives.
"""
def __init__(self, hub, initiative=None):
self._hub = hub
self._gis = self._hub.gis
def add(self, title, description=None, site=None, data=None, thumbnail=None):
"""
Adds a new initiative to the Hub.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
title Required string.
--------------- --------------------------------------------------------------------
description Optional string.
--------------- --------------------------------------------------------------------
site Optional Site object.
--------------- --------------------------------------------------------------------
data Optional string. Either a path or URL to the data.
--------------- --------------------------------------------------------------------
thumbnail Optional string. Either a path or URL to a thumbnail image.
=============== ====================================================================
:return:
The initiative if successfully added, None if unsuccessful.
.. code-block:: python
USAGE EXAMPLE: Add an initiative successfully
initiative1 = myHub.initiatives.add(title='Vision Zero Analysis')
initiative1.item
"""
#Define initiative
if description is None:
description = 'Create your own initiative to organize people around a shared goal.'
_item_dict = {"type":"Hub Initiative", "snippet":title + " Custom initiative", "typekeywords":"OpenData, Hub, hubInitiative", "title":title, "description": description, "licenseInfo": "CC-BY-SA","culture": "{{culture}}", "properties":{'schemaVersion':2}}
#Defining content, collaboration and followers groups
_content_group_title = title + ' Content'
_content_group_dict = {"title": _content_group_title, "tags": ["Hub Group", "Hub Content Group", "Hub Site Group", "Hub Initiative Group"], "access":"public"}
_collab_group_title = title + ' Core Team'
_collab_group_dict = {"title": _collab_group_title, "tags": ["Hub Group", "Hub Initiative Group", "Hub Site Group", "Hub Core Team Group", "Hub Team Group"], "access":"org"}
_followers_group_title = title + ' Followers'
_followers_group_dict = {"title": _followers_group_title, "tags": ["Hub Initiative Group", " Hub Initiative Followers Group", "Hub Initiative Group"], "access":"public"}
#Create groups
content_group = self._gis.groups.create_from_dict(_content_group_dict)
collab_group = self._gis.groups.create_from_dict(_collab_group_dict)
followers_group = self._gis.groups.create_from_dict(_followers_group_dict)
#Protect groups from accidental deletion
content_group.protected = True
collab_group.protected = True
followers_group.protected = True
#Adding it to _item_dict
if content_group is not None and collab_group is not None and followers_group is not None:
_item_dict['properties']['collaborationGroupId'] = collab_group.id
_item_dict['properties']['contentGroupId'] = content_group.id
_item_dict['properties']['followersGroupId'] = followers_group.id
#Create initiative and share it with collaboration group
item = self._gis.content.add(_item_dict, owner=self._gis.users.me.username)
item.share(groups=[collab_group])
#Create initiative site and set initiative properties
_initiative = Initiative(self._hub, item)
if site is None:
site = _initiative.sites.add(title=title)
else:
site = _initiative.sites.clone(site, pages=True, title=title)
item.update(item_properties={'url': site.url, 'culture': self._gis.properties.user.culture})
_initiative.site_url = site.item.url
item.properties['site_id'] = site.itemid
#update initiative data
_item_data = {"assets": [{"id": "bannerImage","url": self._hub.enterprise_org_url+"/sharing/rest/content/items/"+item.id+"/resources/detail-image.jpg","properties": {"type": "resource","fileName": "detail-image.jpg","mimeType": "image/jepg"},"license": {"type": "none"},"display": {"position": {"x": "center","y": "center"}}},{"id": "iconDark","url": self._hub.enterprise_org_url+"/sharing/rest/content/items/"+item.id+"/resources/icon-dark.png","properties": {"type": "resource","fileName": "icon-dark.png","mimeType": "image/png"},"license": {"type": "none"}},{"id": "iconLight","url": self._hub.enterprise_org_url+"/sharing/rest/content/items/"+item.id+"/resources/icon-light.png","properties": {"type": "resource","fileName": "icon-light.png","mimeType": "image/png"},"license": {"type": "none"}}],"steps": [{"id": "informTools","title": "Inform the Public","description": "Share data about your initiative with the public so people can easily find, download and use your data in different formats.","templateIds": [],"itemIds": [site.itemid]},{"id": "listenTools","title": "Listen to the Public","description": "Create ways to gather citizen feedback to help inform your city officials.","templateIds": [],"itemIds": []},{"id": "monitorTools","title": "Monitor Progress","description": "Establish performance measures that incorporate the publics perspective.","templateIds": [],"itemIds": []}],"indicators": [],"values": {"collaborationGroupId": collab_group.id,"contentGroupId": content_group.id,"followersGroupId": followers_group.id,"bannerImage": {"source": "bannerImage","display": {"position": {"x": "center","y": "center"}}}}}
_data = json.dumps(_item_data)
item.update(item_properties={'text': _data})
return Initiative(self._hub, item)
def clone(self, initiative, origin_hub=None, title=None):
"""
Clone allows for the creation of an initiative that is derived from the current initiative.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
initiative Required Initiative object of initiative to be cloned.
--------------- --------------------------------------------------------------------
origin_hub Optional Hub object. Required only for cross-org clones where the
initiative being cloned is not an item with public access.
--------------- --------------------------------------------------------------------
title Optional String.
=============== ====================================================================
:return:
Initiative.
"""
from datetime import timezone
now = datetime.now(timezone.utc)
#Checking if item of correct type has been passed
if 'hubInitiative' not in initiative.item.typeKeywords:
raise Exception("Incorrect item type. Initiative item needed for cloning.")
#New title
if title is None:
title = initiative.title + "-copy-%s" % int(now.timestamp() * 1000)
#If cloning within same org
if origin_hub is None:
origin_hub = self._hub
#Fetch site (checking if origin_hub is correct or if initiative is public)
try:
site = origin_hub.sites.get(initiative.site_id)
except:
raise Exception("Please provide origin_hub of the initiative object, if the initiative is not publicly shared")
#Create new initiative if destination hub is premium
if self._hub._hub_enabled:
#new initiative
new_initiative = self._hub.initiatives.add(title=title, site=site)
return new_initiative
else:
#Create new site if destination hub is basic/enterprise
new_site = self._hub.sites.clone(site, pages=True, title=title)
return new_site
def get(self, initiative_id):
"""
Returns the initiative object for the specified initiative_id.
======================= =============================================================
**Argument** **Description**
----------------------- -------------------------------------------------------------
initiative_id Required string. The initiative itemid.
======================= =============================================================
:return:
The initiative object if the item is found, None if the item is not found.
.. code-block:: python
USAGE EXAMPLE: Fetch an initiative successfully
initiative1 = myHub.initiatives.get('itemId12345')
initiative1.item
"""
initiativeItem = self._gis.content.get(initiative_id)
if 'hubInitiative' in initiativeItem.typeKeywords:
return Initiative(self._hub, initiativeItem)
else:
raise TypeError("Item is not a valid initiative or is inaccessible.")
def search(self, scope=None, title=None, owner=None, created=None, modified=None, tags=None):
"""
Searches for initiatives.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
scope Optional string. Defines the scope of search.
Valid values are 'official', 'community' or 'all'.
--------------- --------------------------------------------------------------------
title Optional string. Return initiatives with provided string in title.
--------------- --------------------------------------------------------------------
owner Optional string. Return initiatives owned by a username.
--------------- --------------------------------------------------------------------
created Optional string. Date the initiative was created.
Shown in milliseconds since UNIX epoch.
--------------- --------------------------------------------------------------------
modified Optional string. Date the initiative was last modified.
Shown in milliseconds since UNIX epoch
--------------- --------------------------------------------------------------------
tags Optional string. User-defined tags that describe the initiative.
=============== ====================================================================
:return:
A list of matching initiatives.
"""
initiativelist = []
#Build search query
query = 'typekeywords:hubInitiative'
if title!=None:
query += ' AND title:'+title
if owner!=None:
query += ' AND owner:'+owner
if created!=None:
query += ' AND created:'+created
if modified!=None:
query += ' AND modified:'+modified
if tags!=None:
query += ' AND tags:'+tags
#Apply org scope and search
if scope is None or self._gis.url=='https://www.arcgis.com':
items = self._gis.content.search(query=query, max_items=5000)
elif scope.lower()=='official':
query += ' AND access:public'
_gis = GIS(self._hub.enterprise_org_url)
items = _gis.content.search(query=query, max_items=5000)
elif scope.lower()=='community':
query += ' AND access:public'
_gis = GIS(self._hub.community_org_url)
items = _gis.content.search(query=query, max_items=5000)
elif scope.lower()=='all':
items = self._gis.content.search(query=query, outside_org=True, max_items=5000)
else:
raise Exception("Invalid value for scope")
#Return searched initiatives
for item in items:
initiativelist.append(Initiative(self._hub, item))
return initiativelist
class Indicator(OrderedDict):
"""
Represents an indicator within an initiative. Initiatives use Indicators to standardize
data sources for ready-to-use analysis and comparison. Indicators are measurements of a system
including features, calculated metrics, or quantified goals.
"""
def __init__(self, gis, initiativeItem, indicatorObject):
"""
Constructs an empty Indicator object
"""
self._gis = gis
self._initiativeItem = initiativeItem
self._initiativedata = self._initiativeItem.get_data()
self._indicatordict = indicatorObject
pmap = PropertyMap(self._indicatordict)
self.definition = pmap
def __repr__(self):
return '<%s id:"%s" optional:%s>' % (type(self).__name__, self.indicatorid, self.optional)
@property
def indicatorid(self):
"""
Returns the id of the indicator
"""
return self._indicatordict['id']
@property
def indicator_type(self):
"""
Returns the type (Data/Parameter) of the indicator
"""
return self._indicatordict['type']
@property
def optional(self):
"""
Status if the indicator is optional (True/False)
"""
return self._indicatordict['optional']
@property
def url(self):
"""
Returns the data layer url (if configured) of the indicator
"""
try:
return self._indicatordict['source']['url']
except:
return 'Url not available for this indicator'
@property
def name(self):
"""
Returns the layer name (if configured) of the indicator
"""
try:
return self._indicatordict['source']['name']
except:
return 'Name not available for this indicator'
@property
def itemid(self):
"""
Returns the item id of the data layer (if configured) of the indicator
"""
try:
return self._indicatordict['source']['itemId']
except:
return 'Item Id not available for this indicator'
@property
def indicator_item(self):
"""
Returns the item of the data layer (if configured) of the indicator
"""
try:
return self._gis.content.get(self.itemid)
except:
return 'Item not configured for this indicator'
@_lazy_property
def data_sdf(self):
"""
Returns the data for the indicator as a Spatial DataFrame.
"""
try:
_indicator_flayer = self.indicator_item.layers[0]
return pd.DataFrame.spatial.from_layer(_indicator_flayer)
except:
return 'Data not configured for this indicator'
@property
def mappings(self):
"""
Returns the attribute mapping from data layer (if configured) of the indicator
"""
try:
return self._indicatordict['source']['mappings']
except:
return 'Attribute mapping not available for this indicator'
def delete(self):
"""
Deletes an indicator from the initiative
:return:
A bool containing True (for success) or False (for failure).
.. code-block:: python
USAGE EXAMPLE: Delete an indicator successfully
indicator1 = initiative1.indicators.get('streetCrashes')
indicator1.delete()
>> True
"""
if self._indicatordict is not None:
_indicator_id = self._indicatordict['id']
self._initiativedata['indicators'] = list(filter(lambda indicator: indicator.get('id')!=_indicator_id, self._initiativedata['indicators']))
_new_initiativedata = json.dumps(self._initiativedata)
return self._initiativeItem.update(item_properties={'text': _new_initiativedata})
def _format_date(self, date):
"""
Return date in Y-M-D
"""
epoch_time = str(date)
return epoch_time
def _week_day(self, num):
"""
Return Weekday/Weekend
"""
if num < 4:
return 'Weekday'
if num >= 4:
return 'Weekend'
def _month(self, date):
"""
Return month number
"""
return str(date)[5:7]
def _hour(self, date):
"""
Return hour number
"""
return str(date)[11:13]
def _bar_chart(self, df, attribute):
"""
Generates a bar chart for given attribute if number of categories >= 7.
"""
#Bar chart for 1st category
counts1 = df[attribute].value_counts()
#Generates bar graph
ax = counts1.plot(kind='barh', figsize=(12, 12), legend=True, fontsize=12, alpha=0.5)
#X axis text and display style of categories
ax.set_xlabel("Count", fontsize=12)
#Y axis text
ax.set_ylabel(attribute, fontsize=14)
#Title
ax.set_title("Bar chart for attribute "+attribute, fontsize=20)
#Annotations
for i in ax.patches:
# get_width pulls left or right; get_y pushes up or down
ax.text(i.get_width()+.1, i.get_y()+.31, str(round((i.get_width()), 2)), fontsize=10, color='dimgrey')
#results.append(plt)
plt.show()
def _pie_chart(self, df, attribute):
"""
Generates a pie chart for given attribute if number of categories < 7.
"""
#Data to plot
types = list(df[attribute].unique())
types = [category for category in types if category]
sizes = df[attribute].value_counts()
#Plot
plt.figure(figsize=(6,6))
plt.title('Pie chart for '+attribute)
plt.pie(sizes, labels=types,
autopct='%1.2f%%', shadow=True, startangle=100)
plt.axis('equal')
#results.append(plt)
plt.show()
def _histogram_chart(self, df, attribute):
"""
Generates a histogram for numerical attributes and datetime attributes.
"""
plt.figure(figsize=(8,8))
bins=None
if attribute=='month':
bins=range(1,13)
n, bins, patches = plt.hist(df[attribute], bins=bins, alpha=0.5)
plt.title("Distribution for "+attribute, fontsize=16)
plt.xlabel(attribute, fontsize=16)
plt.ylabel("Frequency", fontsize=16)
#results.append(plt)
plt.show()
def _line_chart(self, df, attribute):
"""
Generates a line chart for datetime attribute.
"""
hours = df[attribute].unique().tolist()
hours.sort()
frequency = df[attribute].value_counts(normalize=True, sort=False)
plt.plot(hours, frequency, color='red')
plt.xlim(0, 24)
plt.xlabel(attribute)
plt.ylabel('Average count')
plt.title('Average frequency for every '+attribute)
#results.append(plt)
plt.show()
def _scatter_chart_boundary(self):
"""
Generates a scatter chart for variables used to enrich boundaries.
"""
enrich_variables = ['TOTPOP_CY', 'MEDHINC_CY']
enriched = enrich_layer(self.url, analysis_variables=enrich_variables, output_name='boundaryEnriched_'+self.itemid+str(int(time.time())))
#Convert enriched to table
enriched_flayer = enriched.layers[0]
enriched_df = pd.DataFrame.spatial.from_layer(enriched_flayer)
#Scatter plot
fig, ax = plt.subplots(figsize=(8,8))
scatter = plt.scatter(enriched_df['TOTPOP_CY'], enriched_df['MEDHINC_CY'], c='blue', alpha=0.6)
#X axis text and display style of categories
ax.set_xlabel("Population per boundary", fontsize=14)
#Y axis text
ax.set_ylabel("Median household income per boundary", fontsize=14)
#Title
ax.set_title("Population v/s Median Household Income", fontsize=20)
#results.append(plt)
plt.show()
return enriched
def explore(self, subclass, display=True):
""" Returns exploratory analyses (statistics, charts, map) for the indicator.
======================= =============================================================
**Argument** **Description**
----------------------- -------------------------------------------------------------
subclass Required string. Defines the conceptual classification.
Valid values are 'measure', 'place', 'boundary'.
----------------------- -------------------------------------------------------------
display Optional boolean. Indicates if the infographics should be
displayed inline or returned in a list. Default is True.
======================= =============================================================
:return:
List of generated analyses if `display=False` else displays results in the notebok.
"""
results = []
if subclass.lower() not in ['measure', 'place', 'boundary']:
raise Exception("Indicator not of valid subclass")
#Calculating total number of features
indicator_df = self.data_sdf
total = 'Total number of '+self.indicatorid+': '+str(indicator_df.shape[0])
results.append(total)
#Getting column names
category_columnNames = [field['name'] for field in self.mappings if field['type']=='esriFieldTypeString']
date_columnNames = [field['name'] for field in self.mappings if field['type']=='esriFieldTypeDate']
value_columnNames = [field['name'] for field in self.mappings if field['type']=='esriFieldTypeInteger']
#Call necessary charting methods for numerical variables
if value_columnNames:
for value in value_columnNames:
#Average of value field
results.append('Average number of '+value+ ' is: '+str(indicator_df[value].mean()))
self._histogram_chart(indicator_df, value)
#Call necessary charting methods for categorical variables
if category_columnNames:
for category in category_columnNames:
if len(indicator_df[category].unique()) < 7:
self._pie_chart(indicator_df, category)
elif len(indicator_df[category].unique()) < 50:
self._bar_chart(indicator_df, category)
#Call necessary charting methods for datetime variables
if date_columnNames:
for datetime in date_columnNames:
indicator_df['date'] = indicator_df[datetime].apply(self._format_date)
indicator_df['hour'] = indicator_df['date'].apply(self._hour)
#Line chart for hourly distribution
self._line_chart(indicator_df, 'hour')
indicator_df['date'] = pd.to_datetime(indicator_df['date']).dt.date
indicator_df['day_of_week'] = indicator_df['date'].apply(lambda x: x.weekday())
indicator_df['day'] = indicator_df['day_of_week'].apply(self._week_day)
#Pie chart for weekday-weekend distribution
self._pie_chart(indicator_df, 'day')
indicator_df['month'] = indicator_df['date'].apply(self._month)
try:
indicator_df['month'] = indicator_df['month'].astype(int)
except:
pass
#Histogram for monthly distribution
self._histogram_chart(indicator_df, 'month')
#Map for this indicator
indicator_map = self._gis.map()
indicator_map.basemap = 'dark-gray'
if subclass.lower()=='place':
indicator_map.add_layer(self.indicator_item.layers[0], {'title':'Locations for '+self.indicatorid,'opacity':0.7})
elif subclass.lower()=='measure':
indicator_map.add_layer(self.indicator_item.layers[0], {'title':'Desnity based on occurrence','renderer':'HeatmapRenderer','opacity':0.7})
elif subclass.lower()=='boundary':
#Scatter plot of variables enriching boundary
enriched = self._scatter_chart_boundary()
#Map of the enriched layer
indicator_map.add_layer({"type":"FeatureLayer",
"url": enriched.url,
"renderer":"ClassedColorRenderer",
"field_name":"TOTPOP_CY",
"opacity":0.75
})
#results.append(indicator_map)
return indicator_map
def get_data(self):
"""
Retrieves the data associated with an indicator
"""
return self.definition
def update(self, indicator_properties=None):
"""
Updates properties of an indicator
:return:
A bool containing True (for success) or False (for failure).
.. code-block:: python
USAGE EXAMPLE: Update an indicator successfully
indicator1_data = indicator1.get_data()
indicator1_data['optional'] = False
indicator1.update(indicator_properties = indicator1_data)
>> True
Refer the indicator definition (`get_data()`) to learn about fields that can be
updated and their acceptable data format.
"""
try:
_indicatorId = indicator_properties['id']
except:
return 'Indicator properties must include id of indicator'
if indicator_properties is not None:
self._initiativedata['indicators'] = [dict(indicator_properties) if indicator['id']==_indicatorId else indicator for indicator in self._initiativedata['indicators']]
_new_initiativedata = json.dumps(self._initiativedata)
status = self._initiativeItem.update(item_properties={'text': _new_initiativedata})
if status:
self.definition = PropertyMap(indicator_properties)
return status
class IndicatorManager(object):
"""Helper class for managing indicators within an initiative. This class is not created by users directly.
An instance of this class, called 'indicators', is available as a property of the Initiative object. Users
call methods on this 'indicators' object to manipulate (add, get, search, etc) indicators of a particular
initiative.
"""
def __init__(self, gis, initiativeItem):
self._gis = gis
self._hub = self._gis.hub
self._initiativeItem = initiativeItem
self._initiativedata = self._initiativeItem.get_data()
self._indicators = self._initiativedata['indicators']
def add(self, indicator_properties):
"""
Adds a new indicator to given initiative.
*Key:Value Dictionary Options for Argument indicator_properties*
================= =====================================================================
**Key** **Value**
----------------- ---------------------------------------------------------------------
id Required string. Indicator identifier within initiative template
----------------- ---------------------------------------------------------------------
name Optional string. Indicator name
----------------- ---------------------------------------------------------------------
type Optional string. Valid values are Data, Parameter.
----------------- ---------------------------------------------------------------------
optional Required boolean
----------------- ---------------------------------------------------------------------
definition Optional dictionary. Specification of the Indicator - types, fields
----------------- ---------------------------------------------------------------------
source Optional dictionary. Reference to an API or collection of data along
with mapping between schemas
================= =====================================================================
:return:
A bool containing True (for success) or False (for failure).
.. code-block:: python
USAGE EXAMPLE: Add an indicator successfully
indicator1_data = {'id': 'streetCrashes', 'type': 'Data', 'optional':False}
initiative1.indicators.add(indicator_properties = indicator1_data)
>> True
"""
_stemplates = []
_id = indicator_properties['id']
_added = False
#Fetch initiative template data
_itemplateid = self._initiativedata['source']
_itemplate = self._gis.content.get(_itemplateid)
_itemplatedata = _itemplate.get_data()
#Fetch solution templates associated with initiative template
for step in _itemplatedata['steps']:
for _stemplateid in step['templateIds']:
_stemplates.append(_stemplateid)
#Fetch data for each solution template
for _stemplateid in _stemplates:
_stemplate = self._gis.content.get(_stemplateid)
_stemplatedata = _stemplate.get_data()
#Check if indicator exists in solution
for indicator in _stemplatedata['indicators']:
#add indicator to initiative
if indicator['id']==_id:
if self.get(_id) is not None:
return 'Indicator already exists'
else:
self._initiativedata['indicators'].append(indicator_properties)
_new_initiativedata = json.dumps(self._initiativedata)
self._initiativeItem.update(item_properties={'text': _new_initiativedata})
_added = True
#Share indicator item with content (open data) group
try:
item = self._gis.content.get(indicator_properties['source']['itemId'])
initiative = self._hub.initiatives.get(self._initiativeItem.id)
content_group = self._gis.groups.get(initiative.content_group_id)
item.share(groups=[content_group])
except:
pass
return Indicator(self._gis, self._initiativeItem, indicator_properties)
if not _added:
return 'Invalid indicator id for this initiative'
def get(self, indicator_id):
""" Returns the indicator object for the specified indicator_id.
======================= =============================================================
**Argument** **Description**
----------------------- -------------------------------------------------------------
indicator_id Required string. The indicator identifier.
======================= =============================================================
:return:
The indicator object if the indicator is found, None if the indicator is not found.
"""
for indicator in self._indicators:
if indicator['id']==indicator_id:
_indicator = indicator
try:
return Indicator(self._gis, self._initiativeItem, _indicator)
except:
return None
def search(self, url=None, item_id=None, name=None):
"""
Searches for indicators within an initiative.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
url Optional string. url registered for indicator in `source` dictionary.
--------------- --------------------------------------------------------------------
item_id Optional string. itemid registered for indicator in `source` dictionary.
--------------- --------------------------------------------------------------------
name Optional string. name registered for indicator in `source` dictionary.
=============== ====================================================================
:return:
A list of matching indicators.
"""
_indicators = []
indicatorlist = []
for indicator in self._indicators:
_indicators.append(indicator)
if url!=None:
_indicators = [indicator for indicator in _indicators if indicator['source']['url']==url]
if item_id!=None:
_indicators = [indicator for indicator in _indicators if indicator['source']['itemId']==item_id]
if name!=None:
_indicators = [indicator for indicator in _indicators if indicator['source']['name']==name]
for indicator in _indicators:
indicatorlist.append(Indicator(self._gis, self._initiativeItem, indicator))
return indicatorlist
class Event(OrderedDict):
"""
Represents an event in a Hub. A Hub has many Events that can be associated with an Initiative.
Events are meetings for people to support an Initiative. Events are scheduled by an organizer
and have many attendees. An Event has a Group so that they can include content for preparation
as well as gather and archive content during the event for later retrieval or analysis.
"""
def __init__(self, gis, eventObject):
"""
Constructs an empty Event object
"""
self._gis = gis
self._hub = self._gis.hub
self._eventdict = eventObject['attributes']
try:
self._eventdict['geometry'] = eventObject['geometry']
except KeyError:
self._eventdict['geometry'] = {'x':0.00, 'y':0.00}
pmap = PropertyMap(self._eventdict)
self.definition = pmap
def __repr__(self):
return '<%s title:"%s" venue:%s>' % (type(self).__name__, self.title, self.venue)
@property
def event_id(self):
"""
Returns the unique identifier of the event
"""
return self._eventdict['OBJECTID']
@property
def title(self):
"""
Returns the title of the event
"""
return self._eventdict['title']
@property
def venue(self):
"""
Returns the location of the event
"""
return self._eventdict['venue']
@property
def address(self):
"""
Returns the street address for the venue of the event
"""
return self._eventdict['address1']
@property
def initiative_id(self):
"""
Returns the initiative id of the initiative the event belongs to
"""
return self._eventdict['initiativeId']
@property
def site_id(self):
"""
Returns the site id of the initiative site
"""
return self._eventdict['siteId']
@property
def organizers(self):
"""
Returns the name and email of the event organizers
"""
return self._eventdict['organizers']
@property
def description(self):
"""
Returns description of the event
"""
return self._eventdict['description']
@property
def start_date(self):
"""
Returns start date of the event in milliseconds since UNIX epoch
"""
return self._eventdict['startDate']
@property
def end_date(self):
"""
Returns end date of the event in milliseconds since UNIX epoch
"""
return self._eventdict['endDate']
@property
def creator(self):
"""
Returns creator of the event
"""
return self._eventdict['Creator']
@property
def capacity(self):
"""
Returns attendance capacity for attendees of the event
"""
return self._eventdict['capacity']
@property
def attendance(self):
"""
Returns attendance count for a past event
"""
return self._eventdict['attendance']
@property
def access(self):
"""
Returns access permissions of the event
"""
return self._eventdict['status']
@property
def group_id(self):
"""
Returns groupId for the event
"""
return self._eventdict['groupId']
@property
def is_cancelled(self):
"""
Check if event is Cancelled
"""
return self._eventdict['isCancelled']
@property
def geometry(self):
"""
Returns co-ordinates of the event location
"""
return self._eventdict['geometry']
def delete(self):
"""
Deletes an event
:return:
A bool containing True (for success) or False (for failure).
.. code-block:: python
USAGE EXAMPLE: Delete an event successfully
event1 = myhub.events.get(24)
event1.delete()
>> True
"""
_group = self._gis.groups.get(self.group_id)
_group.protected = False
_group.delete()
params = {'f': 'json', 'objectIds': self.event_id}
delete_event = self._gis._con.post(path='https://hub.arcgis.com/api/v3/events/'+self._hub.enterprise_org_id+'/Hub Events/FeatureServer/0/deleteFeatures', postdata=params)
return delete_event['deleteResults'][0]['success']
def update(self, event_properties):
"""
Updates properties of an event
:return:
A bool containing True (for success) or False (for failure).
.. code-block:: python
USAGE EXAMPLE: Update an event successfully
event1 = myhub.events.get(id)
event_properties = {'status': 'planned', description: 'Test'}
event1.update(event_properties)
>> True
"""
_feature = {}
#Build event feature
event_properties['OBJECTID'] = self.event_id
_feature["attributes"] = self._eventdict
for key,value in event_properties.items():
_feature["attributes"][key] = value
_feature["geometry"] = self.geometry
event_data = [_feature]
#Update event
url = 'https://hub.arcgis.com/api/v3/events/'+self._hub.enterprise_org_id+'/Hub Events/FeatureServer/0/updateFeatures'
params = {'f': 'json', 'features': event_data}
update_event = self._gis._con.post(path=url, postdata=params)
return update_event['updateResults'][0]['success']
class EventManager(object):
"""Helper class for managing events within a Hub. This class is not created by users directly.
An instance of this class, called 'events', is available as a property of the Hub object. Users
call methods on this 'events' object to manipulate (add, search, get_map etc) events
of a particular Hub.
"""
def __init__(self, hub, event=None):
self._hub = hub
self._gis = self._hub.gis
if event:
self._event = event
def _all_events(self):
"""
Fetches all events for particular hub.
"""
events = []
url = 'https://hub.arcgis.com/api/v3/events/'+self._hub.enterprise_org_id+'/Hub Events/FeatureServer/0/query'
params = {'f' :'json', 'outFields': '*', 'where': '1=1'}
all_events = self._gis._con.get(url, params)
_events_data = all_events['features']
for event in _events_data:
events.append(Event(self._gis, event))
return events
def add(self, event_properties):
"""
Adds an event for an initiative.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
event_properties Required dictionary. See table below for the keys and values.
=============== ====================================================================
*Key:Value Dictionary Options for Argument event_properties*
================= =====================================================================
**Key** **Value**
----------------- ---------------------------------------------------------------------
title Required string. Name of event.
----------------- ---------------------------------------------------------------------
description Required string. Description of the event.
----------------- ---------------------------------------------------------------------
initiaitve_id Required string. Name label of the item.
----------------- ---------------------------------------------------------------------
venue Required string. Venue name for the event.
----------------- ---------------------------------------------------------------------
address1 Required string. Street address for the venue.
----------------- ---------------------------------------------------------------------
status Required string. Access of event. Valid values are private, planned,
public, draft.
----------------- ---------------------------------------------------------------------
startDate Required start date of the event in milliseconds since UNIX epoch.
----------------- ---------------------------------------------------------------------
endDate Required end date of the event in milliseconds since UNIX epoch.
----------------- ---------------------------------------------------------------------
isAllDay Required boolean. Indicates if the event is a day long event.
----------------- ---------------------------------------------------------------------
capacity Optional integer. The attendance capacity of the event venue.
----------------- ---------------------------------------------------------------------
address2 Optional string. Additional information about event venue street address.
----------------- ---------------------------------------------------------------------
onlineLocation Optional string. Web URL or other details for online event.
----------------- ---------------------------------------------------------------------
organizers Optional list of dictionary of keys `name` and `contact` for each organizer's
name and email. Default values are name, email, username of event creator.
----------------- ---------------------------------------------------------------------
sponsors Optional list of dictionary of keys `name` and `contact` for each sponsor's
name and contact.
================= =====================================================================
:return:
Event if successfully added.
.. code-block:: python
USAGE EXAMPLE: Add an event successfully
event_properties = {
'title':'Test Event',
'description': 'Testing with python',
'initiativeId': '43f..',
'venue': 'Washington Monument',
'address1': '2 15th St NW, Washington, District of Columbia, 20024',
'status': 'planned',
'startDate': 1562803200,
'endDate': 1562889600,
'isAllDay': 1
}
new_event = myhub.events.add(event_properties)
"""
_feature = {}
#Fetch initiaitve site id
_initiative = self._hub.initiatives.get(event_properties['initiativeId'])
event_properties['siteId'] = _initiative.site_id
#Set organizers if not provided
try:
event_properties['organizers']
except:
_organizers_list = [{"name":self._gis.users.me.fullName, "contact": self._gis.users.me.email, "username": self._gis.users.me.username}]
_organizers = json.dumps(_organizers_list)
event_properties['organizers'] = _organizers
#Set sponsors if not provided
try:
event_properties['sponsors']
event_properties['sponsors'] = json.dumps(event_properties['sponsors'])
except:
_sponsors = []
event_properties['sponsors'] = json.dumps(_sponsors)
#Set onlineLocation if not provided
try:
event_properties['onlineLocation']
except:
_onlineLocation = ''
event_properties['onlineLocation'] = _onlineLocation
#Set geometry if not provided
try:
event_properties['geometry']
geometry = event_properties['geometry']
del event_properties['geometry']
except:
geometry = geocode(event_properties['address1'])[0]['location']
event_properties['schemaVersion'] = 2
event_properties['location'] = ''
event_properties['url'] = event_properties['title'].replace(' ', '-').lower()
#Generate event id for new event
event_id = max([event.event_id for event in self._all_events()]) + 1
#Create event group
_event_group_dict = {'title': event_properties['title'], 'access': 'public', 'tags': ["Hub Event Group", "Open Data", "hubEvent|"+str(event_id)]}
_event_group = self._gis.groups.create_from_dict(_event_group_dict)
_event_group.protected = True
event_properties['groupId'] = _event_group.id
#Build new event feature and create it
_feature["attributes"] = event_properties
_feature["geometry"] = geometry
event_data = [_feature]
url = 'https://hub.arcgis.com/api/v3/events/'+self._hub.enterprise_org_id+'/Hub Events/FeatureServer/0/addFeatures'
params = {'f': 'json', 'features': event_data}
add_event = self._gis._con.post(path=url, postdata=params)
try:
add_event['addResults']
return self.get(add_event['addResults'][0]['objectId'])
except:
return add_event
def search(self, initiative_id=None, title=None, venue=None, organizer_name=None):
"""
Searches for events within a Hub.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
initiative_id Optional string. Initiative itemid.
--------------- --------------------------------------------------------------------
title Optional string. Title of the event.
--------------- --------------------------------------------------------------------
venue Optional string. Venue where event is held.
--------------- --------------------------------------------------------------------
organizer_name Optional string. Name of the organizer of the event.
=============== ====================================================================
:return:
A list of matching indicators.
"""
events = []
events = self._all_events()
if initiative_id!=None:
#events =
events = [event for event in events if initiative_id==event.initiative_id]
if title!=None:
events = [event for event in events if title in event.title]
if venue!=None:
events = [event for event in events if venue in event.venue]
if organizer_name!=None:
events = [event for event in events if organizer_name in event.organizers]
return events
def get(self, event_id):
""" Get the event for the specified event_id.
======================= =============================================================
**Argument** **Description**
----------------------- -------------------------------------------------------------
event_id Required integer. The event identifier.
======================= =============================================================
:return:
The event object.
"""
url = 'https://hub.arcgis.com/api/v3/events/'+self._hub.enterprise_org_id+'/Hub Events/FeatureServer/0/'+str(event_id)
params = {'f':'json'}
feature = self._gis._con.get(url, params)
return Event(self._gis, feature['feature'])
def get_map(self):
"""
Plot all events for a Hub in an embedded webmap within the notebook.
"""
_events_layer = self._gis.content.search(query="typekeywords:hubEventsLayer", max_items=5000)[0]
event_map = self._gis.map(zoomlevel=2)
event_map.basemap = 'dark-gray'
event_map.add_layer(_events_layer, {'title':'Event locations for this Hub','opacity':0.7})
return event_map
| 44.438917
| 1,647
| 0.522581
|
4a0f64e03a18e58d489d15d91addcc9fa3b5170b
| 33,985
|
py
|
Python
|
sdk/redhatopenshift/azure-mgmt-redhatopenshift/azure/mgmt/redhatopenshift/v2022_04_01/aio/operations/_open_shift_clusters_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/redhatopenshift/azure-mgmt-redhatopenshift/azure/mgmt/redhatopenshift/v2022_04_01/aio/operations/_open_shift_clusters_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/redhatopenshift/azure-mgmt-redhatopenshift/azure/mgmt/redhatopenshift/v2022_04_01/aio/operations/_open_shift_clusters_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._open_shift_clusters_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_admin_credentials_request, build_list_by_resource_group_request, build_list_credentials_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OpenShiftClustersOperations:
"""OpenShiftClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.redhatopenshift.v2022_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.OpenShiftClusterList"]:
"""Lists OpenShift clusters in the specified subscription.
The operation returns properties of each OpenShift cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OpenShiftClusterList or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.redhatopenshift.v2022_04_01.models.OpenShiftClusterList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenShiftClusterList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OpenShiftClusterList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.RedHatOpenShift/openShiftClusters"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.OpenShiftClusterList"]:
"""Lists OpenShift clusters in the specified subscription and resource group.
The operation returns properties of each OpenShift cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OpenShiftClusterList or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.redhatopenshift.v2022_04_01.models.OpenShiftClusterList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenShiftClusterList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OpenShiftClusterList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.OpenShiftCluster":
"""Gets a OpenShift cluster with the specified subscription, resource group and resource name.
The operation returns properties of a OpenShift cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the OpenShift cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OpenShiftCluster, or the result of cls(response)
:rtype: ~azure.mgmt.redhatopenshift.v2022_04_01.models.OpenShiftCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenShiftCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('OpenShiftCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters/{resourceName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.OpenShiftCluster",
**kwargs: Any
) -> "_models.OpenShiftCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenShiftCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'OpenShiftCluster')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('OpenShiftCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('OpenShiftCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters/{resourceName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.OpenShiftCluster",
**kwargs: Any
) -> AsyncLROPoller["_models.OpenShiftCluster"]:
"""Creates or updates a OpenShift cluster with the specified subscription, resource group and
resource name.
The operation returns properties of a OpenShift cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the OpenShift cluster resource.
:type resource_name: str
:param parameters: The OpenShift cluster resource.
:type parameters: ~azure.mgmt.redhatopenshift.v2022_04_01.models.OpenShiftCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OpenShiftCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.redhatopenshift.v2022_04_01.models.OpenShiftCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenShiftCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OpenShiftCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters/{resourceName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters/{resourceName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a OpenShift cluster with the specified subscription, resource group and resource name.
The operation returns nothing.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the OpenShift cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters/{resourceName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.OpenShiftClusterUpdate",
**kwargs: Any
) -> "_models.OpenShiftCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenShiftCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'OpenShiftClusterUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('OpenShiftCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('OpenShiftCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters/{resourceName}"} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.OpenShiftClusterUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.OpenShiftCluster"]:
"""Creates or updates a OpenShift cluster with the specified subscription, resource group and
resource name.
The operation returns properties of a OpenShift cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the OpenShift cluster resource.
:type resource_name: str
:param parameters: The OpenShift cluster resource.
:type parameters: ~azure.mgmt.redhatopenshift.v2022_04_01.models.OpenShiftClusterUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OpenShiftCluster or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.redhatopenshift.v2022_04_01.models.OpenShiftCluster]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenShiftCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OpenShiftCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters/{resourceName}"} # type: ignore
@distributed_trace_async
async def list_admin_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.OpenShiftClusterAdminKubeconfig":
"""Lists admin kubeconfig of an OpenShift cluster with the specified subscription, resource group
and resource name.
The operation returns the admin kubeconfig.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the OpenShift cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OpenShiftClusterAdminKubeconfig, or the result of cls(response)
:rtype: ~azure.mgmt.redhatopenshift.v2022_04_01.models.OpenShiftClusterAdminKubeconfig
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenShiftClusterAdminKubeconfig"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
request = build_list_admin_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.list_admin_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('OpenShiftClusterAdminKubeconfig', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_admin_credentials.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters/{resourceName}/listAdminCredentials"} # type: ignore
@distributed_trace_async
async def list_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.OpenShiftClusterCredentials":
"""Lists credentials of an OpenShift cluster with the specified subscription, resource group and
resource name.
The operation returns the credentials.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the OpenShift cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OpenShiftClusterCredentials, or the result of cls(response)
:rtype: ~azure.mgmt.redhatopenshift.v2022_04_01.models.OpenShiftClusterCredentials
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OpenShiftClusterCredentials"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
request = build_list_credentials_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
api_version=api_version,
template_url=self.list_credentials.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('OpenShiftClusterCredentials', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_credentials.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RedHatOpenShift/openShiftClusters/{resourceName}/listCredentials"} # type: ignore
| 45.072944
| 303
| 0.668619
|
4a0f6561d1da0c2c7fb3fae85a9ec0760460b7c5
| 338
|
py
|
Python
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/lite/experimental/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 2
|
2020-09-30T00:11:09.000Z
|
2021-10-04T13:00:38.000Z
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/lite/experimental/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | null | null | null |
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/lite/experimental/__init__.py
|
Lube-Project/ProgettoLube
|
cbf33971e2c2e865783ec1a2302625539186a338
|
[
"MIT"
] | 1
|
2021-01-28T01:57:41.000Z
|
2021-01-28T01:57:41.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.lite.experimental namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.lite.python.lite import load_delegate
del _print_function
| 26
| 82
| 0.810651
|
4a0f65d328409d18de16b6cdbf871d3a05f4f6d5
| 938
|
py
|
Python
|
PrimeFactor(e3).py
|
Ritz-19/Project-Euler
|
c6840ff079e197e53bb95fd41f37c17e6277e0bd
|
[
"MIT"
] | null | null | null |
PrimeFactor(e3).py
|
Ritz-19/Project-Euler
|
c6840ff079e197e53bb95fd41f37c17e6277e0bd
|
[
"MIT"
] | null | null | null |
PrimeFactor(e3).py
|
Ritz-19/Project-Euler
|
c6840ff079e197e53bb95fd41f37c17e6277e0bd
|
[
"MIT"
] | null | null | null |
# Euler 3: Prime Factor of a number
# The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143 ?
import math
#Find Factors
def find_Factors(num):
factors = []
for i in range(1, int(math.sqrt(num)+1)):
if num%i == 0:
factors.append(i)
return factors
#Check if prime
def check_Prime(num):
return len(find_Factors(num)) == 1
#main
num = int(input("Enter a Number: "))
factors = find_Factors(num)
largest = 0
for n in factors:
b = check_Prime(n)
if b == True and n > largest:
largest = n
print("The Largest Prime Factor is: ",largest)
'''
While checking for the Factors, you can use Sqrt because the
main factors of a number only occur till its srqt, after which they are
products of the previous.
Example num = 24,
factors = 1,2,3,4,6,8,12
sqrt ~ 4
and all factors after 4 are products of 4 and before, this way you ca shorten
runtime.
'''
| 22.878049
| 77
| 0.684435
|
4a0f66ae5ab2c7fd899f1eb68141593b2d9f5b69
| 192
|
py
|
Python
|
cinema/movies/apps.py
|
kevinGarcia15/cinemaAPI
|
d83a9f57223842378e413936d4ccdba0463f1f0a
|
[
"MIT"
] | null | null | null |
cinema/movies/apps.py
|
kevinGarcia15/cinemaAPI
|
d83a9f57223842378e413936d4ccdba0463f1f0a
|
[
"MIT"
] | null | null | null |
cinema/movies/apps.py
|
kevinGarcia15/cinemaAPI
|
d83a9f57223842378e413936d4ccdba0463f1f0a
|
[
"MIT"
] | null | null | null |
"""users apps"""
#django
from django.apps import AppConfig
class MoviesAppConfig(AppConfig):
"""
Movies app config
"""
name = 'cinema.movies'
verbose_name = 'Movies'
| 17.454545
| 33
| 0.635417
|
4a0f6766897cc6dd4413d78e3f6ea93486f3d944
| 4,816
|
py
|
Python
|
polsalt/blksmooth2d.py
|
Richard-Tarbell/polsalt
|
e953985ffbc786fd071d0b48ebca5bd1dac9a960
|
[
"BSD-3-Clause"
] | 1
|
2017-09-22T17:04:06.000Z
|
2017-09-22T17:04:06.000Z
|
polsalt/blksmooth2d.py
|
Richard-Tarbell/polsalt
|
e953985ffbc786fd071d0b48ebca5bd1dac9a960
|
[
"BSD-3-Clause"
] | 14
|
2015-12-22T17:56:38.000Z
|
2021-07-30T15:36:23.000Z
|
polsalt/blksmooth2d.py
|
Richard-Tarbell/polsalt
|
e953985ffbc786fd071d0b48ebca5bd1dac9a960
|
[
"BSD-3-Clause"
] | 12
|
2015-12-21T15:12:44.000Z
|
2021-08-12T18:58:12.000Z
|
"""
blksmooth2d
General purpose 2d smoothing
"""
import os, sys, glob, shutil, inspect
import numpy as np
import pyfits
from scipy.interpolate import griddata
np.set_printoptions(threshold=np.nan)
# ---------------------------------------------------------------------------------
def blksmooth2d(ar_rc,ok_rc,rblk,cblk,blklim,mode="mean",debug=False):
# blkaverage (using mask, with blks with > blklim of the pts), then spline interpolate result
# optional: median instead of mean
arr_rc = ar_rc*ok_rc
rows,cols = ar_rc.shape
r_rc,c_rc = np.indices((rows,cols)).astype(float)
rblks,cblks = int(rows/rblk),int(cols/cblk)
# equalize block scaling to avoid triangularization failure
rfac,cfac = max(rblk,cblk)/rblk, max(rblk,cblk)/cblk
r0,c0 = (rows % rblk)/2,(cols % cblk)/2
arr_RCb = arr_rc[r0:(r0+rblk*rblks),c0:(c0+cblk*cblks)] \
.reshape(rblks,rblk,cblks,cblk).transpose(0,2,1,3).reshape(rblks,cblks,rblk*cblk)
ok_RCb = ok_rc[r0:(r0+rblk*rblks),c0:(c0+cblk*cblks)] \
.reshape(rblks,rblk,cblks,cblk).transpose(0,2,1,3).reshape(rblks,cblks,rblk*cblk)
r_RCb = rfac*((ok_rc*r_rc)[r0:(r0+rblk*rblks),c0:(c0+cblk*cblks)]) \
.reshape(rblks,rblk,cblks,cblk).transpose(0,2,1,3).reshape(rblks,cblks,rblk*cblk)
c_RCb = cfac*((ok_rc*c_rc)[r0:(r0+rblk*rblks),c0:(c0+cblk*cblks)]) \
.reshape(rblks,rblk,cblks,cblk).transpose(0,2,1,3).reshape(rblks,cblks,rblk*cblk)
ok_RC = ok_RCb.sum(axis=-1) > rblk*cblk*blklim
arr_RC = np.zeros((rblks,cblks))
if mode == "mean":
arr_RC[ok_RC] = arr_RCb[ok_RC].sum(axis=-1)/ok_RCb[ok_RC].sum(axis=-1)
elif mode == "median":
arr_RC[ok_RC] = np.median(arr_RCb[ok_RC],axis=-1)
else:
print "Illegal mode "+mode+" for smoothing"
exit()
r_RC = np.zeros_like(arr_RC); c_RC = np.zeros_like(arr_RC)
r_RC[ok_RC] = r_RCb[ok_RC].sum(axis=-1)/ok_RCb[ok_RC].sum(axis=-1)
c_RC[ok_RC] = c_RCb[ok_RC].sum(axis=-1)/ok_RCb[ok_RC].sum(axis=-1)
# evaluate slopes at edge for edge extrapolation
dar_RC = arr_RC[1:,:] - arr_RC[:-1,:]
dac_RC = arr_RC[:,1:] - arr_RC[:,:-1]
dr_RC = r_RC[1:,:] - r_RC[:-1,:]
dc_RC = c_RC[:,1:] - c_RC[:,:-1]
dadr_RC = np.zeros_like(dar_RC); dadc_RC = np.zeros_like(dac_RC)
dadr_RC[dr_RC!=0] = rfac*dar_RC[dr_RC!=0]/dr_RC[dr_RC!=0]
dadc_RC[dc_RC!=0] = cfac*dac_RC[dc_RC!=0]/dc_RC[dc_RC!=0]
argR = np.where(ok_RC.sum(axis=1)>0)[0]
argC = np.where(ok_RC.sum(axis=0)>0)[0]
dadr_RC[argR[0],argC] *= (arr_RC[argR[0,],argC] > 0)
dadr_RC[argR[-1]-1,argC] *= (arr_RC[argR[-1],argC] > 0)
dadc_RC[argR,argC[0]] *= (arr_RC[argR,argC[0]] > 0)
dadc_RC[argR,argC[-1]-1] *= (arr_RC[argR,argC[-1]] > 0)
if debug:
np.savetxt('arr_RC.txt',arr_RC,fmt="%14.9f")
np.savetxt('dadr_RC.txt',dadr_RC,fmt="%14.9f")
np.savetxt('dadc_RC.txt',dadc_RC,fmt="%14.9f")
np.savetxt('r_RC_0.txt',r_RC,fmt="%9.2f")
np.savetxt('c_RC_0.txt',c_RC,fmt="%9.2f")
# force outer block positions into a rectangle to avoid edge effects, spline interpolate
r_RC[argR[[0,-1]][:,None],argC] = rfac*(r0+(rblk-1)/2.+rblk*argR[[0,-1]])[:,None]
c_RC[argR[:,None],argC[[0,-1]]] = cfac*(c0+(cblk-1)/2.+cblk*argC[[0,-1]])
if debug:
np.savetxt('r_RC_1.txt',r_RC,fmt="%9.2f")
np.savetxt('c_RC_1.txt',c_RC,fmt="%9.2f")
arr_rc = griddata((r_RC[ok_RC],c_RC[ok_RC]),arr_RC[ok_RC], \
tuple(np.mgrid[:rfac*rows:rfac,:cfac*cols:cfac].astype(float)),method='cubic',fill_value=0.)
if debug:
pyfits.PrimaryHDU(arr_rc.astype('float32')).writeto('arr_rc_0.fits',clobber=True)
# extrapolate to original array size
argR_r = ((np.arange(rows) - r0)/rblk).clip(0,rblks-1).astype(int)
argC_c = ((np.arange(cols) - c0)/cblk).clip(0,cblks-1).astype(int)
r0,r1 = np.where(arr_rc.sum(axis=1)>0)[0][[0,-1]]
c0,c1 = np.where(arr_rc.sum(axis=0)>0)[0][[0,-1]]
arr_rc[r0-rblk/2:r0,c0:c1+1] += arr_rc[r0,c0:c1+1] + \
dadr_RC[argR[0],argC_c[c0:c1+1]]*(np.arange(-rblk/2,0)[:,None])
arr_rc[r1+1:r1+rblk/2,c0:c1+1] += arr_rc[r1,c0:c1+1] + \
dadr_RC[argR[-1]-1,argC_c[c0:c1+1]]*(np.arange(1,rblk/2)[:,None])
arr_rc[r0-rblk/2:r1+rblk/2,c0-cblk/2:c0] += arr_rc[r0-rblk/2:r1+rblk/2,c0][:,None] + \
dadc_RC[argR_r[r0-rblk/2:r1+rblk/2],argC[0]][:,None]*np.arange(-cblk/2,0)
arr_rc[r0-rblk/2:r1+rblk/2,c1+1:c1+cblk/2] += arr_rc[r0-rblk/2:r1+rblk/2,c1][:,None] + \
dadc_RC[argR_r[r0-rblk/2:r1+rblk/2],argC[-1]-1][:,None]*np.arange(1,cblk/2)
if debug:
pyfits.PrimaryHDU(arr_rc.astype('float32')).writeto('arr_rc_1.fits',clobber=True)
return arr_rc
| 44.183486
| 100
| 0.600291
|
4a0f68afaf2174068e83672bc60f128b78789e00
| 3,973
|
py
|
Python
|
backend/tests/baserow/contrib/database/migrations/test_remove_field_by_id_migration.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
backend/tests/baserow/contrib/database/migrations/test_remove_field_by_id_migration.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
backend/tests/baserow/contrib/database/migrations/test_remove_field_by_id_migration.py
|
ashishdhngr/baserow
|
b098678d2165eb7c42930ee24dc6753a3cb520c3
|
[
"MIT"
] | null | null | null |
# noinspection PyPep8Naming
import pytest
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
# noinspection PyPep8Naming
@pytest.mark.django_db(transaction=True)
def test_forwards_migration(data_fixture, reset_schema_after_module):
migrate_from = [("database", "0039_formulafield")]
migrate_to = [("database", "0040_formulafield_remove_field_by_id")]
old_state = migrate(migrate_from)
# The models used by the data_fixture below are not touched by this migration so
# it is safe to use the latest version in the test.
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(user=user, table=table, name="text")
FormulaField = old_state.apps.get_model("database", "FormulaField")
ContentType = old_state.apps.get_model("contenttypes", "ContentType")
content_type_id = ContentType.objects.get_for_model(FormulaField).id
formula_field = FormulaField.objects.create(
table_id=table.id,
formula_type="text",
formula=f"field_by_id({text_field.id})",
content_type_id=content_type_id,
order=0,
name="a",
)
unknown_field_by_id = FormulaField.objects.create(
table_id=table.id,
formula_type="text",
formula=f"field_by_id(9999)",
content_type_id=content_type_id,
order=0,
name="b",
)
new_state = migrate(migrate_to)
NewFormulaField = new_state.apps.get_model("database", "FormulaField")
new_formula_field = NewFormulaField.objects.get(id=formula_field.id)
assert new_formula_field.formula == "field('text')"
assert (
new_formula_field.old_formula_with_field_by_id
== f"field_by_id({text_field.id})"
)
new_unknown_field_by_id = NewFormulaField.objects.get(id=unknown_field_by_id.id)
assert new_unknown_field_by_id.formula == "field('unknown field 9999')"
assert new_unknown_field_by_id.old_formula_with_field_by_id == f"field_by_id(9999)"
# noinspection PyPep8Naming
@pytest.mark.django_db(transaction=True)
def test_backwards_migration(data_fixture, reset_schema_after_module):
migrate_from = [("database", "0040_formulafield_remove_field_by_id")]
migrate_to = [("database", "0039_formulafield")]
old_state = migrate(migrate_from)
# The models used by the data_fixture below are not touched by this migration so
# it is safe to use the latest version in the test.
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(user=user, table=table, name="text")
FormulaField = old_state.apps.get_model("database", "FormulaField")
ContentType = old_state.apps.get_model("contenttypes", "ContentType")
content_type_id = ContentType.objects.get_for_model(FormulaField).id
formula_field = FormulaField.objects.create(
table_id=table.id,
formula_type="text",
formula=f"field('text')",
content_type_id=content_type_id,
order=0,
name="a",
)
unknown_field = FormulaField.objects.create(
table_id=table.id,
formula_type="text",
formula=f"field('unknown')",
content_type_id=content_type_id,
order=0,
name="b",
)
new_state = migrate(migrate_to)
NewFormulaField = new_state.apps.get_model("database", "FormulaField")
new_formula_field = NewFormulaField.objects.get(id=formula_field.id)
assert new_formula_field.formula == f"field_by_id({text_field.id})"
new_unknown_field_by_id = NewFormulaField.objects.get(id=unknown_field.id)
assert new_unknown_field_by_id.formula == "field('unknown')"
def migrate(target):
executor = MigrationExecutor(connection)
executor.loader.build_graph() # reload.
executor.migrate(target)
new_state = executor.loader.project_state(target)
return new_state
| 38.95098
| 87
| 0.725648
|
4a0f69e3a73bab7fca87fb7c3e5395b92788268c
| 95
|
py
|
Python
|
test/rest/conftest.py
|
autokrator-uog/backend
|
0a2d46f9b52465ed8dfc9234858d6a93f3754c05
|
[
"MIT"
] | null | null | null |
test/rest/conftest.py
|
autokrator-uog/backend
|
0a2d46f9b52465ed8dfc9234858d6a93f3754c05
|
[
"MIT"
] | null | null | null |
test/rest/conftest.py
|
autokrator-uog/backend
|
0a2d46f9b52465ed8dfc9234858d6a93f3754c05
|
[
"MIT"
] | 1
|
2019-06-09T23:51:13.000Z
|
2019-06-09T23:51:13.000Z
|
import pytest
@pytest.fixture
def test_client(flask_app):
return flask_app.test_client()
| 13.571429
| 34
| 0.778947
|
4a0f6a2c1db6f1185dafe7c3559abd3a4990b4cb
| 3,513
|
py
|
Python
|
tests/test_compare.py
|
TommasoPino/oem
|
0b6567accb2dfc4475b4655ce7b07ff85fed32aa
|
[
"MIT"
] | 3
|
2020-08-31T10:15:04.000Z
|
2021-12-18T03:00:11.000Z
|
tests/test_compare.py
|
TommasoPino/oem
|
0b6567accb2dfc4475b4655ce7b07ff85fed32aa
|
[
"MIT"
] | 26
|
2020-05-17T02:28:28.000Z
|
2021-12-20T03:06:05.000Z
|
tests/test_compare.py
|
TommasoPino/oem
|
0b6567accb2dfc4475b4655ce7b07ff85fed32aa
|
[
"MIT"
] | 2
|
2020-11-15T19:33:07.000Z
|
2021-10-01T08:58:39.000Z
|
import pytest
import numpy as np
from astropy.time import Time
from pathlib import Path
from oem import OrbitEphemerisMessage
from oem.compare import StateCompare
from oem.components import State
SAMPLE_DIR = Path(__file__).parent / "samples"
def test_state_self_difference():
state = State(Time.now(), "ICRF", "EARTH", [1, 0, 0], [0, 1, 0], [0, 0, 1])
compare = state - state
assert compare.range == 0
assert compare.range_rate == 0
assert all(compare.position == 0)
assert all(compare.velocity == 0)
assert all(compare.position_ric == 0)
assert all(compare.velocity_ric == 0)
def test_state_compare_frame_mismatch():
epoch = Time.now()
origin = State(epoch, "ICRF", "EARTH", [1, 0, 0], [0, 1, 0])
target1 = State(epoch, "GRC", "EARTH", [1, 0, 0], [0, 1, 0])
target2 = State(epoch, "ICRF", "MARS", [1, 0, 0], [0, 1, 0])
with pytest.raises(ValueError):
StateCompare(origin, target1)
with pytest.raises(ValueError):
StateCompare(origin, target2)
def test_state_compare_noninertial():
state = State(Time.now(), "GRC", "EARTH", [1, 0, 0], [0, 1, 0])
with pytest.raises(NotImplementedError):
StateCompare(state, state).velocity
def test_state_compare_nonstandard():
state = State(Time.now(), "ABCD", "EARTH", [1, 0, 0], [0, 1, 0])
with pytest.warns(UserWarning):
StateCompare(state, state)
def test_state_compare_epoch_mismatch():
origin = State(Time.now(), "ICRF", "EARTH", [1, 0, 0], [0, 1, 0])
target = State(Time.now(), "ICRF", "EARTH", [1, 0, 0], [0, 1, 0])
with pytest.raises(ValueError):
StateCompare(origin, target)
def test_segment_self_compare():
test_file_path = SAMPLE_DIR / "real" / "GEO_20s.oem"
segment = OrbitEphemerisMessage.open(test_file_path).segments[0]
compare = segment - segment
assert not compare.is_empty
for state_compare in compare.steps(600):
assert state_compare.range == 0 and state_compare.range_rate == 0
def test_segment_compare_mismatch():
test_file_path = SAMPLE_DIR / "real" / "GEO_20s.oem"
segment1 = OrbitEphemerisMessage.open(test_file_path).segments[0]
segment2 = segment1.copy()
_ = segment1 - segment2
segment2.metadata["CENTER_NAME"] = "MARS"
with pytest.raises(ValueError):
_ = segment1 - segment2
def test_ephemeris_self_compare():
test_file_path = SAMPLE_DIR / "real" / "GEO_20s.oem"
oem = OrbitEphemerisMessage.open(test_file_path)
compare = oem - oem
assert not compare.is_empty
for state_compare in compare.steps(600):
assert state_compare.range == 0 and state_compare.range_rate == 0
np.testing.assert_almost_equal(state_compare.position_ric, 0)
np.testing.assert_almost_equal(state_compare.velocity_ric, 0)
def test_real_reference_ric():
test_origin_path = SAMPLE_DIR / "real" / "CompareExample1.oem"
test_target_path = SAMPLE_DIR / "real" / "CompareExample2.oem"
origin = OrbitEphemerisMessage.open(test_origin_path)
target = OrbitEphemerisMessage.open(test_target_path)
compare = target - origin
assert not compare.is_empty
for state_compare in compare.steps(600):
np.testing.assert_almost_equal(state_compare.range, 1.165554784013)
np.testing.assert_almost_equal(
state_compare.position_ric,
np.array([-0.000101713843, -1.165554779575, 0.0]),
decimal=6
)
np.testing.assert_almost_equal(state_compare.velocity_ric, 0)
| 35.13
| 79
| 0.683177
|
4a0f6b55b227bc932da9ee910aa02f085bed75e3
| 11,934
|
py
|
Python
|
homeassistant/components/soundtouch/media_player.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 23
|
2017-11-15T21:03:53.000Z
|
2021-03-29T21:33:48.000Z
|
homeassistant/components/soundtouch/media_player.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:59:36.000Z
|
2022-03-12T00:52:11.000Z
|
homeassistant/components/soundtouch/media_player.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 10
|
2018-01-01T00:12:51.000Z
|
2021-12-21T23:08:05.000Z
|
"""Support for interface with a Bose Soundtouch."""
import logging
import re
from libsoundtouch import soundtouch_device
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
)
import homeassistant.helpers.config_validation as cv
from .const import (
DOMAIN,
SERVICE_ADD_ZONE_SLAVE,
SERVICE_CREATE_ZONE,
SERVICE_PLAY_EVERYWHERE,
SERVICE_REMOVE_ZONE_SLAVE,
)
_LOGGER = logging.getLogger(__name__)
MAP_STATUS = {
"PLAY_STATE": STATE_PLAYING,
"BUFFERING_STATE": STATE_PLAYING,
"PAUSE_STATE": STATE_PAUSED,
"STOP_STATE": STATE_OFF,
}
DATA_SOUNDTOUCH = "soundtouch"
SOUNDTOUCH_PLAY_EVERYWHERE = vol.Schema({vol.Required("master"): cv.entity_id})
SOUNDTOUCH_CREATE_ZONE_SCHEMA = vol.Schema(
{vol.Required("master"): cv.entity_id, vol.Required("slaves"): cv.entity_ids}
)
SOUNDTOUCH_ADD_ZONE_SCHEMA = vol.Schema(
{vol.Required("master"): cv.entity_id, vol.Required("slaves"): cv.entity_ids}
)
SOUNDTOUCH_REMOVE_ZONE_SCHEMA = vol.Schema(
{vol.Required("master"): cv.entity_id, vol.Required("slaves"): cv.entity_ids}
)
DEFAULT_NAME = "Bose Soundtouch"
DEFAULT_PORT = 8090
SUPPORT_SOUNDTOUCH = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_SET
| SUPPORT_TURN_ON
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Bose Soundtouch platform."""
if DATA_SOUNDTOUCH not in hass.data:
hass.data[DATA_SOUNDTOUCH] = []
if discovery_info:
host = discovery_info["host"]
port = int(discovery_info["port"])
# if device already exists by config
if host in [device.config["host"] for device in hass.data[DATA_SOUNDTOUCH]]:
return
remote_config = {"id": "ha.component.soundtouch", "host": host, "port": port}
bose_soundtouch_entity = SoundTouchDevice(None, remote_config)
hass.data[DATA_SOUNDTOUCH].append(bose_soundtouch_entity)
add_entities([bose_soundtouch_entity])
else:
name = config.get(CONF_NAME)
remote_config = {
"id": "ha.component.soundtouch",
"port": config.get(CONF_PORT),
"host": config.get(CONF_HOST),
}
bose_soundtouch_entity = SoundTouchDevice(name, remote_config)
hass.data[DATA_SOUNDTOUCH].append(bose_soundtouch_entity)
add_entities([bose_soundtouch_entity])
def service_handle(service):
"""Handle the applying of a service."""
master_device_id = service.data.get("master")
slaves_ids = service.data.get("slaves")
slaves = []
if slaves_ids:
slaves = [
device
for device in hass.data[DATA_SOUNDTOUCH]
if device.entity_id in slaves_ids
]
master = next(
[
device
for device in hass.data[DATA_SOUNDTOUCH]
if device.entity_id == master_device_id
].__iter__(),
None,
)
if master is None:
_LOGGER.warning(
"Unable to find master with entity_id: %s", str(master_device_id)
)
return
if service.service == SERVICE_PLAY_EVERYWHERE:
slaves = [
d for d in hass.data[DATA_SOUNDTOUCH] if d.entity_id != master_device_id
]
master.create_zone(slaves)
elif service.service == SERVICE_CREATE_ZONE:
master.create_zone(slaves)
elif service.service == SERVICE_REMOVE_ZONE_SLAVE:
master.remove_zone_slave(slaves)
elif service.service == SERVICE_ADD_ZONE_SLAVE:
master.add_zone_slave(slaves)
hass.services.register(
DOMAIN,
SERVICE_PLAY_EVERYWHERE,
service_handle,
schema=SOUNDTOUCH_PLAY_EVERYWHERE,
)
hass.services.register(
DOMAIN,
SERVICE_CREATE_ZONE,
service_handle,
schema=SOUNDTOUCH_CREATE_ZONE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_REMOVE_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_REMOVE_ZONE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_ADD_ZONE_SLAVE,
service_handle,
schema=SOUNDTOUCH_ADD_ZONE_SCHEMA,
)
class SoundTouchDevice(MediaPlayerDevice):
"""Representation of a SoundTouch Bose device."""
def __init__(self, name, config):
"""Create Soundtouch Entity."""
self._device = soundtouch_device(config["host"], config["port"])
if name is None:
self._name = self._device.config.name
else:
self._name = name
self._status = self._device.status()
self._volume = self._device.volume()
self._config = config
@property
def config(self):
"""Return specific soundtouch configuration."""
return self._config
@property
def device(self):
"""Return Soundtouch device."""
return self._device
def update(self):
"""Retrieve the latest data."""
self._status = self._device.status()
self._volume = self._device.volume()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume.actual / 100
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._status.source == "STANDBY":
return STATE_OFF
return MAP_STATUS.get(self._status.play_status, STATE_UNAVAILABLE)
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._volume.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SOUNDTOUCH
def turn_off(self):
"""Turn off media player."""
self._device.power_off()
self._status = self._device.status()
def turn_on(self):
"""Turn on media player."""
self._device.power_on()
self._status = self._device.status()
def volume_up(self):
"""Volume up the media player."""
self._device.volume_up()
self._volume = self._device.volume()
def volume_down(self):
"""Volume down media player."""
self._device.volume_down()
self._volume = self._device.volume()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._device.set_volume(int(volume * 100))
self._volume = self._device.volume()
def mute_volume(self, mute):
"""Send mute command."""
self._device.mute()
self._volume = self._device.volume()
def media_play_pause(self):
"""Simulate play pause media player."""
self._device.play_pause()
self._status = self._device.status()
def media_play(self):
"""Send play command."""
self._device.play()
self._status = self._device.status()
def media_pause(self):
"""Send media pause command to media player."""
self._device.pause()
self._status = self._device.status()
def media_next_track(self):
"""Send next track command."""
self._device.next_track()
self._status = self._device.status()
def media_previous_track(self):
"""Send the previous track command."""
self._device.previous_track()
self._status = self._device.status()
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._status.image
@property
def media_title(self):
"""Title of current playing media."""
if self._status.station_name is not None:
return self._status.station_name
if self._status.artist is not None:
return self._status.artist + " - " + self._status.track
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._status.duration
@property
def media_artist(self):
"""Artist of current playing media."""
return self._status.artist
@property
def media_track(self):
"""Artist of current playing media."""
return self._status.track
@property
def media_album_name(self):
"""Album name of current playing media."""
return self._status.album
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Starting media with media_id: %s", media_id)
if re.match(r"http?://", str(media_id)):
# URL
_LOGGER.debug("Playing URL %s", str(media_id))
self._device.play_url(str(media_id))
else:
# Preset
presets = self._device.presets()
preset = next(
[
preset for preset in presets if preset.preset_id == str(media_id)
].__iter__(),
None,
)
if preset is not None:
_LOGGER.debug("Playing preset: %s", preset.name)
self._device.select_preset(preset)
else:
_LOGGER.warning("Unable to find preset with id %s", media_id)
def create_zone(self, slaves):
"""
Create a zone (multi-room) and play on selected devices.
:param slaves: slaves on which to play
"""
if not slaves:
_LOGGER.warning("Unable to create zone without slaves")
else:
_LOGGER.info("Creating zone with master %s", self._device.config.name)
self._device.create_zone([slave.device for slave in slaves])
def remove_zone_slave(self, slaves):
"""
Remove slave(s) from and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
Note: If removing last slave, the zone will be deleted and you'll have
to create a new one. You will not be able to add a new slave anymore
:param slaves: slaves to remove from the zone
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to remove")
else:
_LOGGER.info(
"Removing slaves from zone with master %s", self._device.config.name
)
self._device.remove_zone_slave([slave.device for slave in slaves])
def add_zone_slave(self, slaves):
"""
Add slave(s) to and existing zone (multi-room).
Zone must already exist and slaves array can not be empty.
:param slaves:slaves to add
"""
if not slaves:
_LOGGER.warning("Unable to find slaves to add")
else:
_LOGGER.info(
"Adding slaves to zone with master %s", self._device.config.name
)
self._device.add_zone_slave([slave.device for slave in slaves])
| 29.760599
| 88
| 0.621585
|
4a0f6bd1daf95694be00eb82722069596820cca2
| 19,392
|
py
|
Python
|
theresa/lib/model.py
|
rychallener/theresa
|
886c6b74bee2edef7df9b6b54ce6d97de4aa4421
|
[
"MIT"
] | 2
|
2021-09-16T19:37:26.000Z
|
2022-01-30T20:16:05.000Z
|
theresa/lib/model.py
|
rychallener/theresa
|
886c6b74bee2edef7df9b6b54ce6d97de4aa4421
|
[
"MIT"
] | null | null | null |
theresa/lib/model.py
|
rychallener/theresa
|
886c6b74bee2edef7df9b6b54ce6d97de4aa4421
|
[
"MIT"
] | null | null | null |
import numpy as np
import time
import theano
import scipy.interpolate as sci
import matplotlib.pyplot as plt
import mc3
import gc
import sys
from numba import jit
# Lib imports
import cf
import atm
import utils
import constants as c
import taurexclass as trc
# Taurex imports
import taurex
from taurex import chemistry
from taurex import planet
from taurex import stellar
from taurex import model
from taurex import pressure
from taurex import temperature
from taurex import cache
from taurex import contributions
from taurex import optimizer
# This import is explicit because it's not included in taurex.temperature. Bug?
from taurex.data.profiles.temperature.temparray import TemperatureArray
@jit(nopython=True)
def fit_2d(params, ecurves, t, y00, sflux, ncurves, intens, baseline):
"""
Basic 2D fitting routine for a single wavelength.
"""
# Check for negative intensities
if intens is not None:
nloc = intens.shape[1]
totint = np.zeros(nloc)
for j in range(nloc):
# Weighted eigenmap intensity
totint[j] = np.sum(intens[:,j] * params[:ncurves])
# Contribution from uniform map
totint[j] += params[ncurves] / np.pi
if np.any(totint <= 0):
f = np.ones(len(t)) * np.min(totint)
return f
f = np.zeros(len(t))
for i in range(ncurves):
f += ecurves[i] * params[i]
f += params[i+1] * y00
f += params[i+2]
f += sflux
if baseline == 'linear':
f += params[i+3] * (t - params[i+4])
elif baseline == 'quadratic':
f += params[i+3] * (t - params[i+5])**2 + \
params[i+4] * (t - params[i+5])
return f
def specgrid(params, fit):
"""
Calculate emission from each cell of a planetary grid, as a
fraction of stellar flux, NOT
accounting for visibility. Observer is assumed to be looking
directly at each grid cell. For efficiency, never-visible cells
are not calculated. Function returns a spectrum of zeros for those
grid cells.
"""
cfg = fit.cfg
# Determine which grid cells to use
# Only considers longitudes currently
nlat, nlon = fit.lat.shape
ilat, ilon = fit.ivislat, fit.ivislon
# Initialize to a list because we don't know the native wavenumber
# resolution a priori of creating the model
nlat, nlon = fit.lat.shape
fluxgrid = np.empty((nlat, nlon), dtype=list)
taugrid = np.empty((nlat, nlon), dtype=list)
pmaps = atm.pmaps(params, fit)
tgrid, p = atm.tgrid(cfg.threed.nlayers, cfg.twod.nlat,
cfg.twod.nlon, fit.tmaps, pmaps,
cfg.threed.pbot, cfg.threed.ptop, params,
fit.nparams3d, fit.modeltype3d,
interptype=cfg.threed.interp,
oob=cfg.threed.oob, smooth=cfg.threed.smooth)
if cfg.threed.z == 'fit':
izmodel = np.where(fit.modeltype3d == 'z')[0][0]
istart = np.sum(fit.nparams3d[:izmodel])
z = params[istart]
else:
z = cfg.threed.z
abn, spec = atm.atminit(cfg.threed.atmtype, cfg.threed.mols, p,
tgrid, cfg.planet.m, cfg.planet.r,
cfg.planet.p0, cfg.threed.elemfile,
cfg.outdir, z, ilat=ilat, ilon=ilon,
cheminfo=fit.cheminfo)
negativeT = False
if cfg.threed.rtfunc == 'taurex':
# Cell-independent Tau-REx objects
rtplan = taurex.planet.Planet(
planet_mass=cfg.planet.m*c.Msun/c.Mjup,
planet_radius=cfg.planet.r*c.Rsun/c.Rjup,
planet_distance=cfg.planet.a,
impact_param=cfg.planet.b,
orbital_period=cfg.planet.porb,
transit_time=cfg.planet.t0)
rtstar = taurex.stellar.Star(
temperature=cfg.star.t,
radius=cfg.star.r,
distance=cfg.star.d,
metallicity=cfg.star.z)
rtp = taurex.pressure.SimplePressureProfile(
nlayers=cfg.threed.nlayers,
atm_min_pressure=cfg.threed.ptop * 1e5,
atm_max_pressure=cfg.threed.pbot * 1e5)
# Latitudes (all visible) and Longitudes
for i, j in zip(ilat, ilon):
# Check for nonphysical atmosphere and return a bad fit
# if so
if not np.all(tgrid[:,i,j] >= 0):
msg = "WARNING: Nonphysical TP profile at Lat: {}, Lon: {}"
print(msg.format(fit.lat[i,j], fit.lon[i,j]))
negativeT = True
rtt = TemperatureArray(
tp_array=tgrid[:,i,j])
rtchem = taurex.chemistry.TaurexChemistry()
for k in range(len(spec)):
if (spec[k] not in ['H2', 'He']) and \
(spec[k] in fit.cfg.threed.mols):
gas = trc.ArrayGas(spec[k], abn[k,:,i,j])
rtchem.addGas(gas)
rt = trc.EmissionModel3D(
planet=rtplan,
star=rtstar,
pressure_profile=rtp,
temperature_profile=rtt,
chemistry=rtchem,
nlayers=cfg.threed.nlayers)
rt.add_contribution(taurex.contributions.AbsorptionContribution())
rt.add_contribution(taurex.contributions.CIAContribution())
#rt.add_contribution(trc.LeeMieVaryMixContribution(
# lee_mie_radius=0.1*np.ones(cfg.threed.nlayers),
# lee_mie_q=40*np.ones(cfg.threed.nlayers),
# lee_mie_mix_ratio=1e-5*np.ones(cfg.threed.nlayers),
# lee_mie_bottomP=cfg.threed.pbot*1e5,
# lee_mie_topP=cfg.threed.ptop*1e5))
if 'H-' in fit.cfg.threed.mols:
rt.add_contribution(trc.HMinusContribution())
rt.build()
# If we have negative temperatures, don't run the model
# (it will fail). Return a bad fit instead.
if negativeT:
fluxgrid = -1 * np.ones((nlat, nlon,
len(rt.nativeWavenumberGrid)))
return fluxgrid, rt.nativeWavenumberGrid
wn, flux, tau, ex = rt.model(wngrid=fit.wngrid)
fluxgrid[i,j] = flux
taugrid[i,j] = tau
# Fill in non-visible cells with zeros
# (np.where doesn't work because of broadcasting issues)
nwn = len(wn)
for i in range(nlat):
for j in range(nlon):
if type(fluxgrid[i,j]) == type(None):
fluxgrid[i,j] = np.zeros(nwn)
if type(taugrid[i,j]) == type(None):
taugrid[i,j] = np.zeros((cfg.threed.nlayers, nwn))
else:
print("ERROR: Unrecognized RT function.")
return fluxgrid, tgrid, taugrid, p, wn, pmaps
def specvtime(params, fit):
"""
Calculate spectra emitted by each grid cell, integrate over filters,
account for line-of-sight and stellar visibility (as functions of time),
and sum over the grid cells. Returns an array of (nfilt, nt). Units
are fraction of stellar flux, Fp/Fs.
"""
tic = time.time()
# Calculate grid of spectra without visibility correction
fluxgrid, tgrid, taugrid, p, wn, pmaps = specgrid(params, fit)
print("Spectrum generation: {} seconds".format(time.time() - tic))
tic = time.time()
nt = len(fit.t)
nlat, nlon = fit.lat.shape
nfilt = len(fit.cfg.twod.filtfiles)
# Integrate to filters
intfluxgrid = np.zeros((nlat, nlon, nfilt))
for i in range(nlat):
for j in range(nlon):
intfluxgrid[i,j] = utils.specint(wn, fluxgrid[i,j],
fit.filtwn, fit.filttrans)
fluxvtime = np.zeros((nfilt, nt))
# Account for vis and sum over grid cells
for it in range(nt):
for ifilt in range(nfilt):
fluxvtime[ifilt,it] = np.sum(intfluxgrid[:,:,ifilt] * fit.vis[it])
# There is a very small memory leak somewhere, but this seems to
# fix it. Not an elegant solution, but ¯\_(ツ)_/¯
gc.collect()
return fluxvtime, tgrid, taugrid, p, wn, pmaps
def sysflux(params, fit):
# Calculate Fp/Fs
fpfs, tgrid, taugrid, p, wn, pmaps = specvtime(params, fit)
nfilt, nt = fpfs.shape
systemflux = np.zeros((nfilt, nt))
# Account for stellar correction
# Transform fp/fs -> fp/(fs + corr) -> (fp + fs + corr)/(fs + corr)
for i in range(nfilt):
fpfscorr = fpfs[i] * fit.sflux / (fit.sflux + fit.scorr[i])
systemflux[i] = fpfscorr + 1
return systemflux.flatten(), tgrid, taugrid, p, wn, pmaps
def mcmc_wrapper(params, fit):
systemflux, tgrid, taugrid, p, wn, pmaps = sysflux(params, fit)
# Integrate cf if asked for
if fit.cfg.threed.fitcf:
cfsd = cfsigdiff(fit, tgrid, wn, taugrid, p, pmaps)
return np.concatenate((systemflux, cfsd))
else:
return systemflux
def cfsigdiff(fit, tgrid, wn, taugrid, p, pmaps):
'''
Computes the distance between a 2D pressure/temperature map
and the corresponding contribution function, in units of
"sigma". Sigma is estimated by finding the 68.3% credible region of
the contribution function and calculating the +/- distances from
the edges of this region to the pressure of maximum contribution.
The sigma distance is computed for every visible grid cell
and returned in a flattened array.
'''
cfs = cf.contribution_filters(tgrid, wn, taugrid, p, fit.filtwn,
fit.filttrans)
# Where the maps "should" be
# Find the roots of the derivative of a spline fit to
# the contribution functions, then calculate some sort
# of goodness of fit
nlev, nlat, nlon = tgrid.shape
nfilt = len(fit.cfg.twod.filtfiles)
cfsigdiff = np.zeros(nfilt * fit.ivislat.size)
logp = np.log10(p)
order = np.argsort(logp)
# Where to interpolate later
xpdf = np.linspace(np.amin(logp),
np.amax(logp),
10*len(logp))
count = 0
for i, j in zip(fit.ivislat, fit.ivislon):
for k in range(nfilt):
# Where the map is placed
xval = np.log10(pmaps[k,i,j])
# Interpolate CF to 10x finer atmospheric layers
pdf = np.interp(xpdf, logp[order], cfs[i,j,order,k])
# Compute minimum density of 68.3% region
pdf, xpdf, HPDmin = mc3.stats.cred_region(pdf=pdf, xpdf=xpdf)
# Calculate 68.3% boundaries
siglo = np.amin(xpdf[pdf>HPDmin])
sighi = np.amax(xpdf[pdf>HPDmin])
# Assume CF is approx. Gaussian
xpeak = (sighi + siglo) / 2
sig = (sighi - siglo) / 2
cfsigdiff[count] = (xval - xpeak) / sig
count += 1
return cfsigdiff
def get_par_2d(fit, m):
'''
Returns sensible parameter settings for each 2D model
'''
cfg = fit.cfg
# Necessary parameters
npar = m.ncurves + 2
params = np.zeros(npar)
params[m.ncurves] = 0.001
pstep = np.ones(npar) * 0.01
pmin = np.ones(npar) * -1.0
pmax = np.ones(npar) * 1.0
pnames = []
texnames = []
for j in range(m.ncurves):
pnames.append("C{}".format(j+1))
texnames.append("$C_{{{}}}$".format(j+1))
pnames.append("C0")
texnames.append("$C_0$")
pnames.append("scorr")
texnames.append("$s_{corr}$")
# Parse baseline models
if cfg.twod.baseline is None:
pass
elif cfg.twod.baseline == 'linear':
params = np.concatenate((params, ( 0.0, 0.0)))
pstep = np.concatenate((pstep, ( 0.01, 0.0)))
pmin = np.concatenate((pmin, (-1.0, -10.0)))
pmax = np.concatenate((pmax, ( 1.0, 10.0)))
pnames = np.concatenate((pnames, ('b1', 't0')))
texnames = np.concatenate((texnames, ('$b_1$', '$t_0$')))
elif cfg.twod.baseline == 'quadratic':
params = np.concatenate((params, ( 0.0, 0.0, 0.0)))
pstep = np.concatenate((pstep, ( 0.01, 0.01, 0.0)))
pmin = np.concatenate((pmin, (-1.0, -1.0, -10.0)))
pmax = np.concatenate((pmax, ( 1.0, 1.0, 10.0)))
pnames = np.concatenate((pnames, ('b2', 'b1', 't0')))
texnames = np.concatenate((texnames, ('$b_2$', '$b_1$', '$t_0$')))
else:
print("Unrecognized baseline model.")
sys.exit()
return params, pstep, pmin, pmax, pnames, texnames
def get_par_3d(fit):
'''
Returns sensible parameter settings for each 3D model
'''
nmaps = len(fit.maps)
nparams = []
modeltype = []
if fit.cfg.threed.mapfunc == 'isobaric':
npar = nmaps
# Guess that higher temps are deeper
ipar = np.argsort(np.max(fit.tmaps, axis=(1,2)))
par = np.linspace(-2, 0, npar)[ipar]
pstep = np.ones(npar) * 1e-3
pmin = np.ones(npar) * np.log10(fit.cfg.threed.ptop)
pmax = np.ones(npar) * np.log10(fit.cfg.threed.pbot)
pnames = ['log(p{})'.format(a) for a in np.arange(1,nmaps+1)]
nparams.append(npar)
modeltype.append('pmap')
elif fit.cfg.threed.mapfunc == 'sinusoidal':
# For a single wavelength
npar = 4
par = np.zeros(npar)
pstep = np.ones(npar) * 1e-3
pmin = np.array([np.log10(fit.cfg.threed.ptop),
-np.inf, -np.inf, -180.0])
pmax = np.array([np.log10(fit.cfg.threed.pbot),
np.inf, np.inf, 180.0])
pnames = ['log(p{})',
'Lat. Amp. {}',
'Lon. Amp. {}',
'Lon. Phase {}']
# Repeat for each wavelength
nwl = len(fit.maps)
par = np.tile(par, nwl)
pstep = np.tile(pstep, nwl)
pmin = np.tile(pmin, nwl)
pmax = np.tile(pmax, nwl)
pnames = np.concatenate([[pname.format(a) for pname in pnames] \
for a in np.arange(1, nmaps+1)]) # Trust me
# Guess that longitudinal sinusoid follows the hotpost
for i in range(nwl):
par[3+i*npar] = fit.maps[i].hslocbest[1]
# Guess that higher temps are deeper
ipar = np.argsort(np.max(fit.tmaps, axis=(1,2)))
for i in range(nwl):
par[i*npar] = np.linspace(-2, 0, nwl)[ipar][i]
nparams.append(npar * nwl)
modeltype.append('pmap')
elif fit.cfg.threed.mapfunc == 'flexible':
ilat, ilon = np.where((fit.lon + fit.dlon / 2. > fit.minvislon) &
(fit.lon - fit.dlon / 2. < fit.maxvislon))
nvislat = len(np.unique(ilat))
nvislon = len(np.unique(ilon))
npar = nvislat * nvislon * len(fit.maps)
par = np.zeros(npar)
pstep = np.ones(npar) * 1e-3
pmin = np.ones(npar) * np.log10(fit.cfg.threed.ptop)
pmax = np.ones(npar) * np.log10(fit.cfg.threed.pbot)
pnames = ['log(p{},{},{})'.format(i,j,k) \
for i in np.arange(1, nmaps+1) \
for j in ilat \
for k in ilon]
nparams.append(npar * nwl)
modeltype.append('pmap')
elif fit.cfg.threed.mapfunc == 'quadratic':
# For a single wavelength
npar = 6
par = np.zeros(npar)
pstep = np.ones(npar) * 1e-3
pmin = np.array([np.log10(fit.cfg.threed.ptop),
-np.inf, -np.inf, -np.inf, -np.inf, -np.inf])
pmax = np.array([np.log10(fit.cfg.threed.pbot),
np.inf, np.inf, np.inf, np.inf, np.inf])
pnames = ['log(p{})',
'LatLat {}',
'LonLon {}',
'Lat {}',
'Lon {}',
'LatLon {}']
# Repeat for each wavelength
nwl = len(fit.maps)
par = np.tile(par, nwl)
pstep = np.tile(pstep, nwl)
pmin = np.tile(pmin, nwl)
pmax = np.tile(pmax, nwl)
pnames = np.concatenate([[pname.format(a) for pname in pnames] \
for a in np.arange(1, nmaps+1)]) # Trust me
nparams.append(npar * nwl)
modeltype.append('pmap')
elif fit.cfg.threed.mapfunc == 'cubic':
# For a single wavelength
npar = 10
par = np.zeros(npar)
pstep = np.ones(npar) * 1e-3
pmin = np.array([np.log10(fit.cfg.threed.ptop),
-np.inf, -np.inf, -np.inf, -np.inf, -np.inf,
-np.inf, -np.inf, -np.inf, -np.inf])
pmax = np.array([np.log10(fit.cfg.threed.pbot),
np.inf, np.inf, np.inf, np.inf, np.inf,
np.inf, np.inf, np.inf, np.inf])
pnames = ['log(p{})',
'LatLatLat {}',
'LonLonLon {}',
'LatLat {}',
'LonLon {}',
'Lat {}',
'Lon {}',
'LatLatLon {}',
'LatLonLon {}',
'LatLon {}']
# Repeat for each wavelength
nwl = len(fit.maps)
par = np.tile(par, nwl)
pstep = np.tile(pstep, nwl)
pmin = np.tile(pmin, nwl)
pmax = np.tile(pmax, nwl)
pnames = np.concatenate([[pname.format(a) for pname in pnames] \
for a in np.arange(1, nmaps+1)]) # Trust me
nparams.append(npar * nwl)
modeltype.append('pmap')
else:
print("Warning: Unrecognized mapping function.")
if fit.cfg.threed.oob == 'both':
par = np.concatenate((par, (1000., 2000.)))
pstep = np.concatenate((pstep, ( 1., 1.)))
pmin = np.concatenate((pmin, ( 0., 0.)))
pmax = np.concatenate((pmax, (4000., 4000.)))
pnames = np.concatenate((pnames, ('Ttop', 'Tbot')))
nparams.append(2)
modeltype.append('oob')
elif fit.cfg.threed.oob == 'top':
par = np.concatenate((par, (1000.,)))
pstep = np.concatenate((pstep, ( 1.,)))
pmin = np.concatenate((pmin, ( 0.,)))
pmax = np.concatenate((pmax, (4000.,)))
pnames = np.concatenate((pnames, ('Ttop',)))
nparams.append(1)
modeltype.append('oob')
elif fit.cfg.threed.oob == 'bot':
par = np.concatenate((par, (2000.,)))
pstep = np.concatenate((pstep, ( 1.,)))
pmin = np.concatenate((pmin, ( 0.,)))
pmax = np.concatenate((pmax, (4000.,)))
pnames = np.concatenate((pnames, ('Tbot',)))
nparams.append(1)
modeltype.append('oob')
else:
print("Unrecognized out-of-bounds rule.")
if fit.cfg.threed.z == 'fit':
par = np.concatenate((par, ( 0. ,)))
pstep = np.concatenate((pstep, ( 0.1,)))
pmin = np.concatenate((pmin, ( -1.0,)))
pmax = np.concatenate((pmax, ( 1.0,)))
pnames = np.concatenate((pnames, ('z',)))
nparams.append(1)
modeltype.append('z')
nparams = np.array(nparams)
modeltype = np.array(modeltype)
return par, pstep, pmin, pmax, pnames, nparams, modeltype
| 36.86692
| 79
| 0.544245
|
4a0f6c007adc12140fdf2e9669744eee7f831feb
| 98,604
|
py
|
Python
|
airflow/providers/google/cloud/operators/bigquery.py
|
jayantsande25/airflow
|
d04aa135268b8e0230be3af6598a3b18e8614c3c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3
|
2021-06-26T13:42:13.000Z
|
2021-08-03T13:51:36.000Z
|
airflow/providers/google/cloud/operators/bigquery.py
|
jayantsande25/airflow
|
d04aa135268b8e0230be3af6598a3b18e8614c3c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 22
|
2020-12-13T07:33:35.000Z
|
2022-02-27T17:55:01.000Z
|
airflow/providers/google/cloud/operators/bigquery.py
|
jayantsande25/airflow
|
d04aa135268b8e0230be3af6598a3b18e8614c3c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-08-28T09:47:31.000Z
|
2021-08-28T09:47:31.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google BigQuery operators."""
import enum
import hashlib
import json
import re
import uuid
import warnings
from datetime import datetime
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, SupportsAbs, Union
import attr
from google.api_core.exceptions import Conflict
from google.cloud.bigquery import TableReference
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.taskinstance import TaskInstance
from airflow.operators.sql import SQLCheckOperator, SQLIntervalCheckOperator, SQLValueCheckOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook, BigQueryJob
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url
BIGQUERY_JOB_DETAILS_LINK_FMT = "https://console.cloud.google.com/bigquery?j={job_id}"
_DEPRECATION_MSG = (
"The bigquery_conn_id parameter has been deprecated. You should pass the gcp_conn_id parameter."
)
class BigQueryUIColors(enum.Enum):
"""Hex colors for BigQuery operators"""
CHECK = "#C0D7FF"
QUERY = "#A1BBFF"
TABLE = "#81A0FF"
DATASET = "#5F86FF"
class BigQueryConsoleLink(BaseOperatorLink):
"""Helper class for constructing BigQuery link."""
name = 'BigQuery Console'
def get_link(self, operator, dttm):
ti = TaskInstance(task=operator, execution_date=dttm)
job_id = ti.xcom_pull(task_ids=operator.task_id, key='job_id')
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id) if job_id else ''
@attr.s(auto_attribs=True)
class BigQueryConsoleIndexableLink(BaseOperatorLink):
"""Helper class for constructing BigQuery link."""
index: int = attr.ib()
@property
def name(self) -> str:
return f'BigQuery Console #{self.index + 1}'
def get_link(self, operator: BaseOperator, dttm: datetime):
ti = TaskInstance(task=operator, execution_date=dttm)
job_ids = ti.xcom_pull(task_ids=operator.task_id, key='job_id')
if not job_ids:
return None
if len(job_ids) < self.index:
return None
job_id = job_ids[self.index]
return BIGQUERY_JOB_DETAILS_LINK_FMT.format(job_id=job_id)
class _BigQueryDbHookMixin:
def get_db_hook(self) -> BigQueryHook:
"""Get BigQuery DB Hook"""
return BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
impersonation_chain=self.impersonation_chain,
labels=self.labels,
)
class BigQueryCheckOperator(_BigQueryDbHookMixin, SQLCheckOperator):
"""
Performs checks against BigQuery. The ``BigQueryCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCheckOperator`
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed
:type sql: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
template_fields = (
'sql',
'gcp_conn_id',
'impersonation_chain',
'labels',
)
template_ext = ('.sql',)
ui_color = BigQueryUIColors.CHECK.value
def __init__(
self,
*,
sql: str,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels: Optional[dict] = None,
**kwargs,
) -> None:
super().__init__(sql=sql, **kwargs)
if bigquery_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.gcp_conn_id = gcp_conn_id
self.sql = sql
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
class BigQueryValueCheckOperator(_BigQueryDbHookMixin, SQLValueCheckOperator):
"""
Performs a simple value check using sql code.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryValueCheckOperator`
:param sql: the sql to be executed
:type sql: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
template_fields = (
'sql',
'gcp_conn_id',
'pass_value',
'impersonation_chain',
'labels',
)
template_ext = ('.sql',)
ui_color = BigQueryUIColors.CHECK.value
def __init__(
self,
*,
sql: str,
pass_value: Any,
tolerance: Any = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels: Optional[dict] = None,
**kwargs,
) -> None:
super().__init__(sql=sql, pass_value=pass_value, tolerance=tolerance, **kwargs)
if bigquery_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.impersonation_chain = impersonation_chain
self.labels = labels
class BigQueryIntervalCheckOperator(_BigQueryDbHookMixin, SQLIntervalCheckOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
This method constructs a query like so ::
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryIntervalCheckOperator`
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_thresholds: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:type metrics_thresholds: dict
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param location: The geographic location of the job. See details at:
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
template_fields = (
'table',
'gcp_conn_id',
'sql1',
'sql2',
'impersonation_chain',
'labels',
)
ui_color = BigQueryUIColors.CHECK.value
def __init__(
self,
*,
table: str,
metrics_thresholds: dict,
date_filter_column: str = 'ds',
days_back: SupportsAbs[int] = -7,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
use_legacy_sql: bool = True,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels: Optional[Dict] = None,
**kwargs,
) -> None:
super().__init__(
table=table,
metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column,
days_back=days_back,
**kwargs,
)
if bigquery_conn_id:
warnings.warn(_DEPRECATION_MSG, DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.gcp_conn_id = gcp_conn_id
self.use_legacy_sql = use_legacy_sql
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
class BigQueryGetDataOperator(BaseOperator):
"""
Fetches the data from a BigQuery table (alternatively fetch data for selected columns)
and returns data in a python list. The number of elements in the returned list will
be equal to the number of rows fetched. Each element in the list will again be a list
where element would represent the columns values for that row.
**Example Result**: ``[['Tony', '10'], ['Mike', '20'], ['Steve', '15']]``
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDataOperator`
.. note::
If you pass fields to ``selected_fields`` which are in different order than the
order of columns already in
BQ table, the data will still be in the order of BQ table.
For example if the BQ table has 3 columns as
``[A,B,C]`` and you pass 'B,A' in the ``selected_fields``
the data would still be of the form ``'A,B'``.
**Example**: ::
get_data = BigQueryGetDataOperator(
task_id='get_data_from_bq',
dataset_id='test_dataset',
table_id='Transaction_partitions',
max_results=100,
selected_fields='DATE',
gcp_conn_id='airflow-conn-id'
)
:param dataset_id: The dataset ID of the requested table. (templated)
:type dataset_id: str
:param table_id: The table ID of the requested table. (templated)
:type table_id: str
:param max_results: The maximum number of records (rows) to be fetched
from the table. (templated)
:type max_results: int
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:type selected_fields: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'dataset_id',
'table_id',
'max_results',
'selected_fields',
'impersonation_chain',
)
ui_color = BigQueryUIColors.QUERY.value
def __init__(
self,
*,
dataset_id: str,
table_id: str,
max_results: int = 100,
selected_fields: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.table_id = table_id
self.max_results = int(max_results)
self.selected_fields = selected_fields
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> list:
self.log.info(
'Fetching Data from %s.%s max results: %s', self.dataset_id, self.table_id, self.max_results
)
hook = BigQueryHook(
bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
rows = hook.list_rows(
dataset_id=self.dataset_id,
table_id=self.table_id,
max_results=self.max_results,
selected_fields=self.selected_fields,
location=self.location,
)
self.log.info('Total extracted rows: %s', len(rows))
table_data = [row.values() for row in rows]
return table_data
class BigQueryExecuteQueryOperator(BaseOperator):
"""
Executes BigQuery SQL queries in a specific BigQuery database.
This operator does not assert idempotency.
:param sql: the sql code to be executed (templated)
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'.
:param destination_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that, if set, will store the results
of the query. (templated)
:type destination_dataset_table: str
:param write_disposition: Specifies the action that occurs if the destination table
already exists. (default: 'WRITE_EMPTY')
:type write_disposition: str
:param create_disposition: Specifies whether the job is allowed to create new tables.
(default: 'CREATE_IF_NEEDED')
:type create_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allow_large_results``
must be ``true`` if this is set to ``false``. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: bool
:param maximum_billing_tier: Positive integer that serves as a multiplier
of the basic price.
Defaults to None, in which case it uses the value set in the project.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by BigQueryOperator
like args.
:type api_resource_configs: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: Optional[Union[list, tuple, set]]
:param query_params: a list of dictionary containing query parameter types and
values, passed to BigQuery. The structure of dictionary should look like
'queryParameters' in Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs.
For example, [{ 'name': 'corpus', 'parameterType': { 'type': 'STRING' },
'parameterValue': { 'value': 'romeoandjuliet' } }]. (templated)
:type query_params: list
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
:type cluster_fields: list[str]
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'sql',
'destination_dataset_table',
'labels',
'query_params',
'impersonation_chain',
)
template_ext = ('.sql',)
ui_color = BigQueryUIColors.QUERY.value
@property
def operator_extra_links(self):
"""Return operator extra links"""
if isinstance(self.sql, str):
return (BigQueryConsoleLink(),)
return (BigQueryConsoleIndexableLink(i) for i, _ in enumerate(self.sql))
def __init__(
self,
*,
sql: Union[str, Iterable],
destination_dataset_table: Optional[str] = None,
write_disposition: str = 'WRITE_EMPTY',
allow_large_results: Optional[bool] = False,
flatten_results: Optional[bool] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
udf_config: Optional[list] = None,
use_legacy_sql: bool = True,
maximum_billing_tier: Optional[int] = None,
maximum_bytes_billed: Optional[float] = None,
create_disposition: str = 'CREATE_IF_NEEDED',
schema_update_options: Optional[Union[list, tuple, set]] = None,
query_params: Optional[list] = None,
labels: Optional[dict] = None,
priority: str = 'INTERACTIVE',
time_partitioning: Optional[dict] = None,
api_resource_configs: Optional[dict] = None,
cluster_fields: Optional[List[str]] = None,
location: Optional[str] = None,
encryption_configuration: Optional[dict] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=2,
)
gcp_conn_id = bigquery_conn_id
warnings.warn(
"This operator is deprecated. Please use `BigQueryInsertJobOperator`.",
DeprecationWarning,
stacklevel=2,
)
self.sql = sql
self.destination_dataset_table = destination_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.allow_large_results = allow_large_results
self.flatten_results = flatten_results
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.udf_config = udf_config
self.use_legacy_sql = use_legacy_sql
self.maximum_billing_tier = maximum_billing_tier
self.maximum_bytes_billed = maximum_bytes_billed
self.schema_update_options = schema_update_options
self.query_params = query_params
self.labels = labels
self.priority = priority
self.time_partitioning = time_partitioning
self.api_resource_configs = api_resource_configs
self.cluster_fields = cluster_fields
self.location = location
self.encryption_configuration = encryption_configuration
self.hook = None # type: Optional[BigQueryHook]
self.impersonation_chain = impersonation_chain
def execute(self, context):
if self.hook is None:
self.log.info('Executing: %s', self.sql)
self.hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
use_legacy_sql=self.use_legacy_sql,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if isinstance(self.sql, str):
job_id = self.hook.run_query(
sql=self.sql,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
)
elif isinstance(self.sql, Iterable):
job_id = [
self.hook.run_query(
sql=s,
destination_dataset_table=self.destination_dataset_table,
write_disposition=self.write_disposition,
allow_large_results=self.allow_large_results,
flatten_results=self.flatten_results,
udf_config=self.udf_config,
maximum_billing_tier=self.maximum_billing_tier,
maximum_bytes_billed=self.maximum_bytes_billed,
create_disposition=self.create_disposition,
query_params=self.query_params,
labels=self.labels,
schema_update_options=self.schema_update_options,
priority=self.priority,
time_partitioning=self.time_partitioning,
api_resource_configs=self.api_resource_configs,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
)
for s in self.sql
]
else:
raise AirflowException(f"argument 'sql' of type {type(str)} is neither a string nor an iterable")
context['task_instance'].xcom_push(key='job_id', value=job_id)
def on_kill(self) -> None:
super().on_kill()
if self.hook is not None:
self.log.info('Cancelling running query')
self.hook.cancel_query()
class BigQueryCreateEmptyTableOperator(BaseOperator):
"""
Creates a new, empty table in the specified BigQuery dataset,
optionally with schema.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
You can also create a table without schema.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateEmptyTableOperator`
:param project_id: The project to create the table into. (templated)
:type project_id: str
:param dataset_id: The dataset to create the table into. (templated)
:type dataset_id: str
:param table_id: The Name of the table to be created. (templated)
:type table_id: str
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If provided all other parameters are ignored.
:type table_resource: Dict[str, Any]
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:type schema_fields: list
:param gcs_schema_object: Full path to the JSON file containing
schema (templated). For
example: ``gs://test-bucket/dir1/dir2/employee_schema.json``
:type gcs_schema_object: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param bigquery_conn_id: [Optional] The connection ID used to connect to Google Cloud and
interact with the Bigquery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: [Optional] The connection ID used to connect to Google Cloud.
and interact with the Google Cloud Storage service.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param labels: a dictionary containing labels for the table, passed to BigQuery
**Example (with schema JSON in GCS)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
gcs_schema_object='gs://schema-bucket/employee_schema.json',
bigquery_conn_id='airflow-conn-id',
google_cloud_storage_conn_id='airflow-conn-id'
)
**Corresponding Schema file** (``employee_schema.json``): ::
[
{
"mode": "NULLABLE",
"name": "emp_name",
"type": "STRING"
},
{
"mode": "REQUIRED",
"name": "salary",
"type": "INTEGER"
}
]
**Example (with schema in the DAG)**: ::
CreateTable = BigQueryCreateEmptyTableOperator(
task_id='BigQueryCreateEmptyTableOperator_task',
dataset_id='ODS',
table_id='Employees',
project_id='internal-gcp-project',
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}],
bigquery_conn_id='airflow-conn-id-account',
google_cloud_storage_conn_id='airflow-conn-id'
)
:type labels: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#ViewDefinition
:type view: dict
:param materialized_view: [Optional] The materialized view definition.
:type materialized_view: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: The location used for the operation.
:type location: str
:param cluster_fields: [Optional] The fields used for clustering.
BigQuery supports clustering for both partitioned and
non-partitioned tables.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#clustering.fields
:type cluster_fields: list
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param exists_ok: If ``True``, ignore "already exists" errors when creating the table.
:type exists_ok: bool
"""
template_fields = (
'dataset_id',
'table_id',
'project_id',
'gcs_schema_object',
'labels',
'view',
'materialized_view',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json", "materialized_view": "json"}
ui_color = BigQueryUIColors.TABLE.value
def __init__(
self,
*,
dataset_id: str,
table_id: str,
table_resource: Optional[Dict[str, Any]] = None,
project_id: Optional[str] = None,
schema_fields: Optional[List] = None,
gcs_schema_object: Optional[str] = None,
time_partitioning: Optional[Dict] = None,
bigquery_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
labels: Optional[Dict] = None,
view: Optional[Dict] = None,
materialized_view: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
cluster_fields: Optional[List[str]] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
exists_ok: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
self.schema_fields = schema_fields
self.gcs_schema_object = gcs_schema_object
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.time_partitioning = {} if time_partitioning is None else time_partitioning
self.labels = labels
self.view = view
self.materialized_view = materialized_view
self.encryption_configuration = encryption_configuration
self.location = location
self.cluster_fields = cluster_fields
self.table_resource = table_resource
self.impersonation_chain = impersonation_chain
self.exists_ok = exists_ok
def execute(self, context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields and self.gcs_schema_object:
gcs_bucket, gcs_object = _parse_gcs_url(self.gcs_schema_object)
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
schema_fields = json.loads(gcs_hook.download(gcs_bucket, gcs_object))
else:
schema_fields = self.schema_fields
try:
self.log.info('Creating table')
table = bq_hook.create_empty_table(
project_id=self.project_id,
dataset_id=self.dataset_id,
table_id=self.table_id,
schema_fields=schema_fields,
time_partitioning=self.time_partitioning,
cluster_fields=self.cluster_fields,
labels=self.labels,
view=self.view,
materialized_view=self.materialized_view,
encryption_configuration=self.encryption_configuration,
table_resource=self.table_resource,
exists_ok=self.exists_ok,
)
self.log.info(
'Table %s.%s.%s created successfully', table.project, table.dataset_id, table.table_id
)
except Conflict:
self.log.info('Table %s.%s already exists.', self.dataset_id, self.table_id)
class BigQueryCreateExternalTableOperator(BaseOperator):
"""
Creates a new external table in the dataset with the data from Google Cloud
Storage.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateExternalTableOperator`
:param bucket: The bucket to point the external table to. (templated)
:type bucket: str
:param source_objects: List of Google Cloud Storage URIs to point
table to. If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type source_objects: list
:param destination_project_dataset_table: The dotted ``(<project>.)<dataset>.<table>``
BigQuery table to load data into (templated). If ``<project>`` is not included,
project will be the project defined in the connection json.
:type destination_project_dataset_table: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
Should not be set when source_format is 'DATASTORE_BACKUP'.
:param table_resource: Table resource as described in documentation:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table
If provided all other parameters are ignored. External schema from object will be resolved.
:type table_resource: Dict[str, Any]
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
:type schema_object: str
:param source_format: File format of the data.
:type source_format: str
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use for the CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param bigquery_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the Bigquery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: (Optional) The connection ID used to connect to Google Cloud
and interact with the Google Cloud Storage service.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'bucket',
'source_objects',
'schema_object',
'destination_project_dataset_table',
'labels',
'table_resource',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
def __init__(
self,
*,
bucket: str,
source_objects: List,
destination_project_dataset_table: str,
table_resource: Optional[Dict[str, Any]] = None,
schema_fields: Optional[List] = None,
schema_object: Optional[str] = None,
source_format: str = 'CSV',
compression: str = 'NONE',
skip_leading_rows: int = 0,
field_delimiter: str = ',',
max_bad_records: int = 0,
quote_character: Optional[str] = None,
allow_quoted_newlines: bool = False,
allow_jagged_rows: bool = False,
bigquery_conn_id: str = 'google_cloud_default',
google_cloud_storage_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
src_fmt_configs: Optional[dict] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
# GCS config
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
kwargs_passed = any(
[
destination_project_dataset_table,
schema_fields,
source_format,
compression,
skip_leading_rows,
field_delimiter,
max_bad_records,
quote_character,
allow_quoted_newlines,
allow_jagged_rows,
src_fmt_configs,
labels,
encryption_configuration,
]
)
if not table_resource:
warnings.warn(
"Passing table parameters via keywords arguments will be deprecated. "
"Please use provide table definition using `table_resource` parameter."
"You can still use external `schema_object`. ",
DeprecationWarning,
stacklevel=2,
)
if table_resource and kwargs_passed:
raise ValueError("You provided both `table_resource` and exclusive keywords arguments.")
self.table_resource = table_resource
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.skip_leading_rows = skip_leading_rows
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.src_fmt_configs = src_fmt_configs or {}
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields and self.schema_object and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
schema_fields = json.loads(gcs_hook.download(self.bucket, self.schema_object))
else:
schema_fields = self.schema_fields
if schema_fields and self.table_resource:
self.table_resource["externalDataConfiguration"]["schema"] = schema_fields
if self.table_resource:
tab_ref = TableReference.from_string(self.destination_project_dataset_table)
bq_hook.create_empty_table(
table_resource=self.table_resource,
project_id=tab_ref.project,
table_id=tab_ref.table_id,
dataset_id=tab_ref.dataset_id,
)
else:
source_uris = [f"gs://{self.bucket}/{source_object}" for source_object in self.source_objects]
bq_hook.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
src_fmt_configs=self.src_fmt_configs,
labels=self.labels,
encryption_configuration=self.encryption_configuration,
)
class BigQueryDeleteDatasetOperator(BaseOperator):
"""
This operator deletes an existing dataset from your Project in Big query.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDeleteDatasetOperator`
:param project_id: The project id of the dataset.
:type project_id: str
:param dataset_id: The dataset to be deleted.
:type dataset_id: str
:param delete_contents: (Optional) Whether to force the deletion even if the dataset is not empty.
Will delete all tables (if any) in the dataset if set to True.
Will raise HttpError 400: "{dataset_id} is still in use" if set to False and dataset is not empty.
The default value is False.
:type delete_contents: bool
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
**Example**: ::
delete_temp_data = BigQueryDeleteDatasetOperator(
dataset_id='temp-dataset',
project_id='temp-project',
delete_contents=True, # Force the deletion of the dataset as well as its tables (if any).
gcp_conn_id='_my_gcp_conn_',
task_id='Deletetemp',
dag=dag)
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_id: str,
project_id: Optional[str] = None,
delete_contents: bool = False,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.project_id = project_id
self.delete_contents = delete_contents
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context) -> None:
self.log.info('Dataset id: %s Project id: %s', self.dataset_id, self.project_id)
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
bq_hook.delete_dataset(
project_id=self.project_id, dataset_id=self.dataset_id, delete_contents=self.delete_contents
)
class BigQueryCreateEmptyDatasetOperator(BaseOperator):
"""
This operator is used to create new dataset for your Project in BigQuery.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateEmptyDatasetOperator`
:param project_id: The name of the project where we want to create the dataset.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide, if datasetId in dataset_reference.
:type dataset_id: str
:param location: The geographic location where the dataset should reside.
:type location: str
:param dataset_reference: Dataset reference that could be provided with request body.
More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param exists_ok: If ``True``, ignore "already exists" errors when creating the dataset.
:type exists_ok: bool
**Example**: ::
create_new_dataset = BigQueryCreateEmptyDatasetOperator(
dataset_id='new-dataset',
project_id='my-project',
dataset_reference={"friendlyName": "New Dataset"}
gcp_conn_id='_my_gcp_conn_',
task_id='newDatasetCreator',
dag=dag)
"""
template_fields = (
'dataset_id',
'project_id',
'dataset_reference',
'impersonation_chain',
)
template_fields_renderers = {"dataset_reference": "json"}
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
dataset_reference: Optional[Dict] = None,
location: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
exists_ok: bool = False,
**kwargs,
) -> None:
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.project_id = project_id
self.location = location
self.gcp_conn_id = gcp_conn_id
self.dataset_reference = dataset_reference if dataset_reference else {}
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
self.exists_ok = exists_ok
super().__init__(**kwargs)
def execute(self, context) -> None:
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
try:
bq_hook.create_empty_dataset(
project_id=self.project_id,
dataset_id=self.dataset_id,
dataset_reference=self.dataset_reference,
location=self.location,
exists_ok=self.exists_ok,
)
except Conflict:
dataset_id = self.dataset_reference.get("datasetReference", {}).get("datasetId", self.dataset_id)
self.log.info('Dataset %s already exists.', dataset_id)
class BigQueryGetDatasetOperator(BaseOperator):
"""
This operator is used to return the dataset specified by dataset_id.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDatasetOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info('Start getting dataset: %s:%s', self.project_id, self.dataset_id)
dataset = bq_hook.get_dataset(dataset_id=self.dataset_id, project_id=self.project_id)
return dataset.to_api_repr()
class BigQueryGetDatasetTablesOperator(BaseOperator):
"""
This operator retrieves the list of tables in the specified dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryGetDatasetTablesOperator`
:param dataset_id: the dataset ID of the requested dataset.
:type dataset_id: str
:param project_id: (Optional) the project of the requested dataset. If None,
self.project_id will be used.
:type project_id: str
:param max_results: (Optional) the maximum number of tables to return.
:type max_results: int
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_id: str,
project_id: Optional[str] = None,
max_results: Optional[int] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.max_results = max_results
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.get_dataset_tables(
dataset_id=self.dataset_id,
project_id=self.project_id,
max_results=self.max_results,
)
class BigQueryPatchDatasetOperator(BaseOperator):
"""
This operator is used to patch dataset for your Project in BigQuery.
It only replaces fields that are provided in the submitted dataset resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryPatchDatasetOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: dict
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"dataset_resource": "json"}
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_id: str,
dataset_resource: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
warnings.warn(
"This operator is deprecated. Please use BigQueryUpdateDatasetOperator.",
DeprecationWarning,
stacklevel=2,
)
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.patch_dataset(
dataset_id=self.dataset_id,
dataset_resource=self.dataset_resource,
project_id=self.project_id,
)
class BigQueryUpdateTableOperator(BaseOperator):
"""
This operator is used to update table for your Project in BigQuery.
Use ``fields`` to specify which fields of table to update. If a field
is listed in ``fields`` and is ``None`` in table, it will be deleted.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateTableOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in table_reference.
:param table_id: The id of table. Don't need to provide,
if tableId in table_reference.
:type table_id: str
:param table_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type table_resource: Dict[str, Any]
:param fields: The fields of ``table`` to change, spelled as the Table
properties (e.g. "friendly_name").
:type fields: List[str]
:param project_id: The name of the project where we want to create the table.
Don't need to provide, if projectId in table_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: table
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
"""
template_fields = (
'dataset_id',
'table_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
def __init__(
self,
*,
table_resource: Dict[str, Any],
fields: Optional[List[str]] = None,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.table_id = table_id
self.project_id = project_id
self.fields = fields
self.gcp_conn_id = gcp_conn_id
self.table_resource = table_resource
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.update_table(
table_resource=self.table_resource,
fields=self.fields,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
class BigQueryUpdateDatasetOperator(BaseOperator):
"""
This operator is used to update dataset for your Project in BigQuery.
Use ``fields`` to specify which fields of dataset to update. If a field
is listed in ``fields`` and is ``None`` in dataset, it will be deleted.
If no ``fields`` are provided then all fields of provided ``dataset_resource``
will be used.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateDatasetOperator`
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_resource: Dataset resource that will be provided with request body.
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_resource: Dict[str, Any]
:param fields: The properties of dataset to change (e.g. "friendly_name").
:type fields: Sequence[str]
:param project_id: The name of the project where we want to create the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:rtype: dataset
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
template_fields = (
'dataset_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"dataset_resource": "json"}
ui_color = BigQueryUIColors.DATASET.value
def __init__(
self,
*,
dataset_resource: Dict[str, Any],
fields: Optional[List[str]] = None,
dataset_id: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.project_id = project_id
self.fields = fields
self.gcp_conn_id = gcp_conn_id
self.dataset_resource = dataset_resource
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
fields = self.fields or list(self.dataset_resource.keys())
dataset = bq_hook.update_dataset(
dataset_resource=self.dataset_resource,
project_id=self.project_id,
dataset_id=self.dataset_id,
fields=fields,
)
return dataset.to_api_repr()
class BigQueryDeleteTableOperator(BaseOperator):
"""
Deletes BigQuery tables
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDeleteTableOperator`
:param deletion_dataset_table: A dotted
``(<project>.|<project>:)<dataset>.<table>`` that indicates which table
will be deleted. (templated)
:type deletion_dataset_table: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'deletion_dataset_table',
'impersonation_chain',
)
ui_color = BigQueryUIColors.TABLE.value
def __init__(
self,
*,
deletion_dataset_table: str,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
ignore_if_missing: bool = False,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.deletion_dataset_table = deletion_dataset_table
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.ignore_if_missing = ignore_if_missing
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
self.log.info('Deleting: %s', self.deletion_dataset_table)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
hook.delete_table(table_id=self.deletion_dataset_table, not_found_ok=self.ignore_if_missing)
class BigQueryUpsertTableOperator(BaseOperator):
"""
Upsert BigQuery table
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpsertTableOperator`
:param dataset_id: A dotted
``(<project>.|<project>:)<dataset>`` that indicates which dataset
will be updated. (templated)
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: The name of the project where we want to update the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'dataset_id',
'table_resource',
'impersonation_chain',
)
template_fields_renderers = {"table_resource": "json"}
ui_color = BigQueryUIColors.TABLE.value
def __init__(
self,
*,
dataset_id: str,
table_resource: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.",
DeprecationWarning,
stacklevel=3,
)
gcp_conn_id = bigquery_conn_id
self.dataset_id = dataset_id
self.table_resource = table_resource
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.location = location
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
self.log.info('Upserting Dataset: %s with table_resource: %s', self.dataset_id, self.table_resource)
hook = BigQueryHook(
bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
hook.run_table_upsert(
dataset_id=self.dataset_id,
table_resource=self.table_resource,
project_id=self.project_id,
)
class BigQueryUpdateTableSchemaOperator(BaseOperator):
"""
Update BigQuery Table Schema
Updates fields on a table schema based on contents of the supplied schema_fields_updates
parameter. The supplied schema does not need to be complete, if the field
already exists in the schema you only need to supply keys & values for the
items you want to patch, just ensure the "name" key is set.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryUpdateTableSchemaOperator`
:param schema_fields_updates: a partial schema resource. see
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#TableSchema
**Example**: ::
schema_fields_updates=[
{"name": "emp_name", "description": "Some New Description"},
{"name": "salary", "policyTags": {'names': ['some_new_policy_tag']},},
{"name": "departments", "fields": [
{"name": "name", "description": "Some New Description"},
{"name": "type", "description": "Some New Description"}
]},
]
:type schema_fields_updates: List[dict]
:param include_policy_tags: (Optional) If set to True policy tags will be included in
the update request which requires special permissions even if unchanged (default False)
see https://cloud.google.com/bigquery/docs/column-level-security#roles
:type include_policy_tags: bool
:param dataset_id: A dotted
``(<project>.|<project>:)<dataset>`` that indicates which dataset
will be updated. (templated)
:type dataset_id: str
:param table_id: The table ID of the requested table. (templated)
:type table_id: str
:param project_id: The name of the project where we want to update the dataset.
Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param location: The location used for the operation.
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
'schema_fields_updates',
'dataset_id',
'table_id',
'project_id',
'impersonation_chain',
)
template_fields_renderers = {"schema_fields_updates": "json"}
ui_color = BigQueryUIColors.TABLE.value
def __init__(
self,
*,
schema_fields_updates: List[Dict[str, Any]],
include_policy_tags: Optional[bool] = False,
dataset_id: Optional[str] = None,
table_id: Optional[str] = None,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.schema_fields_updates = schema_fields_updates
self.include_policy_tags = include_policy_tags
self.table_id = table_id
self.dataset_id = dataset_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
def execute(self, context):
bq_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
return bq_hook.update_table_schema(
schema_fields_updates=self.schema_fields_updates,
include_policy_tags=self.include_policy_tags,
dataset_id=self.dataset_id,
table_id=self.table_id,
project_id=self.project_id,
)
class BigQueryInsertJobOperator(BaseOperator):
"""
Executes a BigQuery job. Waits for the job to complete and returns job id.
This operator work in the following way:
- it calculates a unique hash of the job using job's configuration or uuid if ``force_rerun`` is True
- creates ``job_id`` in form of
``[provided_job_id | airflow_{dag_id}_{task_id}_{exec_date}]_{uniqueness_suffix}``
- submits a BigQuery job using the ``job_id``
- if job with given id already exists then it tries to reattach to the job if its not done and its
state is in ``reattach_states``. If the job is done the operator will raise ``AirflowException``.
Using ``force_rerun`` will submit a new job every time without attaching to already existing ones.
For job definition see here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryInsertJobOperator`
:param configuration: The configuration parameter maps directly to BigQuery's
configuration field in the job object. For more details see
https://cloud.google.com/bigquery/docs/reference/v2/jobs
:type configuration: Dict[str, Any]
:param job_id: The ID of the job. It will be suffixed with hash of job configuration
unless ``force_rerun`` is True.
The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
dashes (-). The maximum length is 1,024 characters. If not provided then uuid will
be generated.
:type job_id: str
:param force_rerun: If True then operator will use hash of uuid as job id suffix
:type force_rerun: bool
:param reattach_states: Set of BigQuery job's states in case of which we should reattach
to the job. Should be other than final states.
:param project_id: Google Cloud Project where the job is running
:type project_id: str
:param location: location the job is running
:type location: str
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param cancel_on_kill: Flag which indicates whether cancel the hook's job or not, when on_kill is called
:type cancel_on_kill: bool
"""
template_fields = (
"configuration",
"job_id",
"impersonation_chain",
)
template_ext = (".json",)
template_fields_renderers = {"configuration": "json", "configuration.query.query": "sql"}
ui_color = BigQueryUIColors.QUERY.value
def __init__(
self,
configuration: Dict[str, Any],
project_id: Optional[str] = None,
location: Optional[str] = None,
job_id: Optional[str] = None,
force_rerun: bool = True,
reattach_states: Optional[Set[str]] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
cancel_on_kill: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.configuration = configuration
self.location = location
self.job_id = job_id
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.force_rerun = force_rerun
self.reattach_states: Set[str] = reattach_states or set()
self.impersonation_chain = impersonation_chain
self.cancel_on_kill = cancel_on_kill
self.hook: Optional[BigQueryHook] = None
def prepare_template(self) -> None:
# If .json is passed then we have to read the file
if isinstance(self.configuration, str) and self.configuration.endswith('.json'):
with open(self.configuration) as file:
self.configuration = json.loads(file.read())
def _submit_job(
self,
hook: BigQueryHook,
job_id: str,
) -> BigQueryJob:
# Submit a new job
job = hook.insert_job(
configuration=self.configuration,
project_id=self.project_id,
location=self.location,
job_id=job_id,
)
# Start the job and wait for it to complete and get the result.
job.result()
return job
@staticmethod
def _handle_job_error(job: BigQueryJob) -> None:
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
def _job_id(self, context):
if self.force_rerun:
hash_base = str(uuid.uuid4())
else:
hash_base = json.dumps(self.configuration, sort_keys=True)
uniqueness_suffix = hashlib.md5(hash_base.encode()).hexdigest()
if self.job_id:
return f"{self.job_id}_{uniqueness_suffix}"
exec_date = context['execution_date'].isoformat()
job_id = f"airflow_{self.dag_id}_{self.task_id}_{exec_date}_{uniqueness_suffix}"
return re.sub(r"[:\-+.]", "_", job_id)
def execute(self, context: Any):
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.hook = hook
job_id = self._job_id(context)
try:
job = self._submit_job(hook, job_id)
self._handle_job_error(job)
except Conflict:
# If the job already exists retrieve it
job = hook.get_job(
project_id=self.project_id,
location=self.location,
job_id=job_id,
)
if job.state in self.reattach_states:
# We are reattaching to a job
job.result()
self._handle_job_error(job)
else:
# Same job configuration so we need force_rerun
raise AirflowException(
f"Job with id: {job_id} already exists and is in {job.state} state. If you "
f"want to force rerun it consider setting `force_rerun=True`."
f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`"
)
self.job_id = job.job_id
return job.job_id
def on_kill(self) -> None:
if self.job_id and self.cancel_on_kill:
self.hook.cancel_job( # type: ignore[union-attr]
job_id=self.job_id, project_id=self.project_id, location=self.location
)
| 42.667244
| 109
| 0.668695
|
4a0f6cb2052e137dd0a02ff3805ea5b2414adfda
| 24,059
|
py
|
Python
|
Cogs/Welcome.py
|
holgern/CommunityUpvoteBot
|
b9af069749707ae03936f8015a42a4af57aef57f
|
[
"MIT"
] | 5
|
2018-06-27T07:59:42.000Z
|
2018-07-05T20:35:24.000Z
|
Cogs/Welcome.py
|
holgern/BeemBot.py
|
b9af069749707ae03936f8015a42a4af57aef57f
|
[
"MIT"
] | null | null | null |
Cogs/Welcome.py
|
holgern/BeemBot.py
|
b9af069749707ae03936f8015a42a4af57aef57f
|
[
"MIT"
] | null | null | null |
import asyncio
import discord
from datetime import datetime
from discord.ext import commands
from shutil import copyfile
import time
import json
import os
import re
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Welcome(bot, settings))
class Welcome:
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.regexUserName = re.compile(r"\[\[[user]+\]\]", re.IGNORECASE)
self.regexUserPing = re.compile(r"\[\[[atuser]+\]\]", re.IGNORECASE)
self.regexServer = re.compile(r"\[\[[server]+\]\]", re.IGNORECASE)
self.regexCount = re.compile(r"\[\[[count]+\]\]", re.IGNORECASE)
self.regexPlace = re.compile(r"\[\[[place]+\]\]", re.IGNORECASE)
self.regexOnline = re.compile(r"\[\[[online]+\]\]", re.IGNORECASE)
def suppressed(self, guild, msg):
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(guild, "SuppressMentions"):
return Nullify.clean(msg)
else:
return msg
async def onjoin(self, member, server):
# Welcome
welcomeChannel = self.settings.getServerStat(server, "WelcomeChannel")
if welcomeChannel:
for channel in server.channels:
if str(channel.id) == str(welcomeChannel):
welcomeChannel = channel
break
if welcomeChannel:
await self._welcome(member, server, welcomeChannel)
else:
await self._welcome(member, server)
async def onleave(self, member, server):
# Goodbye
if not server in self.bot.guilds:
# We're not on this server - and can't say anything there
return
welcomeChannel = self.settings.getServerStat(server, "WelcomeChannel")
if welcomeChannel:
for channel in server.channels:
if str(channel.id) == str(welcomeChannel):
welcomeChannel = channel
break
if welcomeChannel:
await self._goodbye(member, server, welcomeChannel)
else:
await self._goodbye(member, server)
def _getDefault(self, server):
# Returns the default channel for the server
targetChan = server.get_channel(server.id)
targetChanID = self.settings.getServerStat(server, "DefaultChannel")
if len(str(targetChanID)):
# We *should* have a channel
tChan = self.bot.get_channel(int(targetChanID))
if tChan:
# We *do* have one
targetChan = tChan
return targetChan
@commands.command(pass_context=True)
async def setwelcome(self, ctx, *, message = None):
"""Sets the welcome message for your server (bot-admin only).
Available Options:
[[user]] = user name
[[atuser]] = user mention
[[server]] = server name
[[count]] = user count
[[place]] = user's place (1st, 2nd, 3rd, etc)
[[online]] = count of users not offline"""
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if message == None:
self.settings.setServerStat(ctx.message.guild, "Welcome", None)
await ctx.channel.send('Welcome message removed!')
return
self.settings.setServerStat(ctx.message.guild, "Welcome", message)
await ctx.channel.send('Welcome message updated!\n\nHere\'s a preview:')
await self._welcome(ctx.message.author, ctx.message.guild, ctx.message.channel)
# Print the welcome channel
welcomeChannel = self.settings.getServerStat(ctx.message.guild, "WelcomeChannel")
if welcomeChannel:
for channel in ctx.message.guild.channels:
if str(channel.id) == str(welcomeChannel):
welcomeChannel = channel
break
if welcomeChannel:
msg = 'The current welcome channel is **{}**.'.format(welcomeChannel.mention)
else:
if self._getDefault(ctx.guild):
msg = 'The current welcome channel is the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)
else:
msg = 'There is *no channel* set for welcome messages.'
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def testwelcome(self, ctx, *, member = None):
"""Prints the current welcome message (bot-admin only)."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# Here we have found a member, and stuff.
# Let's make sure we have a message
message = self.settings.getServerStat(ctx.message.guild, "Welcome")
if message == None:
await ctx.channel.send('Welcome message not setup. You can do so with the `{}setwelcome [message]` command.'.format(ctx.prefix))
return
await self._welcome(member, ctx.message.guild, ctx.message.channel)
# Print the welcome channel
welcomeChannel = self.settings.getServerStat(ctx.message.guild, "WelcomeChannel")
if welcomeChannel:
for channel in ctx.message.guild.channels:
if str(channel.id) == str(welcomeChannel):
welcomeChannel = channel
break
if welcomeChannel:
msg = 'The current welcome channel is **{}**.'.format(welcomeChannel.mention)
else:
if self._getDefault(ctx.guild):
msg = 'The current welcome channel is the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)
else:
msg = 'There is *no channel* set for welcome messages.'
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def rawwelcome(self, ctx, *, member = None):
"""Prints the current welcome message's markdown (bot-admin only)."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# Here we have found a member, and stuff.
# Let's make sure we have a message
message = self.settings.getServerStat(ctx.message.guild, "Welcome")
if message == None:
await ctx.channel.send('Welcome message not setup. You can do so with the `{}setwelcome [message]` command.'.format(ctx.prefix))
return
# Escape the markdown
message = message.replace('\\', '\\\\').replace('*', '\\*').replace('`', '\\`').replace('_', '\\_')
await ctx.send(message)
# Print the welcome channel
welcomeChannel = self.settings.getServerStat(ctx.message.guild, "WelcomeChannel")
if welcomeChannel:
for channel in ctx.message.guild.channels:
if str(channel.id) == str(welcomeChannel):
welcomeChannel = channel
break
if welcomeChannel:
msg = 'The current welcome channel is **{}**.'.format(welcomeChannel.mention)
else:
if self._getDefault(ctx.guild):
msg = 'The current welcome channel is the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)
else:
msg = 'There is *no channel* set for welcome messages.'
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def setgoodbye(self, ctx, *, message = None):
"""Sets the goodbye message for your server (bot-admin only).
Available Options:
[[user]] = user name
[[atuser]] = user mention
[[server]] = server name
[[count]] = user count
[[place]] = user's place (1st, 2nd, 3rd, etc) - will be count + 1
[[online]] = count of users not offline"""
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if message == None:
self.settings.setServerStat(ctx.message.guild, "Goodbye", None)
await ctx.channel.send('Goodbye message removed!')
return
self.settings.setServerStat(ctx.message.guild, "Goodbye", message)
await ctx.channel.send('Goodbye message updated!\n\nHere\'s a preview:')
await self._goodbye(ctx.message.author, ctx.message.guild, ctx.message.channel)
# Print the goodbye channel
welcomeChannel = self.settings.getServerStat(ctx.message.guild, "WelcomeChannel")
if welcomeChannel:
for channel in ctx.message.guild.channels:
if str(channel.id) == str(welcomeChannel):
welcomeChannel = channel
break
if welcomeChannel:
msg = 'The current goodbye channel is **{}**.'.format(welcomeChannel.mention)
else:
if self._getDefault(ctx.guild):
msg = 'The current goodbye channel is the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)
else:
msg = 'There is *no channel* set for goodbye messages.'
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def testgoodbye(self, ctx, *, member = None):
"""Prints the current goodbye message (bot-admin only)."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# Here we have found a member, and stuff.
# Let's make sure we have a message
message = self.settings.getServerStat(ctx.message.guild, "Goodbye")
if message == None:
await ctx.channel.send('Goodbye message not setup. You can do so with the `{}setgoodbye [message]` command.'.format(ctx.prefix))
return
await self._goodbye(member, ctx.message.guild, ctx.message.channel)
# Print the goodbye channel
welcomeChannel = self.settings.getServerStat(ctx.message.guild, "WelcomeChannel")
if welcomeChannel:
for channel in ctx.message.guild.channels:
if str(channel.id) == str(welcomeChannel):
welcomeChannel = channel
break
if welcomeChannel:
msg = 'The current goodbye channel is **{}**.'.format(welcomeChannel.mention)
else:
if self._getDefault(ctx.guild):
msg = 'The current goodbye channel is the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)
else:
msg = 'There is *no channel* set for goodbye messages.'
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def rawgoodbye(self, ctx, *, member = None):
"""Prints the current goodbye message's markdown (bot-admin only)."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# Here we have found a member, and stuff.
# Let's make sure we have a message
message = self.settings.getServerStat(ctx.message.guild, "Goodbye")
if message == None:
await ctx.channel.send('Goodbye message not setup. You can do so with the `{}setgoodbye [message]` command.'.format(ctx.prefix))
return
# Escape the markdown
message = message.replace('\\', '\\\\').replace('*', '\\*').replace('`', '\\`').replace('_', '\\_')
await ctx.send(message)
# Print the goodbye channel
welcomeChannel = self.settings.getServerStat(ctx.message.guild, "WelcomeChannel")
if welcomeChannel:
for channel in ctx.message.guild.channels:
if str(channel.id) == str(welcomeChannel):
welcomeChannel = channel
break
if welcomeChannel:
msg = 'The current goodbye channel is **{}**.'.format(welcomeChannel.mention)
else:
if self._getDefault(ctx.guild):
msg = 'The current goodbye channel is the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)
else:
msg = 'There is *no channel* set for goodbye messages.'
await ctx.channel.send(msg)
async def _welcome(self, member, server, channel = None):
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
message = self.settings.getServerStat(server, "Welcome")
if message == None:
return
# Let's regex and replace [[user]] [[atuser]] and [[server]]
message = re.sub(self.regexUserName, "{}".format(DisplayName.name(member)), message)
message = re.sub(self.regexUserPing, "{}".format(member.mention), message)
message = re.sub(self.regexServer, "{}".format(self.suppressed(server, server.name)), message)
message = re.sub(self.regexCount, "{:,}".format(len(server.members)), message)
# Get place info
place_str = str(len(server.members))
end_str = "th"
if place_str.endswith("1") and not place_str.endswith("11"):
end_str = "st"
elif place_str.endswith("2") and not place_str.endswith("12"):
end_str = "nd"
elif place_str.endswith("3") and not place_str.endswith("13"):
end_str = "rd"
message = re.sub(self.regexPlace, "{:,}{}".format(len(server.members), end_str), message)
# Get online users
online_count = 0
for m in server.members:
if not m.status == discord.Status.offline:
online_count += 1
message = re.sub(self.regexOnline, "{:,}".format(online_count), message)
if suppress:
message = Nullify.clean(message)
if channel:
await channel.send(message)
else:
try:
if self._getDefault(server):
# Only message if we can
await self._getDefault(server).send(message)
except Exception:
pass
async def _goodbye(self, member, server, channel = None):
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions"):
suppress = True
else:
suppress = False
message = self.settings.getServerStat(server, "Goodbye")
if message == None:
return
# Let's regex and replace [[user]] [[atuser]] and [[server]]
message = re.sub(self.regexUserName, "{}".format(DisplayName.name(member)), message)
message = re.sub(self.regexUserPing, "{}".format(member.mention), message)
message = re.sub(self.regexServer, "{}".format(self.suppressed(server, server.name)), message)
message = re.sub(self.regexCount, "{:,}".format(len(server.members)), message)
# Get place info
place_str = str(len(server.members)+1)
end_str = "th"
if place_str.endswith("1") and not place_str.endswith("11"):
end_str = "st"
elif place_str.endswith("2") and not place_str.endswith("12"):
end_str = "nd"
elif place_str.endswith("3") and not place_str.endswith("13"):
end_str = "rd"
message = re.sub(self.regexPlace, "{:,}{}".format(len(server.members)+1, end_str), message)
# Get online users
online_count = 0
for m in server.members:
if not m.status == discord.Status.offline:
online_count += 1
message = re.sub(self.regexOnline, "{:,}".format(online_count), message)
if suppress:
message = Nullify.clean(message)
if channel:
await channel.send(message)
else:
try:
if self._getDefault(server):
# Only message if we can
await self._getDefault(server).send(message)
except Exception:
pass
@commands.command(pass_context=True)
async def setwelcomechannel(self, ctx, *, channel : discord.TextChannel = None):
"""Sets the channel for the welcome and goodbye messages (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.guild, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if str(aRole['ID']) == str(role.id):
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await ctx.channel.send('You do not have sufficient privileges to access this command.')
return
if channel == None:
self.settings.setServerStat(ctx.message.guild, "WelcomeChannel", "")
if self._getDefault(ctx.guild):
msg = 'Welcome and goodbye messages will be displayed in the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)
else:
msg = "Welcome and goodbye messages will **not** be displayed."
await ctx.channel.send(msg)
return
# If we made it this far - then we can add it
self.settings.setServerStat(ctx.message.guild, "WelcomeChannel", channel.id)
msg = 'Welcome and goodbye messages will be displayed in **{}**.'.format(channel.mention)
await ctx.channel.send(msg)
@setwelcomechannel.error
async def setwelcomechannel_error(self, ctx, error):
# do stuff
msg = 'setwelcomechannel Error: {}'.format(ctx)
await error.channel.send(msg)
| 43.823315
| 147
| 0.585145
|
4a0f6ed0beea239a71d4841345c3a1a9410528a9
| 4,168
|
py
|
Python
|
soam/models/orbit.py
|
MuttData/soam
|
65612a02552668c6721dc20e675654883391c3e9
|
[
"Apache-2.0"
] | 1
|
2021-09-17T01:14:57.000Z
|
2021-09-17T01:14:57.000Z
|
soam/models/orbit.py
|
MuttData/soam
|
65612a02552668c6721dc20e675654883391c3e9
|
[
"Apache-2.0"
] | null | null | null |
soam/models/orbit.py
|
MuttData/soam
|
65612a02552668c6721dc20e675654883391c3e9
|
[
"Apache-2.0"
] | 1
|
2021-08-09T14:22:50.000Z
|
2021-08-09T14:22:50.000Z
|
"""Orbit model wrapper."""
import logging
from typing import List, Union
import warnings
import pandas as pd
from typing_extensions import Literal
from soam.constants import SEED
from soam.models.base import SkWrapper, sk_constructor_wrapper
from soam.utilities.utils import SuppressStdOutStdErr
# pylint: disable=super-init-not-called, attribute-defined-outside-init, unnecessary-pass, no-member
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
from orbit.models.dlt import DLTFull
except ImportError:
logger.warning("No orbit support")
logger.warning("If you want to use it, ´pip install soam[orbit]´")
class SkOrbit(SkWrapper):
"""Scikit-Learn Orbit model wrapper."""
_ignore_params = ["full_output"]
@sk_constructor_wrapper(DLTFull)
def __init__(
self,
date_col: str = 'date',
response_col: str = None,
regressor_col: Union[List[str], str] = None,
damped_factor: float = 0.8,
period: int = 1,
seasonality: int = -1,
seed: int = SEED,
chains: int = 1,
global_trend_option: Literal[
'flat', 'linear', 'loglinear', 'logistic'
] = "linear",
full_output: bool = False,
):
"""Constructor with extra parameters in addition to the base model ones.
Parameters
----------
response_col : str
response or y column name
date_col : str
date column name
regressor_col : Union[List[str], str]
extra regressors column names
damped_factor : float, optional
by default 0.8
period : int, optional
by default 1
seasonality : int, optional
by default -1
chains : int, optional
number of chains spawned by PyStan, by default 1
seed : int, optional
by default 1
global_trend_option : Literal[, optional
by default "linear"
full_output : bool, default False
Return full Orbit output or just prediction column.
Notes:
Since Orbit manages kwargs for everything, this is awkward for our
constructor patching strategy, i.e we need explicit arguments in the signature.
That's why we explicitly specify them in the wrapper's signature,
and we mark custom params in _to_ignore.
For more details on model specific parameters check docs.
"""
pass
def fit(self, X: pd.DataFrame, y: pd.Series):
"""Fit estimator to data."""
df = self._transform_to_input_format(X, y)
self.model = self._init_sk_model(DLTFull, ignore_params=self._ignore_params)
with warnings.catch_warnings(), SuppressStdOutStdErr():
warnings.simplefilter("ignore")
self.model.fit(df)
return self
def predict(self, X: pd.DataFrame) -> pd.DataFrame:
"""Scikit learn's predict."""
X = self._transform_to_input_format(X)
predictions = self.model.predict(X) # pylint: disable=assignment-from-no-return
predictions = self._transform_to_output_format(predictions)
return predictions
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""Scikit learn's transform."""
return self.predict(X)
def fit_transform(self, X: pd.DataFrame, y: pd.Series):
"""Scikit learn's fit_transform."""
self.fit(X, y)
return self.transform(X)
def _transform_to_input_format(
self, X: pd.DataFrame, y: pd.Series = None
) -> pd.DataFrame:
"""Transform input to Orbit compatible df."""
if y is not None:
# set response col dynamically
self.response_col = y.name
return X.assign(**{self.response_col: y})
return X
def _transform_to_output_format(self, predictions: pd.Series) -> pd.DataFrame:
"""Transform Orbit output to SoaM format."""
predictions = predictions.rename(columns={"prediction": "yhat"})
if self.full_output:
return predictions
return predictions[[self.date_col, "yhat"]]
| 33.612903
| 100
| 0.630998
|
4a0f6f44f26ab3bac3d95f4957775a4a58e9b9e8
| 1,375
|
py
|
Python
|
hassio/__main__.py
|
InfernoEmbedded/hassio
|
a401bf0bb8d81d76924254d5b8c9c493ad343468
|
[
"Apache-2.0"
] | null | null | null |
hassio/__main__.py
|
InfernoEmbedded/hassio
|
a401bf0bb8d81d76924254d5b8c9c493ad343468
|
[
"Apache-2.0"
] | null | null | null |
hassio/__main__.py
|
InfernoEmbedded/hassio
|
a401bf0bb8d81d76924254d5b8c9c493ad343468
|
[
"Apache-2.0"
] | null | null | null |
"""Main file for Hass.io."""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import logging
import sys
from hassio import bootstrap
_LOGGER = logging.getLogger(__name__)
def attempt_use_uvloop():
"""Attempt to use uvloop."""
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
# pylint: disable=invalid-name
if __name__ == "__main__":
bootstrap.initialize_logging()
attempt_use_uvloop()
loop = asyncio.get_event_loop()
if not bootstrap.check_environment():
sys.exit(1)
# init executor pool
executor = ThreadPoolExecutor(thread_name_prefix="SyncWorker")
loop.set_default_executor(executor)
_LOGGER.info("Initialize Hass.io setup")
coresys = bootstrap.initialize_coresys(loop)
bootstrap.migrate_system_env(coresys)
_LOGGER.info("Setup HassIO")
loop.run_until_complete(coresys.core.setup())
loop.call_soon_threadsafe(loop.create_task, coresys.core.start())
loop.call_soon_threadsafe(bootstrap.reg_signal, loop)
try:
_LOGGER.info("Run Hass.io")
loop.run_forever()
finally:
_LOGGER.info("Stopping Hass.io")
loop.run_until_complete(coresys.core.stop())
executor.shutdown(wait=False)
loop.close()
_LOGGER.info("Close Hass.io")
sys.exit(0)
| 24.553571
| 69
| 0.703273
|
4a0f6f93ee254955217d5deb54e7de8f588d1ad9
| 2,714
|
py
|
Python
|
src/python/pants/backend/jvm/tasks/check_published_deps.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | 1
|
2021-05-05T18:58:28.000Z
|
2021-05-05T18:58:28.000Z
|
src/python/pants/backend/jvm/tasks/check_published_deps.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/jvm/tasks/check_published_deps.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | 3
|
2020-06-30T08:28:13.000Z
|
2021-07-28T09:35:57.000Z
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.tasks.jar_publish import PushDb
from pants.task.console_task import ConsoleTask
class CheckPublishedDeps(ConsoleTask):
"""Find references to outdated JVM artifacts."""
@classmethod
def register_options(cls, register):
super().register_options(register)
register("--print-uptodate", type=bool, help="Print up-to-date dependencies.")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._print_uptodate = self.get_options().print_uptodate
# We look at the repos for the JarPublish task.
# TODO: Yuck. The repos should be a subsystem that both tasks use.
self.repos = self.context.options.for_scope("publish.jar").repos
self._artifacts_to_targets = {}
def is_published(tgt):
return isinstance(tgt, ExportableJvmLibrary) and tgt.provides
for target in self.context.scan().targets(predicate=is_published):
provided_jar, _ = target.get_artifact_info()
artifact = (provided_jar.org, provided_jar.name)
if not artifact in self._artifacts_to_targets:
self._artifacts_to_targets[artifact] = target
def console_output(self, targets):
push_dbs = {}
def get_version_and_sha(target):
db = target.provides.repo.push_db(target)
if db not in push_dbs:
push_dbs[db] = PushDb.load(db)
pushdb_entry = push_dbs[db].get_entry(target)
return pushdb_entry.sem_ver, pushdb_entry.sha
visited = set()
for target in self.context.targets():
if isinstance(target, (JarLibrary, JvmTarget)):
for dep in target.jar_dependencies:
artifact = (dep.org, dep.name)
if artifact in self._artifacts_to_targets and not artifact in visited:
visited.add(artifact)
artifact_target = self._artifacts_to_targets[artifact]
semver, sha = get_version_and_sha(artifact_target)
if semver.version() != dep.rev:
yield f"outdated {dep.org}#{dep.name} {dep.rev} latest {semver.version()}"
elif self._print_uptodate:
yield f"up-to-date {dep.org}#{dep.name} {semver.version()}"
| 45.233333
| 102
| 0.644436
|
4a0f707e13a8e697202b9b2e9d89d7932d2e2847
| 1,420
|
py
|
Python
|
zerver/views/alert_words.py
|
Pulkit007/zulip
|
8a5f6f8d95baa55c4b28972cfc5a498f5f388e0f
|
[
"Apache-2.0"
] | 17,004
|
2015-09-25T18:27:24.000Z
|
2022-03-31T22:02:32.000Z
|
zerver/views/alert_words.py
|
Pulkit007/zulip
|
8a5f6f8d95baa55c4b28972cfc5a498f5f388e0f
|
[
"Apache-2.0"
] | 20,344
|
2015-09-25T19:02:42.000Z
|
2022-03-31T23:54:40.000Z
|
zerver/views/alert_words.py
|
Pulkit007/zulip
|
8a5f6f8d95baa55c4b28972cfc5a498f5f388e0f
|
[
"Apache-2.0"
] | 7,271
|
2015-09-25T18:48:39.000Z
|
2022-03-31T21:06:11.000Z
|
from typing import List
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import do_add_alert_words, do_remove_alert_words
from zerver.lib.alert_words import user_alert_words
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import check_capped_string, check_list, check_string
from zerver.models import UserProfile
def list_alert_words(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return json_success({"alert_words": user_alert_words(user_profile)})
def clean_alert_words(alert_words: List[str]) -> List[str]:
alert_words = [w.strip() for w in alert_words]
return [w for w in alert_words if w != ""]
@has_request_variables
def add_alert_words(
request: HttpRequest,
user_profile: UserProfile,
alert_words: List[str] = REQ(json_validator=check_list(check_capped_string(100))),
) -> HttpResponse:
do_add_alert_words(user_profile, clean_alert_words(alert_words))
return json_success({"alert_words": user_alert_words(user_profile)})
@has_request_variables
def remove_alert_words(
request: HttpRequest,
user_profile: UserProfile,
alert_words: List[str] = REQ(json_validator=check_list(check_string)),
) -> HttpResponse:
do_remove_alert_words(user_profile, alert_words)
return json_success({"alert_words": user_alert_words(user_profile)})
| 35.5
| 86
| 0.793662
|
4a0f713418d8115465decc98ecd6ff14a1524d66
| 2,651
|
py
|
Python
|
python/pyclaw/runclaw.py
|
geoflows/geoclaw-4.x
|
c8879d25405017b38392aa3b1ea422ff3e3604ea
|
[
"BSD-3-Clause"
] | 2
|
2016-04-26T02:32:09.000Z
|
2021-02-08T08:43:44.000Z
|
util/runclaw.py
|
che-wenchao/D-Claw
|
8ab5d971c9a7a7130e03a447a4b8642e292f4e88
|
[
"BSD-3-Clause"
] | null | null | null |
util/runclaw.py
|
che-wenchao/D-Claw
|
8ab5d971c9a7a7130e03a447a4b8642e292f4e88
|
[
"BSD-3-Clause"
] | 2
|
2019-01-17T04:34:08.000Z
|
2020-08-11T16:02:28.000Z
|
"""
Generic code for running the fortran version of Clawpack and sending the
results to subdirectory output of the directory from which this is executed.
Execute via
$ python $CLAW/python/pyclaw/runclaw.py
from a directory that contains a claw.data file and a Clawpack executable.
"""
def runclaw(xclawcmd=None, outdir=None, overwrite=True, restart=False,
rundir=None):
"""
Run the Fortran version of Clawpack using executable xclawcmd, which is
typically set to 'xclaw', 'xamr', etc.
If it is not set by the call, get it from the environment variable
CLAW_EXE. Default to 'xclaw' if that's not set.
If rundir is None, all *.data is copied from current directory, if a path
is given, data files are copied from there instead.
"""
import os
if type(overwrite) is str:
# convert to boolean
overwrite = (overwrite.lower() in ['true','t'])
if type(restart) is str:
# convert to boolean
restart = (restart.lower() in ['true','t'])
# importing these modules requires $CLAW/python in PYTHONPATH:
from pyclaw.controller import Controller
if xclawcmd is None:
# Determine what executable to use from environment variable CLAW_EXE
# Default to 'xclaw' if it's not set:
xclawcmd = os.environ.get('CLAW_EXE', 'xclaw')
if outdir is None:
outdir = '.'
if rundir is None:
rundir = os.getcwd()
rundir = os.path.abspath(rundir)
print "Will take data from ", rundir
# directory for fort.* files:
outdir = os.path.abspath(outdir)
print '== runclaw: Will write output to ',outdir
clawjob = Controller()
clawjob.xdir = os.getcwd()
clawjob.rundir = rundir # use data files from current directory
clawjob.outdir = outdir # write fort files to outdir
clawjob.xclawcmd = xclawcmd # Clawpack executable
clawjob.overwrite = overwrite # Ok to overwrite outdir and plotdir?
clawjob.restart = restart # Restarting a previous run?
returncode = clawjob.runxclaw()
if returncode != 0:
print '== runclaw: *** fortran returncode = ', returncode, ' aborting'
print '== runclaw: Done executing %s via pyclaw.runclaw.py' % xclawcmd
print '== runclaw: Output is in ', outdir
#----------------------------------------------------------
if __name__=='__main__':
"""
If executed at command line prompt, simply call the function, with
any argument used as setplot:
"""
import sys
args = sys.argv[1:] # any command line arguments
runclaw(*args)
| 33.987179
| 80
| 0.6341
|
4a0f72322fb98885479c903578d9a417bd11cbe8
| 519
|
py
|
Python
|
pages/themes/beginners/exceptions/Task_and_HW/yuliyan_try_except.py
|
ProgressBG-Python-Course/ProgressBG-VC2-Python
|
03b892a42ee1fad3d4f97e328e06a4b1573fd356
|
[
"MIT"
] | null | null | null |
pages/themes/beginners/exceptions/Task_and_HW/yuliyan_try_except.py
|
ProgressBG-Python-Course/ProgressBG-VC2-Python
|
03b892a42ee1fad3d4f97e328e06a4b1573fd356
|
[
"MIT"
] | null | null | null |
pages/themes/beginners/exceptions/Task_and_HW/yuliyan_try_except.py
|
ProgressBG-Python-Course/ProgressBG-VC2-Python
|
03b892a42ee1fad3d4f97e328e06a4b1573fd356
|
[
"MIT"
] | null | null | null |
def user_input(msg):
try:
usr_input = input(msg)
return (usr_input, True)
except:
print("User Break - 'CTRL+D' is Disabled!!")
return ("Not OK", False)
# def user_input(msg):
# usr_input = input(msg)
# if len(usr_input)>2:
# return (usr_input, True)
# else:
# print("User Break - 'CTRL+D' is Disabled!!")
# return ("Not OK" ,False)
while True:
status = user_input("Enter your message: ")
# print(x)
if status[1]:
quit()
| 22.565217
| 54
| 0.543353
|
4a0f72457196a0ca77a3aa49d10a0e20f22fe9de
| 6,268
|
py
|
Python
|
fancyflags/_flags.py
|
corypaik/fancyflags
|
9fef9fb69bd52a81dd67cb963986be1c0b00c070
|
[
"Apache-2.0"
] | null | null | null |
fancyflags/_flags.py
|
corypaik/fancyflags
|
9fef9fb69bd52a81dd67cb963986be1c0b00c070
|
[
"Apache-2.0"
] | null | null | null |
fancyflags/_flags.py
|
corypaik/fancyflags
|
9fef9fb69bd52a81dd67cb963986be1c0b00c070
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Flag classes for defining dict, Item, MultiItem and Auto flags."""
import copy
import functools
from absl import flags
_EMPTY = ""
class DictFlag(flags.Flag):
"""Implements the shared dict mechanism. See also `ItemFlag`."""
def __init__(self, shared_dict, *args, **kwargs):
self._shared_dict = shared_dict
super().__init__(*args, **kwargs)
def _parse(self, value):
# A `DictFlag` should not be overridable from the command line; only the
# dotted `Item` flags should be. However, the _parse() method will still be
# called in two situations:
# 1. Via the base `Flag`'s constructor, which calls `_parse()` to process
# the default value, which will be the shared dict.
# 2. When processing command line overrides. We don't want to allow this
# normally, however some libraries will serialize and deserialize all
# flags, e.g. to pass values between processes, so we accept a dummy
# empty serialized value for these cases. It's unlikely users will try to
# set the dict flag to an empty string from the command line.
if value is self._shared_dict or value == _EMPTY:
return self._shared_dict
raise flags.IllegalFlagValueError(
"Can't override a dict flag directly. Did you mean to override one of "
"its `Item`s instead?")
def serialize(self):
# When serializing flags, we return a sentinel value that the `DictFlag`
# will ignore when parsing. The value of this flag is determined by the
# corresponding `Item` flags for serialization and deserialization.
return _EMPTY
def flag_type(self):
return "dict"
# TODO(b/170423907): Pytype doesn't correctly infer that these have type
# `property`.
_flag_value_property = flags.Flag.value # type: property
_multi_flag_value_property = flags.MultiFlag.value # type: property
class ItemFlag(flags.Flag):
"""Updates a shared dict whenever its own value changes.
See also the `DictFlag` and `ff.Item` classes for usage.
"""
def __init__(self, shared_dict, namespace, *args, **kwargs):
self._shared_dict = shared_dict
self._namespace = namespace
super().__init__(*args, **kwargs)
# `super().value = value` doesn't work, see https://bugs.python.org/issue14965
@_flag_value_property.setter
def value(self, value):
_flag_value_property.fset(self, value)
self._update_shared_dict()
def parse(self, argument):
super().parse(argument)
self._update_shared_dict()
def _update_shared_dict(self):
d = self._shared_dict
for name in self._namespace[:-1]:
d = d[name]
d[self._namespace[-1]] = self.value
class MultiItemFlag(flags.MultiFlag):
"""Updates a shared dict whenever its own value changes.
Used for flags that can appear multiple times on the command line.
See also the `DictFlag` and `ff.Item` classes for usage.
"""
def __init__(self, shared_dict, namespace, *args, **kwargs):
self._shared_dict = shared_dict
self._namespace = namespace
super().__init__(*args, **kwargs)
# `super().value = value` doesn't work, see https://bugs.python.org/issue14965
@_multi_flag_value_property.setter
def value(self, value):
_multi_flag_value_property.fset(self, value)
self._update_shared_dict()
def parse(self, argument):
super().parse(argument)
self._update_shared_dict()
def _update_shared_dict(self):
d = self._shared_dict
for name in self._namespace[:-1]:
d = d[name]
d[self._namespace[-1]] = self.value
class AutoFlag(flags.Flag):
"""Implements the shared dict mechanism."""
def __init__(self, fn, fn_kwargs, *args, **kwargs):
self._fn = fn
self._fn_kwargs = fn_kwargs
super().__init__(*args, **kwargs)
@property
def value(self):
kwargs = copy.deepcopy(self._fn_kwargs)
return functools.partial(self._fn, **kwargs)
@value.setter
def value(self, value):
# The flags `.value` gets set as part of the `flags.FLAG` constructor to a
# default value. However the default value should be given by the initial
# `fn_kwargs` instead, so a) the semantics of setting the value are unclear
# and b) we may not be able to call `self._fn` at this point in execution.
del value
def _parse(self, value):
# An `AutoFlag` should not be overridable from the command line; only the
# dotted `Item` flags should be. However, the `_parse()` method will still
# be called in two situations:
# 1. In the base `Flag`'s constructor, which calls `_parse()` to process the
# default value, which will be None (as set in `DEFINE_auto`).
# 2. When processing command line overrides. We don't want to allow this
# normally, however some libraries will serialize and deserialize all
# flags, e.g. to pass values between processes, so we accept a dummy
# empty serialized value for these cases. It's unlikely users will try to
# set the auto flag to an empty string from the command line.
if value is None or value == _EMPTY:
return None
raise flags.IllegalFlagValueError(
"Can't override an auto flag directly. Did you mean to override one of "
"its `Item`s instead?")
def serialize(self):
# When serializing a `FlagHolder` container, we must return *some* value for
# this flag. We return an empty value that the `AutoFlag` will ignore when
# parsing. The value of this flag is instead determined by the
# corresponding `Item` flags for serialization and deserialization.
return _EMPTY
def flag_type(self):
return "auto"
| 36.870588
| 80
| 0.694959
|
4a0f7247ec556010b5f6cdc8251793eee6c1a0bb
| 2,275
|
py
|
Python
|
ml3d/vis/labellut.py
|
inkyusa/Open3D-ML
|
40b5f7ff45577bcc6fd451cf63cc366324730849
|
[
"MIT"
] | 3
|
2021-03-18T17:09:32.000Z
|
2021-06-26T20:58:12.000Z
|
ml3d/vis/labellut.py
|
inkyusa/Open3D-ML
|
40b5f7ff45577bcc6fd451cf63cc366324730849
|
[
"MIT"
] | null | null | null |
ml3d/vis/labellut.py
|
inkyusa/Open3D-ML
|
40b5f7ff45577bcc6fd451cf63cc366324730849
|
[
"MIT"
] | 1
|
2021-06-26T11:04:29.000Z
|
2021-06-26T11:04:29.000Z
|
class LabelLUT:
"""The class to manage look-up table for assigning colors to labels."""
class Label:
def __init__(self, name, value, color):
self.name = name
self.value = value
self.color = color
Colors = [[0., 0., 0.], [0.96078431, 0.58823529, 0.39215686],
[0.96078431, 0.90196078, 0.39215686],
[0.58823529, 0.23529412, 0.11764706],
[0.70588235, 0.11764706, 0.31372549], [1., 0., 0.],
[0.11764706, 0.11764706, 1.], [0.78431373, 0.15686275, 1.],
[0.35294118, 0.11764706, 0.58823529], [1., 0., 1.],
[1., 0.58823529, 1.], [0.29411765, 0., 0.29411765],
[0.29411765, 0., 0.68627451], [0., 0.78431373, 1.],
[0.19607843, 0.47058824, 1.], [0., 0.68627451, 0.],
[0., 0.23529412,
0.52941176], [0.31372549, 0.94117647, 0.58823529],
[0.58823529, 0.94117647, 1.], [0., 0., 1.], [1.0, 1.0, 0.25],
[0.5, 1.0, 0.25], [0.25, 1.0, 0.25], [0.25, 1.0, 0.5],
[0.25, 1.0, 1.25], [0.25, 0.5, 1.25], [0.25, 0.25, 1.0],
[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.375, 0.375, 0.375],
[0.5, 0.5, 0.5], [0.625, 0.625, 0.625], [0.75, 0.75, 0.75],
[0.875, 0.875, 0.875]]
def __init__(self):
self._next_color = 0
self.labels = {}
def add_label(self, name, value, color=None):
"""Adds a label to the table
**Example:**
The following sample creates a LUT with 3 labels::
lut = ml3d.vis.LabelLUT()
lut.add_label('one', 1)
lut.add_label('two', 2)
lut.add_label('three', 3, [0,0,1]) # use blue for label 'three'
**Args:**
name: The label name as string.
value: The value associated with the label.
color: Optional RGB color. E.g., [0.2, 0.4, 1.0].
"""
if color is None:
if self._next_color >= len(self.Colors):
color = [0.85, 1.0, 1.0]
else:
color = self.Colors[self._next_color]
self._next_color += 1
self.labels[value] = self.Label(name, value, color)
| 40.625
| 79
| 0.476484
|
4a0f727e0489ab5c9dbe11634154d02c1375ef49
| 35
|
py
|
Python
|
open_connect/moderation/tests/__init__.py
|
lpatmo/actionify_the_news
|
998d8ca6b35d0ef1b16efca70f50e59503f5a62d
|
[
"MIT"
] | 66
|
2015-11-30T20:35:38.000Z
|
2019-06-12T17:40:32.000Z
|
open_connect/moderation/tests/__init__.py
|
lpatmo/actionify_the_news
|
998d8ca6b35d0ef1b16efca70f50e59503f5a62d
|
[
"MIT"
] | 18
|
2015-11-30T22:03:05.000Z
|
2019-07-02T00:50:29.000Z
|
open_connect/moderation/tests/__init__.py
|
lpatmo/actionify_the_news
|
998d8ca6b35d0ef1b16efca70f50e59503f5a62d
|
[
"MIT"
] | 11
|
2015-11-30T20:56:01.000Z
|
2019-07-01T17:06:09.000Z
|
"""Tests for the moderation app"""
| 17.5
| 34
| 0.685714
|
4a0f743b856a890378fea85aaedf486e135f766a
| 11,345
|
py
|
Python
|
docs/tutorials/datasets/detection_custom.py
|
JSoothe/gluon-cv
|
bca14b75c0e8b6b1cb74447499770e0a337c1f0c
|
[
"Apache-2.0"
] | 1
|
2019-11-30T05:34:52.000Z
|
2019-11-30T05:34:52.000Z
|
docs/tutorials/datasets/detection_custom.py
|
JSoothe/gluon-cv
|
bca14b75c0e8b6b1cb74447499770e0a337c1f0c
|
[
"Apache-2.0"
] | null | null | null |
docs/tutorials/datasets/detection_custom.py
|
JSoothe/gluon-cv
|
bca14b75c0e8b6b1cb74447499770e0a337c1f0c
|
[
"Apache-2.0"
] | 1
|
2020-04-29T00:08:22.000Z
|
2020-04-29T00:08:22.000Z
|
"""Prepare custom datasets for object detection
===============================================
With GluonCV, we have already provided built-in support for widely used public datasets with zero
effort, e.g. :ref:`sphx_glr_build_examples_datasets_pascal_voc.py` and :ref:`sphx_glr_build_examples_datasets_mscoco.py`.
However it is very natural to create a custom dataset of your choice for object detection tasks.
This tutorial is intend to provide you some hints to clear the path for you.
In practice, feel free to choose whatever method that fits for your use case best.
:ref:`lst_record_dataset`
:ref:`pascal_voc_like`
"""
##############################################################################
#
# .. _lst_record_dataset:
#
# 1. Preferred Object Detection Format for GluonCV and MXNet
# ----------------------------------------------------------
# Let us walk through some fundamental backgrounds in case you are not familiar with them.
#
# Bounding Boxes
# ^^^^^^^^^^^^^^
#
# There are multiple ways to organize the label format for object detection task. We will briefly introduce the
# most widely used: ``bounding box``.
#
# GluonCV expect all bounding boxes to be encoded as (xmin, ymin, xmax, ymax), aka (left, top, right, bottom) borders of each object of interest.
#
# First of all, let us plot a real image for example:
import os, zipfile
from gluoncv import utils
import mxnet as mx
import numpy as np
from matplotlib import pyplot as plt
im_fname = utils.download('https://github.com/dmlc/web-data/blob/master/' +
'gluoncv/datasets/dog.jpg?raw=true',
path='dog.jpg')
img = mx.image.imread(im_fname)
ax = utils.viz.plot_image(img)
print(img.shape)
plt.show()
##############################################################################
# Now, let's label the image manually for demo.
#
# .. hint::
#
# In practice, a dedicated GUI labeling tool is more convenient.
#
# We expect all bounding boxes follow this format: (xmin, ymin, xmax, ymax)
dog_label = [130, 220, 320, 530]
bike_label = [115, 120, 580, 420]
car_label = [480, 80, 700, 170]
all_boxes = np.array([dog_label, bike_label, car_label])
all_ids = np.array([0, 1, 2])
class_names = ['dog', 'bike', 'car']
# see how it looks by rendering the boxes into image
ax = utils.viz.plot_bbox(img, all_boxes, labels=all_ids, class_names=class_names)
plt.show()
##############################################################################
# LST Label for GluonCV and MXNet
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Following the convention used in MXNet, we recommend a LST file which is a plain text list file to store labels.
#
# LST file was first introduced in MXNet following the `RecordIO design <https://mxnet.incubator.apache.org/architecture/note_data_loading.html>`_ and the `List file tutorial <https://mxnet.incubator.apache.org/faq/recordio.html>`_ of creating a LST file.
#
# .. hint::
#
# The benefits of using single LST file are two fold:
#
# 1. It's easier to manege single file rather than scattered annotation files.
#
# 2. It's compatible with ``RecordFile`` binary format which we will cover in this tutorial later.
#
# The format of LST file is:
"""
integer_image_index \t label_of_variable_length \t relative_path_to_image
"""
##############################################################################
# Typically, we take the list of names of all images, shuffles them, then separates them into two lists: a training filename list and a testing filename list.
#
# Here we use compatible format for object detection task as `mxnet.image.ImageDetIter <https://mxnet.apache.org/api/python/image/image.html#image-iterator-for-object-detection>`_.
#
# `mxnet.image.ImageDetIter` is a object detection data iterator written in C++ which includes tons of augmentation choices. However, it's not flexible enough to handle all kinds of customized data augmentation.
# As a result, in GluonCV, we switched to :py:mod:`gluoncv.data.transforms` to support almost all types of data augmentations.
#
# More specifically, the label of object detection task is described as follows:
#
# .. image:: https://github.com/dmlc/web-data/blob/master/gluoncv/datasets/detection_label.png?raw=true
#
# .. image:: https://github.com/dmlc/web-data/blob/master/gluoncv/datasets/detection_label_detail.png?raw=true
#
# So, the corresponding LST file for the image we just labeled can be formatted as:
def write_line(img_path, im_shape, boxes, ids, idx):
h, w, c = im_shape
# for header, we use minimal length 2, plus width and height
# with A: 4, B: 5, C: width, D: height
A = 4
B = 5
C = w
D = h
# concat id and bboxes
labels = np.hstack((ids.reshape(-1, 1), boxes)).astype('float')
# normalized bboxes (recommanded)
labels[:, (1, 3)] /= float(w)
labels[:, (2, 4)] /= float(h)
# flatten
labels = labels.flatten().tolist()
str_idx = [str(idx)]
str_header = [str(x) for x in [A, B, C, D]]
str_labels = [str(x) for x in labels]
str_path = [img_path]
line = '\t'.join(str_idx + str_header + str_labels + str_path) + '\n'
return line
##############################################################################
# A single line may be long, but contains complete information of each image required by object detection.
#
# The length of each line varies, depending on how many objects are labeled inside the corresponding image.
#
# By stacking lines one by one, it is very nature to create ``train.lst`` and ``val.lst`` for training/validation purposes.
#
# In this tutorial, we repeat the same image 4 times to create a fake ``val.lst`` file.
with open('val.lst', 'w') as fw:
for i in range(4):
line = write_line('dog.jpg', img.shape, all_boxes, all_ids, i)
print(line)
fw.write(line)
##############################################################################
# LstDetection for Loading Raw Images in Folders
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Assume the relative root path to the image folder is current directory
from gluoncv.data import LstDetection
lst_dataset = LstDetection('val.lst', root=os.path.expanduser('.'))
print('length:', len(lst_dataset))
first_img = lst_dataset[0][0]
print('image shape:', first_img.shape)
print('Label example:')
print(lst_dataset[0][1])
print("GluonCV swaps bounding boxes to columns 0-3 by default")
##############################################################################
# RecordFileDetection for Entire Dataset Packed in Single MXNet RecordFile
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Reading scattered images in folders can be slow, due to constraint of disk random access speed.
# There's a significant gap between random/sequential access speed especially on HDDs.
# Even with modern PCI-E based Solid State Drives, sequential reading IO performance still blows
# random reading by a large margin.
#
# We will skip repeating the design of RecordIO built into MXNet, if you are interested, have a look at `RecordIO design <https://mxnet.incubator.apache.org/architecture/note_data_loading.html>`_.
#
# In this section, we go through the fundamental steps to create a record file.
#
# First of all, you will need a ``im2rec.py`` file to start with.
##############################################################################
#
# .. hint::
#
# You can find `im2rec.py` in `incubator-mxnet/tools/ <https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py>`_, or you can simply download it now.
#
# Usage:
#
# .. code-block:: bash
#
# python im2rec.py lst_file_name relative_root_to_images --pass-through --pack-label
#
# Some important arguments to the ``im2rec.py``:
#
# - ``--pass-through``: no transcode of original image, pack it to binary as is. It will preserve original quality and aspect ratio anyway.
#
# - ``--pack-label``: pack the labels in lst file to binary record file, so ``.rec`` file is self compelete.
#
import sys
import subprocess
im2rec = utils.download('https://raw.githubusercontent.com/apache/incubator-mxnet/' +
'6843914f642c8343aaa9a09db803b6af6f5d94a2/tools/im2rec.py', 'im2rec.py')
# In this tutorial we skip generating in subprocess but instead download a prepared val.rec
# subprocess.check_output([sys.executable, 'im2rec.py', 'val', '.', '--no-shuffle', '--pass-through', '--pack-label'])
utils.download('https://gist.github.com/zhreshold/599999eab290e951fcfb26cdd59885e2/raw/0d945eeea2a71ba7bd3e39d463f39921acb786d1/val.rec', 'val.rec')
utils.download('https://gist.github.com/zhreshold/599999eab290e951fcfb26cdd59885e2/raw/0d945eeea2a71ba7bd3e39d463f39921acb786d1/val.idx', 'val.idx')
##############################################################################
# Now similarly, we can create a dataset from the binary file we just created with on line of code:
from gluoncv.data import RecordFileDetection
record_dataset = RecordFileDetection('val.rec', coord_normalized=True)
# we expect same results from LstDetection
print('length:', len(record_dataset))
first_img = record_dataset[0][0]
print('image shape:', first_img.shape)
print('Label example:')
print(record_dataset[0][1])
##############################################################################
#
# .. _pascal_voc_like:
#
# 2. Derive from PASCAL VOC format
# --------------------------------
# It you have a custom dataset fully comply with the `Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ object detection format,
# that could be good news, because it's can be adapted to GluonCV format real quick.
#
# We provide a template for you to peek the structures
fname = utils.download('https://github.com/dmlc/web-data/blob/master/gluoncv/datasets/VOCtemplate.zip?raw=true', 'VOCtemplate.zip')
with zipfile.ZipFile(fname) as zf:
zf.extractall('.')
##############################################################################
# A VOC-like dataset will have the following structure:
#
"""
VOCtemplate
└── VOC2018
├── Annotations
│ └── 000001.xml
├── ImageSets
│ └── Main
│ └── train.txt
└── JPEGImages
└── 000001.jpg
"""
##############################################################################
# And an example of annotation file:
with open('VOCtemplate/VOC2018/Annotations/000001.xml', 'r') as fid:
print(fid.read())
##############################################################################
# As long as your dataset can match the PASCAL VOC convension, it is convenient to
# derive custom dataset from ``VOCDetection``
from gluoncv.data import VOCDetection
class VOCLike(VOCDetection):
CLASSES = ['person', 'dog']
def __init__(self, root, splits, transform=None, index_map=None, preload_label=True):
super(VOCLike, self).__init__(root, splits, transform, index_map, preload_label)
dataset = VOCLike(root='VOCtemplate', splits=((2018, 'train'),))
print('length of dataset:', len(dataset))
print('label example:')
print(dataset[0][1])
##############################################################################
# The last column indicate the difficulties of labeled object
# You can ignore the following section if it's out of your intention in the xml file:
"""<difficult>0</difficult>"""
| 42.490637
| 255
| 0.634465
|
4a0f74fe7692eac5bf5770de306a4278c387ae3c
| 1,154
|
py
|
Python
|
剑指offer/05_PrintListInReversedOrder(从尾到头打印链表).py
|
PegasusWang/python_data_structures_and_algorithms
|
513547526d2926f8e8bff36e9b83905085aa3ee5
|
[
"MIT"
] | 2,468
|
2018-04-20T02:58:20.000Z
|
2022-03-29T13:41:38.000Z
|
剑指offer/05_PrintListInReversedOrder(从尾到头打印链表).py
|
PegasusWang/python_data_structures_and_algorithms
|
513547526d2926f8e8bff36e9b83905085aa3ee5
|
[
"MIT"
] | 31
|
2018-05-12T08:40:02.000Z
|
2021-05-27T02:51:52.000Z
|
剑指offer/05_PrintListInReversedOrder(从尾到头打印链表).py
|
PegasusWang/python_data_structures_and_algorithms
|
513547526d2926f8e8bff36e9b83905085aa3ee5
|
[
"MIT"
] | 829
|
2018-04-20T05:40:18.000Z
|
2022-03-28T14:33:56.000Z
|
"""
面试题5:从尾到头打印链表
题目:输入一个链表的头结点,从尾到头反过来打印出每个结点的值。链表结点定义如下:
"""
from collections import deque
class Stack:
def __init__(self):
self.items = deque()
def push(self, val):
return self.items.append(val)
def pop(self):
return self.items.pop()
def empty(self):
return len(self.items) == 0
class Node:
def __init__(self, val, next=None):
self.val, self.next = val, next
class Solution:
def solve(self, headnode):
"""
思路:用一个栈保存所有节点,之后一个一个 pop
"""
val_s = Stack()
cur_node = headnode
while cur_node:
val_s.push(cur_node.val)
cur_node = cur_node.next
while not val_s.empty():
print(val_s.pop())
def solve2(self, headnode):
"""
能用栈就可以使用递归。这一点需要能联想到
"""
curnode = headnode
if curnode:
self.solve2(curnode.next)
print(curnode.val) # 注意 print 放到 递归之后才是倒序
def test():
s = Solution()
linklist = Node(0, Node(1))
s.solve2(linklist)
# linklist = Node(0)
# s.solve2(linklist)
if __name__ == '__main__':
test()
| 19.233333
| 55
| 0.560659
|
4a0f758c1e379d206539286c59273a6686cd4eea
| 817
|
py
|
Python
|
data/data_base.py
|
iwbn/unsupsimflow
|
64512020bd67068d527fd9b99dee4d65d18de0a0
|
[
"Apache-2.0"
] | 7
|
2020-10-08T07:34:13.000Z
|
2021-05-27T07:58:39.000Z
|
data/data_base.py
|
iwbn/unsupsimflow
|
64512020bd67068d527fd9b99dee4d65d18de0a0
|
[
"Apache-2.0"
] | 2
|
2020-11-25T08:21:51.000Z
|
2021-03-02T06:47:55.000Z
|
data/data_base.py
|
iwbn/unsupsimflow
|
64512020bd67068d527fd9b99dee4d65d18de0a0
|
[
"Apache-2.0"
] | 1
|
2020-10-12T12:20:29.000Z
|
2020-10-12T12:20:29.000Z
|
import abc
import tensorflow as tf
class Data:
__metaclass__ = abc.ABCMeta
# NOT YET IMPLEMENTED. YOU MIGHT LATER IMPLEMENT THIS IF NEEDED.
def __init__(self, name):
self.name = name
self._datasets = {} # dictionary with tf.data.Dataset elements
self._initialized = False
def initialize(self):
if not self.initialized:
self.prepare()
else:
raise
self._initialized = True
@abc.abstractmethod
def prepare(self, *args):
pass
def get(self, key):
return self._datasets[key]
def set(self, key, dataset):
self._datasets[key] = dataset
@property
def keys(self):
return list(self._datasets.keys())
@property
def initialized(self):
return self._initialized
| 21.5
| 71
| 0.611995
|
4a0f75ca9af833e7649e797740a67f56d8fc67cd
| 268
|
py
|
Python
|
data/contacts.py
|
SvetlanaPopova/python_1
|
5acc26e3d3746d7fcf48603d9ca9064e39c248ca
|
[
"Apache-2.0"
] | null | null | null |
data/contacts.py
|
SvetlanaPopova/python_1
|
5acc26e3d3746d7fcf48603d9ca9064e39c248ca
|
[
"Apache-2.0"
] | null | null | null |
data/contacts.py
|
SvetlanaPopova/python_1
|
5acc26e3d3746d7fcf48603d9ca9064e39c248ca
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'User'
from model.contact import Contact
testdata = [
Contact(firstname="firstname1", lastname="lastname1", address="address1", mobilephone="1111"),
Contact(firstname="firstname2", lastname="lastname2", address="address2", mobilephone="2222")
]
| 29.777778
| 98
| 0.735075
|
4a0f760581155537d3e651954b378bc7f71d85b2
| 2,170
|
py
|
Python
|
spot/utils/HDFSutils.py
|
AbsaOSS/spot
|
314b16b7722e189de5dc50bcd1ba3434c5df1de8
|
[
"Apache-2.0"
] | 1
|
2022-01-30T06:17:11.000Z
|
2022-01-30T06:17:11.000Z
|
spot/utils/HDFSutils.py
|
AbsaOSS/spot
|
314b16b7722e189de5dc50bcd1ba3434c5df1de8
|
[
"Apache-2.0"
] | 2
|
2022-01-14T19:41:02.000Z
|
2022-02-02T16:04:49.000Z
|
spot/utils/HDFSutils.py
|
AbsaOSS/spot
|
314b16b7722e189de5dc50bcd1ba3434c5df1de8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 ABSA Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
hdfs_block_size = 134217728
class HDFSutils:
def __init__(self, hdfs_block_size=hdfs_block_size):
self.hdfs_block_size = hdfs_block_size
def get_input_size(self, dir_path):
print(f"Checking input dir: {dir_path}")
filelist_process = subprocess.run(['hdfs', 'dfs', '-ls', '-C', dir_path],
check=True,
stdout=subprocess.PIPE,
universal_newlines=True)
raw_filelist = filelist_process.stdout.split('\n')
file_paths = []
for path in raw_filelist:
filename = path.split('/')[-1]
print(filename)
if (len(path) > 0) and (not filename[0] in ['_', '.']):
file_paths.append(path)
size_bytes = 0
blocks = 0
for file in file_paths:
hdfs_stats_process = subprocess.run(['hdfs', 'fsck', file, '-files'],
check=True,
stdout=subprocess.PIPE,
universal_newlines=True)
stats = hdfs_stats_process.stdout.split('\n')[1].split(' ')
file_bytes = int(stats[1])
size_bytes += file_bytes
file_blocks = int(stats[5])
blocks += file_blocks
print(f"{file_blocks} blocks, {file_bytes} bytes {file}")
print(f"Input totals: {size_bytes} bytes, {blocks} HDFS blocks")
return size_bytes, blocks
| 39.454545
| 81
| 0.576037
|
4a0f76db2333eb75a5249de58ad494b1bab51401
| 1,290
|
py
|
Python
|
molecule/latest/tests/test_defaults.py
|
tschoonj/ansible-role-guacamole-exporter
|
6f041058c0fc5d4e0ce9f04fea7472c082575776
|
[
"MIT"
] | null | null | null |
molecule/latest/tests/test_defaults.py
|
tschoonj/ansible-role-guacamole-exporter
|
6f041058c0fc5d4e0ce9f04fea7472c082575776
|
[
"MIT"
] | null | null | null |
molecule/latest/tests/test_defaults.py
|
tschoonj/ansible-role-guacamole-exporter
|
6f041058c0fc5d4e0ce9f04fea7472c082575776
|
[
"MIT"
] | null | null | null |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_files(host):
files = [
"/etc/systemd/system/guacamole_exporter.service",
"/usr/local/bin/guacamole_exporter",
"/etc/guacamole_exporter.conf",
]
for file in files:
f = host.file(file)
assert f.exists
assert f.is_file
def test_permissions_didnt_change(host):
dirs = [
"/etc",
"/root",
"/usr",
"/var"
]
for file in dirs:
f = host.file(file)
assert f.exists
assert f.is_directory
assert f.user == "root"
assert f.group == "root"
def test_user(host):
assert host.group("guacamole-exp").exists
assert "guacamole-exp" in host.user("guacamole-exp").groups
assert host.user("guacamole-exp").shell == "/usr/sbin/nologin"
assert host.user("guacamole-exp").home == "/"
def test_service(host):
s = host.service("guacamole_exporter")
# assert s.is_enabled
assert s.is_running
def test_socket(host):
sockets = [
"tcp://127.0.0.1:9623"
]
for socket in sockets:
s = host.socket(socket)
assert s.is_listening
| 23.454545
| 66
| 0.620155
|
4a0f770ab9a598cbd754b2fd243fd0f7fb7fba9e
| 25,330
|
py
|
Python
|
xfel/command_line/cspad_cbf_metrology.py
|
indu-in/cctbx_project1
|
e09447ddc2ba3aa9d91b21008b0162ab290b0c30
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-03-18T12:31:57.000Z
|
2022-03-14T06:27:06.000Z
|
xfel/command_line/cspad_cbf_metrology.py
|
indu-in/cctbx_project1
|
e09447ddc2ba3aa9d91b21008b0162ab290b0c30
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
xfel/command_line/cspad_cbf_metrology.py
|
indu-in/cctbx_project1
|
e09447ddc2ba3aa9d91b21008b0162ab290b0c30
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# LIBTBX_SET_DISPATCHER_NAME cspad.cbf_metrology
#
from __future__ import absolute_import, division, print_function
from six.moves import range
import os, sys, random
from iotbx.phil import parse
from libtbx import easy_run
from libtbx.utils import Sorry
import six
from six.moves import zip
phil_scope = parse("""
method = *hierarchical expanding
.type = choice
reflections = reindexedstrong *indexed integrated
.type = choice
.help = Which subset of reflections
tag = cspad
.type = str
.help = Name of this refinement run. Output filenames will use this tag.
start_at_hierarchy_level = 0
.type = int
.help = Start refinement at this hierarchy level
refine_to_hierarchy_level = 2
.type = int
.help = maximum level to refine cspad to
refine_distance = True
.type = bool
.help = If true, allow root hierarchy level to refine in Z. Otherwise fix this \
axis. Regardless, higher hierarchy levels will refine in Z.
refine_energy = False
.type = bool
.help = If true, when refining level 0, also refine beam energy. Subsequent hierarchy \
levels will fix the energy in place.
flat_refinement = False
.type = bool
.help = If True, do not refine tilt (Tau2 and Tau3) when refining panel positions. Further, \
don't refine distance at levels 1 or higher (respects refine_distance for level 0).
flat_refinement_with_distance = False
.type = bool
.help = If True, and if using flat refinement, then use constraints to allow disance \
to refine at levels 1 and higher.
n_subset = None
.type = int
.help = Refine a random subset of the provided files
split_dataset = False
.type = bool
.help = After refining the full set of images, if split_dataset is True, the \
data will be split in two using odd and even file numbers and each half \
will be refined independently. For each half, _1 or _2 is appended to \
the tag. If used with n_subset, each half will have n_subset/2 images.
data_phil = None
.type = str
.help = Optional phil file with all experiments and reflections for use during \
refinement. If not provided, the program will use whatever directories \
were specified.
rmsd_filter {
enable = True
.type = bool
.help = If enabled, between each round of hierarchical refinement, filter \
the images by positional RMSD
iqr_multiplier = 1.5
.type = float
.help = Interquartile multiplier
}
n_subset_method = *random n_refl significance_filter
.type = choice
.help = Algorithm to be used for choosing the n_subset images/experiments for \
refinement. n_refl chooses the set with the largest numbers of reflections \
listed in the reflection table files, thus giving maximal coverage of the detector tiles \
with the fewest refineable parameters. Significance_filter chooses the subset of \
images with maximum reflections above an I/sigI cutoff
n_refl_panel_list = None
.type = ints
.help = If n_subset_method is n_refl, specify which panels to search on.
panel_filter = None
.type = ints
.help = Specify a list of panels to include during refinement. Default (None) is to use \
all panels.
output_lcls_geometry = True
.type = bool
.help = If True, convert final refined geometry to LCLS format
""", process_includes=True)
refine_defaults_scope = parse("""
output.include_unused_reflections=False
refinement {
refinery.engine = SparseLevMar
parameterisation {
beam.fix=all
auto_reduction {
action=remove
min_nref_per_parameter=3
}
}
reflections {
outlier {
algorithm=sauter_poon
separate_panels=True
separate_experiments=False
}
}
}
""")
def is_even(filename):
import re
return int(re.findall(r'\d+', filename)[-1][-1]) % 2 == 0
refine_scope = parse("""
include scope dials.command_line.refine.phil_scope
""", process_includes=True)
def run(args):
print("Parsing input...")
if "-c" in args or "-h" in args or "--help" in args:
phil_scope.show(attributes_level=2)
return
user_phil = []
paths = []
refine_phil_file = None
for arg in args:
if os.path.isfile(arg):
try:
if os.path.splitext(arg)[1] == ".phil":
refine_phil_file = arg
continue
except Exception as e:
raise Sorry("Unrecognized file %s"%arg)
if os.path.isdir(arg):
paths.append(arg)
else:
try:
user_phil.append(parse(arg))
except Exception as e:
raise Sorry("Unrecognized argument: %s"%arg)
params = phil_scope.fetch(sources=user_phil).extract()
merged_scope = refine_scope.fetch(refine_defaults_scope)
if refine_phil_file is not None:
merged_scope = merged_scope.fetch(parse(file_name = refine_phil_file))
print("Gathering file names...")
all_exp = []
all_ref = []
if params.data_phil is None:
for path in paths:
exp, ref = find_files(path, params.reflections)
all_exp.extend(exp)
all_ref.extend(ref)
if params.split_dataset:
even_exp = []
odd_exp = []
even_ref = []
odd_ref = []
for exp, ref in zip(all_exp, all_ref):
if is_even(exp):
even_exp.append(exp)
even_ref.append(ref)
else:
odd_exp.append(exp)
odd_ref.append(ref)
base_tag = params.tag
base_n_subset = params.n_subset
params.n_subset = base_n_subset // 2
params.tag = base_tag + "_1"
odd_combine_phil = write_combine_phil(params, odd_exp, odd_ref)
params.tag = base_tag + "_2"
even_combine_phil = write_combine_phil(params, even_exp, even_ref)
params.tag = base_tag
params.n_subset = base_n_subset
full_combine_phil = write_combine_phil(params, odd_exp+even_exp, odd_ref+even_ref)
print("Refining full dataset using tag", params.tag)
refine(params, merged_scope, full_combine_phil)
params.tag = base_tag + "_1"
print("Refining odd numbered data using tag", params.tag)
refine(params, merged_scope, odd_combine_phil)
params.tag = base_tag + "_2"
print("Refining even numbered data using tag", params.tag)
refine(params, merged_scope, even_combine_phil)
else:
combine_phil = write_combine_phil(params, all_exp, all_ref)
refine(params, merged_scope, combine_phil)
else:
assert len(paths) == 0
assert params.n_subset is None
print("Refining full dataset using tag", params.tag)
refine(params, merged_scope, params.data_phil)
if params.split_dataset:
input_scope = parse("""
input {
experiments = None
.type = str
.multiple = True
.help = "The experiment list file path"
reflections = None
.type = str
.multiple = True
.help = "The reflection table file path"
}
""")
input_params = input_scope.fetch(parse(file_name = params.data_phil)).extract()
even_exp = []
odd_exp = []
even_ref = []
odd_ref = []
for f in input_params.input.experiments:
if is_even(f):
even_exp.append(f)
else:
odd_exp.append(f)
for f in input_params.input.reflections:
if is_even(f):
even_ref.append(f)
else:
odd_ref.append(f)
base_tag = params.tag
params.tag = base_tag + "_1"
odd_combine_phil = write_combine_phil(params, odd_exp, odd_ref)
params.tag = base_tag + "_2"
even_combine_phil = write_combine_phil(params, even_exp, even_ref)
params.tag = base_tag + "_1"
print("Refining odd numbered data using tag", params.tag)
refine(params, merged_scope, odd_combine_phil)
params.tag = base_tag + "_2"
print("Refining even numbered data using tag", params.tag)
refine(params, merged_scope, even_combine_phil)
def find_files(path, reflections):
all_exp = []
all_ref = []
for filename in os.listdir(path):
if reflections in filename:
extension = os.path.splitext(filename)[1]
if extension not in ['.pickle', '.mpack', '.refl']: continue
if extension == ".pickle":
exp_path = os.path.join(path, filename.rstrip("_%s%s"%(reflections, extension)) + "_refined_experiments.json")
else:
exp_path = os.path.join(path, filename.rstrip("_%s%s"%(reflections, extension)) + "_refined.expt")
if not os.path.exists(exp_path):
if extension == ".pickle":
exp_path = os.path.join(path, filename.rstrip("_%s%s"%(reflections, extension)) + "_experiments.json")
else:
exp_path = os.path.join(path, filename.rstrip("_%s%s"%(reflections, extension)) + "_indexed.expt")
if not os.path.exists(exp_path): continue
all_exp.append(exp_path)
all_ref.append(os.path.join(path, filename))
return all_exp, all_ref
def write_combine_phil(params, all_exp, all_ref):
combine_phil = "%s_combine.phil"%params.tag
f = open(combine_phil, 'w')
for exp_path, ref_path in zip(all_exp, all_ref):
f.write("input {\n")
f.write(" experiments = %s\n"%exp_path)
f.write(" reflections = %s\n"%ref_path)
f.write("}\n")
f.close()
return combine_phil
def refine(params, merged_scope, combine_phil):
print("Combining experiments...")
command = "dials.combine_experiments reference_from_experiment.average_detector=True reference_from_experiment.average_hierarchy_level=0 output.experiments_filename=%s_combined.expt output.reflections_filename=%s_combined.refl %s"%(params.tag, params.tag, combine_phil)
if params.n_subset is not None:
command += " n_subset=%d n_subset_method=%s"%(params.n_subset, params.n_subset_method)
if params.n_refl_panel_list is not None:
command += " n_refl_panel_list=%s"%(",".join(["%d"%p for p in params.n_refl_panel_list]))
if params.refine_energy:
command += " reference_from_experiment.beam=0"
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
if params.method == 'hierarchical':
refine_hierarchical(params, merged_scope, combine_phil)
elif params.method == 'expanding':
refine_expanding(params, merged_scope, combine_phil)
def refine_hierarchical(params, merged_scope, combine_phil):
if params.panel_filter is not None:
from libtbx import easy_pickle
print("Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in params.panel_filter])))
combined_path = "%s_combined.refl"%params.tag
data = easy_pickle.load(combined_path)
sel = None
for panel_id in params.panel_filter:
if sel is None:
sel = data['panel'] == panel_id
else:
sel |= data['panel'] == panel_id
print("Retaining", len(data.select(sel)), "out of", len(data), "reflections")
easy_pickle.dump(combined_path, data.select(sel))
for i in range(params.start_at_hierarchy_level, params.refine_to_hierarchy_level+1):
if params.rmsd_filter.enable:
input_name = "filtered"
else:
if i == params.start_at_hierarchy_level:
input_name = "combined"
else:
input_name = "refined"
if params.rmsd_filter.enable:
command = "cctbx.xfel.filter_experiments_by_rmsd %s %s output.filtered_experiments=%s output.filtered_reflections=%s"
if i == params.start_at_hierarchy_level:
command = command%("%s_combined.expt"%params.tag, "%s_combined.refl"%params.tag,
"%s_filtered.expt"%params.tag, "%s_filtered.refl"%params.tag)
else:
command = command%("%s_refined_level%d.expt"%(params.tag, i-1), "%s_refined_level%d.refl"%(params.tag, i-1),
"%s_filtered_level%d.expt"%(params.tag, i-1), "%s_filtered_level%d.refl"%(params.tag, i-1))
command += " iqr_multiplier=%f"%params.rmsd_filter.iqr_multiplier
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
print("Refining at hierarchy level", i)
refine_phil_file = "%s_refine_level%d.phil"%(params.tag, i)
if i == 0:
fix_list = ['Tau1'] # fix detector rotz
if not params.refine_distance:
fix_list.append('Dist')
if params.flat_refinement:
fix_list.extend(['Tau2','Tau3'])
diff_phil = "refinement.parameterisation.detector.fix_list=%s\n"%",".join(fix_list)
if params.refine_energy:
diff_phil += " refinement.parameterisation.beam.fix=in_spindle_plane+out_spindle_plane\n" # allow energy to refine
else:
# Note, always need to fix something, so pick a panel group and fix its Tau1 (rotation around Z) always
if params.flat_refinement and params.flat_refinement_with_distance:
diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1,Tau2,Tau3\n" # refine distance, rotz and xy translation
diff_phil += "refinement.parameterisation.detector.constraints.parameter=Dist\n" # constrain distance to be refined identically for all panels at this hierarchy level
elif params.flat_refinement:
diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Group1Tau1,Tau2,Tau3\n" # refine only rotz and xy translation
else:
diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1\n" # refine almost everything
if i == params.start_at_hierarchy_level:
command = "dials.refine %s %s_%s.expt %s_%s.refl"%(refine_phil_file, params.tag, input_name, params.tag, input_name)
else:
command = "dials.refine %s %s_%slevel%d.expt %s_%s_level%d.refl"%(refine_phil_file, params.tag, input_name, i-1, params.tag, input_name, i-1)
diff_phil += "refinement.parameterisation.detector.hierarchy_level=%d\n"%i
command += " output.experiments=%s_refined_level%d.expt output.reflections=%s_refined_level%d.refl"%( \
params.tag, i, params.tag, i)
scope = merged_scope.fetch(parse(diff_phil))
f = open(refine_phil_file, 'w')
f.write(refine_scope.fetch_diff(scope).as_str())
f.close()
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
output_geometry(params)
def refine_expanding(params, merged_scope, combine_phil):
assert params.start_at_hierarchy_level == 0
if params.rmsd_filter.enable:
input_name = "filtered"
command = "cctbx.xfel.filter_experiments_by_rmsd %s %s output.filtered_experiments=%s output.filtered_reflections=%s"
command = command%("%s_combined.expt"%params.tag, "%s_combined.refl"%params.tag,
"%s_filtered.expt"%params.tag, "%s_filtered.refl"%params.tag)
command += " iqr_multiplier=%f"%params.rmsd_filter.iqr_multiplier
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
else:
input_name = "combined"
# --------------------------
if params.panel_filter is not None:
from libtbx import easy_pickle
print("Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in params.panel_filter])))
combined_path = "%s_combined.refl"%params.tag
data = easy_pickle.load(combined_path)
sel = None
for panel_id in params.panel_filter:
if sel is None:
sel = data['panel'] == panel_id
else:
sel |= data['panel'] == panel_id
print("Retaining", len(data.select(sel)), "out of", len(data), "reflections")
easy_pickle.dump(combined_path, data.select(sel))
# ----------------------------------
# this is the order to refine the CSPAD in
steps = {}
steps[0] = [2, 3]
steps[1] = steps[0] + [0, 1]
steps[2] = steps[1] + [14, 15]
steps[3] = steps[2] + [6, 7]
steps[4] = steps[3] + [4, 5]
steps[5] = steps[4] + [12, 13]
steps[6] = steps[5] + [8, 9]
steps[7] = steps[6] + [10, 11]
for s, panels in six.iteritems(steps):
rest = []
for p in panels:
rest.append(p+16)
rest.append(p+32)
rest.append(p+48)
panels.extend(rest)
levels = {0: (0,1)} # levels 0 and 1
for i in range(7):
levels[i+1] = (2,) # level 2
previous_step_and_level = None
for j in range(8):
from libtbx import easy_pickle
print("Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in steps[j]])))
combined_path = "%s_%s.refl"%(params.tag, input_name)
output_path = "%s_step%d.refl"%(params.tag, j)
data = easy_pickle.load(combined_path)
sel = None
for panel_id in steps[j]:
if sel is None:
sel = data['panel'] == panel_id
else:
sel |= data['panel'] == panel_id
print("Retaining", len(data.select(sel)), "out of", len(data), "reflections")
easy_pickle.dump(output_path, data.select(sel))
for i in levels[j]:
print("Step", j , "refining at hierarchy level", i)
refine_phil_file = "%s_refine_step%d_level%d.phil"%(params.tag, j, i)
if i == 0:
if params.refine_distance:
diff_phil = "refinement.parameterisation.detector.fix_list=Tau1" # fix detector rotz
else:
diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Tau1" # fix detector rotz, distance
if params.flat_refinement:
diff_phil += ",Tau2,Tau3" # Also fix x and y rotations
diff_phil += "\n"
if params.refine_energy:
diff_phil += "refinement.parameterisation.beam.fix=in_spindle_plane+out_spindle_plane\n" # allow energy to refine
else:
# Note, always need to fix something, so pick a panel group and fix its Tau1 (rotation around Z) always
if params.flat_refinement and params.flat_refinement_with_distance:
diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1,Tau2,Tau3\n" # refine distance, rotz and xy translation
diff_phil += "refinement.parameterisation.detector.constraints.parameter=Dist\n" # constrain distance to be refined identically for all panels at this hierarchy level
elif params.flat_refinement:
diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Group1Tau1,Tau2,Tau3\n" # refine only rotz and xy translation
else:
diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1\n" # refine almost everything
if previous_step_and_level is None:
command = "dials.refine %s %s_%s.expt %s_step%d.refl"%( \
refine_phil_file, params.tag, input_name, params.tag, j)
else:
p_step, p_level = previous_step_and_level
if p_step == j:
command = "dials.refine %s %s_refined_step%d_level%d.expt %s_refined_step%d_level%d.refl"%( \
refine_phil_file, params.tag, p_step, p_level, params.tag, p_step, p_level)
else:
command = "dials.refine %s %s_refined_step%d_level%d.expt %s_step%d.refl"%( \
refine_phil_file, params.tag, p_step, p_level, params.tag, j)
diff_phil += "refinement.parameterisation.detector.hierarchy_level=%d\n"%i
output_experiments = "%s_refined_step%d_level%d.expt"%(params.tag, j, i)
command += " output.experiments=%s output.reflections=%s_refined_step%d_level%d.refl"%( \
output_experiments, params.tag, j, i)
scope = merged_scope.fetch(parse(diff_phil))
f = open(refine_phil_file, 'w')
f.write(refine_scope.fetch_diff(scope).as_str())
f.close()
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
# In expanding mode, if using flat refinement with distance, after having refined this step as a block, unrefined
# panels will have been left behind. Read back the new metrology, compute the shift applied to the panels refined
# in this step,and apply that shift to the unrefined panels in this step
if params.flat_refinement and params.flat_refinement_with_distance and i > 0:
from dxtbx.model.experiment_list import ExperimentListFactory
from xfel.command_line.cspad_detector_congruence import iterate_detector_at_level, iterate_panels
from scitbx.array_family import flex
from scitbx.matrix import col
from libtbx.test_utils import approx_equal
experiments = ExperimentListFactory.from_json_file(output_experiments, check_format=False)
assert len(experiments.detectors()) == 1
detector = experiments.detectors()[0]
# Displacements: deltas along the vector normal to the detector
displacements = flex.double()
# Iterate through the panel groups at this level
for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
# Were there panels refined in this step in this panel group?
if params.panel_filter:
test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group) if list(detector).index(panel) in params.panel_filter]
else:
test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group)]
if not any(test): continue
# Compute the translation along the normal of this panel group. This is defined as distance in dials.refine
displacements.append(col(panel_group.get_local_fast_axis()).cross(col(panel_group.get_local_slow_axis())).dot(col(panel_group.get_local_origin())))
# Even though the panels are constrained to move the same amount, there is a bit a variation.
stats = flex.mean_and_variance(displacements)
displacement = stats.mean()
print("Average displacement along normals: %f +/- %f"%(stats.mean(), stats.unweighted_sample_standard_deviation()))
# Verify the variation isn't significant
for k in range(1, len(displacements)):
assert approx_equal(displacements[0], displacements[k])
# If all of the panel groups in this level moved, no need to do anything.
if len(displacements) != len(list(iterate_detector_at_level(detector.hierarchy(), 0, i))):
for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
if params.panel_filter:
test = [list(detector).index(panel) in steps[j] and list(detector).index(panel) in params.panel_filter for panel in iterate_panels(panel_group)]
else:
test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group)]
# If any of the panels in this panel group moved, no need to do anything
if any(test): continue
# None of the panels in this panel group moved in this step, so need to apply displacement from other panel
# groups at this level
fast = col(panel_group.get_local_fast_axis())
slow = col(panel_group.get_local_slow_axis())
ori = col(panel_group.get_local_origin())
normal = fast.cross(slow)
panel_group.set_local_frame(fast, slow, (ori.dot(fast)*fast) + (ori.dot(slow)*slow) + (normal*displacement))
# Check the new displacements. Should be the same across all panels.
displacements = []
for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
displacements.append(col(panel_group.get_local_fast_axis()).cross(col(panel_group.get_local_slow_axis())).dot(col(panel_group.get_local_origin())))
for k in range(1, len(displacements)):
assert approx_equal(displacements[0], displacements[k])
experiments.as_file(output_experiments)
previous_step_and_level = j,i
output_geometry(params)
def output_geometry(params):
print("Creating files to deploy to psana calibration directory...")
if params.refine_to_hierarchy_level > 2:
deploy_level = 2
else:
deploy_level = params.refine_to_hierarchy_level
if params.method == 'hierarchical':
command = "cxi.experiment_json_to_cbf_def %s_refined_level%d.expt output_def_file=%s_refined_detector_level%d.def"%(params.tag, deploy_level, params.tag, deploy_level)
elif params.method == 'expanding':
command = "cxi.experiment_json_to_cbf_def %s_refined_step7_level%d.expt output_def_file=%s_refined_detector_level%d.def"%(params.tag, deploy_level, params.tag, deploy_level)
print(command)
result = easy_run.fully_buffered(command=command).raise_if_errors()
result.show_stdout()
if params.output_lcls_geometry:
command = "cxi.cbfheader2slaccalib cbf_header=%s_refined_detector_level%d.def out_metrology_file=0-end.data.%s"%(params.tag, deploy_level, params.tag)
print(command)
result = easy_run.fully_buffered(command=command)
errmsg = "\n".join(result.stderr_lines)
if "ImportError" in errmsg and "PSCalib.GeometryAccess" in errmsg:
print("Not converting to LCLS geometry as PSDM is not available")
print("Done.")
else:
result.raise_if_errors()
result.show_stdout()
print("Done. Soft link 0-end.data.%s to 0-end.data in the geometry folder of your calibration folder for your experiment to deploy this metrology."%params.tag)
if __name__ == "__main__":
run(sys.argv[1:])
| 43.005093
| 271
| 0.680261
|
4a0f779a1ccc1478d2d3f2d1931f54979ef28877
| 2,198
|
py
|
Python
|
test/pymetry.py
|
Jahongir2007/pymetry
|
02c8e82a188700b4213fd4a70aa66a3b5e9843b8
|
[
"MIT"
] | 1
|
2021-04-04T11:38:42.000Z
|
2021-04-04T11:38:42.000Z
|
test/pymetry.py
|
Jahongir2007/pymetry
|
02c8e82a188700b4213fd4a70aa66a3b5e9843b8
|
[
"MIT"
] | null | null | null |
test/pymetry.py
|
Jahongir2007/pymetry
|
02c8e82a188700b4213fd4a70aa66a3b5e9843b8
|
[
"MIT"
] | null | null | null |
'''
Author: Jahongir Sobirov
License: MIT
Version: 1.0.0
All rights reserved 2021 (c)
'''
import turtle
pymetry = turtle.Turtle()
def square(distance, color, bold):
pymetry.color(color)
pymetry.pensize(bold)
pymetry.forward(distance)
pymetry.right(90)
pymetry.forward(distance)
pymetry.right(90)
pymetry.forward(distance)
pymetry.right(90)
pymetry.forward(distance)
pymetry.right(90)
def rect(distancer, color, bold):
pymetry.color(color)
pymetry.pensize(bold)
pymetry.right(90)
pymetry.forward(distancer)
pymetry.left(90)
pymetry.forward(distancer)
def circle(distance, color, bold):
pymetry.color(color)
pymetry.pensize(bold)
pymetry.circle(distance)
def corner(angle, distance, color, bold):
pymetry.color(color)
pymetry.pensize(bold)
pymetry.right(angle)
pymetry.forward(distance)
pymetry.left(angle)
pymetry.forward(distance)
def triangle(a, b, distance, color, bold):
pymetry.color(color)
pymetry.pensize(bold)
pymetry.forward(distance)
pymetry.left(a)
pymetry.forward(distance)
pymetry.left(b)
pymetry.forward(142)
def trsize(a, b, c):
pymetry.shapesize(a, b, c)
def pentagon(distance, color, bold):
pymetry.color(color)
pymetry.pensize(bold)
for i in range(5):
pymetry.forward(distance)
pymetry.right(72)
def hexagon(distance, color, bold):
pymetry.color(color)
pymetry.pensize(bold)
for i in range(6):
pymetry.forward(distance)
pymetry.right(60)
def heptagon(distance, color, bold):
pymetry.color(color)
pymetry.pensize(bold)
for i in range(7):
pymetry.forward(distance)
pymetry.right(51.42)
def octagon(distance, color, bold):
pymetry.color(color)
pymetry.pensize(bold)
for i in range(8):
pymetry.forward(distance)
pymetry.right(45)
def polygon(color, bold):
pymetry.color(color)
pymetry.pensize(bold)
n = int(input("Enter the no of the sides of the polygon : "))
l = int(input("Enter the length of the sides of the polygon : "))
for i in range(n):
pymetry.forward(l)
pymetry.right(360 / n)
| 27.475
| 69
| 0.666515
|
4a0f779dde87c3e7ec6daa6407d8bc3d4934e506
| 5,366
|
py
|
Python
|
datasets/kinnews_kirnews/kinnews_kirnews.py
|
WojciechKusa/datasets
|
1406a04c3e911cec2680d8bc513653e0cafcaaa4
|
[
"Apache-2.0"
] | 10,608
|
2020-09-10T15:47:50.000Z
|
2022-03-31T22:51:47.000Z
|
datasets/kinnews_kirnews/kinnews_kirnews.py
|
realChainLife/datasets
|
98261e8b0b7be4dbaaa71ae188b950f7fbe51bbd
|
[
"Apache-2.0"
] | 2,396
|
2020-09-10T14:55:31.000Z
|
2022-03-31T19:41:04.000Z
|
datasets/kinnews_kirnews/kinnews_kirnews.py
|
realChainLife/datasets
|
98261e8b0b7be4dbaaa71ae188b950f7fbe51bbd
|
[
"Apache-2.0"
] | 1,530
|
2020-09-10T21:43:10.000Z
|
2022-03-31T01:59:12.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kinyarwanda and Kirundi news classification datasets."""
import csv
import os
import datasets
_CITATION = """\
@article{niyongabo2020kinnews,
title={KINNEWS and KIRNEWS: Benchmarking Cross-Lingual Text Classification for Kinyarwanda and Kirundi},
author={Niyongabo, Rubungo Andre and Qu, Hong and Kreutzer, Julia and Huang, Li},
journal={arXiv preprint arXiv:2010.12174},
year={2020}
}
"""
_DESCRIPTION = """\
Kinyarwanda and Kirundi news classification datasets
"""
_HOMEPAGE = "https://github.com/Andrews2017/KINNEWS-and-KIRNEWS-Corpus"
_LICENSE = "MIT License"
_URLs = {
"kinnews": "https://github.com/saradhix/kinnews_kirnews/raw/master/KINNEWS.zip",
"kirnews": "https://github.com/saradhix/kinnews_kirnews/raw/master/KIRNEWS.zip",
}
class KinnewsKirnews(datasets.GeneratorBasedBuilder):
"""This is Kinyarwanda and Kirundi news dataset called KINNEWS and KIRNEWS."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="kinnews_raw", description="Dataset for Kinyarwanda language"),
datasets.BuilderConfig(name="kinnews_cleaned", description="Cleaned dataset for Kinyarwanda language"),
datasets.BuilderConfig(name="kirnews_raw", description="Dataset for Kirundi language"),
datasets.BuilderConfig(name="kirnews_cleaned", description="Cleaned dataset for Kirundi language"),
]
class_labels = [
"politics",
"sport",
"economy",
"health",
"entertainment",
"history",
"technology",
"tourism",
"culture",
"fashion",
"religion",
"environment",
"education",
"relationship",
]
label_columns = {"kinnews_raw": "kin_label", "kirnews_raw": "kir_label"}
def _info(self):
if "raw" in self.config.name:
features = datasets.Features(
{
"label": datasets.ClassLabel(names=self.class_labels),
self.label_columns[self.config.name]: datasets.Value("string"),
"en_label": datasets.Value("string"),
"url": datasets.Value("string"),
"title": datasets.Value("string"),
"content": datasets.Value("string"),
}
)
else:
features = datasets.Features(
{
"label": datasets.ClassLabel(names=self.class_labels),
"title": datasets.Value("string"),
"content": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang, kind = self.config.name.split("_")
data_dir = dl_manager.download_and_extract(_URLs[lang])
lang_dir = lang.upper()
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, lang_dir, kind, "train.csv"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": os.path.join(data_dir, lang_dir, kind, "test.csv"), "split": "test"},
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
with open(filepath, encoding="utf-8") as csv_file:
csv_reader = csv.reader(
csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
)
next(csv_reader)
for id_, row in enumerate(csv_reader):
if "raw" in self.config.name:
label, k_label, en_label, url, title, content = row
yield id_, {
"label": self.class_labels[int(label) - 1],
self.label_columns[self.config.name]: k_label,
"en_label": en_label,
"url": url,
"title": title,
"content": content,
}
else:
label, title, content = row
yield id_, {
"label": self.class_labels[int(label) - 1],
"title": title,
"content": content,
}
| 35.536424
| 111
| 0.574916
|
4a0f78e258e6f7b281178fbff591ed4f188dfb45
| 1,412
|
py
|
Python
|
lib/log_utils/time_utils.py
|
rainwangphy/AutoDL-Projects
|
1a40948255ac3c16ee529d94144a39bf26e89bfa
|
[
"MIT"
] | 72
|
2021-12-01T01:30:05.000Z
|
2022-03-15T18:47:44.000Z
|
lib/log_utils/time_utils.py
|
rainwangphy/AutoDL-Projects
|
1a40948255ac3c16ee529d94144a39bf26e89bfa
|
[
"MIT"
] | 1
|
2021-12-18T16:08:10.000Z
|
2021-12-22T11:28:03.000Z
|
lib/log_utils/time_utils.py
|
rainwangphy/AutoDL-Projects
|
1a40948255ac3c16ee529d94144a39bf26e89bfa
|
[
"MIT"
] | 12
|
2021-12-06T16:41:03.000Z
|
2022-02-17T09:40:57.000Z
|
#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 #
#####################################################
import time, sys
import numpy as np
def time_for_file():
ISOTIMEFORMAT='%d-%h-at-%H-%M-%S'
return '{:}'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
def time_string():
ISOTIMEFORMAT='%Y-%m-%d %X'
string = '[{:}]'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
return string
def time_string_short():
ISOTIMEFORMAT='%Y%m%d'
string = '{:}'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
return string
def time_print(string, is_print=True):
if (is_print):
print('{} : {}'.format(time_string(), string))
def convert_secs2time(epoch_time, return_str=False):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
if return_str:
str = '[{:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
return str
else:
return need_hour, need_mins, need_secs
def print_log(print_string, log):
#if isinstance(log, Logger): log.log('{:}'.format(print_string))
if hasattr(log, 'log'): log.log('{:}'.format(print_string))
else:
print("{:}".format(print_string))
if log is not None:
log.write('{:}\n'.format(print_string))
log.flush()
| 32.837209
| 83
| 0.614731
|
4a0f78e74d8d13e7b0eac3d897288142f76e50cc
| 25,624
|
py
|
Python
|
scripts/LocateTs.py
|
emartineznunez/AutoMeKin
|
bc4e25782ba051b6ccda058279cfe06c740fef99
|
[
"MIT"
] | 8
|
2019-07-06T17:47:35.000Z
|
2020-05-28T21:49:55.000Z
|
scripts/LocateTs.py
|
emartineznunez/AutoMeKin
|
bc4e25782ba051b6ccda058279cfe06c740fef99
|
[
"MIT"
] | 1
|
2020-10-15T20:42:45.000Z
|
2020-10-15T20:42:45.000Z
|
scripts/LocateTs.py
|
emartineznunez/AutoMeKin
|
bc4e25782ba051b6ccda058279cfe06c740fef99
|
[
"MIT"
] | 2
|
2019-08-24T18:58:04.000Z
|
2020-02-24T11:45:36.000Z
|
#!/usr/bin/env python3
import numpy as np
import networkx as nx
from ase.autoneb import AutoNEB
from ase.constraints import ExternalForce,FixAtoms,FixBondLengths
from ase.dimer import DimerControl, MinModeAtoms, MinModeTranslate
from ase.io import read, write
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.optimize import BFGS,FIRE
from ase.vibrations import Vibrations
#from ase.build import minimize_rotation_and_translation
from ase import units
from createMat import get_G_index
from mopacamk import MOPACamk
from os import system
from shutil import copyfile
from re import search
from sys import argv, exit
from Integrators import Langevin
#from xtb.ase.calculator import XTB
from AMK_parameters import emax, label, task, prog, method, charge, gto3d,cov_rad, brrng
from subprocess import run,PIPE
#from sella import Sella
import networkx.algorithms.isomorphism as iso
from cyclic_graph import cycle
def rotation_matrix_from_points(m0, m1):
"""Returns a rigid transformation/rotation matrix that minimizes the
RMSD between two set of points.
m0 and m1 should be (3, npoints) numpy arrays with
coordinates as columns::
(x1 x2 x3 ... xN
y1 y2 y3 ... yN
z1 z2 z3 ... zN)
The centeroids should be set to origin prior to
computing the rotation matrix.
The rotation matrix is computed using quaternion
algebra as detailed in::
Melander et al. J. Chem. Theory Comput., 2015, 11,1055
"""
v0 = np.copy(m0)
v1 = np.copy(m1)
# compute the rotation quaternion
R11, R22, R33 = np.sum(v0 * v1, axis=1)
R12, R23, R31 = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1)
R13, R21, R32 = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1)
f = [[R11 + R22 + R33, R23 - R32, R31 - R13, R12 - R21],
[R23 - R32, R11 - R22 - R33, R12 + R21, R13 + R31],
[R31 - R13, R12 + R21, -R11 + R22 - R33, R23 + R32],
[R12 - R21, R13 + R31, R23 + R32, -R11 - R22 + R33]]
F = np.array(f)
w, V = np.linalg.eigh(F)
# eigenvector corresponding to the most
# positive eigenvalue
q = V[:, np.argmax(w)]
# Rotation matrix from the quaternion q
R = quaternion_to_matrix(q)
return R
def quaternion_to_matrix(q):
"""Returns a rotation matrix.
Computed from a unit quaternion Input as (4,) numpy array.
"""
q0, q1, q2, q3 = q
R_q = [[q0**2 + q1**2 - q2**2 - q3**2,
2 * (q1 * q2 - q0 * q3),
2 * (q1 * q3 + q0 * q2)],
[2 * (q1 * q2 + q0 * q3),
q0**2 - q1**2 + q2**2 - q3**2,
2 * (q2 * q3 - q0 * q1)],
[2 * (q1 * q3 - q0 * q2),
2 * (q2 * q3 + q0 * q1),
q0**2 - q1**2 - q2**2 + q3**2]]
return np.array(R_q)
def minimize_rotation_and_translation(target, atoms, weight,wa):
"""Minimize RMSD between atoms and target.
Rotate and translate atoms to best match target. For more details, see::
Melander et al. J. Chem. Theory Comput., 2015, 11,1055
"""
p = atoms.get_positions()
p0 = target.get_positions()
# centeroids to origin
c = np.mean(p, axis=0)
p -= c
c0 = np.mean(p0, axis=0)
p0 -= c0
#EMN
for x in wa:
p0[x] *= weight
p[x] *= weight
#EMN
# Compute rotation matrix
R = rotation_matrix_from_points(p.T, p0.T)
#EMN
for x in wa:
p0[x] /= weight
p[x] /= weight
#EMN
atoms.set_positions(np.dot(p, R.T) + c0)
def distort(atomos,mol):
v1 = mol.get_positions()[atomos[2]] - mol.get_positions()[atomos[1]]
v2 = mol.get_positions()[atomos[0]] - mol.get_positions()[atomos[2]]
v3 = mol.get_positions()[atomos[1]] - mol.get_positions()[atomos[0]]
nv1 = np.linalg.norm(v1) ; nv2 = np.linalg.norm(v2) ; nv3 = np.linalg.norm(v3)
if abs( round( np.dot(v1 / nv1,v2 / nv2) , 2)) == 1.0:
l = [nv1,nv2,nv3] ; ml = max(l) ; middle = l.index(ml)
atd = atomos[middle]
posit = mol.get_positions()
a = (v1[1] / v1[0]) ** 2 + 1
b = v1[2] * v1[1]/ v1[0] ** 2
c = (v1[2] / (2 * v1[0]) ) ** 2 - 3 / 4
uy = (- b + np.sqrt( b ** 2 - 4 * a * c ) ) / (2 * a)
ux = np.sqrt(3 / 4 - uy * uy)
uz = 0.5
u = np.array([ux,uy,uz])
posit[atd] = posit[atd] + 0.8 * u
mol.set_positions(posit)
return True
else: return False
def vib_calc(ts):
vib = Vibrations(ts)
vib.run(); vib.summary(); vib.clean()
freq = vib.get_frequencies()
eigenv = []
for i in range(len(freq)):
eigenv.append(vib.get_mode(i))
return eigenv,freq
def attach_calculators(images):
for image in images:
if prog == 'mopac': image.calc = MOPACamk(method=method+' threads=1 charge='+charge,relscf=0.01)
# elif prog == 'XTB': image.calc = XTB(method=method)
def Energy_and_forces(geom):
# Get forces and energy from designated potential
try: ene = geom.get_potential_energy()
except: exit()
forces = geom.get_forces()
return ene,forces
def runTrajectory(geom, T, Fric, totaltime, timeStep,breakl,forml):
global n_images
ene,forces = Energy_and_forces(geom)
mdInt = Langevin(units.kB * T, Fric, forces, geom.get_velocities(), geom, timeStep)
ene,forces = Energy_and_forces(geom)
if len(breakl) == 1 and len(forml) == 0:
thresh_c = 40 ; thresh_d = 40 ; thresh_r = 40
else:
thresh_c = 6 ; thresh_d = 1 ; thresh_r = 3
# Run MD trajectory for specified number of steps
n_stop_criteria = 0; damped = False
write("traj.xyz",geom.copy(),format="xyz")
for i in range(0,int(totaltime/dt)):
ene,forces = Energy_and_forces(geom)
mdInt.mdStepPos(forces,timeStep,geom)
ene,forces = Energy_and_forces(geom)
mdInt.mdStepVel(forces,timeStep,geom,damped)
#Print current positions to file
write("traj.xyz",geom.copy(),format="xyz",append=True)
#check adjacency matrix
G,ind,jnd,ibn,jbn = get_G_index(geom,1,len(geom),False)
for n in range(n_dist):
if G[ind[n]][jnd[n]]['weight'] == 0: G.remove_edge(ind[n],jnd[n])
criteria = 0
for ele in breakl: criteria += G.has_edge(ele[0],ele[1])
for ele in forml:
criteria += not G.has_edge(ele[0],ele[1])
if geom.get_distance(ele[0],ele[1]) < 1:
damped = True ; geom.set_constraint()
if criteria == 0: n_stop_criteria += 1
if n_stop_criteria >= thresh_c: return G
elif n_stop_criteria >= thresh_r: geom.set_constraint()
elif n_stop_criteria == thresh_d: damped = True
geom.set_constraint()
return G
def runPOpt(geom,breakl,forml):
write("traj.xyz",geom.copy(),format="xyz")
#constraints
cons_list = []
sum_radb = []
sum_radf = []
for ele in breakl:
cons_list.append([ele[0],ele[1]])
if cycle(geom,ele[0],ele[1]): sum_radb.append(2.0)
else: sum_radb.append( (cov_rad[symb[ele[0]]] + cov_rad[symb[ele[1]]]) * 5 )
for ele in forml:
cons_list.append([ele[0],ele[1]])
sum_radf.append( (cov_rad[symb[ele[0]]] + cov_rad[symb[ele[1]]]) * 1.1 )
#constraints
not_move = []
if len(forml) == 0: n_of_t_b = len(breakl)
else: n_of_t_b = len(forml)
for i in range(0,50):
geom.set_constraint()
positions = geom.get_positions()
#Create vectors and move atoms
for ele in breakl:
v = geom.get_positions()[ele[1]] - geom.get_positions()[ele[0]]
v = v / np.linalg.norm(v)
if ele[1] not in not_move: positions[ele[1]] += 0.05 * v
if ele[0] not in not_move: positions[ele[0]] += -0.05 * v
for ele in forml:
v = geom.get_positions()[ele[1]] - geom.get_positions()[ele[0]]
v = v / np.linalg.norm(v)
if ele[1] not in not_move: positions[ele[1]] += -0.05 * v
if ele[0] not in not_move: positions[ele[0]] += 0.05 * v
geom.set_positions(positions)
geom.set_constraint(FixBondLengths(cons_list))
opt = BFGS(geom, logfile='bfgs.log')
opt.run(fmax=0.5)
#Print current positions to file
write("traj.xyz",geom.copy(),format="xyz",append=True)
###Check if the product has been formed
if len(forml) == 0:
n_of_b_b = 0
for i,ele in enumerate(breakl):
if geom.get_distance(ele[0],ele[1]) > sum_radb[i]:
n_of_b_b += 1
not_move.extend([ele[0],ele[1]])
if n_of_b_b == n_of_t_b:
geom.set_constraint()
print(n_of_b_b,'bonds have been broken. Stop here...')
return
else:
n_of_f_b = 0
for i,ele in enumerate(forml):
if geom.get_distance(ele[0],ele[1]) < sum_radf[i]:
n_of_f_b += 1
not_move.extend([ele[0],ele[1]])
if n_of_f_b == n_of_t_b:
geom.set_constraint()
print(n_of_f_b,'bonds have been formed. Stop here...')
return
geom.set_constraint()
return
inputfile = str(argv[1]) ; line = int(argv[2]) ; run_neb = int(argv[3]); e0 = float(argv[4])
system('rm -rf image*.traj')
#Default parameters
n_max,prefix,fmax,fmaxi,temp,fric,totaltime,dt,ExtForce,weight,k_neb,semax = 15,'image',0.1,0.025,0.,0.5,100,1,6,100,2,True
#n_max,prefix,fmax,fmaxi,temp,fric,totaltime,dt,ExtForce,weight = 10,'image',0.1,0.1,0.,0.5,100,1,6,100
#Here we should read inputfile
for linei in open(inputfile,'r'):
if search("LowLevel ", linei): prog = str(linei.split()[1])
if search("LowLevel ", linei): method = ' '.join([str(elem) for elem in linei.split()[2:] ])
if search("molecule ", linei): molecule = str(linei.split()[1])
if search("Energy ", linei) and semax: emax = 1.5 * float(linei.split()[1])
if search("Temperature ", linei) and semax:
temperature = float(linei.split()[1])
E = int(0.064 * temperature + 0.002 * temperature * np.log(temperature))
emax = 1.5 * max(E,100)
if search("MaxEn ", linei):
emax = 1.5 * float(linei.split()[1])
semax = False
if search("ExtForce ", linei): ExtForce = float(linei.split()[1])
if search("fmaxi ", linei): fmaxi = float(linei.split()[1])
if search("charge ", linei): charge = str(linei.split()[1])
if search("Graphto3D ", linei): gto3d = str(linei.split()[1])
if search("BreakRing ", linei):
brrng = str(linei.split()[1])
if brrng == "yes": brrng = True
else: brrng = False
if search("tsdirll ", linei):
path = str(linei.split()[1])
try:
print('Path to files:',path)
except:
path = 'tsdirLL_'+molecule
print('Path to files:',path)
#check inputfile
if gto3d != 'Traj' and gto3d != 'POpt':
print('Graphto3D valid values: Traj POpt')
exit()
#We now read ts_bonds.inp file
tsbfile = open(path+'/ts_bonds.inp', "r")
lines = tsbfile.readlines()
cs = lines[line].split() ; constr = []; breakl = []; forml = []; atoms_rxn = []
tsbfile.close()
for i in range(0,len(cs),3):
if int(cs[i+1]) not in atoms_rxn: atoms_rxn.append(int(cs[i+1]))
if int(cs[i+2]) not in atoms_rxn: atoms_rxn.append(int(cs[i+2]))
if cs[i] == "b":
c = ExternalForce( int(cs[i+1]) , int(cs[i+2]) ,ExtForce)
constr.append(c) ; breakl.append([ int(cs[i+1]) , int(cs[i+2]) ])
elif cs[i] == "f":
c = ExternalForce( int(cs[i+1]) , int(cs[i+2]) ,-ExtForce)
constr.append(c) ; forml.append([ int(cs[i+1]) , int(cs[i+2]) ])
#Instantiate rmol
rmol = read(molecule+'.xyz')
#For (1,0) rxns, check the bond does not belong to a ring if brrng is False
if len(breakl) == 1 and len(forml) == 0 and not brrng:
if cycle(rmol,breakl[0][0],breakl[0][1]):
print('Bond',breakl[0][0]+1,'-',breakl[0][1]+1,'belongs to a ring:')
print('Abort...')
exit()
n_dist = int( len(rmol) * (len(rmol) - 1) / 2)
natom = len(rmol)
aton = rmol.get_atomic_numbers()
symb = rmol.get_chemical_symbols()
if prog == 'mopac': rmol.calc = MOPACamk(method=method+' threads=1 charge='+charge,relscf=0.01)
#elif prog == 'XTB': rmol.calc = XTB(method=method)
#atoms_not_rxn
latoms = [item for item in range(natom)]
atoms_not_rxn = np.setdiff1d(latoms,atoms_rxn)
#Optimization of the molecule
print('Path:',line,cs)
print('')
print('Optimizing reactant...')
opt = BFGS(rmol, trajectory='image000.traj',logfile='bfgs000.log')
opt.run(fmax=fmax)
react = rmol.copy()
write("react.xyz",react)
#e0 = rmol.get_potential_energy()
#if prog == 'XTB':
# reactfile = open(molecule+'_freq.out', 'w')
# reactfile.write('Energy= '+str(e0)+'\n')
print('Reactant optimized')
##################G of the reactant
Gr,ind,jnd,ibn,jbn = get_G_index(rmol,1,len(rmol),False)
for n in range(n_dist):
if Gr[ind[n]][jnd[n]]['weight'] == 0: Gr.remove_edge(ind[n],jnd[n])
A = nx.adjacency_matrix(Gr) ; Ar = A.A
for z in range(natom): Ar[z][z] = aton[z]
##################G of the expected product
Gp = Gr.copy()
for ele in breakl: Gp.remove_edge(ele[0],ele[1])
for ele in forml: Gp.add_edge(ele[0],ele[1])
A = nx.adjacency_matrix(Gp) ; Ap = A.A
for z in range(natom): Ap[z][z] = aton[z]
##################
tag_p = np.array(sorted( [np.round(elem,3) for elem in np.linalg.eigvals(Ap) ] ))
tag_r = np.array(sorted( [np.round(elem,3) for elem in np.linalg.eigvals(Ar) ] ))
if np.linalg.norm(tag_p - tag_r) == 0: weight = 1
n_images = 1
#For (1,0) rxns, the products are easily generated if brrng is False
if len(breakl) == 1 and len(forml) == 0 and not brrng:
#put the products some distance appart
Goneone = Gr.copy()
Goneone.remove_edge(breakl[0][0],breakl[0][1])
frags = []
for c in nx.connected_components(Goneone):
pmol = rmol.copy()
frags.append([atom.index for atom in pmol if atom.index in Goneone.subgraph(c).nodes()])
positions = []
v1 = rmol.get_positions()[breakl[0][1]] - rmol.get_positions()[breakl[0][0]]
v1 = v1 / np.linalg.norm(v1)
if breakl[0][1] in frags[0]: signfrag0 = 1
else: signfrag0 = -1
for index in range(len(rmol)):
if index in frags[0]: sign = signfrag0
else: sign = - signfrag0
positions.append(rmol.get_positions()[index] + sign * v1 * 2.5)
rmol.set_positions(positions)
write('prod_sep.xyz',rmol)
#For (1,0) rxns, if brrng is True, then runPOpt until the bond is 2 Angstroms long
elif len(breakl) == 1 and len(forml) == 0 and brrng:
print('Running partial optimizations to transform Graph--> 3D geometry')
runPOpt(rmol,breakl,forml)
print('Partial optimizations finished')
else:
#When three atoms involved in the forces are in a line--> distort the geometry and add one more image
if len(atoms_rxn) == 3:
if distort(atoms_rxn,rmol):
print('Reactant geometry distorted')
minimize_rotation_and_translation(react,rmol,weight,atoms_not_rxn)
write('react_distorted.xyz',rmol)
write('image00'+str(n_images)+'.traj',rmol)
n_images += 1
rmol.set_constraint()
if gto3d == 'Traj':
#For the dynamics we give all Hs a mass of 4.0 and apply contraints
masses = []
for x in aton:
if x == 1: masses.append(4.0)
else: masses.append(None)
rmol.set_masses(masses=masses) ; rmol.set_constraint(constr)
###
MaxwellBoltzmannDistribution(rmol, temperature_K = temp )
##Run a constrained short traj to reach the prod.
print('Running dynamics with External Force to transform Graph--> 3D geometry')
G = runTrajectory(rmol,temp,fric,totaltime,dt * units.fs ,breakl,forml)
print('Dynamics with External Force finished')
elif gto3d == 'POpt':
print('Running partial optimizations to transform Graph--> 3D geometry')
runPOpt(rmol,breakl,forml)
print('Partial optimizations finished')
print('Optimizing product...')
minimize_rotation_and_translation(react,rmol,weight,atoms_not_rxn)
#For intermediates we first move the structure along the largest negative eigenvector
if nx.is_connected(Gp):
eigenv, freq = vib_calc(rmol)
#We first move the structure along the largest negative eigenvector direction
positions = rmol.get_positions() + 0.01 * eigenv[0] / np.linalg.norm(eigenv[0])
rmol.set_positions(positions)
#EMN
if len(breakl) == 1 and len(forml) == 0:
c = FixAtoms(indices=[breakl[0][0],breakl[0][1]])
rmol.set_constraint(c)
k_neb = 20
opt = BFGS(rmol, trajectory='image00'+str(n_images)+'.traj',logfile='bfgs00'+str(n_images)+'.log')
if len(breakl) == 1 and len(forml) == 0: opt.run(fmax=0.5)
else: opt.run(fmax=fmax)
#EMN
prod = rmol.copy()
write("prod.xyz",prod)
print('Product optimized')
###Gx is the Graph coming out of the optimization
Gx,ind,jnd,ibn,jbn = get_G_index(rmol,1,len(rmol),False)
for n in range(n_dist):
if Gx[ind[n]][jnd[n]]['weight'] == 0: Gx.remove_edge(ind[n],jnd[n])
A = nx.adjacency_matrix(Gx) ; Ax = A.A
for z in range(natom): Ax[z][z] = aton[z]
###Check for barrierless processes
Adiff = Ar - Ax
if np.linalg.norm(Adiff) == 0:
if not nx.is_connected(Gp): print('Final and initial states are the same --> Barrierless process')
else: print('Final and initial states are the same')
print('Abort...')
exit()
###Check that the product is the expected
Adiff = Ap - Ax
if np.linalg.norm(Adiff) != 0:
print('It seems that the product is not the expected one')
print('Abort...')
exit()
###Check that the Gx is isomorphic with Gp
criteria = 0
for ele in breakl: criteria += Gx.has_edge(ele[0],ele[1])
for ele in forml: criteria += not Gx.has_edge(ele[0],ele[1])
if criteria > 0:
print('Obtained product is not the expected --> The product could not be generated')
print('Abort...')
exit()
#Adiff = Ap - Ax
#if np.linalg.norm(Adiff) > 0:
# print('Obtained product is not the expected --> The product could not be generated')
# exit()
##################
#ep = rmol.get_potential_energy()
ep = rmol.calc.get_final_heat_of_formation() * units.mol / units.kcal
dE = ep - e0
print('{:s} {:10.4f} {:s}'.format('Product energy rel: ',dE,'kcal/mol'))
print('{:s} {:10.4f} {:s}'.format('Product energy abs: ',ep,'kcal/mol'))
#if nx.is_connected(G):
#dE = dE * units.mol / units.kcal
if dE > emax:
print('Product energy > emax:',dE,emax)
print('Abort...')
exit()
print('')
if not run_neb: exit()
#Run autoneb
#autoneb = AutoNEB(attach_calculators,
# prefix=prefix,
# optimizer='BFGS',
# n_simul=1,
# n_max=n_max,
# fmax=fmaxi,
# k=0.1,
# parallel=False,
# maxsteps=[50,1000])
autoneb = AutoNEB(attach_calculators,
prefix=prefix,
optimizer='BFGS',
n_simul=1,
n_max=n_max,
climb=False,
fmax=fmaxi,
k=k_neb,
parallel=False,
maxsteps=100)
try:
autoneb.run()
except Exception as e:
print(e)
print('ERROR in autoneb calculation')
exit()
#Get max value along the NEB
pot_max = -np.Inf
print('')
print('# E(kcal/mol)')
for i in range(n_max):
pot = autoneb.all_images[i].get_potential_energy()
write('ts_'+str(i)+'.xyz',autoneb.all_images[i].copy())
if pot > pot_max and pot !=0 and i != n_max-1:
pot_max = pot; imax = i
ts = autoneb.all_images[i].copy()
tsint = autoneb.all_images[i].copy()
tslet = autoneb.all_images[i].copy()
write('ts_inp.xyz',ts)
print('{:1.0f} {:16.2f}'.format(i, pot))
print('selected image',imax)
if imax == n_max-2:
print('The highest energy point corresponds to products')
exit()
#TS initial guess is the maximum along the NEB
#TS optimization
if prog == 'mopac':
# Use mopac TS optimizer
print("Trying opt in XYZ coordinates")
ts.calc = MOPACamk(method=method+' threads=1 charge='+charge,relscf = 0.01,label = 'ts',task = 'ts precise cycles=1000 t=500 ddmax=0.1 denout',freq=True)
try:
print('{:s} {:10.4f}'.format('TS optimized energy:',ts.get_potential_energy()))
print('Lowest vibrational frequencies:',[float(x) for x in ts.calc.get_freqs()])
p = run("check_ts_structure.sh > ts.log",shell=True)
print(p)
except Exception as e:
p0 = run("cp ts.out ts_xyz.out",shell=True)
print('ERROR in MOPAC "ts" calculation in XYZ coordinates:',e)
ts_int = False; ts_let0 = False ; ts_let1 = False
for linei in open('ts.out','r'):
if search("Too many variables", linei): ts_int = True
if ts_int:
print("Trying now opt in internal coordinates")
tsint.calc = MOPACamk(method=method+' threads=1 charge='+charge,relscf = 0.01,label = 'ts',task = 'ts int precise cycles=1000 t=500 ddmax=0.1 denout',freq=True)
try:
print('{:s} {:10.4f}'.format('TS optimized energy:',tsint.get_potential_energy()))
print('Lowest vibrational frequencies:',[float(x) for x in tsint.calc.get_freqs()])
p = run("check_ts_structure.sh > ts.log",shell=True)
print(p)
exit()
except Exception as e:
p0 = run("cp ts.out ts_int.out",shell=True)
print('ERROR in MOPAC "ts int" calculation:',e)
for linei in open('ts.out','r'):
if search("NUMERICAL PROBLEMS IN BRACKETING LAMDA", linei): ts_let0 = True
if search("Error", linei): ts_let1 = True
if ts_let0 and ts_let1:
print("Trying now opt with let")
if ts_int:
tslet.calc = MOPACamk(method=method+' threads=1 charge='+charge,relscf = 0.01,label = 'ts',task = 'ts let int precise cycles=1000 t=500 ddmax=0.1 denout',freq=True)
else:
tslet.calc = MOPACamk(method=method+' threads=1 charge='+charge,relscf = 0.01,label = 'ts',task = 'ts let precise cycles=1000 t=500 ddmax=0.1 denout',freq=True)
try:
print('{:s} {:10.4f}'.format('TS optimized energy:',tslet.get_potential_energy()))
print('Lowest vibrational frequencies:',[float(x) for x in tslet.calc.get_freqs()])
p = run("check_ts_structure.sh > ts.log",shell=True)
print(p)
exit()
except Exception as e:
p0 = run("cp ts.out ts_let.out",shell=True)
print('ERROR in MOPAC "ts let" calculation:',e)
###############################
#elif prog == 'XTB':
#Dimer method for XTB (no internal optimizer)
#vib calc. to get the lowest frequency mode
# ts.calc = XTB(method=method)
# eigenv, freq = vib_calc(ts)
# lfm0 = eigenv[0] ; lfm1 = eigenv[1]
# print(lfm0)
# print(lfm1)
# print(freq)
# #We first move the ts structure in the second negative eigenvector direction to avoid second order saddles
# positions = ts.get_positions() + 0.01 * lfm1 / np.linalg.norm(lfm1)
# ts.set_positions(positions)
#
# #set up the dimer calc
# d_control = DimerControl(initial_eigenmode_method = 'displacement', \
# displacement_method = 'vector', logfile = None, mask=[True]*len(rmol))
#
# d_atoms = MinModeAtoms(ts, d_control)
#
# displacement_vector = 0.1 * lfm0 / np.linalg.norm(lfm0)
# d_atoms.displace(displacement_vector = displacement_vector)
#
# dim_rlx=MinModeTranslate(d_atoms, trajectory='dimer_method_traj', logfile=None)
# try:
# dim_rlx.run(fmax=0.001,steps=1000)
# except Exception as e:
# print('ERROR in dimer calculation')
# exit()
#
# try:
# eigenv,freq = vib_calc(ts)
# ets = ts.get_potential_energy()
# print('TS optimized energy :',ets)
# tsfile = open('ts.out', 'w')
# moldenfile = open('ts.molden', 'w')
# moldenfile.write('[Molden Format]'+'\n\n')
# tsfile.write('Energy= '+str(ets)+'\n')
# tsfile.write('Freq:'+'\n')
# moldenfile.write('[FREQ]'+'\n')
# for i,x in enumerate(freq):
# if i == 0: tsfile.write(str(-x.imag)+'\n')
# elif i >6: tsfile.write(str(x.real)+'\n')
# if i == 0: moldenfile.write(str(-x.imag)+'\n')
# elif i >6: moldenfile.write(str(x.real)+'\n')
# tsfile.write('Gibbs free energy: [0.]'+'\n')
# tsfile.write('ZPE: [0.]'+'\n')
# tsfile.write(str(natom)+'\nFinal structure:'+'\n')
# moldenfile.write('\n[FR-COORD]'+'\n')
# posit = ts.get_positions()
# for i,ele in enumerate(symb):
# tsfile.write(str(ele)+' '+str(posit[i][0])+' '+str(posit[i][1])+' '+str(posit[i][2])+'\n')
# moldenfile.write(str(ele)+' '+str(posit[i][0]/units.Bohr)+' '+str(posit[i][1]/units.Bohr)+' '+str(posit[i][2]/units.Bohr)+'\n')
## moldenfile.write('\n\n[FR-NORM-COORD]'+'\n')
# for i in range(len(freq)-6):
# moldenfile.write('vibration '+str(i+1)+'\n')
# if i == 0: ifreq = i
# else: ifreq = i + 6
# for j in range(natom):
# moldenfile.write(str(eigenv[ifreq][j][0]/units.Bohr)+' '+str(eigenv[ifreq][j][1]/units.Bohr)+' '+str(eigenv[ifreq][j][2]/units.Bohr)+'\n')
# tsfile.close()
# moldenfile.close()
# write('ts_opt.xyz',ts)
# p = run("check_ts_structure.sh > ts.log",shell=True)
# except Exception as e:
# print('ERROR in the calculation')
# exit()
#
| 38.416792
| 180
| 0.601311
|
4a0f79057964c6355b3e21a58ae403eecdf9dbe9
| 415
|
py
|
Python
|
todo_app/migrations/0003_myuser_full_name.py
|
Nigar-mr/Labrin_Todo
|
565f8b687ab938f528ddd7b344efea0022af76d8
|
[
"MIT"
] | null | null | null |
todo_app/migrations/0003_myuser_full_name.py
|
Nigar-mr/Labrin_Todo
|
565f8b687ab938f528ddd7b344efea0022af76d8
|
[
"MIT"
] | 8
|
2021-03-19T01:28:54.000Z
|
2022-03-11T23:59:07.000Z
|
todo_app/migrations/0003_myuser_full_name.py
|
Nigar-mr/Todo_App
|
565f8b687ab938f528ddd7b344efea0022af76d8
|
[
"MIT"
] | 1
|
2019-09-14T16:22:57.000Z
|
2019-09-14T16:22:57.000Z
|
# Generated by Django 2.2.5 on 2019-09-05 07:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo_app', '0002_myuser'),
]
operations = [
migrations.AddField(
model_name='myuser',
name='full_name',
field=models.CharField(blank=True, max_length=150, verbose_name='full name'),
),
]
| 21.842105
| 89
| 0.604819
|
4a0f79270151ad37ba0c00bfbf785ce51e5aa9c9
| 8,356
|
py
|
Python
|
Tests/Methods/Slot/test_SlotW21_meth.py
|
mxgnsr/pyleecan
|
2b0a04e4ae67c073a91362ab42332908fef53bdd
|
[
"Apache-2.0"
] | null | null | null |
Tests/Methods/Slot/test_SlotW21_meth.py
|
mxgnsr/pyleecan
|
2b0a04e4ae67c073a91362ab42332908fef53bdd
|
[
"Apache-2.0"
] | null | null | null |
Tests/Methods/Slot/test_SlotW21_meth.py
|
mxgnsr/pyleecan
|
2b0a04e4ae67c073a91362ab42332908fef53bdd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from unittest import TestCase
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.SurfLine import SurfLine
from pyleecan.Classes.SlotW21 import SlotW21
from pyleecan.Classes.LamSlot import LamSlot
from numpy import ndarray, pi, arcsin, exp
from ddt import ddt, data
from pyleecan.Methods.Slot.Slot.comp_height import comp_height
from pyleecan.Methods.Slot.Slot.comp_surface import comp_surface
from pyleecan.Methods.Slot.Slot.comp_angle_opening import comp_angle_opening
from pyleecan.Methods.Slot.SlotWind.comp_surface_wind import comp_surface_wind
# For AlmostEqual
DELTA = 1e-4
slotW21_test = list()
# Internal Slot
lam = LamSlot(is_internal=True, Rext=0.1)
lam.slot = SlotW21(
Zs=36, H0=3e-3, H1=0, H1_is_rad=False, H2=20e-3, W0=3e-3, W1=13e-3, W2=10e-3
)
slotW21_test.append(
{
"test_obj": lam,
"S_exp": 2.390225015189331e-4,
"Aw": 0.132201,
"SW_exp": 2.3e-4,
"H_exp": 2.3011250632883697e-2,
}
)
# External Slot
lam = LamSlot(is_internal=False, Rint=0.1)
lam.slot = SlotW21(
Zs=36, H0=3e-3, H1=0, H1_is_rad=False, H2=20e-3, W0=3e-3, W1=13e-3, W2=10e-3
)
slotW21_test.append(
{
"test_obj": lam,
"S_exp": 2.3897749848106692e-4,
"Aw": 0.10168861,
"SW_exp": 2.3e-4,
"H_exp": 2.30903427198e-2,
}
)
# Rad H1
lam = LamSlot(is_internal=False, Rint=0.1)
lam.slot = SlotW21(
Zs=36, H0=3e-3, H1=pi / 4, H1_is_rad=True, H2=20e-3, W0=3e-3, W1=13e-3, W2=10e-3
)
slotW21_test.append(
{
"test_obj": lam,
"S_exp": 2.7897749848106692e-4,
"Aw": 0.097386,
"SW_exp": 2.3e-4,
"H_exp": 2.8086e-2,
}
)
@ddt
class test_SlotW21_meth(TestCase):
"""unittest for SlotW21 methods"""
@data(*slotW21_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
# Check that the analytical method returns the same result as the numerical one
b = comp_surface(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*slotW21_test)
def test_comp_surface_wind(self, test_dict):
"""Check that the computation of the winding surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface_wind()
a = result
b = test_dict["SW_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
# Check that the analytical method returns the same result as the numerical one
b = comp_surface_wind(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*slotW21_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
# Check that the analytical method returns the same result as the numerical one
b = comp_height(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*slotW21_test)
def test_comp_angle_opening(self, test_dict):
"""Check that the computation of the average opening angle iscorrect"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_angle_opening()
self.assertEqual(a, 2 * arcsin(test_obj.slot.W0 / (2 * 0.1)))
# Check that the analytical method returns the same result as the numerical one
b = comp_angle_opening(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*slotW21_test)
def test_comp_angle_wind_eq(self, test_dict):
"""Check that the computation of the average angle is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_angle_wind_eq()
a = result
b = test_dict["Aw"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
def test_build_geometry(self):
"""check that curve_list is correct"""
test_obj = SlotW21(
W0=0.2, H0=0.1, W1=0.4, H1=0.1, H1_is_rad=False, H2=0.1, W2=0.6
)
lam = LamSlot(is_internal=False, slot=test_obj, Rint=1)
# Rbo=1
Z1 = exp(1j * float(arcsin(0.1)))
Z2 = Z1 + 0.1
Z3 = Z1 + 0.1j + 0.2
Z4 = Z1 + 0.2j + 0.3
Z5 = Z1 - 0.4j + 0.3
Z6 = Z1 - 0.3j + 0.2
Z7 = Z1 - 0.2j + 0.1
Z8 = Z1 - 0.2j
[Z8, Z7, Z6, Z5, Z4, Z3, Z2, Z1] = [Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z8]
# Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z2))
curve_list.append(Segment(Z2, Z3))
curve_list.append(Segment(Z3, Z4))
curve_list.append(Segment(Z4, Z5))
curve_list.append(Segment(Z5, Z6))
curve_list.append(Segment(Z6, Z7))
curve_list.append(Segment(Z7, Z8))
result = test_obj.build_geometry()
self.assertEqual(len(result), len(curve_list))
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
def test_build_geometry_wind(self):
"""Check if the build geometry of the winding works correctly"""
test_obj = SlotW21(
W0=0.2, H0=0.1, W1=0.4, H1=0.1, H1_is_rad=False, H2=0.1, W2=0.6
)
lam = LamSlot(is_internal=False, slot=test_obj, Rint=1)
# Rbo=1
Z1 = exp(1j * float(arcsin(0.1)))
Z2 = Z1 + 0.1
Z3 = Z1 + 0.1j + 0.2
Z4 = Z1 + 0.2j + 0.3
Z5 = Z1 - 0.4j + 0.3
Z6 = Z1 - 0.3j + 0.2
Z7 = Z1 - 0.2j + 0.1
Z8 = Z1 - 0.2j
[Z8, Z7, Z6, Z5, Z4, Z3, Z2, Z1] = [Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z8]
Ztan1 = (Z3 + Z6) / 2
Ztan2 = Ztan1 + 0.1
expected = list()
# part(0, 0)
curve_list = list()
curve_list.append(Segment(Z3, Ztan1))
curve_list.append(Segment(Ztan1, Ztan2))
curve_list.append(Segment(Ztan2, Z4))
curve_list.append(Segment(Z4, Z3))
point_ref = (Z3 + Ztan1 + Ztan2 + Z4) / 4
surface = SurfLine(
line_list=curve_list, point_ref=point_ref, label="WindS_R0_T0_S0"
)
expected.append(surface)
# part(0, 1)
curve_list = list()
curve_list.append(Segment(Ztan1, Z6))
curve_list.append(Segment(Z6, Z5))
curve_list.append(Segment(Z5, Ztan2))
curve_list.append(Segment(Ztan2, Ztan1))
point_ref = (Z5 + Ztan1 + Ztan2 + Z6) / 4
surface = SurfLine(
line_list=curve_list, point_ref=point_ref, label="WindS_R0_T1_S0"
)
expected.append(surface)
result = test_obj.build_geometry_wind(Nrad=1, Ntan=2)
self.assertEqual(len(result), len(expected))
for i in range(0, len(result)):
self.assertEqual(len(result[i].line_list), len(expected[i].line_list))
for jj in range(len(result[i].line_list)):
a = result[i].line_list[jj].begin
b = expected[i].line_list[jj].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].line_list[jj].end
b = expected[i].line_list[jj].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
self.assertTrue(result[i].label == expected[i].label)
| 34.528926
| 87
| 0.58832
|
4a0f7b02ea8eeafed47f8c26dcebaa60033d544e
| 344
|
py
|
Python
|
src/modules/dummy_iterator.py
|
jonassoleil/swag
|
dd480e52ae6f7cf7eabd8cef6180ee495f42c034
|
[
"MIT"
] | null | null | null |
src/modules/dummy_iterator.py
|
jonassoleil/swag
|
dd480e52ae6f7cf7eabd8cef6180ee495f42c034
|
[
"MIT"
] | 1
|
2021-03-17T22:10:15.000Z
|
2021-03-17T22:10:15.000Z
|
src/modules/dummy_iterator.py
|
jonassoleil/swag
|
dd480e52ae6f7cf7eabd8cef6180ee495f42c034
|
[
"MIT"
] | 1
|
2021-03-17T16:59:47.000Z
|
2021-03-17T16:59:47.000Z
|
from src.modules.base_model_iterator import BaseModelIterator
class DummyIterator(BaseModelIterator):
"""
Just return the same model once without doing anything
"""
def __init__(self, model):
super().__init__()
self.length = 1
self.model = model
def get_next_model(self):
return self.model
| 24.571429
| 61
| 0.671512
|
4a0f7b2ddda85a12555114980383e3b80e4c0f2e
| 2,736
|
py
|
Python
|
test/torchtest.py
|
edisga/scalene
|
d5c190a4a205071199398948e04edbfd07ca4071
|
[
"Apache-2.0"
] | 3,952
|
2019-12-18T00:37:34.000Z
|
2022-03-31T09:59:03.000Z
|
test/torchtest.py
|
edisga/scalene
|
d5c190a4a205071199398948e04edbfd07ca4071
|
[
"Apache-2.0"
] | 171
|
2021-03-05T14:37:30.000Z
|
2022-03-30T15:15:38.000Z
|
test/torchtest.py
|
edisga/scalene
|
d5c190a4a205071199398948e04edbfd07ca4071
|
[
"Apache-2.0"
] | 148
|
2020-01-09T18:36:53.000Z
|
2022-02-28T03:22:52.000Z
|
import torch
import math
def torchtest():
dtype = torch.float
#device = torch.device("cpu")
device = torch.device("cuda:0") # Uncomment this to run on GPU
# device = torch.device("cuda") # Uncomment this to run on GPU
# Create Tensors to hold input and outputs.
# By default, requires_grad=False, which indicates that we do not need to
# compute gradients with respect to these Tensors during the backward pass.
# x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
q = torch.linspace(-math.pi, math.pi, 5000000, device=device, dtype=dtype)
x = torch.linspace(-math.pi, math.pi, 5000000, device=device, dtype=dtype)
y = torch.sin(x)
# Create random Tensors for weights. For a third order polynomial, we need
# 4 weights: y = a + b x + c x^2 + d x^3
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Tensors during the backward pass.
a = torch.randn((), device=device, dtype=dtype, requires_grad=True)
b = torch.randn((), device=device, dtype=dtype, requires_grad=True)
c = torch.randn((), device=device, dtype=dtype, requires_grad=True)
d = torch.randn((), device=device, dtype=dtype, requires_grad=True)
learning_rate = 1e-6
for t in range(2000):
# Forward pass: compute predicted y using operations on Tensors.
y_pred = a + b * x + c * x ** 2 + d * x ** 3
# Compute and print loss using operations on Tensors.
# Now loss is a Tensor of shape (1,)
# loss.item() gets the scalar value held in the loss.
# loss = (y_pred - y).pow(2).sum()
loss = (y_pred - y).sum()
if t % 100 == 99:
print(t, loss.item())
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Tensors with requires_grad=True.
# After this call a.grad, b.grad. c.grad and d.grad will be Tensors holding
# the gradient of the loss with respect to a, b, c, d respectively.
loss.backward()
# Manually update weights using gradient descent. Wrap in torch.no_grad()
# because weights have requires_grad=True, but we don't need to track this
# in autograd.
with torch.no_grad():
a -= learning_rate * a.grad
b -= learning_rate * b.grad
c -= learning_rate * c.grad
d -= learning_rate * d.grad
# Manually zero the gradients after updating weights
a.grad = None
b.grad = None
c.grad = None
d.grad = None
print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')
torchtest()
| 42.75
| 85
| 0.622807
|
4a0f7bd003bd35df10739ae4a6d8d29b4a654e27
| 1,638
|
py
|
Python
|
python/30 days of code/day27.py
|
angelopassaro/Hacktoberfest-1
|
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
|
[
"Apache-2.0"
] | 1
|
2020-10-06T01:20:07.000Z
|
2020-10-06T01:20:07.000Z
|
python/30 days of code/day27.py
|
angelopassaro/Hacktoberfest-1
|
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
|
[
"Apache-2.0"
] | null | null | null |
python/30 days of code/day27.py
|
angelopassaro/Hacktoberfest-1
|
21f90f5d49efba9b1a27f4d9b923f5017ab43f0e
|
[
"Apache-2.0"
] | null | null | null |
def minimum_index(seq):
if len(seq) == 0:
raise ValueError("Cannot get the minimum value index from an empty sequence")
min_idx = 0
for i in range(1, len(seq)):
if seq[i] < seq[min_idx]:
min_idx = i
return min_idx
class TestDataEmptyArray(object):
@staticmethod
def get_array():
return []
class TestDataUniqueValues(object):
@staticmethod
def get_array():
return [2,1,3,4]
@staticmethod
def get_expected_result():
return 1
class TestDataExactlyTwoDifferentMinimums(object):
@staticmethod
def get_array():
return [3,1,1]
@staticmethod
def get_expected_result():
return 1
def TestWithEmptyArray():
try:
seq = TestDataEmptyArray.get_array()
result = minimum_index(seq)
except ValueError as e:
pass
else:
assert False
def TestWithUniqueValues():
seq = TestDataUniqueValues.get_array()
assert len(seq) >= 2
assert len(list(set(seq))) == len(seq)
expected_result = TestDataUniqueValues.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
def TestiWithExactyTwoDifferentMinimums():
seq = TestDataExactlyTwoDifferentMinimums.get_array()
assert len(seq) >= 2
tmp = sorted(seq)
assert tmp[0] == tmp[1] and (len(tmp) == 2 or tmp[1] < tmp[2])
expected_result = TestDataExactlyTwoDifferentMinimums.get_expected_result()
result = minimum_index(seq)
assert result == expected_result
TestWithEmptyArray()
TestWithUniqueValues()
TestiWithExactyTwoDifferentMinimums()
print("OK")
| 20.734177
| 85
| 0.666056
|
4a0f7c0e558f5289838a6baebb8d36770b857573
| 9,527
|
py
|
Python
|
src/python/pants/backend/jvm/subsystems/jvm_platform.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | 1
|
2021-11-11T14:04:24.000Z
|
2021-11-11T14:04:24.000Z
|
src/python/pants/backend/jvm/subsystems/jvm_platform.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/jvm/subsystems/jvm_platform.py
|
lahosken/pants
|
1b0340987c9b2eab9411416803c75b80736716e4
|
[
"Apache-2.0"
] | 1
|
2021-11-11T14:04:12.000Z
|
2021-11-11T14:04:12.000Z
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.base.exceptions import TaskError
from pants.base.revision import Revision
from pants.java.distribution.distribution import DistributionLocator
from pants.subsystem.subsystem import Subsystem
from pants.util.memo import memoized_method, memoized_property
logger = logging.getLogger(__name__)
class JvmPlatform(Subsystem):
"""Used to keep track of repo compile settings."""
# NB(gmalmquist): These assume a java version number N can be specified as either 'N' or '1.N'
# (eg, '7' is equivalent to '1.7'). New versions should only be added to this list
# if they follow this convention. If this convention is ever not followed for future
# java releases, they can simply be omitted from this list and they will be parsed
# strictly (eg, if Java 10 != 1.10, simply leave it out).
SUPPORTED_CONVERSION_VERSIONS = (6, 7, 8,)
class IllegalDefaultPlatform(TaskError):
"""The --default-platform option was set, but isn't defined in --platforms."""
class UndefinedJvmPlatform(TaskError):
"""Platform isn't defined."""
def __init__(self, target, platform_name, platforms_by_name):
scope_name = JvmPlatform.options_scope
messages = ['Undefined jvm platform "{}" (referenced by {}).'
.format(platform_name, target.address.spec if target else 'unknown target')]
if not platforms_by_name:
messages.append('In fact, no platforms are defined under {0}. These should typically be'
' specified in [{0}] in pants.ini.'.format(scope_name))
else:
messages.append('Perhaps you meant one of:{}'.format(
''.join('\n {}'.format(name) for name in sorted(platforms_by_name.keys()))
))
messages.append('\nThese are typically defined under [{}] in pants.ini.'
.format(scope_name))
super(JvmPlatform.UndefinedJvmPlatform, self).__init__(' '.join(messages))
options_scope = 'jvm-platform'
@classmethod
def register_options(cls, register):
super(JvmPlatform, cls).register_options(register)
register('--platforms', advanced=True, type=dict, default={}, fingerprint=True,
help='Compile settings that can be referred to by name in jvm_targets.')
register('--default-platform', advanced=True, type=str, default=None, fingerprint=True,
help='Name of the default platform to use if none are specified.')
@classmethod
def subsystem_dependencies(cls):
return super(JvmPlatform, cls).subsystem_dependencies() + (DistributionLocator,)
def _parse_platform(self, name, platform):
return JvmPlatformSettings(platform.get('source', platform.get('target')),
platform.get('target', platform.get('source')),
platform.get('args', ()),
name=name)
@classmethod
def preferred_jvm_distribution(cls, platforms, strict=False):
"""Returns a jvm Distribution with a version that should work for all the platforms.
Any one of those distributions whose version is >= all requested platforms' versions
can be returned unless strict flag is set.
:param iterable platforms: An iterable of platform settings.
:param bool strict: If true, only distribution whose version matches the minimum
required version can be returned, i.e, the max target_level of all the requested
platforms.
:returns: Distribution one of the selected distributions.
"""
if not platforms:
return DistributionLocator.cached()
min_version = max(platform.target_level for platform in platforms)
max_version = Revision(*(min_version.components + [9999])) if strict else None
return DistributionLocator.cached(minimum_version=min_version, maximum_version=max_version)
@memoized_property
def platforms_by_name(self):
platforms = self.get_options().platforms or {}
return {name: self._parse_platform(name, platform) for name, platform in platforms.items()}
@property
def _fallback_platform(self):
logger.warn('No default jvm platform is defined.')
source_level = JvmPlatform.parse_java_version(DistributionLocator.cached().version)
target_level = source_level
platform_name = '(DistributionLocator.cached().version {})'.format(source_level)
return JvmPlatformSettings(source_level, target_level, [], name=platform_name)
@memoized_property
def default_platform(self):
name = self.get_options().default_platform
if not name:
return self._fallback_platform
platforms_by_name = self.platforms_by_name
if name not in platforms_by_name:
raise self.IllegalDefaultPlatform(
"The default platform was set to '{0}', but no platform by that name has been "
"defined. Typically, this should be defined under [{1}] in pants.ini."
.format(name, self.options_scope)
)
return JvmPlatformSettings(*platforms_by_name[name], name=name, by_default=True)
@memoized_method
def get_platform_by_name(self, name, for_target=None):
"""Finds the platform with the given name.
If the name is empty or None, returns the default platform.
If not platform with the given name is defined, raises an error.
:param str name: name of the platform.
:param JvmTarget for_target: optionally specified target we're looking up the platform for.
Only used in error message generation.
:return: The jvm platform object.
:rtype: JvmPlatformSettings
"""
if not name:
return self.default_platform
if name not in self.platforms_by_name:
raise self.UndefinedJvmPlatform(for_target, name, self.platforms_by_name)
return self.platforms_by_name[name]
def get_platform_for_target(self, target):
"""Find the platform associated with this target.
:param JvmTarget target: target to query.
:return: The jvm platform object.
:rtype: JvmPlatformSettings
"""
if not target.payload.platform and target.is_synthetic:
derived_from = target.derived_from
platform = derived_from and getattr(derived_from, 'platform', None)
if platform:
return platform
return self.get_platform_by_name(target.payload.platform, target)
@classmethod
def parse_java_version(cls, version):
"""Parses the java version (given a string or Revision object).
Handles java version-isms, converting things like '7' -> '1.7' appropriately.
Truncates input versions down to just the major and minor numbers (eg, 1.6), ignoring extra
versioning information after the second number.
:param version: the input version, given as a string or Revision object.
:return: the parsed and cleaned version, suitable as a javac -source or -target argument.
:rtype: Revision
"""
conversion = {str(i): '1.{}'.format(i) for i in cls.SUPPORTED_CONVERSION_VERSIONS}
if str(version) in conversion:
return Revision.lenient(conversion[str(version)])
if not hasattr(version, 'components'):
version = Revision.lenient(version)
if len(version.components) <= 2:
return version
return Revision(*version.components[:2])
class JvmPlatformSettings(object):
"""Simple information holder to keep track of common arguments to java compilers."""
class IllegalSourceTargetCombination(TaskError):
"""Illegal pair of -source and -target flags to compile java."""
def __init__(self, source_level, target_level, args, name=None, by_default=False):
"""
:param source_level: Revision object or string for the java source level.
:param target_level: Revision object or string for the java target level.
:param list args: Additional arguments to pass to the java compiler.
:param str name: name to identify this platform.
:param by_default: True if this value was inferred by omission of a specific platform setting.
"""
self.source_level = JvmPlatform.parse_java_version(source_level)
self.target_level = JvmPlatform.parse_java_version(target_level)
self.args = tuple(args or ())
self.name = name
self._by_default = by_default
self._validate_source_target()
def _validate_source_target(self):
if self.source_level > self.target_level:
if self.by_default:
name = "{} (by default)".format(self.name)
else:
name = self.name
raise self.IllegalSourceTargetCombination(
'Platform {platform} has java source level {source_level} but target level {target_level}.'
.format(platform=name,
source_level=self.source_level,
target_level=self.target_level)
)
@property
def by_default(self):
return self._by_default
def __iter__(self):
yield self.source_level
yield self.target_level
yield self.args
def __eq__(self, other):
return tuple(self) == tuple(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(tuple(self))
def __cmp__(self, other):
return cmp(tuple(self), tuple(other))
def __str__(self):
return 'source={source},target={target},args=({args})'.format(
source=self.source_level,
target=self.target_level,
args=' '.join(self.args)
)
| 40.713675
| 99
| 0.708408
|
4a0f7d7d9a015ed7fd4c4edb9ef4ee623c66b6a4
| 5,292
|
py
|
Python
|
tests/model/test_utils.py
|
weiwang2330/BayesNeuralNet
|
6be81289d9bc46657a1b14ded440c8160721a464
|
[
"MIT"
] | 1
|
2019-03-30T06:20:46.000Z
|
2019-03-30T06:20:46.000Z
|
tests/model/test_utils.py
|
Li-Scottech/zhusuan
|
48c0f4e0716eb387f81ee8c3f3ca97fcf01e9d1e
|
[
"MIT"
] | null | null | null |
tests/model/test_utils.py
|
Li-Scottech/zhusuan
|
48c0f4e0716eb387f81ee8c3f3ca97fcf01e9d1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from itertools import permutations
import tensorflow as tf
from zhusuan.model.utils import *
from zhusuan.model.utils import Context
class TestContext(tf.test.TestCase):
def test_Context(self):
self.assertEqual(Context.get_contexts(), [])
with self.assertRaisesRegexp(RuntimeError, "No contexts on the stack"):
Context.get_context()
with Context() as context:
self.assertEqual(Context.get_contexts(), [context])
self.assertEqual(Context.get_context(), context)
with Context() as context_inner:
self.assertEqual(Context.get_contexts(),
[context, context_inner])
self.assertEqual(Context.get_context(), context_inner)
self.assertEqual(Context.get_contexts(), [context])
self.assertEqual(Context.get_context(), context)
self.assertEqual(Context.get_contexts(), [])
with self.assertRaisesRegexp(RuntimeError, "No contexts on the stack"):
Context.get_context()
class TestGetBackwardTensors(tf.test.TestCase):
def testGetBackwardOpsChain(self):
# a -> b -> c
a = tf.placeholder(tf.float32)
b = tf.sqrt(a)
c = tf.square(b)
for n in range(4):
for seed_tensors in permutations([a, b, c], n):
if c in seed_tensors:
truth = [a.op, b.op, c.op]
elif b in seed_tensors:
truth = [a.op, b.op]
elif a in seed_tensors:
truth = [a.op]
else:
truth = []
self.assertEqual(get_backward_ops(seed_tensors), truth)
self.assertEqual(get_backward_ops([c], treat_as_inputs=[b]), [c.op])
self.assertEqual(
get_backward_ops([b, c], treat_as_inputs=[b]), [c.op])
self.assertEqual(
get_backward_ops([a, c], treat_as_inputs=[b]), [a.op, c.op])
def testGetBackwardOpsSplit(self):
# a -> b -> c
# \-> d
a = tf.placeholder(tf.float32)
b = tf.exp(a)
c = tf.log(b)
d = tf.negative(b)
self.assertEqual(get_backward_ops([d]), [a.op, b.op, d.op])
self.assertEqual(get_backward_ops([c]), [a.op, b.op, c.op])
self.assertEqual(
get_backward_ops([c, d]), [a.op, b.op, c.op, d.op])
self.assertEqual(get_backward_ops([b, d]), [a.op, b.op, d.op])
self.assertEqual(get_backward_ops([a, d]), [a.op, b.op, d.op])
self.assertEqual(
get_backward_ops([c, d], treat_as_inputs=[b]), [c.op, d.op])
self.assertEqual(
get_backward_ops([c], treat_as_inputs=[d]), [a.op, b.op, c.op])
def testGetBackwardOpsMerge(self):
# a -> c -> d
# b ->/
a = tf.placeholder(tf.float32)
b = tf.constant(0, dtype=tf.int32)
c = tf.reduce_sum(a, reduction_indices=b)
d = tf.stop_gradient(c)
self.assertEqual(
get_backward_ops([d]), [a.op, b.op, c.op, d.op])
self.assertEqual(get_backward_ops([d], treat_as_inputs=[c]), [d.op])
self.assertEqual(
get_backward_ops([d], treat_as_inputs=[a]), [b.op, c.op, d.op])
def testGetBackwardOpsBridge(self):
# a -> b -> c -> d -> e
# \ --- /
a = tf.placeholder(tf.int32)
b = tf.identity(a)
c = tf.cast(b, tf.float32)
d = tf.tile(c, b)
e = tf.tanh(d)
self.assertEqual(
get_backward_ops([e]), [a.op, b.op, c.op, d.op, e.op])
self.assertEqual(get_backward_ops([c]), [a.op, b.op, c.op])
self.assertEqual(get_backward_ops([e], treat_as_inputs=[c]),
[a.op, b.op, d.op, e.op])
def testGetBackwardOpsControlDeps(self):
# a -> b - \
# c -> d - e
# \ /
# f
a = tf.placeholder(tf.float32, name='a')
b = tf.identity(a, name='b')
c = tf.placeholder(tf.float32, name='c')
d = tf.identity(c, name='d')
with tf.control_dependencies([b, d]):
e = tf.placeholder(tf.float32, name='e')
with tf.control_dependencies([e, d]):
f = tf.placeholder(tf.float32, name='f')
self.assertEqual(get_backward_ops([f]),
[a.op, b.op, c.op, d.op, e.op, f.op])
self.assertEqual(get_backward_ops([d, f]),
[c.op, d.op, a.op, b.op, e.op, f.op])
self.assertEqual(get_backward_ops([f], treat_as_inputs=[b]),
[a.op, b.op, c.op, d.op, e.op, f.op])
self.assertEqual(get_backward_ops([f], treat_as_inputs=[b, c]),
[a.op, b.op, d.op, e.op, f.op])
self.assertEqual(get_backward_ops([f], treat_as_inputs=[d, e]),
[a.op, b.op, c.op, d.op, e.op, f.op])
self.assertEqual(get_backward_ops([d, f], treat_as_inputs=[b]),
[c.op, d.op, a.op, b.op, e.op, f.op])
def test_get_backward_ops_control_flow(self):
# while_loop, scan, TensorArray
pass
| 39.492537
| 79
| 0.546485
|
4a0f7d7e4e6ef765e76710958f62466a433b4c1d
| 965
|
py
|
Python
|
ssh-bruteforce.py
|
dasithsv/python4pentesters
|
b4b7845f5c0f844d3309a62a365819658a2bbe6c
|
[
"MIT"
] | 1
|
2022-03-28T18:15:59.000Z
|
2022-03-28T18:15:59.000Z
|
ssh-bruteforce.py
|
dasithsv/python4pentesters
|
b4b7845f5c0f844d3309a62a365819658a2bbe6c
|
[
"MIT"
] | null | null | null |
ssh-bruteforce.py
|
dasithsv/python4pentesters
|
b4b7845f5c0f844d3309a62a365819658a2bbe6c
|
[
"MIT"
] | null | null | null |
import paramiko
import sys
import os
target = str(input('Please enter target IP address: '))
username = str(input('Please enter username to bruteforce: '))
password_file = str(input('Please enter location of the password file: '))
def ssh_connect(password, code=0):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(target, port=22, username=username, password=password)
except paramiko.AuthenticationException:
code = 1
ssh.close()
return code
with open(password_file, 'r') as file:
for line in file.readlines():
password = line.strip()
try:
response = ssh_connect(password)
if response == 0:
print('password found: ' + password)
exit(0)
elif response == 1:
print('no luck')
except Exception as e:
print(e)
pass
input_file.close()
| 24.74359
| 74
| 0.617617
|
4a0f7de4b1922d5e3caad25a06f74ed3920995de
| 9,594
|
py
|
Python
|
bs4/builder/_lxml.py
|
qbosen/leetcode_file_generator
|
594d7bb1e5ac5cb3100ddbaecc3f8359c17dbbb8
|
[
"MIT"
] | 2
|
2019-02-17T11:55:41.000Z
|
2022-03-04T14:37:01.000Z
|
application/physical/otmr/scraper/lib/bs4/builder/_lxml.py
|
cprior/finance-stuff
|
6a9389456e1068e0e8fc6bd83c87b8144a6390bf
|
[
"MIT"
] | 10
|
2019-12-26T17:31:31.000Z
|
2022-03-21T22:17:33.000Z
|
application/physical/otmr/scraper/lib/bs4/builder/_lxml.py
|
cprior/finance-stuff
|
6a9389456e1068e0e8fc6bd83c87b8144a6390bf
|
[
"MIT"
] | 2
|
2018-11-28T10:08:31.000Z
|
2021-06-22T06:07:42.000Z
|
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
try:
from collections.abc import Callable # Python 3.6
except ImportError , e:
from collections import Callable
from io import BytesIO
from StringIO import StringIO
from lxml import etree
from bs4.element import (
Comment,
Doctype,
NamespacedAttribute,
ProcessingInstruction,
XMLProcessingInstruction,
)
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
processing_instruction_class = XMLProcessingInstruction
NAME = "lxml-xml"
ALTERNATE_NAMES = ["xml"]
# Well, it's permissive by XML parser standards.
features = [NAME, LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
exclude_encodings=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
if is_html:
self.processing_instruction_class = ProcessingInstruction
else:
self.processing_instruction_class = XMLProcessingInstruction
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(
markup, try_encodings, is_html, exclude_encodings)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(nsmap) == 0 and len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
self.soup.endData()
self.soup.handle_data(target + ' ' + data)
self.soup.endData(self.processing_instruction_class)
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
NAME = LXML
ALTERNATE_NAMES = ["lxml-html"]
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
is_xml = False
processing_instruction_class = ProcessingInstruction
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| 36.479087
| 82
| 0.627892
|
4a0f7f2642dc7cfec063cc2310c1fb6f439f087b
| 3,915
|
py
|
Python
|
thumt/modules/quantization.py
|
THUNLP-MT/Transformer-DMB
|
14dedbedf2e369f9b8abf53d8d47c7862a951e39
|
[
"BSD-3-Clause"
] | null | null | null |
thumt/modules/quantization.py
|
THUNLP-MT/Transformer-DMB
|
14dedbedf2e369f9b8abf53d8d47c7862a951e39
|
[
"BSD-3-Clause"
] | null | null | null |
thumt/modules/quantization.py
|
THUNLP-MT/Transformer-DMB
|
14dedbedf2e369f9b8abf53d8d47c7862a951e39
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
# Author: Zhixing Tan
# Contact: playinf@stu.xmu.edu.cn
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numbers
def _check_type_and_shape(input, min, max):
min_is_number = isinstance(min, numbers.Real)
max_is_number = isinstance(max, numbers.Real)
min_is_tensor = isinstance(min, torch.Tensor)
max_is_tensor = isinstance(max, torch.Tensor)
if min_is_tensor and max_is_tensor:
min_ndim = min.dim()
max_ndim = max.dim()
if min_ndim > 1 or max_ndim > 1:
raise ValueError("Unsupported dimension: min: %d, max: %d" %
(min_ndim, max_ndim))
if min_ndim != max_ndim:
raise ValueError("dim(min) != dim(max): %d vs %d" %
(min_ndim, max_ndim))
if min_ndim == 1:
if input.shape[-1] != min.shape[-1]:
raise ValueError("Unmatched channels: %d vs %d" %
(input.shape[-1], min.shape[-1]))
elif not (max_is_number and min_is_number):
raise ValueError("min and max must both be numbers or Tensors.")
def _choose_quantization_params(min, max):
scale = (max - min) / 254.0
initial_zero_point = 1.0 - min / scale
if isinstance(initial_zero_point, torch.Tensor):
nudged_zero_point = initial_zero_point.clamp_(1.0, 255.0).round_()
else:
if initial_zero_point > 255.0:
nudged_zero_point = 255.0
elif initial_zero_point < 1.0:
nudged_zero_point = 1.0
else:
nudged_zero_point = round(initial_zero_point)
return scale, nudged_zero_point
class FakeQuantWithMinMaxArgs(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
mask_min = input < min
mask_max = input > max
ctx.save_for_backward(mask_min, mask_max)
output = input.clone()
output[mask_min] = min
output[mask_max] = max
scale, zero_point = _choose_quantization_params(min, max)
output.div_(scale).add_(zero_point).clamp_(1.0, 255.0).round_()
output.sub_(zero_point).mul_(scale)
return output
@staticmethod
def backward(ctx, grad_output):
mask_min, mask_max = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[mask_min] = 0.0
grad_input[mask_max] = 0.0
if ctx.needs_input_grad[1]:
grad_min = grad_output[mask_min].sum()
else:
grad_min = None
if ctx.needs_input_grad[2]:
grad_max = grad_output[mask_max].sum()
else:
grad_max = None
return grad_input, grad_min, grad_max
class FakeQuantWithMinMaxArgs1D(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
_check_type_and_shape(input, min, max)
mask_min = input < min
mask_max = input > max
ctx.save_for_backward(mask_min, mask_max)
output = torch.where(mask_min, min, input)
output = torch.where(mask_max, max, output)
scale, zero_point = _choose_quantization_params(min, max)
output.div_(scale).add_(zero_point).clamp_(1.0, 255.0).round_()
output.sub_(zero_point).mul_(scale)
return output
@staticmethod
def backward(ctx, grad_output):
mask_min, mask_max = ctx.saved_tensors
zero_tensor = torch.zeros_like(grad_output)
grad_hidden = torch.where(mask_min, zero_tensor, grad_output)
grad_min = grad_output - grad_hidden
grad_input = torch.where(mask_max, zero_tensor, grad_hidden)
grad_max = grad_hidden - grad_input
return grad_input, grad_min, grad_max
fake_quant_with_min_max_args = FakeQuantWithMinMaxArgs.apply
fake_quant_with_min_max_args_1d = FakeQuantWithMinMaxArgs1D.apply
| 30.585938
| 74
| 0.642912
|
4a0f7faa7ba5d32f665a9b1a4fed05df6a693903
| 7,181
|
py
|
Python
|
Notebooks/ml_util.py
|
bprasad26/modeling_earthquake_damage
|
cae6ccaadb6c86ba9fd24fecb86eb1677da4224d
|
[
"MIT"
] | 1
|
2021-12-01T15:41:19.000Z
|
2021-12-01T15:41:19.000Z
|
Notebooks/ml_util.py
|
bprasad26/modeling_earthquake_damage
|
cae6ccaadb6c86ba9fd24fecb86eb1677da4224d
|
[
"MIT"
] | null | null | null |
Notebooks/ml_util.py
|
bprasad26/modeling_earthquake_damage
|
cae6ccaadb6c86ba9fd24fecb86eb1677da4224d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import learning_curve
import plotly.graph_objects as go
from sklearn.model_selection import validation_curve
import matplotlib.pyplot as plt
from sklearn import tree
def indices_of_top_k(arr, k):
return np.sort(np.argpartition(np.array(arr), -k)[-k:])
class TopFeatureSelector(BaseEstimator, TransformerMixin):
def __init__(self, feature_importances, k):
self.feature_importances = feature_importances
self.k = k
def fit(self, X, y=None):
self.feature_indices_ = indices_of_top_k(self.feature_importances, self.k)
return self
def transform(self, X):
return X[:, self.feature_indices_]
# plot precision, recall vs threshold
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=thresholds,
y=precisions[:-1],
name="Precision",
mode="lines",
line=dict(color="blue"),
)
)
fig.add_trace(
go.Scatter(
x=thresholds,
y=recalls[:-1],
name="Recall",
mode="lines",
line=dict(color="green"),
)
)
fig.update_yaxes(range=[0, 1])
fig.update_xaxes(range=[-50000, 50000])
fig.update_layout(
title="Precision and recall versus the decision threshold",
xaxis_title="Threshold",
)
fig.show()
def plot_precision_vs_recall(precisions, recalls):
fig = go.Figure()
fig.add_trace(
go.Scatter(x=recalls, y=precisions, mode="lines", line=dict(color="green"))
)
fig.update_yaxes(range=[0, 1])
fig.update_xaxes(range=[0, 1])
fig.update_layout(
title="Precision vs Recall", xaxis_title="Recall",
)
fig.show()
def plot_roc_curve(fpr, trp, label=None):
fig = go.Figure()
fig.add_trace(
go.Scatter(x=fpr, y=tpr, mode="lines", line=dict(color="green"), name=label)
)
fig.add_trace(
go.Scatter(
x=[0, 1],
y=[0, 1],
mode="lines",
line=dict(color="blue"),
name="random classifier",
)
)
fig.update_yaxes(range=[0, 1])
fig.update_xaxes(range=[0, 1])
if label == None:
fig.update_layout(
title="The ROC Curve",
xaxis_title="False Positive Rate (Fall-Out)",
yaxis_title="True Positive Rate (Recall)",
showlegend=False,
)
else:
fig.update_layout(
title="The ROC Curve",
xaxis_title="False Positive Rate (Fall-Out)",
yaxis_title="True Positive Rate (Recall)",
)
fig.show()
def compare_roc_curve(fpr_clf1, trp_clf1, label1, fpr_clf2, tpr_clf2, label2):
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=fpr_clf1, y=trp_clf1, mode="lines", line=dict(color="green"), name=label1
)
)
fig.add_trace(
go.Scatter(
x=fpr_clf2, y=tpr_clf2, mode="lines", line=dict(color="red"), name=label2
)
)
fig.add_trace(
go.Scatter(
x=[0, 1],
y=[0, 1],
mode="lines",
line=dict(color="blue"),
name="random classifier",
)
)
fig.update_yaxes(range=[0, 1])
fig.update_xaxes(range=[0, 1])
fig.update_layout(
title="The ROC Curve",
xaxis_title="False Positive Rate (Fall-Out)",
yaxis_title="True Positive Rate (Recall)",
)
fig.show()
from sklearn.base import BaseEstimator, TransformerMixin
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names]
def plot_learning_curves(estimator, X, y, cv):
train_sizes, train_scores, test_scores = learning_curve(
estimator=estimator,
X=X,
y=y,
train_sizes=np.linspace(0.1, 1.0, 10),
cv=cv,
n_jobs=-1,
)
train_mean = np.mean(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=train_sizes,
y=train_mean,
name="Training accuracy",
mode="lines",
line=dict(color="blue"),
)
)
fig.add_trace(
go.Scatter(
x=train_sizes,
y=test_mean,
name="Validation accuracy",
mode="lines",
line=dict(color="green"),
)
)
fig.update_layout(
title="Learning Curves",
xaxis_title="Number of training examples",
yaxis_title="Accuracy",
)
fig.show()
def plot_validation_curves(estimator, X, y, param_name, param_range, cv):
train_scores, test_scores = validation_curve(
estimator=estimator,
X=X,
y=y,
param_name=param_name,
param_range=param_range,
cv=cv,
)
train_mean = np.mean(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=param_range,
y=train_mean,
name="Training Accuracy",
mode="lines",
line=dict(color="Blue"),
)
)
fig.add_trace(
go.Scatter(
x=param_range,
y=test_mean,
name="Validation Accuracy",
mode="lines",
line=dict(color="Green"),
)
)
fig.update_layout(
title="Validation Curves", xaxis_title=param_name, yaxis_title="Accuracy"
)
fig.show()
def plot_decision_tree(classifier, feature_names, class_names):
"""This function plots decision tree.
classifier: The name of the classifier,
feature_names: Feature names
class_name: class names
"""
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(4, 4))
tree.plot_tree(
classifier,
feature_names=feature_names,
class_names=class_names,
rounded=True,
filled=True,
)
fig.show()
def plot_silhouetter_scores(k_range, silhouette_scores):
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=k_range,
y=silhouette_scores,
mode="lines+markers",
marker=dict(color="green"),
)
)
fig.update_layout(xaxis_title="K", yaxis_title="Silhouette Score")
fig.show()
def num_to_cat_list(df, num_col_list, n_unique_val):
"""This function takes a pandas dataframe, a list of numerical columns
and create a list of columns that needs to be converted to categorical column if
it is less than or equal to n_unique_val."""
# columns that needs to converted
cols_to_convert = []
for col in num_col_list:
unique_val = df[col].nunique()
print(col, unique_val)
if unique_val <= n_unique_val:
cols_to_convert.append(col)
return cols_to_convert
| 25.374558
| 87
| 0.590865
|
4a0f80270de0984a6de12a6103cb670ea1d6e23c
| 10,884
|
py
|
Python
|
mvj/urls.py
|
City-of-Helsinki/mvj
|
6f786047805a968317ecc37b38c2262ada2c3805
|
[
"MIT"
] | 1
|
2021-01-12T08:14:10.000Z
|
2021-01-12T08:14:10.000Z
|
mvj/urls.py
|
City-of-Helsinki/mvj
|
6f786047805a968317ecc37b38c2262ada2c3805
|
[
"MIT"
] | 249
|
2017-04-18T14:00:13.000Z
|
2022-03-30T12:18:03.000Z
|
mvj/urls.py
|
City-of-Helsinki/mvj
|
6f786047805a968317ecc37b38c2262ada2c3805
|
[
"MIT"
] | 7
|
2017-04-18T08:43:54.000Z
|
2021-07-28T07:29:30.000Z
|
import rest_framework.urls
from django.conf import settings
from django.contrib import admin
from django.urls import include, path, re_path
from rest_framework import routers
from rest_framework_swagger.views import get_swagger_view
from credit_integration import urls as credit_integration_urls
from forms.viewsets.form import AnswerViewSet, FormViewSet
from leasing.api_functions import CalculateIncreaseWith360DayCalendar
from leasing.report.viewset import ReportViewSet
from leasing.views import CloudiaProxy, VirreProxy, ktj_proxy
from leasing.viewsets.area_note import AreaNoteViewSet
from leasing.viewsets.auditlog import AuditLogView
from leasing.viewsets.basis_of_rent import BasisOfRentViewSet
from leasing.viewsets.batchrun import (
JobRunLogEntryViewSet,
JobRunViewSet,
JobViewSet,
ScheduledJobViewSet,
)
from leasing.viewsets.comment import CommentTopicViewSet, CommentViewSet
from leasing.viewsets.contact import ContactViewSet
from leasing.viewsets.contact_additional_views import ContactExistsView
from leasing.viewsets.debt_collection import (
CollectionCourtDecisionViewSet,
CollectionLetterTemplateViewSet,
CollectionLetterViewSet,
CollectionNoteViewSet,
)
from leasing.viewsets.decision import DecisionCopyToLeasesView, DecisionViewSet
from leasing.viewsets.email import SendEmailView
from leasing.viewsets.infill_development_compensation import (
InfillDevelopmentCompensationAttachmentViewSet,
InfillDevelopmentCompensationViewSet,
)
from leasing.viewsets.inspection import InspectionAttachmentViewSet
from leasing.viewsets.invoice import (
InvoiceNoteViewSet,
InvoiceRowViewSet,
InvoiceSetViewSet,
InvoiceViewSet,
ReceivableTypeViewSet,
)
from leasing.viewsets.invoice_additional_views import (
InvoiceCalculatePenaltyInterestView,
InvoiceCreditView,
InvoiceExportToLaskeView,
InvoiceRowCreditView,
InvoiceSetCreditView,
)
from leasing.viewsets.land_area import (
LeaseAreaAttachmentViewSet,
PlanUnitListWithIdentifiersViewSet,
PlanUnitViewSet,
PlotMasterIdentifierList,
)
from leasing.viewsets.land_use_agreement import (
LandUseAgreementAttachmentViewSet,
LandUseAgreementInvoiceCreditView,
LandUseAgreementInvoiceExportToLaskeView,
LandUseAgreementInvoiceRowCreditView,
LandUseAgreementInvoiceRowViewSet,
LandUseAgreementInvoiceSetCreditView,
LandUseAgreementInvoiceSetViewSet,
LandUseAgreementInvoiceViewSet,
LandUseAgreementViewSet,
)
from leasing.viewsets.lease import (
DistrictViewSet,
FinancingViewSet,
HitasViewSet,
IntendedUseViewSet,
LeaseTypeViewSet,
LeaseViewSet,
ManagementViewSet,
MunicipalityViewSet,
NoticePeriodViewSet,
RegulationViewSet,
RelatedLeaseViewSet,
ReservationProcedureViewSet,
SpecialProjectViewSet,
StatisticalUseViewSet,
SupportiveHousingViewSet,
)
from leasing.viewsets.lease_additional_views import (
LeaseBillingPeriodsView,
LeaseCopyAreasToContractView,
LeaseCreateChargeViewSet,
LeaseCreateCollectionLetterDocumentViewSet,
LeasePreviewInvoicesForYearView,
LeaseRentForPeriodView,
LeaseSetInvoicingStateView,
LeaseSetRentInfoCompletionStateView,
)
from leasing.viewsets.leasehold_transfer import LeaseholdTransferViewSet
from leasing.viewsets.rent import IndexViewSet
from leasing.viewsets.ui_data import UiDataViewSet
from leasing.viewsets.vat import VatViewSet
from plotsearch.views import PlotSearchSubtypeViewSet, PlotSearchViewSet
from users.views import UsersPermissions
from users.viewsets import UserViewSet
router = routers.DefaultRouter()
router.register(r"area_note", AreaNoteViewSet)
router.register(r"basis_of_rent", BasisOfRentViewSet)
router.register(r"collection_court_decision", CollectionCourtDecisionViewSet)
router.register(r"collection_letter", CollectionLetterViewSet)
router.register(r"collection_letter_template", CollectionLetterTemplateViewSet)
router.register(r"collection_note", CollectionNoteViewSet)
router.register(r"comment", CommentViewSet)
router.register(r"comment_topic", CommentTopicViewSet)
router.register(r"contact", ContactViewSet)
router.register(r"decision", DecisionViewSet)
router.register(r"district", DistrictViewSet)
router.register(r"financing", FinancingViewSet)
router.register(r"form", FormViewSet, basename="form")
router.register(r"answer", AnswerViewSet)
router.register(r"hitas", HitasViewSet)
router.register(r"index", IndexViewSet)
router.register(
r"infill_development_compensation", InfillDevelopmentCompensationViewSet
)
router.register(
r"infill_development_compensation_attachment",
InfillDevelopmentCompensationAttachmentViewSet,
)
router.register(r"inspection_attachment", InspectionAttachmentViewSet)
router.register(r"invoice", InvoiceViewSet)
router.register(r"invoice_note", InvoiceNoteViewSet)
router.register(r"invoice_row", InvoiceRowViewSet)
router.register(r"invoice_set", InvoiceSetViewSet)
router.register(r"intended_use", IntendedUseViewSet)
router.register(r"lease", LeaseViewSet, basename="lease")
router.register(r"lease_area_attachment", LeaseAreaAttachmentViewSet)
router.register(
r"lease_create_charge", LeaseCreateChargeViewSet, basename="lease_create_charge"
)
router.register(
r"lease_create_collection_letter",
LeaseCreateCollectionLetterDocumentViewSet,
basename="lease_create_collection_letter",
)
router.register(r"lease_type", LeaseTypeViewSet)
router.register(r"leasehold_transfer", LeaseholdTransferViewSet)
router.register(r"management", ManagementViewSet)
router.register(r"municipality", MunicipalityViewSet)
router.register(r"notice_period", NoticePeriodViewSet)
router.register(r"plan_unit", PlanUnitViewSet)
router.register(
r"plan_unit_list_with_identifiers",
PlanUnitListWithIdentifiersViewSet,
basename="planunitlistwithidentifiers",
)
router.register(r"plot_master_identifier_list", PlotMasterIdentifierList)
router.register(r"plot_search", PlotSearchViewSet)
router.register(r"plot_search_subtype", PlotSearchSubtypeViewSet)
router.register(r"regulation", RegulationViewSet)
router.register(r"receivable_type", ReceivableTypeViewSet)
router.register(r"related_lease", RelatedLeaseViewSet)
router.register(r"report", ReportViewSet, basename="report")
router.register(r"special_project", SpecialProjectViewSet)
router.register(r"reservation_procedure", ReservationProcedureViewSet)
router.register(r"statistical_use", StatisticalUseViewSet)
router.register(r"supportive_housing", SupportiveHousingViewSet)
router.register(r"ui_data", UiDataViewSet, basename="ui_data")
router.register(r"user", UserViewSet)
router.register(r"vat", VatViewSet)
router.register(r"land_use_agreement", LandUseAgreementViewSet)
router.register(r"land_use_agreement_attachment", LandUseAgreementAttachmentViewSet)
router.register(r"land_use_agreement_invoice", LandUseAgreementInvoiceViewSet)
router.register(r"land_use_agreement_invoice_row", LandUseAgreementInvoiceRowViewSet)
router.register(r"land_use_agreement_invoice_set", LandUseAgreementInvoiceSetViewSet)
# Batchrun
router.register("scheduled_job", ScheduledJobViewSet)
router.register("job", JobViewSet)
router.register("job_run", JobRunViewSet)
router.register("job_run_log_entry", JobRunLogEntryViewSet)
additional_api_paths = [
path("auditlog/", AuditLogView.as_view(), name="auditlog"),
path("contact_exists/", ContactExistsView.as_view(), name="contact-exists"),
path(
"decision_copy_to_leases/",
DecisionCopyToLeasesView.as_view(),
name="decision-copy-to-leases",
),
path(
"invoice_calculate_penalty_interest/",
InvoiceCalculatePenaltyInterestView.as_view(),
name="invoice-calculate-penalty-interest",
),
path("invoice_credit/", InvoiceCreditView.as_view(), name="invoice-credit"),
path(
"invoice_export_to_laske/",
InvoiceExportToLaskeView.as_view(),
name="invoice-export-to-laske",
),
path(
"invoice_row_credit/", InvoiceRowCreditView.as_view(), name="invoice-row-credit"
),
path(
"invoice_set_credit/", InvoiceSetCreditView.as_view(), name="invoice-set-credit"
),
path(
"land_use_agreement_invoice_credit/",
LandUseAgreementInvoiceCreditView.as_view(),
name="land_use_agreement_invoice-credit",
),
path(
"land_use_agreement_invoice_export_to_laske/",
LandUseAgreementInvoiceExportToLaskeView.as_view(),
name="land_use_agreement_invoice-export-to-laske",
),
path(
"land_use_agreement_invoice_row_credit/",
LandUseAgreementInvoiceRowCreditView.as_view(),
name="land_use_agreement_invoice-row-credit",
),
path(
"land_use_agreement_invoice_set_credit/",
LandUseAgreementInvoiceSetCreditView.as_view(),
name="land_use_agreement_invoice-set-credit",
),
path(
"lease_billing_periods/",
LeaseBillingPeriodsView.as_view(),
name="lease-billing-periods",
),
path(
"lease_copy_areas_to_contract/",
LeaseCopyAreasToContractView.as_view(),
name="lease-copy-areas-to-contract",
),
path(
"lease_preview_invoices_for_year/",
LeasePreviewInvoicesForYearView.as_view(),
name="lease-preview-invoices-for-year",
),
path(
"lease_rent_for_period/",
LeaseRentForPeriodView.as_view(),
name="lease-rent-for-period",
),
path(
"lease_set_invoicing_state/",
LeaseSetInvoicingStateView.as_view(),
name="lease-set-invoicing-state",
),
path(
"lease_set_rent_info_completion_state/",
LeaseSetRentInfoCompletionStateView.as_view(),
name="lease-set-rent-info-completion-state",
),
path("send_email/", SendEmailView.as_view(), name="send-email"),
path("users_permissions/", UsersPermissions.as_view(), name="users-permissions"),
path(
"functions/calculate_increase_with_360_day_calendar",
CalculateIncreaseWith360DayCalendar.as_view(),
),
]
urlpatterns = [
path("v1/", include(router.urls + additional_api_paths)),
path(
"v1/", include((credit_integration_urls, "credit_integration"), namespace="v1"),
),
re_path(r"(?P<base_type>ktjki[ir])/tuloste/(?P<print_type>[\w/]+)/pdf", ktj_proxy),
path("contract_file/<contract_id>/", CloudiaProxy.as_view()),
path("contract_file/<contract_id>/<file_id>/", CloudiaProxy.as_view()),
path("trade_register/<service>/<business_id>/", VirreProxy.as_view()),
path("admin/", admin.site.urls),
path("auth/", include(rest_framework.urls)),
path("docs/", get_swagger_view(title="MVJ API")),
]
if settings.DEBUG and "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 38.459364
| 88
| 0.784454
|
4a0f803512d4d86e9de82d09a4fb65cb795fd718
| 824
|
py
|
Python
|
vectorize_data.py
|
tuhoag/text-classification
|
4b70be170f88fa54009b6ec3dbfcda8a316fc589
|
[
"MIT"
] | null | null | null |
vectorize_data.py
|
tuhoag/text-classification
|
4b70be170f88fa54009b6ec3dbfcda8a316fc589
|
[
"MIT"
] | null | null | null |
vectorize_data.py
|
tuhoag/text-classification
|
4b70be170f88fa54009b6ec3dbfcda8a316fc589
|
[
"MIT"
] | null | null | null |
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest, f_classif
NGRAM_RANGE = (1, 2)
MIN_DF = 2
TOP_K = 10000
def ngram_vectorize(train_texts, train_labels, val_texts):
kwargs = {
'ngram_range': NGRAM_RANGE,
'analyzer': 'word',
'strip_accents': 'unicode',
'decode_error': 'replace',
'dtype': 'int32',
'min_df': MIN_DF
}
vectorizer = TfidfVectorizer(**kwargs)
vectorizer.fit(train_texts)
x_train = vectorizer.transform(train_texts)
x_val = vectorizer.transform(val_texts)
selector = SelectKBest(f_classif, k=min(TOP_K, x_train.shape[1]))
selector.fit(x_train, train_labels)
x_train = selector.transform(x_train)
x_val = selector.transform(x_val)
return x_train, x_val
| 25.75
| 69
| 0.691748
|
4a0f807a35e18ed2e66dab7198cbedf1cb30fb05
| 3,146
|
py
|
Python
|
framebuf_.py
|
hlovatt/PyBoardTypeshedGenerator
|
1d133cab16ea5d558b03175e6fa48b4a23b76136
|
[
"MIT"
] | 5
|
2020-07-26T08:48:39.000Z
|
2021-09-13T19:19:37.000Z
|
framebuf_.py
|
hlovatt/PyBoardTypeshedGenerator
|
1d133cab16ea5d558b03175e6fa48b4a23b76136
|
[
"MIT"
] | null | null | null |
framebuf_.py
|
hlovatt/PyBoardTypeshedGenerator
|
1d133cab16ea5d558b03175e6fa48b4a23b76136
|
[
"MIT"
] | 1
|
2020-11-07T22:37:44.000Z
|
2020-11-07T22:37:44.000Z
|
"""
Generate `pyi` from corresponding `rst` docs.
"""
import rst
from rst2pyi import RST2PyI
__author__ = rst.__author__
__copyright__ = rst.__copyright__
__license__ = rst.__license__
__version__ = "7.2.0" # Version set by https://github.com/hlovatt/tag2ver
def framebuf(shed: RST2PyI) -> None:
shed.module(
name="framebuf",
old="Frame buffer manipulation",
post_doc=f"""
from typing import overload, Final
from uio import AnyWritableBuf
""",
end="class FrameBuffer",
)
shed.consume_minuses_underline_line(and_preceding_lines=True)
shed.class_(
name="FrameBuffer", end="Constructors",
)
shed.def_(
old=r".. class:: FrameBuffer(buffer, width, height, format, stride=width, /)",
new="def __init__(self, buffer: AnyWritableBuf, width: int, height: int, format: int, stride: int = ..., /)",
)
shed.def_(
old=r".. method:: FrameBuffer.fill(c)", new="def fill(self, c: int, /) -> None",
)
shed.def_(
old=r".. method:: FrameBuffer.pixel(x, y[, c])",
new=[
"def pixel(self, x: int, y: int, /) -> int",
"def pixel(self, x: int, y: int, c: int, /) -> None",
],
)
cmd = r".. method:: FrameBuffer."
rect = r"rect(x, y, w, h, c)"
shed.defs_with_common_description(
cmd=cmd,
old2new={
"hline(x, y, w, c)": "def hline(self, x: int, y: int, w: int, c: int, /) -> None",
"vline(x, y, h, c)": "def vline(self, x: int, y: int, h: int, c: int, /) -> None",
"line(x1, y1, x2, y2, c)": "def line(self, x1: int, y1: int, x2: int, y2: int, c: int, /) -> None",
},
end=cmd + rect,
)
shed.defs_with_common_description(
cmd=cmd,
old2new={
rect: "def rect(self, x: int, y: int, w: int, h: int, c: int, /) -> None",
"fill_rect(x, y, w, h, c)": "def fill_rect(self, x: int, y: int, w: int, h: int, c: int, /) -> None",
},
end="Drawing text",
)
shed.def_(
old=r".. method:: FrameBuffer.text(s, x, y[, c])",
new="def text(self, s: str, x: int, y: int, c: int = 1, /) -> None",
)
shed.def_(
old=r".. method:: FrameBuffer.scroll(xstep, ystep)",
new="def scroll(self, xstep: int, ystep: int, /) -> None",
)
shed.def_(
old=r".. method:: FrameBuffer.blit(fbuf, x, y, key=-1, palette=None)",
new="""
def blit(self, fbuf: FrameBuffer, x: int, y: int, key: int = -1, pallet: FrameBuffer | None = None, /) -> None
""",
)
shed.vars(
old=".. data:: framebuf.MONO_VLSB", class_var=None,
)
shed.vars(
old=".. data:: framebuf.MONO_HLSB", class_var=None,
)
shed.vars(
old=".. data:: framebuf.MONO_HMSB", class_var=None,
)
shed.vars(
old=".. data:: framebuf.RGB565", class_var=None,
)
shed.vars(
old=".. data:: framebuf.GS2_HMSB", class_var=None,
)
shed.vars(
old=".. data:: framebuf.GS4_HMSB", class_var=None,
)
shed.vars(
old=".. data:: framebuf.GS8", class_var=None, end=None,
)
shed.write()
| 32.102041
| 117
| 0.544183
|
4a0f80f64979a746c7bffff4f6e3f9bb9820e012
| 10,863
|
py
|
Python
|
google/appengine/dist/_library.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 26
|
2015-01-20T08:02:38.000Z
|
2020-06-10T04:57:41.000Z
|
google/appengine/dist/_library.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 4
|
2018-03-28T16:49:17.000Z
|
2019-11-02T18:35:02.000Z
|
google/appengine/dist/_library.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 13
|
2016-02-28T00:14:23.000Z
|
2021-05-03T15:47:36.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Code to exist off of google.appengine.dist.
Kept in a separate file from the __init__ module for testing purposes.
"""
__all__ = ['use_library']
try:
import distutils.version
except ImportError:
distutils = None
import os
import sys
server_software = os.getenv('SERVER_SOFTWARE')
USING_SDK = not server_software or server_software.startswith('Dev')
del server_software
_DESIRED_DJANGO_VERSION = 'v0_96'
AUTO_IMPORT_FIXER_FILE = 'auto_import_fixer.py'
def fix_paths(app_path, python_lib_path):
"""Fix the __path__ attr of sys.modules entries.
Specifically this fixes the path of those sys.modules package entries that
have __path__ attributes that point to the python library, but where there
is a similar package in the application's code.
Args:
app_path: The root path of the application code.
python_lib_path: The root path of the python library.
"""
if os.path.isfile(os.path.join(app_path, AUTO_IMPORT_FIXER_FILE)):
return
for module_name, module in sys.modules.items():
if getattr(module, '__path__', None) is None:
continue
module_app_path = os.path.join(app_path, *module_name.split('.'))
module_init_file = os.path.join(module_app_path, '__init__.py')
if not os.path.isfile(module_init_file):
continue
found_python_lib_path = False
found_app_path = False
for path in module.__path__:
if path.startswith(python_lib_path):
found_python_lib_path = True
if path.startswith(app_path):
found_app_path = True
if found_python_lib_path and not found_app_path:
module.__path__.append(module_app_path)
try:
import google
except ImportError:
import google as google
if not USING_SDK:
this_version = os.path.dirname(os.path.dirname(google.__file__))
versions = os.path.dirname(this_version)
PYTHON_LIB = os.path.dirname(versions)
fix_paths(sys.path[-1], PYTHON_LIB)
del this_version, versions
else:
PYTHON_LIB = os.path.dirname(os.path.dirname(google.__file__))
del google
installed = {}
def SetAllowedModule(_):
pass
class UnacceptableVersionError(Exception):
"""Raised when a version of a package that is unacceptable is requested."""
pass
class LooseVersion(object):
"""Shallow class compatible with distutils.version.LooseVersion."""
def __init__(self, version):
"""Create a new instance of LooseVersion.
Args:
version: iterable containing the version values.
"""
self.version = tuple(map(str, version))
def __repr__(self):
return '.'.join(self.version)
def __str__(self):
return '.'.join(self.version)
@classmethod
def parse(cls, string):
"""Parse a version string and create a new LooseVersion instance.
Args:
string: dot delimited version string.
Returns:
A distutils.version.LooseVersion compatible object.
"""
return cls(string.split('.'))
def DjangoVersion():
"""Discover the version of Django installed.
Returns:
A distutils.version.LooseVersion.
"""
try:
__import__('django.' + _DESIRED_DJANGO_VERSION)
except ImportError:
pass
import django
try:
return distutils.version.LooseVersion('.'.join(map(str, django.VERSION)))
except AttributeError:
return LooseVersion(django.VERSION)
def PylonsVersion():
"""Discover the version of Pylons installed.
Returns:
A distutils.version.LooseVersion.
"""
import pylons
return distutils.version.LooseVersion(pylons.__version__)
PACKAGES = {
'django': (DjangoVersion,
{'0.96': None,
'1.0': None,
'1.1': None,
'1.2': None,
'1.3': None,
}),
'_test': (lambda: distutils.version.LooseVersion('1.0'), {'1.0': None}),
'_testpkg': (lambda: distutils.version.LooseVersion('1.0'),
{'1.0': set([('_test', '1.0')])}),
}
def EqualVersions(version, baseline):
"""Test that a version is acceptable as compared to the baseline.
Meant to be used to compare version numbers as returned by a package itself
and not user input.
Args:
version: distutils.version.LooseVersion.
The version that is being checked.
baseline: distutils.version.LooseVersion.
The version that one hopes version compares equal to.
Returns:
A bool indicating whether the versions are considered equal.
"""
baseline_tuple = baseline.version
truncated_tuple = version.version[:len(baseline_tuple)]
if truncated_tuple == baseline_tuple:
return True
else:
return False
def AllowInstalledLibrary(name, desired):
"""Allow the use of a package without performing a version check.
Needed to clear a package's dependencies in case the dependencies need to be
imported in order to perform a version check. The version check is skipped on
the dependencies because the assumption is that the package that triggered
the call would not be installed without the proper dependencies (which might
be a different version than what the package explicitly requires).
Args:
name: Name of package.
desired: Desired version.
Raises:
UnacceptableVersion Error if the installed version of a package is
unacceptable.
"""
CallSetAllowedModule(name, desired)
dependencies = PACKAGES[name][1][desired]
if dependencies:
for dep_name, dep_version in dependencies:
AllowInstalledLibrary(dep_name, dep_version)
installed[name] = desired, False
def CheckInstalledLibrary(name, desired):
"""Check that the library and its dependencies are installed.
Args:
name: Name of the library that should be installed.
desired: The desired version.
Raises:
UnacceptableVersionError if the installed version of a package is
unacceptable.
"""
dependencies = PACKAGES[name][1][desired]
if dependencies:
for dep_name, dep_version in dependencies:
AllowInstalledLibrary(dep_name, dep_version)
CheckInstalledVersion(name, desired, explicit=True)
def CheckInstalledVersion(name, desired, explicit):
"""Check that the installed version of a package is acceptable.
Args:
name: Name of package.
desired: Desired version string.
explicit: Explicitly requested by the user or implicitly because of a
dependency.
Raises:
UnacceptableVersionError if the installed version of a package is
unacceptable.
"""
CallSetAllowedModule(name, desired)
find_version = PACKAGES[name][0]
if name == 'django':
global _DESIRED_DJANGO_VERSION
_DESIRED_DJANGO_VERSION = 'v' + desired.replace('.', '_')
installed_version = find_version()
try:
desired_version = distutils.version.LooseVersion(desired)
except AttributeError:
desired_version = LooseVersion.parse(desired)
if not EqualVersions(installed_version, desired_version):
raise UnacceptableVersionError(
'%s %s was requested, but %s is already in use' %
(name, desired_version, installed_version))
installed[name] = desired, explicit
def CallSetAllowedModule(name, desired):
"""Helper to call SetAllowedModule(name), after special-casing Django."""
if USING_SDK and name == 'django':
sys.path[:] = [dirname
for dirname in sys.path
if not dirname.startswith(os.path.join(
PYTHON_LIB, 'lib', 'django'))]
if desired in ('0.96', '1.2', '1.3'):
sys.path.insert(1, os.path.join(PYTHON_LIB, 'lib', 'django-' + desired))
SetAllowedModule(name)
def CreatePath(name, version):
"""Create the path to a package."""
package_dir = '%s-%s' % (name, version)
return os.path.join(PYTHON_LIB, 'versions', 'third_party', package_dir)
def RemoveLibrary(name):
"""Remove a library that has been installed."""
installed_version, _ = installed[name]
path = CreatePath(name, installed_version)
try:
sys.path.remove(path)
except ValueError:
pass
del installed[name]
def AddLibrary(name, version, explicit):
"""Add a library to sys.path and 'installed'."""
sys.path.insert(1, CreatePath(name, version))
installed[name] = version, explicit
def InstallLibrary(name, version, explicit=True):
"""Install a package.
If the installation is explicit then the user made the installation request,
not a package as a dependency. Explicit installation leads to stricter
version checking.
Args:
name: Name of the requested package (already validated as available).
version: The desired version (already validated as available).
explicit: Explicitly requested by the user or implicitly because of a
dependency.
"""
installed_version, explicitly_installed = installed.get(name, [None] * 2)
if name in sys.modules:
if explicit:
CheckInstalledVersion(name, version, explicit=True)
return
elif installed_version:
if version == installed_version:
return
if explicit:
if explicitly_installed:
raise ValueError('%s %s requested, but %s already in use' %
(name, version, installed_version))
RemoveLibrary(name)
else:
version_ob = distutils.version.LooseVersion(version)
installed_ob = distutils.version.LooseVersion(installed_version)
if version_ob <= installed_ob:
return
else:
RemoveLibrary(name)
AddLibrary(name, version, explicit)
dep_details = PACKAGES[name][1][version]
if not dep_details:
return
for dep_name, dep_version in dep_details:
InstallLibrary(dep_name, dep_version, explicit=False)
def use_library(name, version):
"""Specify a third-party package to use.
Args:
name: Name of package to use.
version: Version of the package to use (string).
"""
if name not in PACKAGES:
raise ValueError('%s is not a supported package' % name)
versions = PACKAGES[name][1].keys()
if version not in versions:
raise ValueError('%s is not a supported version for %s; '
'supported versions are %s' % (version, name, versions))
if USING_SDK:
CheckInstalledLibrary(name, version)
else:
InstallLibrary(name, version, explicit=True)
if not USING_SDK:
InstallLibrary('django', '0.96', explicit=False)
| 23.211538
| 79
| 0.700543
|
4a0f8189eadb27811f16d1cd8c7ec259a00c26a5
| 4,443
|
py
|
Python
|
basemaps/routes/api/v1/layer_router.py
|
Skydipper/Basemaps
|
8d1489919ff363beb3bcd290c7f709c80b548c91
|
[
"MIT"
] | null | null | null |
basemaps/routes/api/v1/layer_router.py
|
Skydipper/Basemaps
|
8d1489919ff363beb3bcd290c7f709c80b548c91
|
[
"MIT"
] | 1
|
2019-12-20T12:36:08.000Z
|
2019-12-20T14:33:43.000Z
|
basemaps/routes/api/v1/layer_router.py
|
Skydipper/Basemaps
|
8d1489919ff363beb3bcd290c7f709c80b548c91
|
[
"MIT"
] | null | null | null |
"""API ROUTER"""
import logging
import json
import urllib
import requests
from flask import jsonify, Blueprint, redirect, request
from basemaps.routes.api import error
from basemaps.middleware import exist_mapid, get_layer, exist_tile
from basemaps.services.redis_service import RedisService
import ee
layer_endpoints = Blueprint('tile_endpoints', __name__)
@layer_endpoints.route('/<layer>/<z>/<x>/<y>', strict_slashes=False, methods=['GET'])
@get_layer
def get_tile(layer, z, x, y, map_object=None, layer_obj=None):
"""Get tile Endpoint"""
#logging.info(f"[Layer Router]: made it to router. {z}/{x}/{y}")
try:
layer_config = layer_obj.get('layerConfig')
layer_type = layer_obj.get('provider')
except Exception as e:
logging.error(str(e))
return error(status=500, detail='Error grabbing layer data: ' + str(e))
# IF Carto type of layer
if layer_type == 'cartodb':
#logging.info(f"[Layer Router] Carto type: {layer_type}")
tmp_url = get_carto_url(layer_config)
url = tmp_url.replace("{z}/{x}/{y}", f"{z}/{x}/{y}")
#logging.info(f"[Layer Router]: URL.{url}")
# IF EE type of layer
if layer_type == 'gee':
#logging.info(f"[Layer Router] EE type: {layer_type}")
try:
if map_object is None:
logging.info('Generating mapid')
style_type = layer_config.get('body').get('styleType')
image = None
if 'isImageCollection' not in layer_config or not layer_config.get('isImageCollection'):
image = ee.Image(layer_config.get('assetId'))
else:
position = layer_config.get('position')
image_col = ee.ImageCollection(layer_config.get('assetId'))
if 'filterDates' in layer_config:
dates = layer_config.get('filterDates')
image_col = image_col.filterDate(dates[0], dates[1])
if position == 'first':
logging.info('Obtaining first')
image = ee.Image(image_col.sort('system:time_start', True).first())
else:
logging.info('Obtaining last')
image = ee.Image(image_col.sort('system:time_start', False).first())
if style_type == 'sld':
style = layer_config.get('body').get('sldValue')
map_object = image.sldStyle(style).getMapId()
else:
map_object = image.getMapId(layer_config.get('body'))
RedisService.set_layer_mapid(layer, map_object.get('mapid'), map_object.get('token'))
except Exception as e:
logging.error(str(e))
return error(status=500, detail='Error generating tile: ' + str(e))
try:
url = ee.data.getTileUrl(map_object, int(x), int(y), int(z))
except Exception as e:
logging.error(str(e))
return error(status=404, detail='Tile Not Found')
# Return back the url of the individual tile either from EE or Carto
return redirect(url)
def get_carto_url(layerConfig):
"""blah"""
sql_config = layerConfig.get('sql_config', None)
if sql_config:
for config in sql_config:
logging.info(f"[Layer Router] SQL: {config}")
key = config['key']
key_params = config['key_params']
if key_params[0].get('required', False):
for l in layerConfig["body"]["layers"]:
l['options']['sql'] = l['options']['sql'].replace(f'{{{key}}}', '0').format(key_params['key'])
else:
for l in layerConfig["body"]["layers"]:
l['options']['sql'] = l['options']['sql'].replace(f'{{{key}}}', '0').format('')
_layerTpl = urllib.parse.quote_plus(json.dumps({
"version": "1.3.0",
"stat_tag": "API",
"layers": [{ **l, "options": { **l["options"]}} for l in layerConfig.get("body").get("layers")]
}))
apiParams = f"?stat_tag=API&config={_layerTpl}"
url = f"http://35.233.41.65/user/skydipper/api/v1/map{apiParams}"
r = requests.get(url, headers={'Content-Type': 'application/json'})
try:
tile_url = r.json().get('metadata').get('tilejson').get('raster').get('tiles')[0]
return tile_url
except:
return None
| 45.336735
| 114
| 0.576412
|
4a0f819778f51fa28b3a111bb93076a885c7168f
| 6,934
|
py
|
Python
|
CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/gis/server/admin/parameters.py
|
moazzamwaheed2017/carparkapi
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
[
"MIT"
] | null | null | null |
CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/gis/server/admin/parameters.py
|
moazzamwaheed2017/carparkapi
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
[
"MIT"
] | 9
|
2020-02-03T15:50:10.000Z
|
2022-03-02T07:11:34.000Z
|
CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/gis/server/admin/parameters.py
|
moazzamwaheed2017/carparkapi
|
e52ae1b2aed47321ce9d22ba6cd0b85fa60a417a
|
[
"MIT"
] | null | null | null |
import json
########################################################################
class Extension(object):
"""
represents a service extension
"""
_typeName = None
_capabilities = None
_enabled = None
_maxUploadFileSize = None
_allowedUploadFileTypes = None
_properties = None
_allowedExtensions = ["naserver", "mobileserver",
"kmlserver", "wfsserver",
"schematicsserver", "featureserver",
"wcsserver", "wmsserver"]
#----------------------------------------------------------------------
def __init__(self, type_name,
capabilities,
enabled,
max_upload_file_size,
allowed_upload_filetype,
properties):
"""Constructor"""
self._typeName = type_name
self._capabilities = capabilities
self._enabled = enabled
self._maxUploadFileSize = max_upload_file_size
self._allowedUploadFileTypes = allowed_upload_filetype
self._properties = properties
#----------------------------------------------------------------------
@property
def properties(self):
"""gets/sets the extension properties"""
return self._properties
#----------------------------------------------------------------------
@properties.setter
def properties(self, value):
"""gets/sets the extension properties"""
if isinstance(value, dict):
self._properties = value
#----------------------------------------------------------------------
@property
def typeName(self):
"""gets the extension type"""
return self._typeName
#----------------------------------------------------------------------
@property
def capabilities(self):
"""gets/sets the capabilities"""
return self._capabilities
#----------------------------------------------------------------------
@capabilities.setter
def capabilities(self, value):
"""gets/sets the capabilities"""
if self._capabilities != value:
self._capabilities = value
#----------------------------------------------------------------------
@property
def enabled(self):
"""gets/sets the extension is enabled"""
return self._enabled
#----------------------------------------------------------------------
@enabled.setter
def enabled(self, value):
"""gets/sets the extension is enabled"""
if isinstance(value, bool):
self._enabled = value
#----------------------------------------------------------------------
@property
def max_upload_file_size(self):
"""sets/gets the maxUploadFileSize"""
return self._maxUploadFileSize
#----------------------------------------------------------------------
@max_upload_file_size.setter
def max_upload_file_size(self, value):
"""sets/gets the maxUploadFileSize"""
if isinstance(value, int):
self._maxUploadFileSize = value
#----------------------------------------------------------------------
@property
def allowed_upload_filetypes(self):
"""gets/sets the allowedUploadFileTypes"""
return self._allowedUploadFileTypes
#----------------------------------------------------------------------
@allowed_upload_filetypes.setter
def allowed_upload_filetypes(self, value):
"""gets/sets the allowedUploadFileTypes"""
self._allowedUploadFileTypes = value
#----------------------------------------------------------------------
def __str__(self):
"""returns the object as JSON"""
return json.dumps({
"typeName": self._typeName,
"capabilities": self._capabilities,
"enabled": self._enabled,
"maxUploadFileSize": self._maxUploadFileSize,
"allowedUploadFileTypes": self._allowedUploadFileTypes,
"properties": self._properties
})
#----------------------------------------------------------------------
@property
def value(self):
"""returns the object as a dictionary"""
return json.loads(str(self))
#----------------------------------------------------------------------
@staticmethod
def fromJSON(value):
"""returns the object from json string or dictionary"""
if isinstance(value, str):
value = json.loads(value)
elif isinstance(value, dict):
value = value
else:
raise AttributeError("Invalid input")
if 'allowedUploadFileTypes' not in value:
value['allowedUploadFileTypes'] = ""
return Extension(type_name=value['typeName'],
capabilities=value['capabilities'] or "",
enabled=value['enabled'] == "true",
max_upload_file_size=value['maxUploadFileSize'],
allowed_upload_filetype=value['allowedUploadFileTypes'] or "",
properties=value['properties'])
########################################################################
class ClusterProtocol(object):
"""
The clustering protocol defines a channel which is used by server
machines within a cluster to communicate with each other. A server
machine will communicate with its peers information about the status of
objects running within it for load balancing and fault tolerance.
ArcGIS Server supports the TCP clustering protocols where server
machines communicate with each other over a TCP channel (port).
Inputs:
tcpClusterPort - The port to use when configuring a TCP based
protocol. By default, the server will pick up the next value in the
assigned ports on all machines.
"""
_tcpClusterPort = None
#----------------------------------------------------------------------
def __init__(self, tcpClusterPort):
"""Constructor"""
self._tcpClusterPort = int(tcpClusterPort)
#----------------------------------------------------------------------
@property
def tcpClusterPort(self):
"""
The port to use when configuring a TCP based protocol. By default,
the server will pick up the next value in the assigned ports on
all machines.
"""
return self._tcpClusterPort
#----------------------------------------------------------------------
def __str__(self):
""""""
return json.dumps({
"tcpClusterPort" : self._tcpClusterPort
})
#----------------------------------------------------------------------
@property
def value(self):
"""
returns the tcpClusterPort as a dictionary
"""
return {
"tcpClusterPort" : self._tcpClusterPort
}
| 39.397727
| 87
| 0.474041
|
4a0f821a78003d1f119fc07580e5f3a5a172b9b5
| 351
|
py
|
Python
|
examples/apps/GetImage/GetImage.py
|
zhengqun/SungemSDK-Python
|
fcd9789721d96b7197543523b65a25a7351944f5
|
[
"Apache-2.0"
] | 1
|
2019-01-02T07:13:53.000Z
|
2019-01-02T07:13:53.000Z
|
examples/apps/GetImage/GetImage.py
|
zhengqun/SungemSDK-Python
|
fcd9789721d96b7197543523b65a25a7351944f5
|
[
"Apache-2.0"
] | null | null | null |
examples/apps/GetImage/GetImage.py
|
zhengqun/SungemSDK-Python
|
fcd9789721d96b7197543523b65a25a7351944f5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright(c) 2018 Senscape Corporation.
# License: Apache 2.0
# Import libs
import cv2, sys, numpy as np
sys.path.append('../../../')
import hsapi as hs
device = hs.GetDevice()
device.OpenDevice()
try:
while(1):
image = device.GetImage(False)
cv2.imshow('image',image)
cv2.waitKey(1)
finally:
device.CloseDevice()
| 18.473684
| 41
| 0.646724
|
4a0f821d451f924a6af4a83173ff337105fcf217
| 7,233
|
py
|
Python
|
research/object_detection/utils/metrics.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 82,518
|
2016-02-05T12:07:23.000Z
|
2022-03-31T23:09:47.000Z
|
research/object_detection/utils/metrics.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 9,021
|
2016-03-08T01:02:05.000Z
|
2022-03-31T08:06:35.000Z
|
research/object_detection/utils/metrics.py
|
873040/Abhishek
|
2ddd716e66bc5cc6e6f0787508dd07da0e02e75a
|
[
"Apache-2.0"
] | 54,341
|
2016-02-06T17:19:55.000Z
|
2022-03-31T10:27:44.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing metrics like precision, recall, CorLoc and etc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
def compute_precision_recall(scores, labels, num_gt):
"""Compute precision and recall.
Args:
scores: A float numpy array representing detection score
labels: A float numpy array representing weighted true/false positive labels
num_gt: Number of ground truth instances
Raises:
ValueError: if the input is not of the correct format
Returns:
precision: Fraction of positive instances over detected ones. This value is
None if no ground truth labels are present.
recall: Fraction of detected positive instance over all positive instances.
This value is None if no ground truth labels are present.
"""
if not isinstance(labels, np.ndarray) or len(labels.shape) != 1:
raise ValueError("labels must be single dimension numpy array")
if labels.dtype != np.float and labels.dtype != np.bool:
raise ValueError("labels type must be either bool or float")
if not isinstance(scores, np.ndarray) or len(scores.shape) != 1:
raise ValueError("scores must be single dimension numpy array")
if num_gt < np.sum(labels):
raise ValueError("Number of true positives must be smaller than num_gt.")
if len(scores) != len(labels):
raise ValueError("scores and labels must be of the same size.")
if num_gt == 0:
return None, None
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
true_positive_labels = labels[sorted_indices]
false_positive_labels = (true_positive_labels <= 0).astype(float)
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = cum_true_positives.astype(float) / (
cum_true_positives + cum_false_positives)
recall = cum_true_positives.astype(float) / num_gt
return precision, recall
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError("If precision is None, recall must also be None")
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(
recall, np.ndarray):
raise ValueError("precision and recall must be numpy array")
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError("input must be float numpy array.")
if len(precision) != len(recall):
raise ValueError("precision and recall must be of the same size.")
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError("Precision must be in the range of [0, 1].")
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError("recall must be in the range of [0, 1].")
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError("recall must be a non-decreasing array")
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Preprocess precision to be a non-decreasing array
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class):
"""Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images containing
at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number of
images that are correctly detected at least one object instance of a
particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of each
class
"""
return np.where(
num_gt_imgs_per_class == 0, np.nan,
num_images_correctly_detected_per_class / num_gt_imgs_per_class)
def compute_median_rank_at_k(tp_fp_list, k):
"""Computes MedianRank@k, where k is the top-scoring labels.
Args:
tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all
detection on a single image, where the detections are sorted by score in
descending order. Further, each numpy array element can have boolean or
float values. True positive elements have either value >0.0 or True;
any other value is considered false positive.
k: number of top-scoring proposals to take.
Returns:
median_rank: median rank of all true positive proposals among top k by
score.
"""
ranks = []
for i in range(len(tp_fp_list)):
ranks.append(
np.where(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])] > 0)[0])
concatenated_ranks = np.concatenate(ranks)
return np.median(concatenated_ranks)
def compute_recall_at_k(tp_fp_list, num_gt, k):
"""Computes Recall@k, MedianRank@k, where k is the top-scoring labels.
Args:
tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all
detection on a single image, where the detections are sorted by score in
descending order. Further, each numpy array element can have boolean or
float values. True positive elements have either value >0.0 or True;
any other value is considered false positive.
num_gt: number of groundtruth anotations.
k: number of top-scoring proposals to take.
Returns:
recall: recall evaluated on the top k by score detections.
"""
tp_fp_eval = []
for i in range(len(tp_fp_list)):
tp_fp_eval.append(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])])
tp_fp_eval = np.concatenate(tp_fp_eval)
return np.sum(tp_fp_eval) / num_gt
| 37.283505
| 80
| 0.716439
|
4a0f827887fc9c6e56eacc706e4fca1a69eaa9aa
| 14,785
|
py
|
Python
|
src/test/tests/rendering/legends.py
|
eddieTest/visit
|
ae7bf6f5f16b01cf6b672d34e2d293fa7170616b
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/tests/rendering/legends.py
|
eddieTest/visit
|
ae7bf6f5f16b01cf6b672d34e2d293fa7170616b
|
[
"BSD-3-Clause"
] | null | null | null |
src/test/tests/rendering/legends.py
|
eddieTest/visit
|
ae7bf6f5f16b01cf6b672d34e2d293fa7170616b
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: legends.py
#
# Tests: mesh - 3D unstructured, single domain
# plots - subset, boundary, filledboundary
# operators - none
# selection - material
#
# Defect ID: VisIt00002740, VisIt00002737
#
# Programmer: Kathleen Bonnell
# Date: December 2, 2002
#
# Modifications:
# Brad Whitlock, Thu Dec 12 09:50:31 PDT 2002
# I fixed the test so it uses the new interface for the SubsetAttributes.
#
# Kathleen Bonnell, Fri Jul 18 14:04:19 PDT 2003
# I added tests for Boundary, FilledBoundary.
#
# Eric Brugger, Mon Jul 21 12:14:52 PDT 2003
# I added legend sizing and positioning tests.
#
# Kathleen Bonnell, Thu Aug 28 14:34:57 PDT 2003
# Remove compound var name from Subset, Boundary and Filled Boundary plots.
#
# Kathleen Bonnell, Fri Oct 28 10:00:30 PDT 2005
# Add tests for curve plots (legends_09 ... legends_11).
#
# Kathleen Bonnell, Fri Oct 28 15:54:37 PDT 2005
# Add more tests for curve plots, for testing reading of TIME
# (legends_12 & legends_13).
#
# Brad Whitlock, Tue Nov 21 10:54:18 PDT 2006
# I made it use line style enum values instead of ints so the intent
# is more clear.
#
# Brad Whitlock, Mon Mar 26 08:54:40 PDT 2007
# Organized different parts of the test into functions and added a new
# function that sets the properties for a legend.
#
# Hank Childs, Sun Jan 25 15:07:31 PST 2009
# Turn off minmaxLabels as well.
#
# Kathleen Bonnell, Wed Sep 23 10:13:13 PDT 2009
# Add TestLegendProperties2, to test new capability of modifiying num tics,
# and setting numeric values and text labels for tics in liu of the
# automatically generated ones.
#
# Kathleen Bonnell, Tue Oct 6 11:36:41 PDT 2009
# Added test for constant variable legend.
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
#
# Brad Whitlock, Mon Jan 25 15:34:23 PST 2010
# I fixed a bug that made small baselines. I also increased the legend size
# in some tests so it's more prominent.
#
# Kathleen Biagas, Mon Dec 19 15:45:38 PST 2016
# Use FilledBoundary plot for materials instead of Subset, and Subset for
# domains instead of FilledBoundary.
#
# ----------------------------------------------------------------------------
# Test the Filled Boundary plot with some subsets turned off, and
# single-color on.
# This test ensures that correct labels are applied to the legend.
def TestLevelsLegend(a):
TestSection("Test levels legend")
OpenDatabase(silo_data_path("globe.silo"))
AddPlot("FilledBoundary", "mat1")
TurnMaterialsOff(("2", "4"))
fbAtts = FilledBoundaryAttributes()
fbAtts.colorType = fbAtts.ColorBySingleColor
fbAtts.singleColor = (0, 255, 255, 255)
SetPlotOptions(fbAtts)
DrawPlots()
Test("legends_01")
DeleteAllPlots()
# Test the FilledBoundary and Boundary plots, to ensure that setting
# their atts works.
AddPlot("FilledBoundary", "mat1")
fba = FilledBoundaryAttributes()
fba.colorType = fba.ColorByMultipleColors
SetPlotOptions(fba)
DrawPlots()
Test("legends_02")
DeleteAllPlots()
AddPlot("Boundary", "mat1")
ba = BoundaryAttributes()
ba.colorType = ba.ColorByColorTable
ba.colorTableName = "rainbow"
SetPlotOptions(ba)
DrawPlots()
Test("legends_03")
DeleteAllPlots()
#
# Test legend sizing and positioning.
#
def TestSizeAndPosition(a):
TestSection("Test legend default sizing and positioning")
OpenDatabase(silo_data_path("curv2d.silo"))
AddPlot("Boundary", "mat1")
bndAtts = BoundaryAttributes()
bndAtts.colorType = bndAtts.ColorBySingleColor
bndAtts.singleColor = (0, 0, 0, 255)
SetPlotOptions(bndAtts)
AddPlot("Contour", "p")
AddPlot("Mesh", "curvmesh2d")
AddPlot("FilledBoundary", "mat1")
DrawPlots()
Test("legends_04")
DeleteAllPlots()
AddPlot("Pseudocolor", "d")
AddPlot("Vector", "vel")
AddPlot("FilledBoundary", "mat1")
DrawPlots()
Test("legends_05")
DeleteAllPlots()
AddPlot("Pseudocolor", "d")
AddOperator("Elevate")
AddPlot("Pseudocolor", "p")
AddOperator("Elevate")
elevate_atts = ElevateAttributes()
elevate_atts.useXYLimits = elevate_atts.Never
SetOperatorOptions(elevate_atts)
DrawPlots()
Test("legends_06")
DeleteAllPlots()
OpenDatabase(silo_data_path("globe.silo"))
AddPlot("Volume", "u")
DrawPlots()
Test("legends_07")
DeleteAllPlots()
OpenDatabase(silo_data_path("multi_ucd3d.silo"))
AddPlot("Contour", "d")
contourAtts = ContourAttributes()
contourAtts.contourNLevels = 15
SetPlotOptions(contourAtts)
AddPlot("Subset", "domains")
DrawPlots()
Test("legends_08")
DeleteAllPlots()
#
# TEST LEGEND FOR CURVE PLOTS
#
def TestCurveLegend(a):
TestSection("Test Curve plot legend")
OpenDatabase(data_path("curve_test_data/c033.curve"))
# Test legend on
AddPlot("Curve", "parabolic")
curveAtts = CurveAttributes()
curveAtts.color = (255, 0, 0, 255)
curveAtts.lineWidth = 1
SetPlotOptions(curveAtts)
DrawPlots()
Test("legends_09")
# Test legend off
curveAtts.showLegend = 0
SetPlotOptions(curveAtts)
Test("legends_10")
curveAtts.showLegend = 1
SetPlotOptions(curveAtts)
# Tests multiple plots
OpenDatabase(data_path("curve_test_data/c044.curve"))
AddPlot("Curve", "parabolic")
curveAtts.color = (0, 255, 0, 255)
curveAtts.lineWidth = 5
SetPlotOptions(curveAtts)
DrawPlots()
OpenDatabase(data_path("curve_test_data/c055.curve"))
AddPlot("Curve", "parabolic")
curveAtts.color = (0, 0, 255, 255)
curveAtts.lineWidth = 2
SetPlotOptions(curveAtts)
DrawPlots()
Test("legends_11")
# Add DatabaseInfo
a.databaseInfoFlag = 1
SetAnnotationAttributes(a)
Test("legends_12")
DeleteAllPlots()
OpenDatabase(data_path("curve_test_data/distribution.ultra"))
AddPlot("Curve", "Laplace Distribution")
DrawPlots()
Test("legends_13")
DeleteAllPlots()
# Remove DatabaseInfo
a.databaseInfoFlag = 0
SetAnnotationAttributes(a)
#
# Test setting legend properties. Note that we currently just test the
# avtVariableLegend but others work pretty much the same way.
#
def TestLegendProperties(a):
TestSection("Test setting legend properties")
OpenDatabase(silo_data_path("noise.silo"))
AddPlot("Pseudocolor", "hardyglobal")
DrawPlots()
v0 = View3DAttributes()
v0.viewNormal = (-0.778207, 0.3577, 0.516183)
v0.focus = (0, 0, 0)
v0.viewUp = (0.283417, 0.933512, -0.219613)
v0.viewAngle = 30
v0.parallelScale = 17.3205
v0.nearPlane = -34.641
v0.farPlane = 34.641
v0.imagePan = (0.0768749, 0.057219)
v0.imageZoom = 0.863307
v0.perspective = 1
v0.eyeAngle = 2
v0.centerOfRotationSet = 0
v0.centerOfRotation = (0, 0, 0)
SetView3D(v0)
Test("legends_14")
# Get the plot's legend
legend = GetAnnotationObject(GetPlotList().GetPlots(0).plotName)
# See if we can scale the legend.
legend.xScale = 3.
Test("legends_15")
legend.yScale = 3.
Test("legends_16")
# Test the bounding box.
legend.drawBoundingBox = 1
Test("legends_17")
legend.boundingBoxColor = (180,180,180,230)
Test("legends_18")
# Test moving the legend
legend.managePosition = 0
legend.position = (0.55,0.9)
Test("legends_19")
# Test text color
InvertBackgroundColor()
Test("legends_20")
InvertBackgroundColor()
legend.useForegroundForTextColor = 0
legend.textColor = (255, 0, 0, 255)
Test("legends_21")
# Test number format
legend.numberFormat = "%1.4e"
Test("legends_22")
# Test the font.
legend.fontFamily = legend.Courier
Test("legends_23")
legend.fontFamily = legend.Times
Test("legends_24")
legend.fontFamily = legend.Arial
legend.fontBold = 1
Test("legends_25")
legend.fontBold = 0
legend.fontItalic = 1
Test("legends_26")
# Test turning off the labels.
legend.fontItalic = 0
legend.drawLabels = 0
legend.drawMinMax = 0
Test("legends_27")
# Test turning off the title.
legend.drawTitle = 0
Test("legends_28")
# Add a plot and then delete plot 0 to see that the legend disappears
# in the list of annotation objects. Note that plot names are created
# using an increasing integer. If this test is executed out of the order
# from when it was baselined then the number will change and the test
# will need to be rebaselined.
text = "Before: " + str(GetAnnotationObjectNames()) + "\n"
AddPlot("Mesh", "Mesh")
DrawPlots()
SetActivePlots(0)
DeleteActivePlots()
text = text + "After: " + str(GetAnnotationObjectNames()) + "\n"
TestText("legends_29", text)
DeleteAllPlots()
#
# Test how legends get copied to new windows.
#
def TestLegendCopying(a):
TestSection("Test legend copying")
OpenDatabase(silo_data_path("noise.silo"))
AddPlot("Pseudocolor", "hardyglobal")
DrawPlots()
# Customize the legend.
legend = GetAnnotationObject(GetPlotList().GetPlots(0).plotName)
legend.xScale = 3.
legend.yScale = 3.
legend.drawBoundingBox = 1
legend.boundingBoxColor = (50,50,100,255)
# Create another annotation object.
text2d = CreateAnnotationObject("Text2D", "text_obj")
text2d.position = (0.45, 0.5)
text2d.height = 0.05
text2d.textColor = (255, 0, 0, 255)
text2d.useForegroundForTextColor = 0
text2d.text = "Text annotation"
Test("legends_30")
# Clone the window and make sure that it has the right annotation objects
# and that their properties have been inherited from window 1.
CloneWindow()
SetActiveWindow(2)
DrawPlots()
Test("legends_31")
DeleteWindow()
# Test clone on first reference.
SetCloneWindowOnFirstRef(1)
AddWindow()
DrawPlots()
Test("legends_32")
TestText("legends_33", str(GetAnnotationObjectNames()))
# Test it clone on first reference again via SetActiveWindow
DeleteWindow()
AddWindow()
SetActiveWindow(2)
DrawPlots()
Test("legends_34")
TestText("legends_35", str(GetAnnotationObjectNames()))
# Now that we're in window 2, delete the text object.
w2text = GetAnnotationObject("text_obj")
w2text.Delete()
# Customize the legend in window 2 so we'll know if copying window 1's
# attributes over to window 2 messed it up.
legend2 = GetAnnotationObject(GetPlotList().GetPlots(0).plotName)
legend2.boundingBoxColor = (200,0,0,255)
Test("legends_36")
CopyAnnotationsToWindow(1, 2)
RedrawWindow()
Test("legends_37")
# Clean up
DeleteAllPlots()
DeleteWindow()
text2d.Delete()
GetAnnotationObject("text_obj").Delete()
DeleteAllPlots()
def TestLegendTics():
TestSection("Test setting legend tics")
OpenDatabase(silo_data_path("curv2d.silo"))
AddPlot("Pseudocolor", "d")
DrawPlots()
legend = GetAnnotationObject(GetPlotList().GetPlots(0).plotName)
legend.xScale = 3.
legend.yScale = 3.
# change number of ticks
legend.numTicks = 3
Test("legends_38")
# turn off use of min and max as tick values
legend.minMaxInclusive = 0
Test("legends_39")
legend.numTicks = 1
Test("legends_40")
legend.minMaxInclusive = 1
Test("legends_41")
legend.numTicks = 2
Test("legends_42")
legend.minMaxInclusive = 0
Test("legends_43")
legend.minMaxInclusive = 1
# turn off automatic control of ticks so labels can be added
legend.controlTicks = 0
# default values should be what was calculated
Test("legends_44")
# supply some labels
legend.suppliedLabels = ("", "second", "", "fourth", "")
# Turn on drawing of text labels
legend.drawLabels = legend.Both
Test("legends_45")
# only labels, no values
legend.drawLabels = legend.Labels
Test("legends_46")
# supply different values -- don't need to be in order
# show that values out-of-range won't be used
legend.suppliedValues = (2.2, 4.5, 3.8, 1.0, 5.7)
legend.suppliedLabels = ("this", "that", "the other", "noshow1", "noshow2")
legend.drawLabels = legend.Values
Test("legends_47")
legend.drawLabels = legend.Both
Test("legends_48")
legend.drawLabels = legend.Labels
Test("legends_49")
legend.orientation = legend.HorizontalTop
Test("legends_50")
legend.orientation = legend.HorizontalBottom
Test("legends_51")
legend.orientation = legend.VerticalLeft
Test("legends_52")
DeleteAllPlots()
# demonstrate adding labels to 'levels' type legends
AddPlot("FilledBoundary", "mat1")
DrawPlots()
legend = GetAnnotationObject(GetPlotList().GetPlots(0).plotName)
legend.xScale = 3.
legend.yScale = 3.
legend.controlTicks = 0
Test("legends_53")
legend.drawLabels = legend.Both
legend.suppliedLabels = ("red", "green", "blue");
Test("legends_54")
legend.drawLabels = legend.Labels
Test("legends_55")
DeleteAllPlots()
AddPlot("Contour", "p")
contourAtts = ContourAttributes()
contourAtts.contourNLevels = 6
SetPlotOptions(contourAtts)
DrawPlots()
legend = GetAnnotationObject(GetPlotList().GetPlots(0).plotName)
legend.xScale = 3.
legend.yScale = 3.
Test("legends_56")
nf = legend.numberFormat
legend.numberFormat = "%# -0.2e"
Test("legends_57")
legend.numberFormat = nf
legend.controlTicks = 0
legend.drawLabels = legend.Both
legend.suppliedLabels = ("one", "", "two", "", "three")
Test("legends_58")
legend.drawLabels = legend.Labels
Test("legends_59")
DeleteAllPlots()
# test constant legend
DefineScalarExpression("one", "cell_constant(<curvmesh2d>, 1)")
AddPlot("Pseudocolor", "one")
DrawPlots()
legend = GetAnnotationObject(GetPlotList().GetPlots(0).plotName)
legend.xScale = 3.
legend.yScale = 3.
Test("legends_60")
#clean up
DeleteAllPlots()
def main():
# Turn off all annotation except the legend.
a = GetAnnotationAttributes()
TurnOffAllAnnotations(a)
a.legendInfoFlag = 1
SetAnnotationAttributes(a)
TestLevelsLegend(a)
TestSizeAndPosition(a)
TestCurveLegend(a)
TestLegendProperties(a)
TestLegendCopying(a)
TestLegendTics()
# reset DatabaseInfo for future tests.
a.databaseInfoFlag = 0
SetAnnotationAttributes(a)
main()
Exit()
| 28.055028
| 79
| 0.666689
|
4a0f82bb2e8179b646094b1e3aadfb66894ae418
| 2,360
|
py
|
Python
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifySecurityGroupPolicyRequest.py
|
DataDog/aliyun-openapi-python-sdk
|
5cbee29bce6416dd62f61f0c3786b1af6ea0d84f
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifySecurityGroupPolicyRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifySecurityGroupPolicyRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifySecurityGroupPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'ModifySecurityGroupPolicy','ecs')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_SecurityGroupId(self):
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self,SecurityGroupId):
self.add_query_param('SecurityGroupId',SecurityGroupId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_InnerAccessPolicy(self):
return self.get_query_params().get('InnerAccessPolicy')
def set_InnerAccessPolicy(self,InnerAccessPolicy):
self.add_query_param('InnerAccessPolicy',InnerAccessPolicy)
| 35.757576
| 84
| 0.780508
|
4a0f83971132d5f38cb2abd37e8b0864b5647149
| 1,496
|
py
|
Python
|
preprocess/change_bg_color_NEGATIVE_resize.py
|
TalalWasim/scene_text_segmentation
|
ef687c3eea429e7e6cf7c8485111b08d4eea37d4
|
[
"MIT"
] | 3
|
2019-10-09T06:31:01.000Z
|
2021-06-15T15:41:32.000Z
|
preprocess/change_bg_color_NEGATIVE_resize.py
|
TalalWasim/scene_text_segmentation
|
ef687c3eea429e7e6cf7c8485111b08d4eea37d4
|
[
"MIT"
] | 3
|
2019-10-09T06:32:44.000Z
|
2021-11-30T14:55:48.000Z
|
preprocess/change_bg_color_NEGATIVE_resize.py
|
TalalWasim/scene_text_segmentation
|
ef687c3eea429e7e6cf7c8485111b08d4eea37d4
|
[
"MIT"
] | 2
|
2020-01-22T03:30:19.000Z
|
2021-03-08T05:58:15.000Z
|
import cv2
import numpy as np
from PIL import Image
from numpy import *
import glob
gt_img_dir = '/path/to/datasets/TextSegmentation/ICDAR2013_KAIST/GT_color/'
#gt_img_dir = '/path/to/datasets/TextSegmentation/ICDAR2013_KAIST/resized_256/GT_color/'
#gt_lime_dir = '/path/to/datasets/TextSegmentation/ICDAR2013_KAIST/GT_color_LimeBG/'
gt_neg_dir = '/path/to/datasets/TextSegmentation/ICDAR2013_KAIST/resized_256/GT_color_negative/'
gt_lime_dir = '/path/to/datasets/TextSegmentation/ICDAR2013_KAIST/resized_256/GT_color_negative_limeBG/'
img_files = sorted(glob.glob(gt_img_dir+'*.png'))
#for img in img_files:
for img_idx in range(0,len(img_files)):
#for img_idx in range(0,5):
img_name = img_files[img_idx].split('/')[-1].split('.')[0]
print("image is {}".format(img_name))
image_img = Image.open(img_files[img_idx])
#change the color of NEGATIVE gts
image_img_neg = image_img.point(lambda p: 255-p if p>0 else 0 ) # invert
image_img_neg_resize = image_img_neg.resize((256,256), Image.ANTIALIAS)
image_img_neg_resize.save(gt_neg_dir+img_name+'.png')
#image = array(Image.open(img_files[img_idx]))
image = array(image_img_neg)
### convert black pixels to lime
image[np.where((image==[0,0,0]).all(axis=2))] = [0,255,0]
#image_lime = Image.fromarray(converted_image)
image_lime = Image.fromarray(image)
image_lime = image_lime.resize((256,256), Image.ANTIALIAS)
image_lime.save(gt_lime_dir+img_name+'.png')
print("done!")
| 34.790698
| 104
| 0.738636
|
4a0f83a002ae358824bb426e9f99f22c7fb6abd1
| 1,846
|
py
|
Python
|
Companyname/appgestion/views.py
|
friedrrich/DesarrolloWebTest
|
43425c7c5c9edf06aabb3c6e499dd72df89ee369
|
[
"Apache-2.0"
] | null | null | null |
Companyname/appgestion/views.py
|
friedrrich/DesarrolloWebTest
|
43425c7c5c9edf06aabb3c6e499dd72df89ee369
|
[
"Apache-2.0"
] | null | null | null |
Companyname/appgestion/views.py
|
friedrrich/DesarrolloWebTest
|
43425c7c5c9edf06aabb3c6e499dd72df89ee369
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from appgestion.models import Articulo
def buscar(request):
#si txt_producto viene con dato if devuelve true
if request.GET["txt_producto"]:
producto_recibido = request.GET["txt_producto"]
#comandos usados en la shell
articulos=Articulo.objects.filter(nombre__contains=producto_recibido)
return render(request,"Metraje.html",{"articulos":articulos,"producto_consultado":producto_recibido})
else:
mensaje="Debe ingresar un producto a buscar"
return HttpResponse(mensaje)
#listo
def ingresar_producto(request):
nombre=request.GET["txt_nombre"]
categoria=request.GET["txt_categoria"]
precio=request.GET["txt_precio"]
if len(nombre)>0 and len(categoria)>0 and len(precio)>0:
pro=Articulo(nombre=nombre,categoria=categoria,precio=precio)
pro.save()
mensaje="Articulo ingresado"
else:
mensaje="Aticulo No ingresado. Faltan datos por ingresar..."
return HttpResponse(mensaje)
def eliminar_producto(request):
if request.GET["txt_id"]: #true si encuentra valor
id_recibido=request.GET["txt_id"]
producto=Articulo.objects.filter(id=id_recibido)
if producto:
pro=Articulo.objects.get(id=id_recibido)
pro.delete()
mensaje="Producto eliminado"
else:
mensaje="Producto No eliminado. No existe producto con ese id"
else:
mensaje="Debe ingresar un id"
return HttpResponse(mensaje)
#Uso de varios formularios en 1 página
def busqueda_productos(request):
return render(request,"Metraje.html")
def formulario_ingreso(request):
return render(request,"Metraje.html")
def formulario_eliminar(request):
return render(request,"Metraje.html")
| 34.830189
| 109
| 0.69935
|
4a0f84346a2b99dd314fc01257c5eb076e04caf4
| 3,426
|
py
|
Python
|
yolo/yolov3/yolov3_common.py
|
pengfeinie/object-detection
|
188882c941ceb027ee52232dfae6767c2b4feaca
|
[
"CECILL-B"
] | null | null | null |
yolo/yolov3/yolov3_common.py
|
pengfeinie/object-detection
|
188882c941ceb027ee52232dfae6767c2b4feaca
|
[
"CECILL-B"
] | null | null | null |
yolo/yolov3/yolov3_common.py
|
pengfeinie/object-detection
|
188882c941ceb027ee52232dfae6767c2b4feaca
|
[
"CECILL-B"
] | null | null | null |
import numpy as np
import cv2
def getLabels():
# Create labels into list
with open('cfg/coco.names') as f:
labels = [line.strip() for line in f]
# Initialize colours for representing every detected object
colours = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Loading trained YOLO v3 Objects Detector
# with the help of 'dnn' library from OpenCV
# Reads a network model stored in Darknet model files.
network = cv2.dnn.readNetFromDarknet('cfg/yolov3.cfg', 'cfg/yolov3.weights')
# Getting only output layer names that we need from YOLO
ln = network.getLayerNames()
ln = [ln[i - 1] for i in network.getUnconnectedOutLayers()]
print(ln)
return ln, network, colours, labels
def performForward(network, blob, ln, p_min, h, w):
network.setInput(blob)
output_from_network = network.forward(ln)
# Preparing lists for detected bounding boxes, confidences and class numbers.
bounding_boxes = []
confidences = []
class_numbers = []
# Going through all output layers after feed forward pass
for result in output_from_network:
for detected_objects in result:
scores = detected_objects[5:]
class_current = np.argmax(scores)
confidence_current = scores[class_current]
if confidence_current > p_min:
box_current = detected_objects[0:4] * np.array([w, h, w, h])
# Now, from YOLO data format, we can get top left corner coordinates
# that are x_min and y_min
x_center, y_center, box_width, box_height = box_current
x_min = int(x_center - (box_width / 2))
y_min = int(y_center - (box_height / 2))
# Adding results into prepared lists
bounding_boxes.append([x_min, y_min, int(box_width), int(box_height)])
confidences.append(float(confidence_current))
class_numbers.append(class_current)
return bounding_boxes, confidences, class_numbers
def nonMaximumSuppression(bounding_boxes, confidences, p_min, threshold, colours, class_numbers, labels, image):
results = cv2.dnn.NMSBoxes(bounding_boxes, confidences, p_min, threshold)
# At-least one detection should exists
if len(results) > 0:
for i in results.flatten():
# Getting current bounding box coordinates, its width and height
x_min, y_min = bounding_boxes[i][0], bounding_boxes[i][1]
box_width, box_height = bounding_boxes[i][2], bounding_boxes[i][3]
# Preparing colour for current bounding box
colour_box_current = colours[class_numbers[i]].tolist()
# Drawing bounding box on the original image
cv2.rectangle(image, (x_min, y_min),
(x_min + box_width, y_min + box_height),
colour_box_current, 2)
# Preparing text with label and confidence for current bounding box
text_box_current = '{}: {:.4f}'.format(labels[int(class_numbers[i])],
confidences[i])
# Putting text with label and confidence on the original image
cv2.putText(image, text_box_current, (x_min, y_min - 5),
cv2.FONT_HERSHEY_COMPLEX, 0.7, colour_box_current, 2)
| 45.078947
| 112
| 0.627262
|
4a0f848bbac4475cbc414a313d61b204fa8a0f4f
| 3,712
|
py
|
Python
|
netmiko/scp_functions.py
|
Vnictros240/netmiko
|
384944f230071cc16566f5c9719561a437372d27
|
[
"MIT"
] | 1
|
2019-09-16T05:52:41.000Z
|
2019-09-16T05:52:41.000Z
|
netmiko/scp_functions.py
|
swetha1922/netmiko
|
896042c259702f092e56620050c1e6287bccfb2a
|
[
"MIT"
] | null | null | null |
netmiko/scp_functions.py
|
swetha1922/netmiko
|
896042c259702f092e56620050c1e6287bccfb2a
|
[
"MIT"
] | null | null | null |
"""
Netmiko SCP operations.
Supports file get and file put operations.
SCP requires a separate SSH connection for a control channel.
Currently only supports Cisco IOS and Cisco ASA.
"""
from netmiko import FileTransfer, InLineTransfer
def verifyspace_and_transferfile(scp_transfer):
"""Verify space and transfer file."""
if not scp_transfer.verify_space_available():
raise ValueError("Insufficient space available on remote device")
scp_transfer.transfer_file()
def file_transfer(
ssh_conn,
source_file,
dest_file,
file_system=None,
direction="put",
disable_md5=False,
inline_transfer=False,
overwrite_file=False,
):
"""Use Secure Copy or Inline (IOS-only) to transfer files to/from network devices.
inline_transfer ONLY SUPPORTS TEXT FILES and will not support binary file transfers.
return {
'file_exists': boolean,
'file_transferred': boolean,
'file_verified': boolean,
}
"""
transferred_and_verified = {
"file_exists": True,
"file_transferred": True,
"file_verified": True,
}
transferred_and_notverified = {
"file_exists": True,
"file_transferred": True,
"file_verified": False,
}
nottransferred_but_verified = {
"file_exists": True,
"file_transferred": False,
"file_verified": True,
}
if "cisco_ios" in ssh_conn.device_type or "cisco_xe" in ssh_conn.device_type:
cisco_ios = True
else:
cisco_ios = False
if not cisco_ios and inline_transfer:
raise ValueError("Inline Transfer only supported for Cisco IOS/Cisco IOS-XE")
scp_args = {
"ssh_conn": ssh_conn,
"source_file": source_file,
"dest_file": dest_file,
"direction": direction,
}
if file_system is not None:
scp_args["file_system"] = file_system
TransferClass = InLineTransfer if inline_transfer else FileTransfer
with TransferClass(**scp_args) as scp_transfer:
if scp_transfer.check_file_exists():
if overwrite_file:
if not disable_md5:
if scp_transfer.compare_md5():
return nottransferred_but_verified
else:
# File exists, you can overwrite it, MD5 is wrong (transfer file)
verifyspace_and_transferfile(scp_transfer)
if scp_transfer.compare_md5():
return transferred_and_verified
else:
raise ValueError(
"MD5 failure between source and destination files"
)
else:
# File exists, you can overwrite it, but MD5 not allowed (transfer file)
verifyspace_and_transferfile(scp_transfer)
return transferred_and_notverified
else:
# File exists, but you can't overwrite it.
if not disable_md5:
if scp_transfer.compare_md5():
return nottransferred_but_verified
msg = "File already exists and overwrite_file is disabled"
raise ValueError(msg)
else:
verifyspace_and_transferfile(scp_transfer)
# File doesn't exist
if not disable_md5:
if scp_transfer.compare_md5():
return transferred_and_verified
else:
raise ValueError("MD5 failure between source and destination files")
else:
return transferred_and_notverified
| 33.745455
| 92
| 0.599946
|
4a0f84e73b10fe41481a7d429e4e854c1c6222e5
| 13,617
|
py
|
Python
|
sockjs/session.py
|
iTraceur/sockjs-channels
|
eaff5affaaf22f696a17697d4b323cb9dc6bb07d
|
[
"MIT"
] | 1
|
2022-03-24T16:13:04.000Z
|
2022-03-24T16:13:04.000Z
|
sockjs/session.py
|
iTraceur/sockjs-channels
|
eaff5affaaf22f696a17697d4b323cb9dc6bb07d
|
[
"MIT"
] | null | null | null |
sockjs/session.py
|
iTraceur/sockjs-channels
|
eaff5affaaf22f696a17697d4b323cb9dc6bb07d
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
import warnings
from collections import deque
from datetime import datetime
from .constants import DEFAULT_SESSION_TIMEOUT, DEFAULT_HEARTBEAT_INTERVAL, DEFAULT_GC_INTERVAL
from .exceptions import SessionIsAcquired, SessionIsClosed
from .protocol import FRAME_MESSAGE, FRAME_MESSAGE_BLOB, FRAME_HEARTBEAT
from .protocol import FRAME_OPEN, FRAME_CLOSE
from .protocol import MSG_CLOSE, MSG_MESSAGE
from .protocol import STATE_NEW, STATE_OPEN, STATE_CLOSING, STATE_CLOSED
from .protocol import SockjsMessage, OpenMessage, ClosedMessage
from .protocol import close_frame, message_frame, messages_frame
logger = logging.getLogger("sockjs")
class Session(object):
""" SockJS session object
``state``: Session state
``manager``: Session manager that hold this session
``acquired``: Acquired state, indicates that consumer is using session
``timeout``: Session timeout
"""
scope = None
manager = None
acquired = False
state = STATE_NEW
interrupted = False
exception = None
_heartbeat_timer = None # heartbeat event loop timer
_heartbeat_future_task = None # heartbeat task
_heartbeat_consumed = True
def __init__(self, sid, handler, scope, *, timeout=DEFAULT_SESSION_TIMEOUT,
heartbeat_interval=DEFAULT_HEARTBEAT_INTERVAL, debug=False):
self.id = sid
self.handler = handler
self.scope = scope
self.expired = False
self.timeout = timeout
self.heartbeat_interval = heartbeat_interval
self.expires = datetime.now() + timeout
self._hits = 0
self._heartbeats = 0
self._heartbeat_consumer = False
self._debug = debug
self._waiter = None
self._queue = deque()
def __str__(self):
result = ["id=%r" % (self.id,)]
if self.state == STATE_OPEN:
result.append("connected")
elif self.state == STATE_CLOSED:
result.append("closed")
else:
result.append("disconnected")
if self.acquired:
result.append("acquired")
if self.message_length:
result.append("queue[%s]" % self.message_length)
if self._hits:
result.append("hits=%s" % self._hits)
if self._heartbeats:
result.append("heartbeats=%s" % self._heartbeats)
return " ".join(result)
@property
def message_length(self):
return len(self._queue)
def _tick(self, timeout=None):
if timeout is None:
self.expires = datetime.now() + self.timeout
else:
self.expires = datetime.now() + timeout
async def acquire(self, manager, heartbeat=True):
self.acquired = True
self.manager = manager
self._heartbeat_consumer = heartbeat
self._hits += 1
if self.state == STATE_NEW:
logger.debug("open session: %s", self.id)
self.state = STATE_OPEN
self._feed(FRAME_OPEN, FRAME_OPEN)
try:
await self.handler(OpenMessage, self)
self.start_heartbeat()
except asyncio.CancelledError:
raise
except Exception as exc:
self.state = STATE_CLOSING
self.exception = exc
self.interrupted = True
self._feed(FRAME_CLOSE, (3000, "Internal error"))
logger.exception("Exception in open session handling.")
def release(self):
self.acquired = False
self.scope = None
self.manager = None
def start_heartbeat(self):
if self._heartbeat_consumer and not self._heartbeat_timer:
loop = asyncio.get_event_loop()
self._heartbeat_timer = loop.call_later(self.heartbeat_interval, self._heartbeat)
def stop_heartbeat(self):
if self._heartbeat_timer is not None:
self._heartbeat_timer.cancel()
self._heartbeat_timer = None
def _heartbeat(self):
# If the last heartbeat was not consumed, the client was closed.
if not self._heartbeat_consumed:
asyncio.ensure_future(self.remote_closed())
return
if self.state != STATE_OPEN:
self.stop_heartbeat()
return
self._heartbeats += 1
self._feed(FRAME_HEARTBEAT, FRAME_HEARTBEAT)
self._heartbeat_consumed = False
loop = asyncio.get_event_loop()
self._heartbeat_timer = loop.call_later(self.heartbeat_interval, self._heartbeat)
def _feed(self, frame, data):
if frame == FRAME_MESSAGE:
if self._queue and self._queue[-1][0] == FRAME_MESSAGE:
self._queue[-1][1].append(data)
else:
self._queue.append((frame, [data]))
else:
self._queue.append((frame, data))
# notify waiter
self.notify_waiter()
async def wait(self, pack=True):
if not self._queue and self.state != STATE_CLOSED:
assert not self._waiter
loop = asyncio.get_event_loop()
self._waiter = loop.create_future()
await self._waiter
if self._queue:
frame, message = self._queue.popleft()
if frame == FRAME_HEARTBEAT:
self._heartbeat_consumed = True
else:
self._tick()
if pack:
if frame == FRAME_CLOSE:
return FRAME_CLOSE, close_frame(*message)
elif frame == FRAME_MESSAGE:
return FRAME_MESSAGE, messages_frame(message)
return frame, message
else:
raise SessionIsClosed()
def notify_waiter(self):
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(True)
def send(self, message):
"""send message to client."""
assert isinstance(message, str), "String is required"
if self._debug:
logger.info("outgoing message: %s, %s", self.id, str(message)[:200])
if self.state != STATE_OPEN:
return
self._feed(FRAME_MESSAGE, message)
def send_frame(self, frame):
"""send message frame to client."""
if self._debug:
logger.info("outgoing message: %s, %s", self.id, frame[:200])
if self.state != STATE_OPEN:
return
self._feed(FRAME_MESSAGE_BLOB, frame)
def expire(self):
"""Manually expire a session."""
self.expired = True
self.stop_heartbeat()
async def remote_message(self, message):
logger.debug("incoming message: %s, %s", self.id, message[:200])
self._tick()
try:
await self.handler(SockjsMessage(MSG_MESSAGE, message), self)
except Exception as exc:
logger.exception("Exception in message handler, %s." % str(exc))
async def remote_messages(self, messages):
self._tick()
for message in messages:
logger.debug("incoming message: %s, %s", self.id, message[:200])
try:
await self.handler(SockjsMessage(MSG_MESSAGE, message), self)
except Exception as exc:
logger.exception("Exception in message handler, %s." % str(exc))
async def remote_close(self, exc=None):
"""close session from remote."""
if self.state in (STATE_CLOSING, STATE_CLOSED):
return
logger.info("close session: %s", self.id)
self.state = STATE_CLOSING
if exc is not None:
self.exception = exc
self.interrupted = True
try:
await self.handler(SockjsMessage(MSG_CLOSE, exc), self)
except Exception as exc:
logger.exception("Exception in close handler, %s." % str(exc))
self.stop_heartbeat()
async def remote_closed(self):
if self.state == STATE_CLOSED:
return
logger.info("session closed: %s", self.id)
self.state = STATE_CLOSED
self.expire()
try:
await self.handler(ClosedMessage, self)
except Exception as exc:
logger.exception("Exception in closed handler, %s." % str(exc))
# notify waiter
self.notify_waiter()
def close(self, code=3000, reason="Go away!"):
"""close session"""
if self.state in (STATE_CLOSING, STATE_CLOSED):
return
if self._debug:
logger.debug("close session: %s", self.id)
self.state = STATE_CLOSING
self._feed(FRAME_CLOSE, (code, reason))
self.stop_heartbeat()
empty = object()
class SessionManager(dict):
"""A basic session manager."""
_gc_timer = None # gc event loop timer
_gc_future_task = None # gc task
def __init__(self,
name,
handler,
heartbeat_interval=DEFAULT_HEARTBEAT_INTERVAL,
session_timeout=DEFAULT_SESSION_TIMEOUT,
gc_interval=DEFAULT_GC_INTERVAL,
debug=False):
super().__init__()
self.name = name
self.route_name = "sockjs-url-%s" % name
self.handler = handler
self.factory = Session
self.gc_interval = gc_interval
self.heartbeat_interval = heartbeat_interval
self.session_timeout = session_timeout
self.debug = debug
self._acquired_map = {}
self._sessions = []
def __str__(self):
return "SessionManager<%s>" % self.route_name
@property
def started(self):
return self._gc_timer is not None
def start(self):
if not self._gc_timer:
loop = asyncio.get_event_loop()
self._gc_timer = loop.call_later(self.gc_interval, self._gc)
def stop(self):
if self._gc_timer is not None:
self._gc_timer.cancel()
self._gc_timer = None
if self._gc_future_task is not None:
self._gc_future_task.cancel()
self._gc_future_task = None
def _gc(self):
if self._gc_future_task is None:
self._gc_future_task = asyncio.ensure_future(self._gc_task())
async def _gc_task(self):
if self._sessions:
now = datetime.now()
idx = 0
while idx < len(self._sessions):
session = self._sessions[idx]
if session.expires < now or session.expired:
session._feed(FRAME_CLOSE, (3000, "Session timeout!"))
# Session is to be GC"d immediately
if session.state == STATE_OPEN:
await session.remote_close()
if session.state == STATE_CLOSING:
await session.remote_closed()
if session.id in self._acquired_map:
await self.release(session)
del self[session.id]
del self._sessions[idx]
continue
idx += 1
self._gc_future_task = None
loop = asyncio.get_event_loop()
self._gc_timer = loop.call_later(self.gc_interval, self._gc)
def _add(self, session):
if session.expired:
raise ValueError("Can not add expired session.")
session.manager = self
self[session.id] = session
self._sessions.append(session)
return session
def get(self, sid, create=False, scope=None, default=empty):
session = super().get(sid, None)
if session is None:
if create:
session = self._add(
self.factory(sid, self.handler, scope, timeout=self.session_timeout,
heartbeat_interval=self.heartbeat_interval, debug=self.debug)
)
else:
if default is not empty:
return default
raise KeyError(sid)
else:
session.scope = scope
return session
async def acquire(self, session):
sid = session.id
if sid in self._acquired_map:
raise SessionIsAcquired("Another connection still open")
if sid not in self:
raise KeyError("Unknown session")
await session.acquire(self)
self._acquired_map[sid] = True
return session
def is_acquired(self, session):
return session.id in self._acquired_map
async def release(self, session):
if session.id in self._acquired_map:
session.release()
del self._acquired_map[session.id]
def active_sessions(self):
for session in list(self.values()):
if not session.expired:
yield session
async def clear(self):
"""Manually expire all _sessions in the pool."""
for session in list(self.values()):
if session.state != STATE_CLOSED:
await session.remote_closed()
self._sessions.clear()
super().clear()
def broadcast(self, message):
blob = message_frame(message)
for session in list(self.values()):
if not session.expired:
session.send_frame(blob)
def __del__(self):
if len(self._sessions):
warnings.warn(
"Unclosed _sessions! "
"Please call `await SessionManager.clear()` before del",
RuntimeWarning,
)
self.stop()
| 30.807692
| 95
| 0.586693
|
4a0f854d899cd4ce87b00d8d3092ad66df14485a
| 119
|
py
|
Python
|
data/yfinance.py
|
lulosol/tradekit
|
02d24c0849192fc53d27fdf763e60041396a2171
|
[
"Apache-2.0"
] | null | null | null |
data/yfinance.py
|
lulosol/tradekit
|
02d24c0849192fc53d27fdf763e60041396a2171
|
[
"Apache-2.0"
] | null | null | null |
data/yfinance.py
|
lulosol/tradekit
|
02d24c0849192fc53d27fdf763e60041396a2171
|
[
"Apache-2.0"
] | null | null | null |
import yfinance
def get_agg_daily_bars(symbol, start, end)
return yfinance.download(symbol, start=start, end=end)
| 23.8
| 58
| 0.781513
|
4a0f87d73010825e5545cbbc60ace84c40d436bc
| 6,449
|
py
|
Python
|
v1/firmware/uploader/uploader.py
|
amirgeva/z80pc
|
2daaa319ad7b313abdf0c73fc3faee8d6c36ed3e
|
[
"BSD-2-Clause"
] | null | null | null |
v1/firmware/uploader/uploader.py
|
amirgeva/z80pc
|
2daaa319ad7b313abdf0c73fc3faee8d6c36ed3e
|
[
"BSD-2-Clause"
] | null | null | null |
v1/firmware/uploader/uploader.py
|
amirgeva/z80pc
|
2daaa319ad7b313abdf0c73fc3faee8d6c36ed3e
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
'''
Memory programmer. Upload using serial port, a ROM image into the
target memory. Upload in 54 bytes chunks and verify by reading back
Usage: uploader.py <COM> <FILE>
Example: uploader.py COM5 os.bin
'''
import sys
import serial
import time
def calculate_crc16(buffer):
'''
Calculate the CRC16 value of a buffer. Should match the firmware version
:param buffer: byte array or list of 8 bit integers
:return: a 16 bit unsigned integer crc result
'''
result = 0
for b in buffer:
data = int(b) ^ (result & 0xFF)
data = data ^ ((data << 4) & 0xFF)
result = ((data << 8) | (result >> 8)) ^ (data >> 4) ^ (data << 3)
result = result & 0xFFFF
return result
# Header prefix for messages
MAGIC = [0x12, 0x34, 0x56, 0x78]
def create_empty_message(address, length):
'''
Create an empty message with a header filled, but without command or data
:param address: 16 bit unsigned memory address
:param length: length of data (0-54)
:return: a message list
'''
msg = [0] * 64
for i in range(4):
msg[i] = MAGIC[i]
msg[5] = address & 0xFF
msg[6] = (address >> 8) & 0xFF
msg[7] = length & 0xFF
return msg
def place_crc(msg):
'''
Calculate the CRC value of a message, and place it in the right spot
:param msg: Message to be processed. Modified in place
:return:
'''
crc = calculate_crc16(msg[4:62])
msg[62] = crc & 0xFF
msg[63] = (crc >> 8) & 0xFF
def create_reset_message():
msg = create_empty_message(0,0)
msg[4]=3
place_crc(msg)
return msg
def create_query_message(address, length):
'''
Create a memory read message (command 2)
:param address: 16 bit unsigned memory address
:param length: length of data
:return: Message ready to be sent
'''
msg = create_empty_message(address, length)
msg[4] = 2
place_crc(msg)
return msg
def create_programming_message(address, data):
'''
Create a memory write message (command 1)
:param address: 16 bit unsigned memory address
:param data: list of data (up to 54 elements)
:return: Message ready to be sent
'''
length = min(54, len(data))
msg = create_empty_message(address, length)
msg[4] = 1
for i in range(length):
msg[8 + i] = data[i]
place_crc(msg)
return msg
def verify_header(msg, n):
'''
Verify the header of the incoming has a valid MAGIC number
:param msg: Message buffer
:param n: How many bytes received so far (1-4)
:return: True if message header is ok so far
'''
for i in range(min(4, n)):
if msg[i] != MAGIC[i]:
return False
return True
def verify_crc(msg):
'''
Check incoming CRC is valid
:param msg: Incoming message
:return: True if message is ok
'''
crc = calculate_crc16(msg[4:62])
return msg[62] == (crc & 0xFF) and msg[63] == ((crc >> 8) & 0xFF)
def verify_ack(msg, data):
'''
Compare incoming data with the expected values
:param msg: Incoming message
:param data: Expected data
:return: True if they match
'''
for i in range(len(data)):
if msg[8 + i] != data[i]:
print(f"Sent: {data}")
print(f"Recv: {msg[8:62]}")
return False
return True
def wait_for_result(ser, data):
'''
Wait for an incoming message
Timeout if nothing arrives in a 1 second interval
Then verify and compare to expected data
:param ser: Serial port
:param data: Expected data
:return: True only if received data matches
'''
msg = [0] * 64
pos = 0
while True:
b = ser.read(1)
if len(b) < 1:
print(f"Timeout pos={pos}")
return False
msg[pos] = int(b[0])
pos = pos + 1
if not verify_header(msg, pos):
print("Skipping until header")
pos = 0
if pos == 64:
if not verify_crc(msg):
print("CRC Failed")
return False
return verify_ack(msg, data)
def main():
if len(sys.argv) != 3:
print("Usage: uploader.py <COMPORT> <FILE>")
else:
try:
data = open(sys.argv[2], 'rb').read()
#ser = serial.serialwin32.Serial(sys.argv[1], baudrate=115200, timeout=1.0)
ser = serial.Serial(sys.argv[1], baudrate=115200, timeout=5.0)
time.sleep(5)
# Split up data into 54 byte chunks
n = (len(data) + 53) // 54
queue = []
start = 0
for i in range(n):
stop = min(start + 54, len(data))
queue.append((start, stop))
start = stop
trial = 0
total_trials = 4
fail_count = 0
while len(queue) > 0 and trial < total_trials:
trial = trial + 1
print(f"Trial {trial}, Sending {len(queue)} packets")
print("Write")
for item in queue:
sys.stdout.write(f'{item[0]}\r')
sys.stdout.flush()
sub = data[item[0]:item[1]]
msg = create_programming_message(item[0], sub)
ser.write(bytes(msg))
leftover = []
time.sleep(4)
print("Verify")
for item in queue:
sys.stdout.write(f'{item[0]}\r')
sys.stdout.flush()
sub = data[item[0]:item[1]]
msg = create_query_message(item[0], len(sub))
ser.write(bytes(msg))
if not wait_for_result(ser, sub):
fail_count = fail_count + 1
print(f"Failed address: {item[0]}")
leftover.append(item)
if fail_count > 100:
break
queue = leftover
if fail_count > 10:
break
if len(queue) > 0:
print(f"Could not send {len(queue)} packets after {total_trials} trials")
else:
msg = create_reset_message()
ser.write(bytes(msg))
except FileNotFoundError:
print(f"File not found: {sys.argv[2]}")
if __name__ == '__main__':
main()
| 28.790179
| 89
| 0.540859
|
4a0f881cfd419f8733fd94732171302690e8c615
| 3,806
|
py
|
Python
|
chiadoge/ssl/create_ssl.py
|
sengexyz/chiadoge-blockchain
|
d2b6963876ad81d5f058a6d33e26c884a0d0b201
|
[
"Apache-2.0"
] | 2
|
2021-07-05T14:34:35.000Z
|
2022-01-01T21:27:52.000Z
|
chiadoge/ssl/create_ssl.py
|
sengexyz/chiadoge-blockchain
|
d2b6963876ad81d5f058a6d33e26c884a0d0b201
|
[
"Apache-2.0"
] | null | null | null |
chiadoge/ssl/create_ssl.py
|
sengexyz/chiadoge-blockchain
|
d2b6963876ad81d5f058a6d33e26c884a0d0b201
|
[
"Apache-2.0"
] | 1
|
2021-07-07T11:08:36.000Z
|
2021-07-07T11:08:36.000Z
|
import datetime
from pathlib import Path
from typing import Any, Tuple
import pkg_resources
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.x509.oid import NameOID
def get_chiadoge_ca_crt_key() -> Tuple[Any, Any]:
crt = pkg_resources.resource_string(__name__, "chiadoge_ca.crt")
key = pkg_resources.resource_string(__name__, "chiadoge_ca.key")
return crt, key
def get_mozzila_ca_crt() -> str:
mozilla_path = Path(__file__).parent.parent.parent.absolute() / "mozilla-ca/cacert.pem"
return str(mozilla_path)
def generate_ca_signed_cert(ca_crt: bytes, ca_key: bytes, cert_out: Path, key_out: Path):
one_day = datetime.timedelta(1, 0, 0)
root_cert = x509.load_pem_x509_certificate(ca_crt, default_backend())
root_key = load_pem_private_key(ca_key, None, default_backend())
cert_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
new_subject = x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, "Chiadoge"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Chiadoge"),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "Organic Farming Division"),
]
)
cert = (
x509.CertificateBuilder()
.subject_name(new_subject)
.issuer_name(root_cert.issuer)
.public_key(cert_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.today() - one_day)
.not_valid_after(datetime.datetime(2100, 8, 2))
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("chiadoge.co")]),
critical=False,
)
.sign(root_key, hashes.SHA256(), default_backend())
)
cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = cert_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
cert_out.write_bytes(cert_pem)
key_out.write_bytes(key_pem)
def make_ca_cert(cert_path: Path, key_path: Path):
root_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
subject = issuer = x509.Name(
[
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Chiadoge"),
x509.NameAttribute(NameOID.COMMON_NAME, "Chiadoge CA"),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "Organic Farming Division"),
]
)
root_cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(root_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=3650))
.add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=True)
.sign(root_key, hashes.SHA256(), default_backend())
)
cert_path.write_bytes(
root_cert.public_bytes(
encoding=serialization.Encoding.PEM,
)
)
key_path.write_bytes(
root_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
)
def main():
return make_ca_cert(Path("./chiadoge_ca.crt"), Path("./chiadoge_ca.key"))
if __name__ == "__main__":
main()
| 35.570093
| 104
| 0.704151
|
4a0f88b353e71c833e579a5f7261af38220df8e4
| 8,303
|
py
|
Python
|
apps/logs/migrations/0054_fill_importbatch_date.py
|
techlib/czechelib-stats
|
ca132e326af0924740a525710474870b1fb5fd37
|
[
"MIT"
] | 1
|
2019-12-12T15:38:42.000Z
|
2019-12-12T15:38:42.000Z
|
apps/logs/migrations/0054_fill_importbatch_date.py
|
techlib/czechelib-stats
|
ca132e326af0924740a525710474870b1fb5fd37
|
[
"MIT"
] | null | null | null |
apps/logs/migrations/0054_fill_importbatch_date.py
|
techlib/czechelib-stats
|
ca132e326af0924740a525710474870b1fb5fd37
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.12 on 2022-02-15 11:11
import sys
from django.db import migrations
from django.db.models import Count, Min, Max, F
def fix_extra_month_in_jr2_data(apps, schema_editor):
"""
Pycounter contained an error which caused data from JR2 reports to create an extra month
beyond the one that was present
(https://github.com/pitthsls/pycounter/commit/7c24ab91460c25a8b9d905b0484f375d27841ac2)
This code find and, if `fix` is True, fixes the problem.
Because it works on AccessLog level, it has to deal with Clickhouse
"""
ReportType = apps.get_model('logs', 'ReportType')
AccessLog = apps.get_model('logs', 'AccessLog')
SushiFetchAttempt = apps.get_model('sushi', 'SushiFetchAttempt')
try:
jr2 = ReportType.objects.get(short_name='JR2')
except ReportType.DoesNotExist:
return
import_batches = (
AccessLog.objects.filter(report_type=jr2)
.values('import_batch_id')
.annotate(month_count=Count('date', distinct=True))
.filter(month_count__gt=1)
)
count = 0
from django.conf import settings
for rec in import_batches:
ib_id = rec['import_batch_id']
try:
fa = SushiFetchAttempt.objects.get(import_batch_id=ib_id)
except SushiFetchAttempt.DoesNotExist:
# we are only interest in import batches with fetch attempt, because there we can check
# the expected date
continue
# delete access logs for which the date does not match the fetch attempt
AccessLog.objects.filter(import_batch_id=ib_id).exclude(date=fa.start_date).delete()
if settings.CLICKHOUSE_SYNC_ACTIVE:
from logs.cubes import ch_backend, AccessLogCube
ch_backend.delete_records(
AccessLogCube.query().filter(import_batch_id=ib_id, date__not_in=[fa.start_date])
)
count += 1
print(f'Fixed {count} broken JR2 import batches')
def fix_unrequested_data_in_import_batches(apps, schema_editor):
"""
In FLVC we got 2 import batches where the sushi server gave us not only data for the
period we asked for, but also for other periods :/
Here we find and fix such cases. We need the fetch attempt for that, because we need to know
the requested date
Because it works on AccessLog level, it has to deal with Clickhouse
"""
AccessLog = apps.get_model('logs', 'AccessLog')
ImportBatch = apps.get_model('logs', 'ImportBatch')
import_batches = (
AccessLog.objects.values('import_batch_id')
.annotate(month_count=Count('date', distinct=True))
.filter(month_count__gt=1)
.values('import_batch_id')
)
count = 0
from django.conf import settings
for ib in ImportBatch.objects.filter(
pk__in=import_batches, sushifetchattempt__isnull=False
).select_related('sushifetchattempt'):
ib.date = ib.sushifetchattempt.start_date
ib.save()
# delete access logs for which the date does not match the fetch attempt
AccessLog.objects.filter(import_batch_id=ib.pk).exclude(date=ib.date).delete()
if settings.CLICKHOUSE_SYNC_ACTIVE:
from logs.cubes import ch_backend, AccessLogCube
ch_backend.delete_records(
AccessLogCube.query().filter(import_batch_id=ib.pk, date__not_in=[ib.date])
)
count += 1
print(f'Fixed {count} import batches with extra data')
def add_date_to_importbatch_from_fetchattempts(apps, schema_editor):
"""
Assign date to import batch based on the fetch attempt - this is much faster than using
accesslogs
"""
ImportBatch = apps.get_model('logs', 'ImportBatch')
SushiFetchAttempt = apps.get_model('sushi', 'SushiFetchAttempt')
# a sanity check first
multi_month = (
ImportBatch.objects.annotate(start=Min('accesslog__date'), end=Max('accesslog__date'))
.exclude(end=F('start'))
.count()
)
if multi_month:
raise ValueError(
f'Multi-month import batches exist ({multi_month}), migration cannot proceed!'
)
total = 0
# it is not possible to use F() with join in updates, so we cannot update all importbatches
# with the date extracted from FetchAttempt. Therefor we do it at least in batches by month
# which is much more effective than doing it import batch by import batch
print('Import batches without date:', ImportBatch.objects.filter(date__isnull=True).count())
# we need the import_batch__isnull=False because the import_batch__date__isnull may be caused
# by import_batch being null (a left outer join is created), so the filter would not work as
# expected
for month in (
SushiFetchAttempt.objects.filter(
import_batch__date__isnull=True, import_batch__isnull=False
)
.values_list('start_date', flat=True)
.distinct()
):
updated = ImportBatch.objects.filter(
date__isnull=True,
pk__in=SushiFetchAttempt.objects.filter(start_date=month)
.values('import_batch_id')
.distinct(),
).update(date=month)
total += updated
print(f'Updated import batches for {month}: {updated}')
print(f'Updated import batches - total: {total}')
print(
f'Left import batches without date:', ImportBatch.objects.filter(date__isnull=True).count()
)
def add_date_to_importbatch_from_accesslogs(apps, schema_editor):
ImportBatch = apps.get_model('logs', 'ImportBatch')
# it is not possible to use F() with joint in updates, so we cannot update all import batches
# with the date extracted from AccessLogs. Here we use a naive approach which goes by
# individual import batches, because we can assume only a small number of objects here
# because most will be resolved above using fetch attempts
total = 0
print('Import batches without date:', ImportBatch.objects.filter(date__isnull=True).count())
for ib in (
ImportBatch.objects.filter(date__isnull=True)
.annotate(al_date=Min('accesslog__date'))
.filter(al_date__isnull=False)
):
ib.date = ib.al_date
ib.save()
total += 1
print('.', end='')
sys.stdout.flush()
print(f'Updated import batches - total: {total}')
print(
f'Left import batches without date:', ImportBatch.objects.filter(date__isnull=True).count()
)
def remove_orphan_import_batches_without_date(apps, schema_editor):
"""
If any import batches still exist which do not have date, they should not have a fetch attempt
and have empty usage - and thus not have much value.
Here we remove those that do not have an MDU.
"""
ImportBatch = apps.get_model('logs', 'ImportBatch')
removed, stats = (
ImportBatch.objects.filter(
date__isnull=True, sushifetchattempt__isnull=True, mdu__isnull=True
)
.annotate(al_count=Count('accesslog'))
.filter(al_count=0)
.delete()
)
print(f'Removed {removed} import batches without MDU and with zero usage')
without_date = ImportBatch.objects.filter(date__isnull=True).count()
print(f'Left import batches without date: {without_date} - these are from empty MDUs')
class Migration(migrations.Migration):
dependencies = [
('logs', '0053_clickhouse_add_import_batch_idx'),
('sushi', '0048_discard_credentials_broken_state'),
('scheduler', '0014_finalizing_import_batch'),
]
operations = [
migrations.RunPython(fix_extra_month_in_jr2_data, migrations.RunPython.noop),
migrations.RunPython(fix_unrequested_data_in_import_batches, migrations.RunPython.noop),
# we need to use both fetch attempts and accesslogs for adding dates because
# none of them alone works for all import batches
migrations.RunPython(add_date_to_importbatch_from_fetchattempts, migrations.RunPython.noop),
migrations.RunPython(add_date_to_importbatch_from_accesslogs, migrations.RunPython.noop),
# we finally remove import batches which are empty and completely orphaned
migrations.RunPython(remove_orphan_import_batches_without_date, migrations.RunPython.noop),
]
| 41.723618
| 100
| 0.690353
|
4a0f89b67ebcd64a2e7ff180463259ae3f05cb90
| 21,604
|
py
|
Python
|
sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_05_01_preview/aio/operations/_storage_accounts_operations.py
|
GoWang/azure-sdk-for-python
|
f241e3734a50953c2a37c10d2d84eb4c013b3ba0
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_05_01_preview/aio/operations/_storage_accounts_operations.py
|
GoWang/azure-sdk-for-python
|
f241e3734a50953c2a37c10d2d84eb4c013b3ba0
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_05_01_preview/aio/operations/_storage_accounts_operations.py
|
GoWang/azure-sdk-for-python
|
f241e3734a50953c2a37c10d2d84eb4c013b3ba0
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountsOperations:
"""StorageAccountsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name: str,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.StorageAccountList"]:
"""Lists all the storage accounts in a Data Box Edge/Data Box Gateway device.
Lists all the storage accounts in a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.databoxedge.models.StorageAccountList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts'} # type: ignore
async def get(
self,
device_name: str,
storage_account_name: str,
resource_group_name: str,
**kwargs
) -> "_models.StorageAccount":
"""Gets a StorageAccount by name.
Gets a StorageAccount by name.
:param device_name: The device name.
:type device_name: str
:param storage_account_name: The storage account name.
:type storage_account_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}'} # type: ignore
async def _create_or_update_initial(
self,
device_name: str,
storage_account_name: str,
resource_group_name: str,
storage_account: "_models.StorageAccount",
**kwargs
) -> Optional["_models.StorageAccount"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccount"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(storage_account, 'StorageAccount')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}'} # type: ignore
async def begin_create_or_update(
self,
device_name: str,
storage_account_name: str,
resource_group_name: str,
storage_account: "_models.StorageAccount",
**kwargs
) -> AsyncLROPoller["_models.StorageAccount"]:
"""Creates a new StorageAccount or updates an existing StorageAccount on the device.
Creates a new StorageAccount or updates an existing StorageAccount on the device.
:param device_name: The device name.
:type device_name: str
:param storage_account_name: The StorageAccount name.
:type storage_account_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param storage_account: The StorageAccount properties.
:type storage_account: ~azure.mgmt.databoxedge.models.StorageAccount
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either StorageAccount or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.databoxedge.models.StorageAccount]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
device_name=device_name,
storage_account_name=storage_account_name,
resource_group_name=resource_group_name,
storage_account=storage_account,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}'} # type: ignore
async def _delete_initial(
self,
device_name: str,
storage_account_name: str,
resource_group_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}'} # type: ignore
async def begin_delete(
self,
device_name: str,
storage_account_name: str,
resource_group_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the StorageAccount on the Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param storage_account_name: The StorageAccount name.
:type storage_account_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
device_name=device_name,
storage_account_name=storage_account_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'storageAccountName': self._serialize.url("storage_account_name", storage_account_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccounts/{storageAccountName}'} # type: ignore
| 49.664368
| 235
| 0.673301
|
4a0f8b6894c11ab143aac741dc32c820161ed05a
| 8,190
|
py
|
Python
|
vendor/packages/Babel/docs/conf.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/packages/Babel/docs/conf.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | 1
|
2021-12-13T20:55:07.000Z
|
2021-12-13T20:55:07.000Z
|
vendor/packages/Babel/docs/conf.py
|
DESHRAJ/fjord
|
8899b6286b23347c9b024334e61c33fe133e836d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Babel documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 3 17:53:01 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Babel'
copyright = u'2013, Edgewall Software'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'babel'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebar-about.html', 'localtoc.html', 'sidebar-links.html',
'searchbox.html'],
'**': ['sidebar-logo.html', 'localtoc.html', 'relations.html',
'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Babeldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Needed for unicode symbol conversion.
'fontpkg': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Babel.tex', u'Babel Documentation',
u'Edgewall Software', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index_', 'babel', u'Babel Documentation',
[u'Edgewall Software'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index_', 'Babel', u'Babel Documentation',
u'Edgewall Software', 'Babel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
intersphinx_mapping = {
'http://docs.python.org/2': None,
}
| 31.867704
| 80
| 0.707326
|
4a0f8b94399ffce38e42e79fba2bbcbbbe0db5ef
| 795
|
py
|
Python
|
src/timeshap/wrappers/__init__.py
|
feedzai/timeshap
|
0d56b3b86222d52fdc5a1e96f125513e0ed18e6c
|
[
"Apache-2.0"
] | 49
|
2022-03-25T14:35:52.000Z
|
2022-03-31T18:05:51.000Z
|
src/timeshap/wrappers/__init__.py
|
feedzai/timeshap
|
0d56b3b86222d52fdc5a1e96f125513e0ed18e6c
|
[
"Apache-2.0"
] | 1
|
2022-03-31T12:15:55.000Z
|
2022-03-31T14:59:06.000Z
|
src/timeshap/wrappers/__init__.py
|
feedzai/timeshap
|
0d56b3b86222d52fdc5a1e96f125513e0ed18e6c
|
[
"Apache-2.0"
] | 2
|
2022-03-28T04:32:35.000Z
|
2022-03-28T06:39:24.000Z
|
# Copyright 2022 Feedzai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base_wrapper import TimeSHAPWrapper
# Guarding against torch not installed
from ..utils.compatibility import is_torch_installed
if is_torch_installed():
from .torch_wrappers import TorchModelWrapper
| 36.136364
| 75
| 0.769811
|
4a0f8ecdea7b0cdc6d5c4281f0b40957125111c2
| 5,012
|
py
|
Python
|
aardvark-api-windows-x86_64-v5.40/aardvark-api-windows-x86_64-v5.40/python/aai2c_file.py
|
firstsystemsuk/aardvark_i2c
|
2cbf38f76dc144cd0c44f56b1d1f7a09fd720d8d
|
[
"MIT"
] | 1
|
2021-03-19T20:47:16.000Z
|
2021-03-19T20:47:16.000Z
|
aardvark-api-windows-x86_64-v5.40/aardvark-api-windows-x86_64-v5.40/python/aai2c_file.py
|
firstsystemsuk/aardvark_i2c
|
2cbf38f76dc144cd0c44f56b1d1f7a09fd720d8d
|
[
"MIT"
] | null | null | null |
aardvark-api-windows-x86_64-v5.40/aardvark-api-windows-x86_64-v5.40/python/aai2c_file.py
|
firstsystemsuk/aardvark_i2c
|
2cbf38f76dc144cd0c44f56b1d1f7a09fd720d8d
|
[
"MIT"
] | 1
|
2021-12-30T02:19:41.000Z
|
2021-12-30T02:19:41.000Z
|
#!/usr/bin/env python3
#==========================================================================
# (c) 2004-2019 Total Phase, Inc.
#--------------------------------------------------------------------------
# Project : Aardvark Sample Code
# File : aai2c_file.c
#--------------------------------------------------------------------------
# Configure the device as an I2C master and send data.
#--------------------------------------------------------------------------
# Redistribution and use of this file in source and binary forms, with
# or without modification, are permitted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==========================================================================
#==========================================================================
# IMPORTS
#==========================================================================
from __future__ import division, with_statement, print_function
import sys
from aardvark_py import *
#==========================================================================
# CONSTANTS
#==========================================================================
BUFFER_SIZE = 2048
I2C_BITRATE = 400
#==========================================================================
# FUNCTIONS
#==========================================================================
def blast_bytes (handle, slave_addr, filename):
# Open the file
try:
f=open(filename, 'rb')
except:
print("Unable to open file '" + filename + "'")
return
trans_num = 0
while 1:
# Read from the file
filedata = f.read(BUFFER_SIZE)
if (len(filedata) == 0):
break
# Write the data to the bus
data_out = array('B', filedata)
count = aa_i2c_write(handle, slave_addr, AA_I2C_NO_FLAGS, data_out)
if (count < 0):
print("error: %s" % aa_status_string(count))
break
elif (count == 0):
print("error: no bytes written")
print(" are you sure you have the right slave address?")
break
elif (count != len(data_out)):
print("error: only a partial number of bytes written")
print(" (%d) instead of full (%d)" % (count, len(data_out)))
break
sys.stdout.write("*** Transaction #%02d\n" % trans_num)
# Dump the data to the screen
sys.stdout.write("Data written to device:")
for i in range(count):
if ((i&0x0f) == 0):
sys.stdout.write("\n%04x: " % i)
sys.stdout.write("%02x " % (data_out[i] & 0xff))
if (((i+1)&0x07) == 0):
sys.stdout.write(" ")
sys.stdout.write("\n\n")
trans_num = trans_num + 1
# Sleep a tad to make sure slave has time to process this request
aa_sleep_ms(10)
f.close()
#==========================================================================
# MAIN PROGRAM
#==========================================================================
if (len(sys.argv) < 4):
print("usage: aai2c_file PORT SLAVE_ADDR filename")
print(" SLAVE_ADDR is the target slave address")
print("")
print(" 'filename' should contain data to be sent")
print(" to the downstream i2c device")
sys.exit()
port = int(sys.argv[1])
addr = int(sys.argv[2], 0)
filename = sys.argv[3]
handle = aa_open(port)
if (handle <= 0):
print("Unable to open Aardvark device on port %d" % port)
print("Error code = %d" % handle)
sys.exit()
# Ensure that the I2C subsystem is enabled
aa_configure(handle, AA_CONFIG_SPI_I2C)
# Enable the I2C bus pullup resistors (2.2k resistors).
# This command is only effective on v2.0 hardware or greater.
# The pullup resistors on the v1.02 hardware are enabled by default.
aa_i2c_pullup(handle, AA_I2C_PULLUP_BOTH)
# Enable the Aardvark adapter's power supply.
# This command is only effective on v2.0 hardware or greater.
# The power pins on the v1.02 hardware are not enabled by default.
aa_target_power(handle, AA_TARGET_POWER_BOTH)
# Set the bitrate
bitrate = aa_i2c_bitrate(handle, I2C_BITRATE)
print("Bitrate set to %d kHz" % bitrate)
blast_bytes(handle, addr, filename)
# Close the device
aa_close(handle)
| 35.546099
| 75
| 0.52933
|
4a0f8ee6a1d3a145dff4c5257bd5add381cc20b5
| 8,234
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/choropleth/marker/__init__.py
|
potpath/plotly.py
|
46cd47f441d8bda9b14b4ba66a33f02731faf8f0
|
[
"MIT"
] | 1
|
2020-04-06T20:57:36.000Z
|
2020-04-06T20:57:36.000Z
|
packages/python/plotly/plotly/graph_objs/choropleth/marker/__init__.py
|
potpath/plotly.py
|
46cd47f441d8bda9b14b4ba66a33f02731faf8f0
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/choropleth/marker/__init__.py
|
potpath/plotly.py
|
46cd47f441d8bda9b14b4ba66a33f02731faf8f0
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on plot.ly for width .
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "choropleth.marker"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
colorsrc
Sets the source reference on plot.ly for color .
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on plot.ly for width .
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.marker.Line`
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
colorsrc
Sets the source reference on plot.ly for color .
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on plot.ly for width .
Returns
-------
Line
"""
super(Line, self).__init__("line")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choropleth.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.choropleth.marker import line as v_line
# Initialize validators
# ---------------------
self._validators["color"] = v_line.ColorValidator()
self._validators["colorsrc"] = v_line.ColorsrcValidator()
self._validators["width"] = v_line.WidthValidator()
self._validators["widthsrc"] = v_line.WidthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
_v = arg.pop("widthsrc", None)
self["widthsrc"] = widthsrc if widthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Line"]
| 34.165975
| 86
| 0.56121
|
4a0f8fc1d9a40cd8c1e83ca1828ac95fba5999ab
| 4,499
|
py
|
Python
|
src/preprocess.py
|
HeosSacer/SSVEP-Brain-Computer-Interface
|
1c4a0c899475d484f4427a94e65cfbd8b71c6904
|
[
"MIT"
] | 4
|
2019-12-09T04:37:55.000Z
|
2021-11-05T13:49:55.000Z
|
src/preprocess.py
|
swxie/SSVEP-Brain-Computer-Interface
|
1c4a0c899475d484f4427a94e65cfbd8b71c6904
|
[
"MIT"
] | null | null | null |
src/preprocess.py
|
swxie/SSVEP-Brain-Computer-Interface
|
1c4a0c899475d484f4427a94e65cfbd8b71c6904
|
[
"MIT"
] | 3
|
2019-11-24T03:07:45.000Z
|
2022-02-26T10:04:00.000Z
|
import mne
import os
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
from mne.time_frequency import psd_welch
mne.set_log_level("ERROR")
recording_dir = os.path.join(os.path.dirname(__file__), "..", "REC")
stimulation_duration = 15.0
sampling_frequency = 256.0
fmin = 10.0
fmax = 20.0
info = mne.create_info(ch_names=["O1", "OZ", "O2", "reference"],
ch_types=["eeg", "eeg", "eeg", "eeg"],
sfreq=sampling_frequency)
def extract_features(channel_data):
mne_raw = mne.io.RawArray(data=channel_data, info=info)
psds, frequencies = psd_welch(mne_raw, fmin=fmin, fmax=fmax)
return psds, frequencies
def preprocess_recordings(output_file=os.path.join(recording_dir, "preprocessed_data")):
"""
Preprocess all the data from the recording.mat files and saves it with the following
structure:
{
'frequencies': [f1, f2, ...], # psd frequencies
'labels' : [l1, l2, ...], # labels for feature vectors
'features' : [
[psd_of_f1_c1, psd_of_f2_c1, ..., psd_of_f1_c2, ...], # of channels for feature 1
[psd_of_f1, psd_of_f2, ...] # of channels for feature 2
],
'file_ids' : [0, 0, ..., 1, 1, ...] # unique id for the file the data comes from
}
"""
frequencies = []
features = []
labels = []
file_ids = []
n_splits = 15
offset = int(sampling_frequency * stimulation_duration / n_splits)
frequency_to_pds_mean = dict()
normalisation_factor_of = dict()
for file_id, recording_mat_file in enumerate(os.listdir(recording_dir)):
if not recording_mat_file.endswith(".mat"):
continue
recording_mat_file = os.path.join(recording_dir, recording_mat_file)
recording_mat = sio.loadmat(recording_mat_file)
Y = recording_mat["Y"][0, :]
channel_data = recording_mat["X"]
time_stamp_indexes = recording_mat["trial"][0, :]
for i, stimulation_begin in np.ndenumerate(time_stamp_indexes[::2]):
file_ids.append(file_id)
i = i[0]
stimulation_end = stimulation_begin + int(sampling_frequency * stimulation_duration)
for j in range(n_splits):
current_channel_data = channel_data[stimulation_begin : stimulation_begin + offset]
stimulation_begin += offset
psds, frequencies = extract_features(current_channel_data.T)
# only the even indexed labels contain labels with stimulation frequencies
# uneven indexed labels are breaks and therefor always zero
labels.append(Y[i * 2])
features.append(psds.flatten())
if Y[i * 2] in frequency_to_pds_mean:
frequency_to_pds_mean[Y[i * 2]] += psds
normalisation_factor_of[Y[i * 2]] += 1
else:
frequency_to_pds_mean[Y[i * 2]] = psds
normalisation_factor_of[Y[i * 2]] = 1
for frequency in frequency_to_pds_mean.keys():
frequency_to_pds_mean[frequency] /= normalisation_factor_of[frequency]
plt.plot(frequencies, frequency_to_pds_mean[frequency][0], label="Reference")
plt.plot(frequencies, frequency_to_pds_mean[frequency][1], label="O1")
plt.plot(frequencies, frequency_to_pds_mean[frequency][2], label="OZ")
plt.plot(frequencies, frequency_to_pds_mean[frequency][3], label="O2")
plt.legend()
plt.title("Stimulation Frequency " + str(frequency) + " (Hz)")
plt.ylabel("Relative Amplitude")
plt.xlabel("Frequency (Hz)")
# plt.savefig("pds_" + str(frequency) + ".png")
# plt.show()
plt.clf()
np.save(output_file, {
"features" : features,
"labels" : labels,
"file_ids" : file_ids,
"frequencies" : frequencies
})
def load_preprocessed_data(input_file=os.path.join(recording_dir, "preprocessed_data.npy")):
file_content = np.load(input_file).item()
features = file_content["features"]
labels = file_content["labels"]
frequencies = file_content["frequencies"]
file_ids = file_content["file_ids"]
return features, labels, file_ids, frequencies
if __name__ == '__main__':
preprocess_recordings()
# features, labels, file_ids, frequencies = load_preprocessed_data()
| 35.425197
| 99
| 0.619693
|
4a0f9156cb6e5286f0be0f3db37afae3085be0c5
| 2,786
|
py
|
Python
|
pychron/processing/permutator/view.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 1
|
2019-02-27T21:57:44.000Z
|
2019-02-27T21:57:44.000Z
|
pychron/processing/permutator/view.py
|
ael-noblegas/pychron
|
6ebbbb1f66a614972b62b7a9be4c784ae61b5d62
|
[
"Apache-2.0"
] | 80
|
2018-07-17T20:10:20.000Z
|
2021-08-17T15:38:24.000Z
|
pychron/processing/permutator/view.py
|
AGESLDEO/pychron
|
1a81e05d9fba43b797f335ceff6837c016633bcf
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import HasTraits, Instance, List, Property
from traitsui.api import View, UItem, TabularEditor
# ============= standard library imports ========================
from numpy import array
from uncertainties import nominal_value
# ============= local library imports ==========================
from traitsui.tabular_adapter import TabularAdapter
from pychron.core.helpers.formatting import floatfmt
from pychron.pipeline.plot.editors.graph_editor import GraphEditor
class ResultsAdapter(TabularAdapter):
columns = [('Identifier', 'identifier'),
('Min (Ma)', 'mi'),
('Max (Ma)', 'ma'),
('Spread (Ma)', 'spread'),
('Std.', 'std')]
mi_text = Property
ma_text = Property
spread_text = Property
std_text = Property
def _get_mi_text(self):
return floatfmt(self.item.mi)
def _get_ma_text(self):
return floatfmt(self.item.ma)
def _get_spread_text(self):
return floatfmt(self.item.spread)
def _get_std_text(self):
return floatfmt(self.item.std)
class ResultRecord(object):
ma = 0
mi = 0
spread = 0
std = 0
identifier = ''
def __init__(self, records):
ages = array([nominal_value(ai.age) for ai in records])
self.mi = min(ages)
self.ma = max(ages)
self.std = ages.std()
self.identifier = records[0].identifier
self.spread = self.ma - self.mi
class PermutatorResultsView(HasTraits):
editor = Instance(GraphEditor)
results = List
def append_results(self, records):
self.results.append(ResultRecord(records))
def traits_view(self):
v = View(UItem('editor', style='custom'),
UItem('results', editor=TabularEditor(adapter=ResultsAdapter())),
width=700,
height=600)
return v
# ============= EOF =============================================
| 31.303371
| 82
| 0.589375
|
4a0f916a159d2351088d1681139523f08cd5f3a6
| 8,472
|
py
|
Python
|
Python/autoencoders_keras/variational_autoencoder.py
|
fyumoto/AE_Keras
|
e923b41aa5782fe6c7b1ce8ac3c7031e2395867f
|
[
"Apache-2.0"
] | 1
|
2020-03-22T14:11:38.000Z
|
2020-03-22T14:11:38.000Z
|
Python/autoencoders_keras/variational_autoencoder.py
|
fyumoto/AE_Keras
|
e923b41aa5782fe6c7b1ce8ac3c7031e2395867f
|
[
"Apache-2.0"
] | null | null | null |
Python/autoencoders_keras/variational_autoencoder.py
|
fyumoto/AE_Keras
|
e923b41aa5782fe6c7b1ce8ac3c7031e2395867f
|
[
"Apache-2.0"
] | null | null | null |
# License
# Copyright 2018 Hamaad Musharaf Shah
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import math
import inspect
from sklearn.base import BaseEstimator, TransformerMixin
import keras
from keras.layers import Input, Dense, BatchNormalization, Dropout, Lambda, add
from keras.models import Model, Sequential
import tensorflow
from autoencoders_keras.loss_history import LossHistory
class VariationalAutoencoder(BaseEstimator,
TransformerMixin):
def __init__(self,
n_feat=None,
n_epoch=None,
batch_size=None,
encoder_layers=None,
decoder_layers=None,
n_hidden_units=None,
encoding_dim=None,
denoising=None):
args, _, _, values = inspect.getargvalues(inspect.currentframe())
values.pop("self")
for arg, val in values.items():
setattr(self, arg, val)
loss_history = LossHistory()
early_stop = keras.callbacks.EarlyStopping(monitor="val_loss",
patience=10)
reduce_learn_rate = keras.callbacks.ReduceLROnPlateau(monitor="val_loss",
factor=0.1,
patience=20)
self.callbacks_list = [loss_history, early_stop, reduce_learn_rate]
for i in range(self.encoder_layers):
if i == 0:
self.input_data = Input(shape=(self.n_feat,))
self.encoded = BatchNormalization()(self.input_data)
self.encoded = Dense(units=self.n_hidden_units, activation="elu")(self.encoded)
self.encoded = Dropout(rate=0.5)(self.encoded)
elif i > 0 and i < self.encoder_layers - 1:
self.encoded = BatchNormalization()(self.encoded)
self.encoded = Dense(units=self.n_hidden_units, activation="elu")(self.encoded)
self.encoded = Dropout(rate=0.5)(self.encoded)
elif i == self.encoder_layers - 1:
self.encoded = BatchNormalization()(self.encoded)
self.encoded = Dense(units=self.n_hidden_units, activation="elu")(self.encoded)
self.mu = Dense(units=self.encoding_dim, activation="linear")(self.encoded)
self.log_sigma = Dense(units=self.encoding_dim, activation="linear")(self.encoded)
z = Lambda(self.sample_z, output_shape=(self.encoding_dim,))([self.mu, self.log_sigma])
self.decoded_layers_dict = {}
decoder_counter = 0
for i in range(self.decoder_layers):
if i == 0:
self.decoded_layers_dict[decoder_counter] = BatchNormalization()
decoder_counter += 1
self.decoded_layers_dict[decoder_counter] = Dense(units=self.n_hidden_units, activation="elu")
decoder_counter += 1
self.decoded_layers_dict[decoder_counter] = Dropout(rate=0.5)
self.decoded = self.decoded_layers_dict[decoder_counter - 2](z)
self.decoded = self.decoded_layers_dict[decoder_counter - 1](self.decoded)
self.decoded = self.decoded_layers_dict[decoder_counter](self.decoded)
decoder_counter += 1
elif i > 0 and i < self.decoder_layers - 1:
self.decoded_layers_dict[decoder_counter] = BatchNormalization()
decoder_counter += 1
self.decoded_layers_dict[decoder_counter] = Dense(units=self.n_hidden_units, activation="elu")
decoder_counter += 1
self.decoded_layers_dict[decoder_counter] = Dropout(rate=0.5)
self.decoded = self.decoded_layers_dict[decoder_counter - 2](self.decoded)
self.decoded = self.decoded_layers_dict[decoder_counter - 1](self.decoded)
self.decoded = self.decoded_layers_dict[decoder_counter](self.decoded)
decoder_counter += 1
elif i == self.decoder_layers - 1:
self.decoded_layers_dict[decoder_counter] = BatchNormalization()
decoder_counter += 1
self.decoded_layers_dict[decoder_counter] = Dense(units=self.n_hidden_units, activation="elu")
self.decoded = self.decoded_layers_dict[decoder_counter - 1](self.decoded)
self.decoded = self.decoded_layers_dict[decoder_counter](self.decoded)
decoder_counter += 1
# Output would have shape: (batch_size, n_feat).
self.decoded_layers_dict[decoder_counter] = Dense(units=self.n_feat, activation="sigmoid")
self.decoded = self.decoded_layers_dict[decoder_counter](self.decoded)
self.autoencoder = Model(self.input_data, self.decoded)
self.autoencoder.compile(optimizer=keras.optimizers.Adam(),
loss=self.vae_loss)
def fit(self,
X,
y=None):
self.autoencoder.fit(X if self.denoising is None else X + self.denoising, X,
validation_split=0.3,
epochs=self.n_epoch,
batch_size=self.batch_size,
shuffle=True,
callbacks=self.callbacks_list,
verbose=1)
self.encoder = Model(self.input_data, self.mu)
self.generator_input = Input(shape=(self.encoding_dim,))
self.generator_output = None
decoder_counter = 0
for i in range(self.decoder_layers):
if i == 0:
self.generator_output = self.decoded_layers_dict[decoder_counter](self.generator_input)
decoder_counter += 1
self.generator_output = self.decoded_layers_dict[decoder_counter](self.generator_output)
decoder_counter += 1
self.generator_output = self.decoded_layers_dict[decoder_counter](self.generator_output)
decoder_counter += 1
elif i > 0 and i < self.decoder_layers - 1:
self.generator_output = self.decoded_layers_dict[decoder_counter](self.generator_output)
decoder_counter += 1
self.generator_output = self.decoded_layers_dict[decoder_counter](self.generator_output)
decoder_counter += 1
self.generator_output = self.decoded_layers_dict[decoder_counter](self.generator_output)
decoder_counter += 1
elif i == self.decoder_layers - 1:
self.generator_output = self.decoded_layers_dict[decoder_counter](self.generator_output)
decoder_counter += 1
self.generator_output = self.decoded_layers_dict[decoder_counter](self.generator_output)
decoder_counter += 1
self.generator_output = self.decoded_layers_dict[decoder_counter](self.generator_output)
self.generator = Model(self.generator_input, self.generator_output)
return self
def transform(self,
X):
return self.encoder.predict(X)
def sample_z(self,
args):
mu_, log_sigma_ = args
eps = keras.backend.random_normal(shape=(keras.backend.shape(mu_)[0], self.encoding_dim),
mean=0.0,
stddev=1.0)
out = mu_ + keras.backend.exp(log_sigma_ / 2) * eps
return out
def vae_loss(self,
y_true,
y_pred):
recon = keras.backend.sum(x=keras.backend.square(y_pred - y_true))
kl = -0.5 * keras.backend.sum(x=1.0 + self.log_sigma - keras.backend.exp(self.log_sigma) - keras.backend.square(self.mu))
return recon + kl
| 48.411429
| 307
| 0.601629
|
4a0f91d1793e90d7b8ec86a43cbcd8b0fbf0985f
| 143,125
|
py
|
Python
|
lbrynet/extras/daemon/Daemon.py
|
preethamvishy/lbry
|
5a1f42ee5491ea6f1b49f8ddd89e4ee37a7598ec
|
[
"MIT"
] | null | null | null |
lbrynet/extras/daemon/Daemon.py
|
preethamvishy/lbry
|
5a1f42ee5491ea6f1b49f8ddd89e4ee37a7598ec
|
[
"MIT"
] | null | null | null |
lbrynet/extras/daemon/Daemon.py
|
preethamvishy/lbry
|
5a1f42ee5491ea6f1b49f8ddd89e4ee37a7598ec
|
[
"MIT"
] | null | null | null |
import logging.handlers
import mimetypes
import os
import requests
import urllib
import json
import textwrap
from typing import Callable, Optional, List
from operator import itemgetter
from binascii import hexlify, unhexlify
from copy import deepcopy
from twisted.internet import defer, reactor
from twisted.internet.task import LoopingCall
from twisted.python.failure import Failure
from torba.client.baseaccount import SingleKey, HierarchicalDeterministic
from lbrynet import conf, utils, __version__
from lbrynet.dht.error import TimeoutError
from lbrynet.blob.blob_file import is_valid_blobhash
from lbrynet.extras import system_info
from lbrynet.extras.reflector import reupload
from lbrynet.extras.daemon.Components import d2f, f2d
from lbrynet.extras.daemon.Components import WALLET_COMPONENT, DATABASE_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT
from lbrynet.extras.daemon.Components import FILE_MANAGER_COMPONENT, RATE_LIMITER_COMPONENT
from lbrynet.extras.daemon.Components import EXCHANGE_RATE_MANAGER_COMPONENT, PAYMENT_RATE_COMPONENT, UPNP_COMPONENT
from lbrynet.extras.daemon.ComponentManager import RequiredCondition
from lbrynet.extras.daemon.Downloader import GetStream
from lbrynet.extras.daemon.Publisher import Publisher
from lbrynet.extras.daemon.auth.server import AuthJSONRPCServer
from lbrynet.extras.wallet import LbryWalletManager
from lbrynet.extras.wallet.account import Account as LBCAccount
from lbrynet.extras.wallet.dewies import dewies_to_lbc, lbc_to_dewies
from lbrynet.p2p.StreamDescriptor import download_sd_blob
from lbrynet.p2p.Error import InsufficientFundsError, UnknownNameError, DownloadDataTimeout, DownloadSDTimeout
from lbrynet.p2p.Error import NullFundsError, NegativeFundsError, ResolveError
from lbrynet.p2p.Peer import Peer
from lbrynet.p2p.SinglePeerDownloader import SinglePeerDownloader
from lbrynet.p2p.client.StandaloneBlobDownloader import StandaloneBlobDownloader
from lbrynet.schema.claim import ClaimDict
from lbrynet.schema.uri import parse_lbry_uri
from lbrynet.schema.error import URIParseError, DecodeError
from lbrynet.schema.validator import validate_claim_id
from lbrynet.schema.address import decode_address
from lbrynet.schema.decode import smart_decode
log = logging.getLogger(__name__)
requires = AuthJSONRPCServer.requires
INITIALIZING_CODE = 'initializing'
# TODO: make this consistent with the stages in Downloader.py
DOWNLOAD_METADATA_CODE = 'downloading_metadata'
DOWNLOAD_TIMEOUT_CODE = 'timeout'
DOWNLOAD_RUNNING_CODE = 'running'
DOWNLOAD_STOPPED_CODE = 'stopped'
STREAM_STAGES = [
(INITIALIZING_CODE, 'Initializing'),
(DOWNLOAD_METADATA_CODE, 'Downloading metadata'),
(DOWNLOAD_RUNNING_CODE, 'Started %s, got %s/%s blobs, stream status: %s'),
(DOWNLOAD_STOPPED_CODE, 'Paused stream'),
(DOWNLOAD_TIMEOUT_CODE, 'Stream timed out')
]
CONNECTION_STATUS_CONNECTED = 'connected'
CONNECTION_STATUS_NETWORK = 'network_connection'
CONNECTION_MESSAGES = {
CONNECTION_STATUS_CONNECTED: 'No connection problems detected',
CONNECTION_STATUS_NETWORK: "Your internet connection appears to have been interrupted",
}
SHORT_ID_LEN = 20
MAX_UPDATE_FEE_ESTIMATE = 0.3
DIRECTION_ASCENDING = 'asc'
DIRECTION_DESCENDING = 'desc'
DIRECTIONS = DIRECTION_ASCENDING, DIRECTION_DESCENDING
async def maybe_paginate(get_records: Callable, get_record_count: Callable,
page: Optional[int], page_size: Optional[int], **constraints):
if None not in (page, page_size):
constraints.update({
"offset": page_size * (page-1),
"limit": page_size
})
return {
"items": await get_records(**constraints),
"total_pages": int(((await get_record_count(**constraints)) + (page_size-1)) / page_size),
"page": page, "page_size": page_size
}
return await get_records(**constraints)
class IterableContainer:
def __iter__(self):
for attr in dir(self):
if not attr.startswith("_"):
yield getattr(self, attr)
def __contains__(self, item):
for attr in self:
if item == attr:
return True
return False
class Checker:
"""The looping calls the daemon runs"""
INTERNET_CONNECTION = 'internet_connection_checker', 300
# CONNECTION_STATUS = 'connection_status_checker'
class _FileID(IterableContainer):
"""The different ways a file can be identified"""
SD_HASH = 'sd_hash'
FILE_NAME = 'file_name'
STREAM_HASH = 'stream_hash'
ROWID = "rowid"
CLAIM_ID = "claim_id"
OUTPOINT = "outpoint"
TXID = "txid"
NOUT = "nout"
CHANNEL_CLAIM_ID = "channel_claim_id"
CLAIM_NAME = "claim_name"
CHANNEL_NAME = "channel_name"
FileID = _FileID()
# TODO add login credentials in a conf file
# TODO alert if your copy of a lbry file is out of date with the name record
class NoValidSearch(Exception):
pass
class CheckInternetConnection:
def __init__(self, daemon):
self.daemon = daemon
def __call__(self):
self.daemon.connected_to_internet = utils.check_connection()
class AlwaysSend:
def __init__(self, value_generator, *args, **kwargs):
self.value_generator = value_generator
self.args = args
self.kwargs = kwargs
def __call__(self):
d = defer.maybeDeferred(self.value_generator, *self.args, **self.kwargs)
d.addCallback(lambda v: (True, v))
return d
def sort_claim_results(claims):
claims.sort(key=lambda d: (d['height'], d['name'], d['claim_id'], d['txid'], d['nout']))
return claims
def is_first_run():
if os.path.isfile(conf.settings.get_db_revision_filename()):
return False
if os.path.isfile(os.path.join(conf.settings['data_dir'], 'lbrynet.sqlite')):
return False
if os.path.isfile(os.path.join(conf.settings['lbryum_wallet_dir'], 'blockchain_headers')):
return False
return True
DHT_HAS_CONTACTS = "dht_has_contacts"
WALLET_IS_UNLOCKED = "wallet_is_unlocked"
class DHTHasContacts(RequiredCondition):
name = DHT_HAS_CONTACTS
component = DHT_COMPONENT
message = "your node is not connected to the dht"
@staticmethod
def evaluate(component):
return len(component.contacts) > 0
class WalletIsUnlocked(RequiredCondition):
name = WALLET_IS_UNLOCKED
component = WALLET_COMPONENT
message = "your wallet is locked"
@staticmethod
def evaluate(component):
return not component.check_locked()
class Daemon(AuthJSONRPCServer):
"""
LBRYnet daemon, a jsonrpc interface to lbry functions
"""
component_attributes = {
DATABASE_COMPONENT: "storage",
DHT_COMPONENT: "dht_node",
WALLET_COMPONENT: "wallet_manager",
FILE_MANAGER_COMPONENT: "file_manager",
EXCHANGE_RATE_MANAGER_COMPONENT: "exchange_rate_manager",
PAYMENT_RATE_COMPONENT: "payment_rate_manager",
RATE_LIMITER_COMPONENT: "rate_limiter",
BLOB_COMPONENT: "blob_manager",
UPNP_COMPONENT: "upnp"
}
def __init__(self, analytics_manager=None, component_manager=None):
to_skip = conf.settings['components_to_skip']
if 'reflector' not in to_skip and not conf.settings['run_reflector_server']:
to_skip.append('reflector')
looping_calls = {
Checker.INTERNET_CONNECTION[0]: (LoopingCall(CheckInternetConnection(self)),
Checker.INTERNET_CONNECTION[1])
}
AuthJSONRPCServer.__init__(self, analytics_manager=analytics_manager, component_manager=component_manager,
use_authentication=conf.settings['use_auth_http'],
use_https=conf.settings['use_https'], to_skip=to_skip, looping_calls=looping_calls)
self.is_first_run = is_first_run()
# TODO: move this to a component
self.connected_to_internet = True
self.connection_status_code = None
# components
# TODO: delete these, get the components where needed
self.storage = None
self.dht_node = None
self.wallet_manager: LbryWalletManager = None
self.file_manager = None
self.exchange_rate_manager = None
self.payment_rate_manager = None
self.rate_limiter = None
self.blob_manager = None
self.upnp = None
# TODO: delete this
self.streams = {}
@property
def default_wallet(self):
try:
return self.wallet_manager.default_wallet
except AttributeError:
return None
@property
def default_account(self):
try:
return self.wallet_manager.default_account
except AttributeError:
return None
@property
def ledger(self):
try:
return self.wallet_manager.default_account.ledger
except AttributeError:
return None
@defer.inlineCallbacks
def setup(self):
log.info("Starting lbrynet-daemon")
log.info("Platform: %s", json.dumps(system_info.get_platform()))
yield super().setup()
log.info("Started lbrynet-daemon")
def _stop_streams(self):
"""stop pending GetStream downloads"""
for sd_hash, stream in self.streams.items():
stream.cancel(reason="daemon shutdown")
def _shutdown(self):
self._stop_streams()
return super()._shutdown()
def _download_blob(self, blob_hash, rate_manager=None, timeout=None):
"""
Download a blob
:param blob_hash (str): blob hash
:param rate_manager (PaymentRateManager), optional: the payment rate manager to use,
defaults to session.payment_rate_manager
:param timeout (int): blob timeout
:return: BlobFile
"""
if not blob_hash:
raise Exception("Nothing to download")
rate_manager = rate_manager or self.payment_rate_manager
timeout = timeout or 30
downloader = StandaloneBlobDownloader(
blob_hash, self.blob_manager, self.component_manager.peer_finder, self.rate_limiter,
rate_manager, self.wallet_manager, timeout
)
return downloader.download()
@defer.inlineCallbacks
def _get_stream_analytics_report(self, claim_dict):
sd_hash = claim_dict.source_hash.decode()
try:
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
except Exception:
stream_hash = None
report = {
"sd_hash": sd_hash,
"stream_hash": stream_hash,
}
blobs = {}
try:
sd_host = yield self.blob_manager.get_host_downloaded_from(sd_hash)
except Exception:
sd_host = None
report["sd_blob"] = sd_host
if stream_hash:
blob_infos = yield self.storage.get_blobs_for_stream(stream_hash)
report["known_blobs"] = len(blob_infos)
else:
blob_infos = []
report["known_blobs"] = 0
# for blob_hash, blob_num, iv, length in blob_infos:
# try:
# host = yield self.session.blob_manager.get_host_downloaded_from(blob_hash)
# except Exception:
# host = None
# if host:
# blobs[blob_num] = host
# report["blobs"] = json.dumps(blobs)
defer.returnValue(report)
@defer.inlineCallbacks
def _download_name(self, name, claim_dict, sd_hash, txid, nout, timeout=None, file_name=None):
"""
Add a lbry file to the file manager, start the download, and return the new lbry file.
If it already exists in the file manager, return the existing lbry file
"""
@defer.inlineCallbacks
def _download_finished(download_id, name, claim_dict):
report = yield self._get_stream_analytics_report(claim_dict)
self.analytics_manager.send_download_finished(download_id, name, report, claim_dict)
self.analytics_manager.send_new_download_success(download_id, name, claim_dict)
@defer.inlineCallbacks
def _download_failed(error, download_id, name, claim_dict):
report = yield self._get_stream_analytics_report(claim_dict)
self.analytics_manager.send_download_errored(error, download_id, name, claim_dict,
report)
self.analytics_manager.send_new_download_fail(download_id, name, claim_dict, error)
if sd_hash in self.streams:
downloader = self.streams[sd_hash]
result = yield downloader.finished_deferred
defer.returnValue(result)
else:
download_id = utils.random_string()
self.analytics_manager.send_download_started(download_id, name, claim_dict)
self.analytics_manager.send_new_download_start(download_id, name, claim_dict)
self.streams[sd_hash] = GetStream(
self.file_manager.sd_identifier, self.wallet_manager, self.exchange_rate_manager, self.blob_manager,
self.component_manager.peer_finder, self.rate_limiter, self.payment_rate_manager, self.storage,
conf.settings['max_key_fee'], conf.settings['disable_max_key_fee'], conf.settings['data_rate'],
timeout
)
try:
lbry_file, finished_deferred = yield self.streams[sd_hash].start(
claim_dict, name, txid, nout, file_name
)
finished_deferred.addCallbacks(
lambda _: _download_finished(download_id, name, claim_dict),
lambda e: _download_failed(e, download_id, name, claim_dict)
)
result = yield self._get_lbry_file_dict(lbry_file)
except Exception as err:
yield _download_failed(err, download_id, name, claim_dict)
if isinstance(err, (DownloadDataTimeout, DownloadSDTimeout)):
log.warning('Failed to get %s (%s)', name, err)
else:
log.error('Failed to get %s (%s)', name, err)
if self.streams[sd_hash].downloader and self.streams[sd_hash].code != 'running':
yield self.streams[sd_hash].downloader.stop(err)
result = {'error': str(err)}
finally:
del self.streams[sd_hash]
defer.returnValue(result)
async def _publish_stream(self, account, name, bid, claim_dict, file_path=None, certificate=None,
claim_address=None, change_address=None):
publisher = Publisher(
account, self.blob_manager, self.payment_rate_manager, self.storage,
self.file_manager, self.wallet_manager, certificate
)
parse_lbry_uri(name)
if not file_path:
stream_hash = await d2f(self.storage.get_stream_hash_for_sd_hash(
claim_dict['stream']['source']['source']))
tx = await publisher.publish_stream(name, bid, claim_dict, stream_hash, claim_address)
else:
tx = await publisher.create_and_publish_stream(name, bid, claim_dict, file_path, claim_address)
if conf.settings['reflect_uploads']:
d = reupload.reflect_file(publisher.lbry_file)
d.addCallbacks(lambda _: log.info("Reflected new publication to lbry://%s", name),
log.exception)
self.analytics_manager.send_claim_action('publish')
nout = 0
txo = tx.outputs[nout]
log.info("Success! Published to lbry://%s txid: %s nout: %d", name, tx.id, nout)
return {
"success": True,
"tx": tx,
"claim_id": txo.claim_id,
"claim_address": self.ledger.hash160_to_address(txo.script.values['pubkey_hash']),
"output": tx.outputs[nout]
}
def _get_or_download_sd_blob(self, blob, sd_hash):
if blob:
return self.blob_manager.get_blob(blob[0])
return download_sd_blob(
sd_hash.decode(), self.blob_manager, self.component_manager.peer_finder, self.rate_limiter,
self.payment_rate_manager, self.wallet_manager, timeout=conf.settings['peer_search_timeout'],
download_mirrors=conf.settings['download_mirrors']
)
def get_or_download_sd_blob(self, sd_hash):
"""Return previously downloaded sd blob if already in the blob
manager, otherwise download and return it
"""
d = self.blob_manager.completed_blobs([sd_hash.decode()])
d.addCallback(self._get_or_download_sd_blob, sd_hash)
return d
def get_size_from_sd_blob(self, sd_blob):
"""
Get total stream size in bytes from a sd blob
"""
d = self.file_manager.sd_identifier.get_metadata_for_sd_blob(sd_blob)
d.addCallback(lambda metadata: metadata.validator.info_to_show())
d.addCallback(lambda info: int(dict(info)['stream_size']))
return d
def _get_est_cost_from_stream_size(self, size):
"""
Calculate estimated LBC cost for a stream given its size in bytes
"""
if self.payment_rate_manager.generous:
return 0.0
return size / (10 ** 6) * conf.settings['data_rate']
async def get_est_cost_using_known_size(self, uri, size):
"""
Calculate estimated LBC cost for a stream given its size in bytes
"""
cost = self._get_est_cost_from_stream_size(size)
resolved = await self.wallet_manager.resolve(uri)
if uri in resolved and 'claim' in resolved[uri]:
claim = ClaimDict.load_dict(resolved[uri]['claim']['value'])
final_fee = self._add_key_fee_to_est_data_cost(claim.source_fee, cost)
return final_fee
def get_est_cost_from_sd_hash(self, sd_hash):
"""
Get estimated cost from a sd hash
"""
d = self.get_or_download_sd_blob(sd_hash)
d.addCallback(self.get_size_from_sd_blob)
d.addCallback(self._get_est_cost_from_stream_size)
return d
def _get_est_cost_from_metadata(self, metadata, name):
d = self.get_est_cost_from_sd_hash(metadata.source_hash)
def _handle_err(err):
if isinstance(err, Failure):
log.warning(
"Timeout getting blob for cost est for lbry://%s, using only key fee", name)
return 0.0
raise err
d.addErrback(_handle_err)
d.addCallback(lambda data_cost: self._add_key_fee_to_est_data_cost(metadata.source_fee,
data_cost))
return d
def _add_key_fee_to_est_data_cost(self, fee, data_cost):
fee_amount = 0.0 if not fee else self.exchange_rate_manager.convert_currency(fee.currency,
"LBC",
fee.amount)
return data_cost + fee_amount
async def get_est_cost_from_uri(self, uri):
"""
Resolve a name and return the estimated stream cost
"""
resolved = await self.wallet_manager.resolve(uri)
if resolved:
claim_response = resolved[uri]
else:
claim_response = None
result = None
if claim_response and 'claim' in claim_response:
if 'value' in claim_response['claim'] and claim_response['claim']['value'] is not None:
claim_value = ClaimDict.load_dict(claim_response['claim']['value'])
cost = await d2f(self._get_est_cost_from_metadata(claim_value, uri))
result = round(cost, 5)
else:
log.warning("Failed to estimate cost for %s", uri)
return result
def get_est_cost(self, uri, size=None):
"""Get a cost estimate for a lbry stream, if size is not provided the
sd blob will be downloaded to determine the stream size
"""
if size is not None:
return self.get_est_cost_using_known_size(uri, size)
return self.get_est_cost_from_uri(uri)
@defer.inlineCallbacks
def _get_lbry_file_dict(self, lbry_file):
key = hexlify(lbry_file.key) if lbry_file.key else None
full_path = os.path.join(lbry_file.download_directory, lbry_file.file_name)
mime_type = mimetypes.guess_type(full_path)[0]
if os.path.isfile(full_path):
with open(full_path) as written_file:
written_file.seek(0, os.SEEK_END)
written_bytes = written_file.tell()
else:
written_bytes = 0
size = yield lbry_file.get_total_bytes()
file_status = yield lbry_file.status()
num_completed = file_status.num_completed
num_known = file_status.num_known
status = file_status.running_status
result = {
'completed': lbry_file.completed,
'file_name': lbry_file.file_name,
'download_directory': lbry_file.download_directory,
'points_paid': lbry_file.points_paid,
'stopped': lbry_file.stopped,
'stream_hash': lbry_file.stream_hash,
'stream_name': lbry_file.stream_name,
'suggested_file_name': lbry_file.suggested_file_name,
'sd_hash': lbry_file.sd_hash,
'download_path': full_path,
'mime_type': mime_type,
'key': key,
'total_bytes': size,
'written_bytes': written_bytes,
'blobs_completed': num_completed,
'blobs_in_stream': num_known,
'status': status,
'claim_id': lbry_file.claim_id,
'txid': lbry_file.txid,
'nout': lbry_file.nout,
'outpoint': lbry_file.outpoint,
'metadata': lbry_file.metadata,
'channel_claim_id': lbry_file.channel_claim_id,
'channel_name': lbry_file.channel_name,
'claim_name': lbry_file.claim_name
}
defer.returnValue(result)
@defer.inlineCallbacks
def _get_lbry_file(self, search_by, val, return_json=False):
lbry_file = None
if search_by in FileID:
for l_f in self.file_manager.lbry_files:
if l_f.__dict__.get(search_by) == val:
lbry_file = l_f
break
else:
raise NoValidSearch(f'{search_by} is not a valid search operation')
if return_json and lbry_file:
lbry_file = yield self._get_lbry_file_dict(lbry_file)
defer.returnValue(lbry_file)
@defer.inlineCallbacks
def _get_lbry_files(self, return_json=False, **kwargs):
lbry_files = list(self.file_manager.lbry_files)
if kwargs:
for search_type, value in iter_lbry_file_search_values(kwargs):
lbry_files = [l_f for l_f in lbry_files if l_f.__dict__[search_type] == value]
if return_json:
file_dicts = []
for lbry_file in lbry_files:
lbry_file_dict = yield self._get_lbry_file_dict(lbry_file)
file_dicts.append(lbry_file_dict)
lbry_files = file_dicts
log.debug("Collected %i lbry files", len(lbry_files))
defer.returnValue(lbry_files)
def _sort_lbry_files(self, lbry_files, sort_by):
for field, direction in sort_by:
is_reverse = direction == DIRECTION_DESCENDING
key_getter = create_key_getter(field) if field else None
lbry_files = sorted(lbry_files, key=key_getter, reverse=is_reverse)
return lbry_files
def _parse_lbry_files_sort(self, sort):
"""
Given a sort string like 'file_name, desc' or 'points_paid',
parse the string into a tuple of (field, direction).
Direction defaults to ascending.
"""
pieces = [p.strip() for p in sort.split(',')]
field = pieces.pop(0)
direction = DIRECTION_ASCENDING
if pieces and pieces[0] in DIRECTIONS:
direction = pieces[0]
return field, direction
def _get_single_peer_downloader(self):
downloader = SinglePeerDownloader()
downloader.setup(self.wallet_manager)
return downloader
@defer.inlineCallbacks
def _blob_availability(self, blob_hash, search_timeout, blob_timeout, downloader=None):
if not downloader:
downloader = self._get_single_peer_downloader()
result = {}
search_timeout = search_timeout or conf.settings['peer_search_timeout']
blob_timeout = blob_timeout or conf.settings['sd_download_timeout']
is_available = False
reachable_peers = []
unreachable_peers = []
try:
peers = yield self.jsonrpc_peer_list(blob_hash, search_timeout)
peer_infos = [{"peer": Peer(x['host'], x['port']),
"blob_hash": blob_hash,
"timeout": blob_timeout} for x in peers]
dl = []
dl_peers = []
dl_results = []
for peer_info in peer_infos:
d = downloader.download_temp_blob_from_peer(**peer_info)
dl.append(d)
dl_peers.append("%s:%i" % (peer_info['peer'].host, peer_info['peer'].port))
for dl_peer, (success, download_result) in zip(dl_peers,
(yield defer.DeferredList(dl))):
if success:
if download_result:
reachable_peers.append(dl_peer)
else:
unreachable_peers.append(dl_peer)
dl_results.append(download_result)
is_available = any(dl_results)
except Exception as err:
result['error'] = "Failed to get peers for blob: %s" % err
response = {
'is_available': is_available,
'reachable_peers': reachable_peers,
'unreachable_peers': unreachable_peers,
}
defer.returnValue(response)
############################################################################
# #
# JSON-RPC API methods start here #
# #
############################################################################
@AuthJSONRPCServer.deprecated("stop")
def jsonrpc_daemon_stop(self):
pass
def jsonrpc_stop(self):
"""
Stop lbrynet
Usage:
stop
Options:
None
Returns:
(string) Shutdown message
"""
log.info("Shutting down lbrynet daemon")
reactor.callLater(0.1, reactor.fireSystemEvent, "shutdown")
return "Shutting down"
@defer.inlineCallbacks
def jsonrpc_status(self):
"""
Get daemon status
Usage:
status
Options:
None
Returns:
(dict) lbrynet-daemon status
{
'installation_id': (str) installation id - base58,
'is_running': (bool),
'is_first_run': bool,
'skipped_components': (list) [names of skipped components (str)],
'startup_status': { Does not include components which have been skipped
'database': (bool),
'wallet': (bool),
'session': (bool),
'dht': (bool),
'hash_announcer': (bool),
'stream_identifier': (bool),
'file_manager': (bool),
'blob_manager': (bool),
'blockchain_headers': (bool),
'peer_protocol_server': (bool),
'reflector': (bool),
'upnp': (bool),
'exchange_rate_manager': (bool),
},
'connection_status': {
'code': (str) connection status code,
'message': (str) connection status message
},
'blockchain_headers': {
'downloading_headers': (bool),
'download_progress': (float) 0-100.0
},
'wallet': {
'blocks': (int) local blockchain height,
'blocks_behind': (int) remote_height - local_height,
'best_blockhash': (str) block hash of most recent block,
'is_encrypted': (bool),
'is_locked': (bool),
},
'dht': {
'node_id': (str) lbry dht node id - hex encoded,
'peers_in_routing_table': (int) the number of peers in the routing table,
},
'blob_manager': {
'finished_blobs': (int) number of finished blobs in the blob manager,
},
'hash_announcer': {
'announce_queue_size': (int) number of blobs currently queued to be announced
},
'file_manager': {
'managed_files': (int) count of files in the file manager,
},
'upnp': {
'aioupnp_version': (str),
'redirects': {
<TCP | UDP>: (int) external_port,
},
'gateway': (str) manufacturer and model,
'dht_redirect_set': (bool),
'peer_redirect_set': (bool),
'external_ip': (str) external ip address,
}
}
"""
connection_code = CONNECTION_STATUS_CONNECTED if self.connected_to_internet else CONNECTION_STATUS_NETWORK
response = {
'installation_id': conf.settings.installation_id,
'is_running': all(self.component_manager.get_components_status().values()),
'is_first_run': self.is_first_run,
'skipped_components': self.component_manager.skip_components,
'startup_status': self.component_manager.get_components_status(),
'connection_status': {
'code': connection_code,
'message': CONNECTION_MESSAGES[connection_code],
},
}
for component in self.component_manager.components:
status = yield defer.maybeDeferred(component.get_status)
if status:
response[component.component_name] = status
defer.returnValue(response)
def jsonrpc_version(self):
"""
Get lbry version information
Usage:
version
Options:
None
Returns:
(dict) Dictionary of lbry version information
{
'build': (str) build type (e.g. "dev", "rc", "release"),
'ip': (str) remote ip, if available,
'lbrynet_version': (str) lbrynet_version,
'lbryum_version': (str) lbryum_version,
'lbryschema_version': (str) lbryschema_version,
'os_release': (str) os release string
'os_system': (str) os name
'platform': (str) platform string
'processor': (str) processor type,
'python_version': (str) python version,
}
"""
platform_info = system_info.get_platform()
log.info("Get version info: " + json.dumps(platform_info))
return self._render_response(platform_info)
def jsonrpc_report_bug(self, message=None):
"""
Report a bug to slack
Usage:
report_bug (<message> | --message=<message>)
Options:
--message=<message> : (str) Description of the bug
Returns:
(bool) true if successful
"""
platform_name = system_info.get_platform()['platform']
report_bug_to_slack(
message,
conf.settings.installation_id,
platform_name,
__version__
)
return self._render_response(True)
def jsonrpc_settings_get(self):
"""
Get daemon settings
Usage:
settings_get
Options:
None
Returns:
(dict) Dictionary of daemon settings
See ADJUSTABLE_SETTINGS in lbrynet/conf.py for full list of settings
"""
return self._render_response(conf.settings.get_adjustable_settings_dict())
def jsonrpc_settings_set(self, **kwargs):
"""
Set daemon settings
Usage:
settings_set [--download_directory=<download_directory>]
[--data_rate=<data_rate>]
[--download_timeout=<download_timeout>]
[--peer_port=<peer_port>]
[--max_key_fee=<max_key_fee>]
[--disable_max_key_fee=<disable_max_key_fee>]
[--use_upnp=<use_upnp>]
[--run_reflector_server=<run_reflector_server>]
[--cache_time=<cache_time>]
[--reflect_uploads=<reflect_uploads>]
[--share_usage_data=<share_usage_data>]
[--peer_search_timeout=<peer_search_timeout>]
[--sd_download_timeout=<sd_download_timeout>]
[--auto_renew_claim_height_delta=<auto_renew_claim_height_delta>]
Options:
--download_directory=<download_directory> : (str) path of download directory
--data_rate=<data_rate> : (float) 0.0001
--download_timeout=<download_timeout> : (int) 180
--peer_port=<peer_port> : (int) 3333
--max_key_fee=<max_key_fee> : (dict) maximum key fee for downloads,
in the format:
{
'currency': <currency_symbol>,
'amount': <amount>
}.
In the CLI, it must be an escaped JSON string
Supported currency symbols: LBC, USD, BTC
--disable_max_key_fee=<disable_max_key_fee> : (bool) False
--use_upnp=<use_upnp> : (bool) True
--run_reflector_server=<run_reflector_server> : (bool) False
--cache_time=<cache_time> : (int) 150
--reflect_uploads=<reflect_uploads> : (bool) True
--share_usage_data=<share_usage_data> : (bool) True
--peer_search_timeout=<peer_search_timeout> : (int) 3
--sd_download_timeout=<sd_download_timeout> : (int) 3
--auto_renew_claim_height_delta=<auto_renew_claim_height_delta> : (int) 0
claims set to expire within this many blocks will be
automatically renewed after startup (if set to 0, renews
will not be made automatically)
Returns:
(dict) Updated dictionary of daemon settings
"""
# TODO: improve upon the current logic, it could be made better
new_settings = kwargs
setting_types = {
'download_directory': str,
'data_rate': float,
'download_timeout': int,
'peer_port': int,
'max_key_fee': dict,
'use_upnp': bool,
'run_reflector_server': bool,
'cache_time': int,
'reflect_uploads': bool,
'share_usage_data': bool,
'disable_max_key_fee': bool,
'peer_search_timeout': int,
'sd_download_timeout': int,
'auto_renew_claim_height_delta': int
}
for key, setting_type in setting_types.items():
if key in new_settings:
if isinstance(new_settings[key], setting_type):
conf.settings.update({key: new_settings[key]},
data_types=(conf.TYPE_RUNTIME, conf.TYPE_PERSISTED))
elif setting_type is dict and isinstance(new_settings[key], str):
decoded = json.loads(str(new_settings[key]))
conf.settings.update({key: decoded},
data_types=(conf.TYPE_RUNTIME, conf.TYPE_PERSISTED))
else:
converted = setting_type(new_settings[key])
conf.settings.update({key: converted},
data_types=(conf.TYPE_RUNTIME, conf.TYPE_PERSISTED))
conf.settings.save_conf_file_settings()
return self._render_response(conf.settings.get_adjustable_settings_dict())
def jsonrpc_help(self, command=None):
"""
Return a useful message for an API command
Usage:
help [<command> | --command=<command>]
Options:
--command=<command> : (str) command to retrieve documentation for
Returns:
(str) Help message
"""
if command is None:
return self._render_response({
'about': 'This is the LBRY JSON-RPC API',
'command_help': 'Pass a `command` parameter to this method to see ' +
'help for that command (e.g. `help command=resolve_name`)',
'command_list': 'Get a full list of commands using the `commands` method',
'more_info': 'Visit https://lbry.io/api for more info',
})
fn = self.callable_methods.get(command)
if fn is None:
raise Exception(
f"No help available for '{command}'. It is not a valid command."
)
return self._render_response({
'help': textwrap.dedent(fn.__doc__ or '')
})
def jsonrpc_commands(self):
"""
Return a list of available commands
Usage:
commands
Options:
None
Returns:
(list) list of available commands
"""
return self._render_response(sorted([command for command in self.callable_methods.keys()]))
@AuthJSONRPCServer.deprecated("account_balance")
def jsonrpc_wallet_balance(self, address=None):
pass
@AuthJSONRPCServer.deprecated("account_unlock")
def jsonrpc_wallet_unlock(self, password):
pass
@AuthJSONRPCServer.deprecated("account_decrypt")
def jsonrpc_wallet_decrypt(self):
pass
@AuthJSONRPCServer.deprecated("account_encrypt")
def jsonrpc_wallet_encrypt(self, new_password):
pass
@AuthJSONRPCServer.deprecated("address_is_mine")
def jsonrpc_wallet_is_address_mine(self, address):
pass
@AuthJSONRPCServer.deprecated()
def jsonrpc_wallet_public_key(self, address):
pass
@AuthJSONRPCServer.deprecated("address_list")
def jsonrpc_wallet_list(self):
pass
@AuthJSONRPCServer.deprecated("address_unused")
def jsonrpc_wallet_new_address(self):
pass
@AuthJSONRPCServer.deprecated("address_unused")
def jsonrpc_wallet_unused_address(self):
pass
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_wallet_send(self, amount, address=None, claim_id=None, account_id=None):
"""
Send credits. If given an address, send credits to it. If given a claim id, send a tip
to the owner of a claim specified by uri. A tip is a claim support where the recipient
of the support is the claim address for the claim being supported.
Usage:
wallet_send (<amount> | --amount=<amount>)
((<address> | --address=<address>) | (<claim_id> | --claim_id=<claim_id>))
[--account_id=<account_id>]
Options:
--amount=<amount> : (decimal) amount of credit to send
--address=<address> : (str) address to send credits to
--claim_id=<claim_id> : (str) claim_id of the claim to send to tip to
--account_id=<account_id> : (str) account to fund the transaction
Returns:
If sending to an address:
(dict) Dictionary containing the transaction information
{
"hex": (str) raw transaction,
"inputs": (list) inputs(dict) used for the transaction,
"outputs": (list) outputs(dict) for the transaction,
"total_fee": (int) fee in dewies,
"total_input": (int) total of inputs in dewies,
"total_output": (int) total of outputs in dewies(input - fees),
"txid": (str) txid of the transaction,
}
If sending a claim tip:
(dict) Dictionary containing the result of the support
{
txid : (str) txid of resulting support claim
nout : (int) nout of the resulting support claim
fee : (float) fee paid for the transaction
}
"""
amount = self.get_dewies_or_error("amount", amount)
if not amount:
raise NullFundsError
elif amount < 0:
raise NegativeFundsError()
if address and claim_id:
raise Exception("Given both an address and a claim id")
elif not address and not claim_id:
raise Exception("Not given an address or a claim id")
if address:
# raises an error if the address is invalid
decode_address(address)
reserved_points = self.wallet_manager.reserve_points(address, amount)
if reserved_points is None:
raise InsufficientFundsError()
account = self.get_account_or_default(account_id)
result = await self.wallet_manager.send_points_to_address(reserved_points, amount, account)
self.analytics_manager.send_credits_sent()
else:
log.info("This command is deprecated for sending tips, please use the newer claim_tip command")
result = await self.jsonrpc_claim_tip(claim_id=claim_id, amount=amount, account_id=account_id)
return result
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
# @AuthJSONRPCServer.deprecated("account_fund"), API has changed as well, so we forward for now
# marked as deprecated in changelog and will be removed after subsequent release
def jsonrpc_wallet_prefill_addresses(self, num_addresses, amount, no_broadcast=False):
"""
Create new UTXOs, each containing `amount` credits
Usage:
wallet_prefill_addresses [--no_broadcast]
(<num_addresses> | --num_addresses=<num_addresses>)
(<amount> | --amount=<amount>)
Options:
--no_broadcast : (bool) whether to broadcast or not
--num_addresses=<num_addresses> : (int) num of addresses to create
--amount=<amount> : (decimal) initial amount in each address
Returns:
(dict) the resulting transaction
"""
broadcast = not no_broadcast
return self.jsonrpc_account_fund(
self.default_account.id,
self.default_account.id,
amount=amount,
outputs=num_addresses,
broadcast=broadcast
)
@requires("wallet")
def jsonrpc_account_list(self, account_id=None, confirmations=6,
include_claims=False, show_seed=False):
"""
List details of all of the accounts or a specific account.
Usage:
account_list [<account_id>] [--confirmations=<confirmations>]
[--include_claims] [--show_seed]
Options:
--account_id=<account_id> : (str) If provided only the balance for this
account will be given
--confirmations=<confirmations> : (int) required confirmations (default: 0)
--include_claims : (bool) include claims, requires than a
LBC account is specified (default: false)
--show_seed : (bool) show the seed for the account
Returns:
(map) balance of account(s)
"""
kwargs = {
'confirmations': confirmations,
'show_seed': show_seed
}
if account_id:
return self.get_account_or_error(account_id).get_details(**kwargs)
else:
return self.wallet_manager.get_detailed_accounts(**kwargs)
@requires("wallet")
async def jsonrpc_account_balance(self, account_id=None, confirmations=0):
"""
Return the balance of an account
Usage:
account_balance [<account_id>] [<address> | --address=<address>]
Options:
--account_id=<account_id> : (str) If provided only the balance for this
account will be given. Otherwise default account.
--confirmations=<confirmations> : (int) Only include transactions with this many
confirmed blocks.
Returns:
(decimal) amount of lbry credits in wallet
"""
account = self.get_account_or_default(account_id)
dewies = await account.get_balance(confirmations=confirmations)
return dewies_to_lbc(dewies)
@requires("wallet")
async def jsonrpc_account_add(
self, account_name, single_key=False, seed=None, private_key=None, public_key=None):
"""
Add a previously created account from a seed, private key or public key (read-only).
Specify --single_key for single address or vanity address accounts.
Usage:
account_add (<account_name> | --account_name=<account_name>)
(--seed=<seed> | --private_key=<private_key> | --public_key=<public_key>)
[--single_key]
Options:
--account_name=<account_name> : (str) name of the account to add
--seed=<seed> : (str) seed to generate new account from
--private_key=<private_key> : (str) private key for new account
--public_key=<public_key> : (str) public key for new account
--single_key : (bool) create single key account, default is multi-key
Returns:
(map) added account details
"""
account = LBCAccount.from_dict(
self.ledger, self.default_wallet, {
'name': account_name,
'seed': seed,
'private_key': private_key,
'public_key': public_key,
'address_generator': {
'name': SingleKey.name if single_key else HierarchicalDeterministic.name
}
}
)
if self.ledger.network.is_connected:
await self.ledger.subscribe_account(account)
self.default_wallet.save()
result = account.to_dict()
result['id'] = account.id
result['status'] = 'added'
result.pop('certificates', None)
result['is_default'] = self.default_wallet.accounts[0] == account
return result
@requires("wallet")
async def jsonrpc_account_create(self, account_name, single_key=False):
"""
Create a new account. Specify --single_key if you want to use
the same address for all transactions (not recommended).
Usage:
account_create (<account_name> | --account_name=<account_name>) [--single_key]
Options:
--account_name=<account_name> : (str) name of the account to create
--single_key : (bool) create single key account, default is multi-key
Returns:
(map) new account details
"""
account = LBCAccount.generate(
self.ledger, self.default_wallet, account_name, {
'name': SingleKey.name if single_key else HierarchicalDeterministic.name
}
)
if self.ledger.network.is_connected:
await self.ledger.subscribe_account(account)
self.default_wallet.save()
result = account.to_dict()
result['id'] = account.id
result['status'] = 'created'
result.pop('certificates', None)
result['is_default'] = self.default_wallet.accounts[0] == account
return result
@requires("wallet")
def jsonrpc_account_remove(self, account_id):
"""
Remove an existing account.
Usage:
account (<account_id> | --account_id=<account_id>)
Options:
--account_id=<account_id> : (str) id of the account to remove
Returns:
(map) details of removed account
"""
account = self.get_account_or_error(account_id)
self.default_wallet.accounts.remove(account)
self.default_wallet.save()
result = account.to_dict()
result['id'] = account.id
result['status'] = 'removed'
result.pop('certificates', None)
return result
@requires("wallet")
def jsonrpc_account_set(
self, account_id, default=False, new_name=None,
change_gap=None, change_max_uses=None, receiving_gap=None, receiving_max_uses=None):
"""
Change various settings on an account.
Usage:
account (<account_id> | --account_id=<account_id>)
[--default] [--new_name=<new_name>]
[--change_gap=<change_gap>] [--change_max_uses=<change_max_uses>]
[--receiving_gap=<receiving_gap>] [--receiving_max_uses=<receiving_max_uses>]
Options:
--account_id=<account_id> : (str) id of the account to change
--default : (bool) make this account the default
--new_name=<new_name> : (str) new name for the account
--receiving_gap=<receiving_gap> : (int) set the gap for receiving addresses
--receiving_max_uses=<receiving_max_uses> : (int) set the maximum number of times to
use a receiving address
--change_gap=<change_gap> : (int) set the gap for change addresses
--change_max_uses=<change_max_uses> : (int) set the maximum number of times to
use a change address
Returns:
(map) updated account details
"""
account = self.get_account_or_error(account_id)
change_made = False
if account.receiving.name == HierarchicalDeterministic.name:
address_changes = {
'change': {'gap': change_gap, 'maximum_uses_per_address': change_max_uses},
'receiving': {'gap': receiving_gap, 'maximum_uses_per_address': receiving_max_uses},
}
for chain_name in address_changes:
chain = getattr(account, chain_name)
for attr, value in address_changes[chain_name].items():
if value is not None:
setattr(chain, attr, value)
change_made = True
if new_name is not None:
account.name = new_name
change_made = True
if default:
self.default_wallet.accounts.remove(account)
self.default_wallet.accounts.insert(0, account)
change_made = True
if change_made:
self.default_wallet.save()
result = account.to_dict()
result['id'] = account.id
result.pop('certificates', None)
result['is_default'] = self.default_wallet.accounts[0] == account
return result
@requires(WALLET_COMPONENT)
def jsonrpc_account_unlock(self, password, account_id=None):
"""
Unlock an encrypted account
Usage:
account_unlock (<password> | --password=<password>) [<account_id> | --account_id=<account_id>]
Options:
--account_id=<account_id> : (str) id for the account to unlock
Returns:
(bool) true if account is unlocked, otherwise false
"""
return self.wallet_manager.unlock_account(
password, self.get_account_or_default(account_id, lbc_only=False)
)
@requires(WALLET_COMPONENT)
def jsonrpc_account_lock(self, account_id=None):
"""
Lock an unlocked account
Usage:
account_lock [<account_id> | --account_id=<account_id>]
Options:
--account_id=<account_id> : (str) id for the account to lock
Returns:
(bool) true if account is locked, otherwise false
"""
return self.wallet_manager.lock_account(self.get_account_or_default(account_id, lbc_only=False))
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
def jsonrpc_account_decrypt(self, account_id=None):
"""
Decrypt an encrypted account, this will remove the wallet password
Usage:
account_decrypt [<account_id> | --account_id=<account_id>]
Options:
--account_id=<account_id> : (str) id for the account to decrypt
Returns:
(bool) true if wallet is decrypted, otherwise false
"""
return self.wallet_manager.decrypt_account(self.get_account_or_default(account_id, lbc_only=False))
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
def jsonrpc_account_encrypt(self, new_password, account_id=None):
"""
Encrypt an unencrypted account with a password
Usage:
wallet_encrypt (<new_password> | --new_password=<new_password>) [<account_id> | --account_id=<account_id>]
Options:
--account_id=<account_id> : (str) id for the account to encrypt
Returns:
(bool) true if wallet is decrypted, otherwise false
"""
return self.wallet_manager.encrypt_account(
new_password,
self.get_account_or_default(account_id, lbc_only=False)
)
@requires("wallet")
def jsonrpc_account_max_address_gap(self, account_id):
"""
Finds ranges of consecutive addresses that are unused and returns the length
of the longest such range: for change and receiving address chains. This is
useful to figure out ideal values to set for 'receiving_gap' and 'change_gap'
account settings.
Usage:
account_max_address_gap (<account_id> | --account_id=<account_id>)
Options:
--account_id=<account_id> : (str) account for which to get max gaps
Returns:
(map) maximum gap for change and receiving addresses
"""
return self.get_account_or_error(account_id).get_max_gap()
@requires("wallet")
def jsonrpc_account_fund(self, to_account=None, from_account=None, amount='0.0',
everything=False, outputs=1, broadcast=False):
"""
Transfer some amount (or --everything) to an account from another
account (can be the same account). Amounts are interpreted as LBC.
You can also spread the transfer across a number of --outputs (cannot
be used together with --everything).
Usage:
account_fund [<to_account> | --to_account=<to_account>]
[<from_account> | --from_account=<from_account>]
(<amount> | --amount=<amount> | --everything)
[<outputs> | --outputs=<outputs>]
[--broadcast]
Options:
--to_account=<to_account> : (str) send to this account
--from_account=<from_account> : (str) spend from this account
--amount=<amount> : (str) the amount to transfer lbc
--everything : (bool) transfer everything (excluding claims), default: false.
--outputs=<outputs> : (int) split payment across many outputs, default: 1.
--broadcast : (bool) actually broadcast the transaction, default: false.
Returns:
(map) transaction performing requested action
"""
to_account = self.get_account_or_default(to_account, 'to_account')
from_account = self.get_account_or_default(from_account, 'from_account')
amount = self.get_dewies_or_error('amount', amount) if amount else None
if not isinstance(outputs, int):
raise ValueError("--outputs must be an integer.")
if everything and outputs > 1:
raise ValueError("Using --everything along with --outputs is not supported.")
return from_account.fund(
to_account=to_account, amount=amount, everything=everything,
outputs=outputs, broadcast=broadcast
)
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_account_send(self, amount, addresses, account_id=None, broadcast=False):
"""
Send the same number of credits to multiple addresses.
Usage:
account_send <amount> <addresses>... [--account_id=<account_id>] [--broadcast]
Options:
--account_id=<account_id> : (str) account to fund the transaction
--broadcast : (bool) actually broadcast the transaction, default: false.
Returns:
"""
amount = self.get_dewies_or_error("amount", amount)
if not amount:
raise NullFundsError
elif amount < 0:
raise NegativeFundsError()
for address in addresses:
decode_address(address)
account = self.get_account_or_default(account_id)
result = await account.send_to_addresses(amount, addresses, broadcast)
self.analytics_manager.send_credits_sent()
return result
@requires(WALLET_COMPONENT)
def jsonrpc_address_is_mine(self, address, account_id=None):
"""
Checks if an address is associated with the current wallet.
Usage:
wallet_is_address_mine (<address> | --address=<address>)
[<account_id> | --account_id=<account_id>]
Options:
--address=<address> : (str) address to check
--account_id=<account_id> : (str) id of the account to use
Returns:
(bool) true, if address is associated with current wallet
"""
return self.wallet_manager.address_is_mine(
address, self.get_account_or_default(account_id)
)
@requires(WALLET_COMPONENT)
def jsonrpc_address_list(self, account_id=None, page=None, page_size=None):
"""
List account addresses
Usage:
address_list [<account_id> | --account_id=<account_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to use
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns:
List of wallet addresses
"""
account = self.get_account_or_default(account_id)
return maybe_paginate(
account.get_addresses,
account.get_address_count,
page, page_size
)
@requires(WALLET_COMPONENT)
def jsonrpc_address_unused(self, account_id=None):
"""
Return an address containing no balance, will create
a new address if there is none.
Usage:
address_unused [--account_id=<account_id>]
Options:
--account_id=<account_id> : (str) id of the account to use
Returns:
(str) Unused wallet address in base58
"""
return self.get_account_or_default(account_id).receiving.get_or_create_usable_address()
@requires(FILE_MANAGER_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_file_list(self, sort=None, **kwargs):
"""
List files limited by optional filters
Usage:
file_list [--sd_hash=<sd_hash>] [--file_name=<file_name>] [--stream_hash=<stream_hash>]
[--rowid=<rowid>] [--claim_id=<claim_id>] [--outpoint=<outpoint>] [--txid=<txid>] [--nout=<nout>]
[--channel_claim_id=<channel_claim_id>] [--channel_name=<channel_name>]
[--claim_name=<claim_name>] [--sort=<sort_method>...]
Options:
--sd_hash=<sd_hash> : (str) get file with matching sd hash
--file_name=<file_name> : (str) get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : (str) get file with matching stream hash
--rowid=<rowid> : (int) get file with matching row id
--claim_id=<claim_id> : (str) get file with matching claim id
--outpoint=<outpoint> : (str) get file with matching claim outpoint
--txid=<txid> : (str) get file with matching claim txid
--nout=<nout> : (int) get file with matching claim nout
--channel_claim_id=<channel_claim_id> : (str) get file with matching channel claim id
--channel_name=<channel_name> : (str) get file with matching channel name
--claim_name=<claim_name> : (str) get file with matching claim name
--sort=<sort_method> : (str) sort by any property, like 'file_name'
or 'metadata.author'; to specify direction
append ',asc' or ',desc'
Returns:
(list) List of files
[
{
'completed': (bool) true if download is completed,
'file_name': (str) name of file,
'download_directory': (str) download directory,
'points_paid': (float) credit paid to download file,
'stopped': (bool) true if download is stopped,
'stream_hash': (str) stream hash of file,
'stream_name': (str) stream name ,
'suggested_file_name': (str) suggested file name,
'sd_hash': (str) sd hash of file,
'download_path': (str) download path of file,
'mime_type': (str) mime type of file,
'key': (str) key attached to file,
'total_bytes': (int) file size in bytes,
'written_bytes': (int) written size in bytes,
'blobs_completed': (int) number of fully downloaded blobs,
'blobs_in_stream': (int) total blobs on stream,
'status': (str) downloader status
'claim_id': (str) None if claim is not found else the claim id,
'outpoint': (str) None if claim is not found else the tx and output,
'txid': (str) None if claim is not found else the transaction id,
'nout': (int) None if claim is not found else the transaction output index,
'metadata': (dict) None if claim is not found else the claim metadata,
'channel_claim_id': (str) None if claim is not found or not signed,
'channel_name': (str) None if claim is not found or not signed,
'claim_name': (str) None if claim is not found else the claim name
},
]
"""
result = yield self._get_lbry_files(return_json=True, **kwargs)
if sort:
sort_by = [self._parse_lbry_files_sort(s) for s in sort]
result = self._sort_lbry_files(result, sort_by)
response = yield self._render_response(result)
defer.returnValue(response)
@requires(WALLET_COMPONENT)
async def jsonrpc_resolve_name(self, name, force=False):
"""
Resolve stream info from a LBRY name
Usage:
resolve_name (<name> | --name=<name>) [--force]
Options:
--name=<name> : (str) the name to resolve
--force : (bool) force refresh and do not check cache
Returns:
(dict) Metadata dictionary from name claim, None if the name is not
resolvable
"""
try:
name = parse_lbry_uri(name).name
metadata = await self.wallet_manager.resolve(name, check_cache=not force)
if name in metadata:
metadata = metadata[name]
return metadata
except UnknownNameError:
log.info('Name %s is not known', name)
@requires(WALLET_COMPONENT)
async def jsonrpc_claim_show(self, txid=None, nout=None, claim_id=None):
"""
Resolve claim info from txid/nout or with claim ID
Usage:
claim_show [<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
[<claim_id> | --claim_id=<claim_id>]
Options:
--txid=<txid> : (str) look for claim with this txid, nout must
also be specified
--nout=<nout> : (int) look for claim with this nout, txid must
also be specified
--claim_id=<claim_id> : (str) look for claim with this claim id
Returns:
(dict) Dictionary containing claim info as below,
{
'txid': (str) txid of claim
'nout': (int) nout of claim
'amount': (float) amount of claim
'value': (str) value of claim
'height' : (int) height of claim takeover
'claim_id': (str) claim ID of claim
'supports': (list) list of supports associated with claim
}
if claim cannot be resolved, dictionary as below will be returned
{
'error': (str) reason for error
}
"""
if claim_id is not None and txid is None and nout is None:
claim_results = await self.wallet_manager.get_claim_by_claim_id(claim_id)
elif txid is not None and nout is not None and claim_id is None:
claim_results = await self.wallet_manager.get_claim_by_outpoint(txid, int(nout))
else:
raise Exception("Must specify either txid/nout, or claim_id")
return claim_results
@requires(WALLET_COMPONENT)
async def jsonrpc_resolve(self, force=False, uri=None, uris=None):
"""
Resolve given LBRY URIs
Usage:
resolve [--force] (<uri> | --uri=<uri>) [<uris>...]
Options:
--force : (bool) force refresh and ignore cache
--uri=<uri> : (str) uri to resolve
--uris=<uris> : (list) uris to resolve
Returns:
Dictionary of results, keyed by uri
'<uri>': {
If a resolution error occurs:
'error': Error message
If the uri resolves to a channel or a claim in a channel:
'certificate': {
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number,
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'permanent_url': (str) permanent url of the certificate claim,
'supports: (list) list of supports [{'txid': (str) txid,
'nout': (int) nout,
'amount': (float) amount}],
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
If the uri resolves to a channel:
'claims_in_channel': (int) number of claims in the channel,
If the uri resolves to a claim:
'claim': {
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number,
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'permanent_url': (str) permanent url of the claim,
'channel_name': (str) channel name if claim is in a channel
'supports: (list) list of supports [{'txid': (str) txid,
'nout': (int) nout,
'amount': (float) amount}]
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
}
"""
uris = tuple(uris or [])
if uri is not None:
uris += (uri,)
results = {}
valid_uris = tuple()
for u in uris:
try:
parse_lbry_uri(u)
valid_uris += (u,)
except URIParseError:
results[u] = {"error": "%s is not a valid uri" % u}
resolved = await self.wallet_manager.resolve(*valid_uris, check_cache=not force)
for resolved_uri in resolved:
results[resolved_uri] = resolved[resolved_uri]
return results
@requires(WALLET_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, BLOB_COMPONENT,
RATE_LIMITER_COMPONENT, PAYMENT_RATE_COMPONENT, DATABASE_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_get(self, uri, file_name=None, timeout=None):
"""
Download stream from a LBRY name.
Usage:
get <uri> [<file_name> | --file_name=<file_name>] [<timeout> | --timeout=<timeout>]
Options:
--uri=<uri> : (str) uri of the content to download
--file_name=<file_name> : (str) specified name for the downloaded file
--timeout=<timeout> : (int) download timeout in number of seconds
Returns:
(dict) Dictionary containing information about the stream
{
'completed': (bool) true if download is completed,
'file_name': (str) name of file,
'download_directory': (str) download directory,
'points_paid': (float) credit paid to download file,
'stopped': (bool) true if download is stopped,
'stream_hash': (str) stream hash of file,
'stream_name': (str) stream name ,
'suggested_file_name': (str) suggested file name,
'sd_hash': (str) sd hash of file,
'download_path': (str) download path of file,
'mime_type': (str) mime type of file,
'key': (str) key attached to file,
'total_bytes': (int) file size in bytes,
'written_bytes': (int) written size in bytes,
'blobs_completed': (int) number of fully downloaded blobs,
'blobs_in_stream': (int) total blobs on stream,
'status': (str) downloader status,
'claim_id': (str) claim id,
'outpoint': (str) claim outpoint string,
'txid': (str) claim txid,
'nout': (int) claim nout,
'metadata': (dict) claim metadata,
'channel_claim_id': (str) None if claim is not signed
'channel_name': (str) None if claim is not signed
'claim_name': (str) claim name
}
"""
timeout = timeout if timeout is not None else conf.settings['download_timeout']
parsed_uri = parse_lbry_uri(uri)
if parsed_uri.is_channel and not parsed_uri.path:
raise Exception("cannot download a channel claim, specify a /path")
resolved = (await self.wallet_manager.resolve(uri)).get(uri, {})
resolved = resolved if 'value' in resolved else resolved.get('claim')
if not resolved:
raise ResolveError(
"Failed to resolve stream at lbry://{}".format(uri.replace("lbry://", ""))
)
if 'error' in resolved:
raise ResolveError(f"error resolving stream: {resolved['error']}")
txid, nout, name = resolved['txid'], resolved['nout'], resolved['name']
claim_dict = ClaimDict.load_dict(resolved['value'])
sd_hash = claim_dict.source_hash.decode()
if sd_hash in self.streams:
log.info("Already waiting on lbry://%s to start downloading", name)
await d2f(self.streams[sd_hash].data_downloading_deferred)
lbry_file = await d2f(self._get_lbry_file(FileID.SD_HASH, sd_hash, return_json=False))
if lbry_file:
if not os.path.isfile(os.path.join(lbry_file.download_directory, lbry_file.file_name)):
log.info("Already have lbry file but missing file in %s, rebuilding it",
lbry_file.download_directory)
await d2f(lbry_file.start())
else:
log.info('Already have a file for %s', name)
result = await d2f(self._get_lbry_file_dict(lbry_file))
else:
result = await d2f(self._download_name(name, claim_dict, sd_hash, txid, nout,
timeout=timeout, file_name=file_name))
return result
@requires(FILE_MANAGER_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_file_set_status(self, status, **kwargs):
"""
Start or stop downloading a file
Usage:
file_set_status (<status> | --status=<status>) [--sd_hash=<sd_hash>]
[--file_name=<file_name>] [--stream_hash=<stream_hash>] [--rowid=<rowid>]
Options:
--status=<status> : (str) one of "start" or "stop"
--sd_hash=<sd_hash> : (str) set status of file with matching sd hash
--file_name=<file_name> : (str) set status of file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : (str) set status of file with matching stream hash
--rowid=<rowid> : (int) set status of file with matching row id
Returns:
(str) Confirmation message
"""
if status not in ['start', 'stop']:
raise Exception('Status must be "start" or "stop".')
search_type, value = get_lbry_file_search_value(kwargs)
lbry_file = yield self._get_lbry_file(search_type, value, return_json=False)
if not lbry_file:
raise Exception(f'Unable to find a file for {search_type}:{value}')
if status == 'start' and lbry_file.stopped or status == 'stop' and not lbry_file.stopped:
yield self.file_manager.toggle_lbry_file_running(lbry_file)
msg = "Started downloading file" if status == 'start' else "Stopped downloading file"
else:
msg = (
"File was already being downloaded" if status == 'start'
else "File was already stopped"
)
response = yield self._render_response(msg)
defer.returnValue(response)
@requires(FILE_MANAGER_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_file_delete(self, delete_from_download_dir=False, delete_all=False, **kwargs):
"""
Delete a LBRY file
Usage:
file_delete [--delete_from_download_dir] [--delete_all] [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>] [--claim_id=<claim_id>] [--txid=<txid>]
[--nout=<nout>] [--claim_name=<claim_name>] [--channel_claim_id=<channel_claim_id>]
[--channel_name=<channel_name>]
Options:
--delete_from_download_dir : (bool) delete file from download directory,
instead of just deleting blobs
--delete_all : (bool) if there are multiple matching files,
allow the deletion of multiple files.
Otherwise do not delete anything.
--sd_hash=<sd_hash> : (str) delete by file sd hash
--file_name=<file_name> : (str) delete by file name in downloads folder
--stream_hash=<stream_hash> : (str) delete by file stream hash
--rowid=<rowid> : (int) delete by file row id
--claim_id=<claim_id> : (str) delete by file claim id
--txid=<txid> : (str) delete by file claim txid
--nout=<nout> : (int) delete by file claim nout
--claim_name=<claim_name> : (str) delete by file claim name
--channel_claim_id=<channel_claim_id> : (str) delete by file channel claim id
--channel_name=<channel_name> : (str) delete by file channel claim name
Returns:
(bool) true if deletion was successful
"""
lbry_files = yield self._get_lbry_files(return_json=False, **kwargs)
if len(lbry_files) > 1:
if not delete_all:
log.warning("There are %i files to delete, use narrower filters to select one",
len(lbry_files))
response = yield self._render_response(False)
defer.returnValue(response)
else:
log.warning("Deleting %i files",
len(lbry_files))
if not lbry_files:
log.warning("There is no file to delete")
result = False
else:
for lbry_file in lbry_files:
file_name, stream_hash = lbry_file.file_name, lbry_file.stream_hash
if lbry_file.sd_hash in self.streams:
del self.streams[lbry_file.sd_hash]
yield self.file_manager.delete_lbry_file(lbry_file,
delete_file=delete_from_download_dir)
log.info("Deleted file: %s", file_name)
result = True
response = yield self._render_response(result)
defer.returnValue(response)
@requires(WALLET_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT, BLOB_COMPONENT,
DHT_COMPONENT, RATE_LIMITER_COMPONENT, PAYMENT_RATE_COMPONENT, DATABASE_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
def jsonrpc_stream_cost_estimate(self, uri, size=None):
"""
Get estimated cost for a lbry stream
Usage:
stream_cost_estimate (<uri> | --uri=<uri>) [<size> | --size=<size>]
Options:
--uri=<uri> : (str) uri to use
--size=<size> : (float) stream size in bytes. if provided an sd blob won't be
downloaded.
Returns:
(float) Estimated cost in lbry credits, returns None if uri is not
resolvable
"""
return self.get_est_cost(uri, size)
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_channel_new(self, channel_name, amount, account_id=None):
"""
Generate a publisher key and create a new '@' prefixed certificate claim
Usage:
channel_new (<channel_name> | --channel_name=<channel_name>)
(<amount> | --amount=<amount>)
[--account_id=<account_id>]
Options:
--channel_name=<channel_name> : (str) name of the channel prefixed with '@'
--amount=<amount> : (decimal) bid amount on the channel
--account_id=<account_id> : (str) id of the account to store channel
Returns:
(dict) Dictionary containing result of the claim
{
'tx' : (str) hex encoded transaction
'txid' : (str) txid of resulting claim
'nout' : (int) nout of the resulting claim
'fee' : (float) fee paid for the claim transaction
'claim_id' : (str) claim ID of the resulting claim
}
"""
try:
parsed = parse_lbry_uri(channel_name)
if not parsed.is_channel:
raise Exception("Cannot make a new channel for a non channel name")
if parsed.path:
raise Exception("Invalid channel uri")
except (TypeError, URIParseError):
raise Exception("Invalid channel name")
amount = self.get_dewies_or_error("amount", amount)
if amount <= 0:
raise Exception("Invalid amount")
tx = await self.wallet_manager.claim_new_channel(
channel_name, amount, self.get_account_or_default(account_id)
)
self.default_wallet.save()
self.analytics_manager.send_new_channel()
nout = 0
txo = tx.outputs[nout]
log.info("Claimed a new channel! lbry://%s txid: %s nout: %d", channel_name, tx.id, nout)
return {
"success": True,
"tx": tx,
"claim_id": txo.claim_id,
"claim_address": txo.get_address(self.ledger),
"output": txo
}
@requires(WALLET_COMPONENT)
def jsonrpc_channel_list(self, account_id=None, page=None, page_size=None):
"""
Get certificate claim infos for channels that can be published to
Usage:
channel_list [<account_id> | --account_id=<account_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to use
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns:
(list) ClaimDict, includes 'is_mine' field to indicate if the certificate claim
is in the wallet.
"""
account = self.get_account_or_default(account_id)
return maybe_paginate(
account.get_channels,
account.get_channel_count,
page, page_size
)
@requires(WALLET_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_channel_export(self, claim_id):
"""
Export serialized channel signing information for a given certificate claim id
Usage:
channel_export (<claim_id> | --claim_id=<claim_id>)
Options:
--claim_id=<claim_id> : (str) Claim ID to export information about
Returns:
(str) Serialized certificate information
"""
result = yield self.wallet_manager.export_certificate_info(claim_id)
defer.returnValue(result)
@requires(WALLET_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_channel_import(self, serialized_certificate_info):
"""
Import serialized channel signing information (to allow signing new claims to the channel)
Usage:
channel_import (<serialized_certificate_info> | --serialized_certificate_info=<serialized_certificate_info>)
Options:
--serialized_certificate_info=<serialized_certificate_info> : (str) certificate info
Returns:
(dict) Result dictionary
"""
result = yield self.wallet_manager.import_certificate_info(serialized_certificate_info)
defer.returnValue(result)
@requires(WALLET_COMPONENT, FILE_MANAGER_COMPONENT, BLOB_COMPONENT, PAYMENT_RATE_COMPONENT, DATABASE_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_publish(
self, name, bid, metadata=None, file_path=None, fee=None, title=None,
description=None, author=None, language=None, license=None,
license_url=None, thumbnail=None, preview=None, nsfw=None, sources=None,
channel_name=None, channel_id=None, channel_account_id=None, account_id=None,
claim_address=None, change_address=None):
"""
Make a new name claim and publish associated data to lbrynet,
update over existing claim if user already has a claim for name.
Fields required in the final Metadata are:
'title'
'description'
'author'
'language'
'license'
'nsfw'
Metadata can be set by either using the metadata argument or by setting individual arguments
fee, title, description, author, language, license, license_url, thumbnail, preview, nsfw,
or sources. Individual arguments will overwrite the fields specified in metadata argument.
Usage:
publish (<name> | --name=<name>) (<bid> | --bid=<bid>) [--metadata=<metadata>]
[--file_path=<file_path>] [--fee=<fee>] [--title=<title>]
[--description=<description>] [--author=<author>] [--language=<language>]
[--license=<license>] [--license_url=<license_url>] [--thumbnail=<thumbnail>]
[--preview=<preview>] [--nsfw=<nsfw>] [--sources=<sources>]
[--channel_name=<channel_name>] [--channel_id=<channel_id>]
[--channel_account_id=<channel_account_id>...] [--account_id=<account_id>]
[--claim_address=<claim_address>] [--change_address=<change_address>]
Options:
--name=<name> : (str) name of the content (can only consist of a-z A-Z 0-9 and -(dash))
--bid=<bid> : (decimal) amount to back the claim
--metadata=<metadata> : (dict) ClaimDict to associate with the claim.
--file_path=<file_path> : (str) path to file to be associated with name. If provided,
a lbry stream of this file will be used in 'sources'.
If no path is given but a sources dict is provided,
it will be used. If neither are provided, an
error is raised.
--fee=<fee> : (dict) Dictionary representing key fee to download content:
{
'currency': currency_symbol,
'amount': decimal,
'address': str, optional
}
supported currencies: LBC, USD, BTC
If an address is not provided a new one will be
automatically generated. Default fee is zero.
--title=<title> : (str) title of the publication
--description=<description> : (str) description of the publication
--author=<author> : (str) author of the publication. The usage for this field is not
the same as for channels. The author field is used to credit an author
who is not the publisher and is not represented by the channel. For
example, a pdf file of 'The Odyssey' has an author of 'Homer' but may
by published to a channel such as '@classics', or to no channel at all
--language=<language> : (str) language of the publication
--license=<license> : (str) publication license
--license_url=<license_url> : (str) publication license url
--thumbnail=<thumbnail> : (str) thumbnail url
--preview=<preview> : (str) preview url
--nsfw=<nsfw> : (bool) whether the content is nsfw
--sources=<sources> : (str) {'lbry_sd_hash': sd_hash} specifies sd hash of file
--channel_name=<channel_name> : (str) name of the publisher channel name in the wallet
--channel_id=<channel_id> : (str) claim id of the publisher channel, does not check
for channel claim being in the wallet. This allows
publishing to a channel where only the certificate
private key is in the wallet.
--channel_account_id=<channel_id>: (str) one or more account ids for accounts to look in
for channel certificates, defaults to all accounts.
--account_id=<account_id> : (str) account to use for funding the transaction
--claim_address=<claim_address> : (str) address where the claim is sent to, if not specified
new address will automatically be created
Returns:
(dict) Dictionary containing result of the claim
{
'tx' : (str) hex encoded transaction
'txid' : (str) txid of resulting claim
'nout' : (int) nout of the resulting claim
'fee' : (decimal) fee paid for the claim transaction
'claim_id' : (str) claim ID of the resulting claim
}
"""
try:
parse_lbry_uri(name)
except (TypeError, URIParseError):
raise Exception("Invalid name given to publish")
amount = self.get_dewies_or_error('bid', bid)
if amount <= 0:
raise ValueError("Bid value must be greater than 0.0")
for address in [claim_address, change_address]:
if address is not None:
# raises an error if the address is invalid
decode_address(address)
account = self.get_account_or_default(account_id)
available = await account.get_balance()
if amount >= available:
existing_claims = await account.get_claims(claim_name=name)
if len(existing_claims) == 1:
available += existing_claims[0].get_estimator(self.ledger).effective_amount
if amount >= available:
raise InsufficientFundsError(
f"Please lower the bid value, the maximum amount "
f"you can specify for this claim is {dewies_to_lbc(available)}."
)
metadata = metadata or {}
if fee is not None:
metadata['fee'] = fee
if title is not None:
metadata['title'] = title
if description is not None:
metadata['description'] = description
if author is not None:
metadata['author'] = author
if language is not None:
metadata['language'] = language
if license is not None:
metadata['license'] = license
if license_url is not None:
metadata['licenseUrl'] = license_url
if thumbnail is not None:
metadata['thumbnail'] = thumbnail
if preview is not None:
metadata['preview'] = preview
if nsfw is not None:
metadata['nsfw'] = bool(nsfw)
metadata['version'] = '_0_1_0'
# check for original deprecated format {'currency':{'address','amount'}}
# add address, version to fee if unspecified
if 'fee' in metadata:
if len(metadata['fee'].keys()) == 1 and isinstance(metadata['fee'].values()[0], dict):
raise Exception('Old format for fee no longer supported. '
'Fee must be specified as {"currency":,"address":,"amount":}')
if 'amount' in metadata['fee'] and 'currency' in metadata['fee']:
if not metadata['fee']['amount']:
log.warning("Stripping empty fee from published metadata")
del metadata['fee']
elif 'address' not in metadata['fee']:
address = await account.receiving.get_or_create_usable_address()
metadata['fee']['address'] = address
if 'fee' in metadata and 'version' not in metadata['fee']:
metadata['fee']['version'] = '_0_0_1'
claim_dict = {
'version': '_0_0_1',
'claimType': 'streamType',
'stream': {
'metadata': metadata,
'version': '_0_0_1'
}
}
# this will be used to verify the format with lbrynet.schema
claim_copy = deepcopy(claim_dict)
if sources is not None:
claim_dict['stream']['source'] = sources
claim_copy['stream']['source'] = sources
elif file_path is not None:
if not os.path.isfile(file_path):
raise Exception("invalid file path to publish")
# since the file hasn't yet been made into a stream, we don't have
# a valid Source for the claim when validating the format, we'll use a fake one
claim_copy['stream']['source'] = {
'version': '_0_0_1',
'sourceType': 'lbry_sd_hash',
'source': '0' * 96,
'contentType': ''
}
else:
# there is no existing source to use, and a file was not provided to make a new one
raise Exception("no source provided to publish")
try:
ClaimDict.load_dict(claim_copy)
# the metadata to use in the claim can be serialized by lbrynet.schema
except DecodeError as err:
# there was a problem with a metadata field, raise an error here rather than
# waiting to find out when we go to publish the claim (after having made the stream)
raise Exception(f"invalid publish metadata: {err}")
certificate = None
if channel_id or channel_name:
certificate = await self.get_channel_or_error(
self.get_accounts_or_all(channel_account_id), channel_id, channel_name
)
log.info("Publish: %s", {
'name': name,
'file_path': file_path,
'bid': dewies_to_lbc(amount),
'claim_address': claim_address,
'change_address': change_address,
'claim_dict': claim_dict,
'channel_id': channel_id,
'channel_name': channel_name
})
return await self._publish_stream(
account, name, amount, claim_dict, file_path,
certificate, claim_address, change_address
)
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_claim_abandon(self, claim_id=None, txid=None, nout=None, account_id=None, blocking=True):
"""
Abandon a name and reclaim credits from the claim
Usage:
claim_abandon [<claim_id> | --claim_id=<claim_id>]
[<txid> | --txid=<txid>] [<nout> | --nout=<nout>]
[--account_id=<account_id>]
[--blocking]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to abandon
--txid=<txid> : (str) txid of the claim to abandon
--nout=<nout> : (int) nout of the claim to abandon
--account_id=<account_id> : (str) id of the account to use
--blocking : (bool) wait until abandon is in mempool
Returns:
(dict) Dictionary containing result of the claim
{
success: (bool) True if txn is successful
txid : (str) txid of resulting transaction
}
"""
account = self.get_account_or_default(account_id)
if claim_id is None and txid is None and nout is None:
raise Exception('Must specify claim_id, or txid and nout')
if txid is None and nout is not None:
raise Exception('Must specify txid')
if nout is None and txid is not None:
raise Exception('Must specify nout')
tx = await self.wallet_manager.abandon_claim(claim_id, txid, nout, account)
self.analytics_manager.send_claim_action('abandon')
if blocking:
await self.ledger.wait(tx)
return {"success": True, "tx": tx}
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_claim_new_support(self, name, claim_id, amount, account_id=None):
"""
Support a name claim
Usage:
claim_new_support (<name> | --name=<name>) (<claim_id> | --claim_id=<claim_id>)
(<amount> | --amount=<amount>) [--account_id=<account_id>]
Options:
--name=<name> : (str) name of the claim to support
--claim_id=<claim_id> : (str) claim_id of the claim to support
--amount=<amount> : (decimal) amount of support
--account_id=<account_id> : (str) id of the account to use
Returns:
(dict) Dictionary containing the transaction information
{
"hex": (str) raw transaction,
"inputs": (list) inputs(dict) used for the transaction,
"outputs": (list) outputs(dict) for the transaction,
"total_fee": (int) fee in dewies,
"total_input": (int) total of inputs in dewies,
"total_output": (int) total of outputs in dewies(input - fees),
"txid": (str) txid of the transaction,
}
"""
account = self.get_account_or_default(account_id)
amount = self.get_dewies_or_error("amount", amount)
result = await self.wallet_manager.support_claim(name, claim_id, amount, account)
self.analytics_manager.send_claim_action('new_support')
return result
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
async def jsonrpc_claim_tip(self, claim_id, amount, account_id=None):
"""
Tip the owner of the claim
Usage:
claim_tip (<claim_id> | --claim_id=<claim_id>) (<amount> | --amount=<amount>)
[--account_id=<account_id>]
Options:
--claim_id=<claim_id> : (str) claim_id of the claim to support
--amount=<amount> : (decimal) amount of support
--account_id=<account_id> : (str) id of the account to use
Returns:
(dict) Dictionary containing the transaction information
{
"hex": (str) raw transaction,
"inputs": (list) inputs(dict) used for the transaction,
"outputs": (list) outputs(dict) for the transaction,
"total_fee": (int) fee in dewies,
"total_input": (int) total of inputs in dewies,
"total_output": (int) total of outputs in dewies(input - fees),
"txid": (str) txid of the transaction,
}
"""
account = self.get_account_or_default(account_id)
amount = self.get_dewies_or_error("amount", amount)
validate_claim_id(claim_id)
result = await self.wallet_manager.tip_claim(amount, claim_id, account)
self.analytics_manager.send_claim_action('new_support')
return result
@AuthJSONRPCServer.deprecated()
def jsonrpc_claim_renew(self, outpoint=None, height=None):
pass
@requires(WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
def jsonrpc_claim_send_to_address(self, claim_id, address, amount=None):
"""
Send a name claim to an address
Usage:
claim_send_to_address (<claim_id> | --claim_id=<claim_id>)
(<address> | --address=<address>)
[<amount> | --amount=<amount>]
Options:
--claim_id=<claim_id> : (str) claim_id to send
--address=<address> : (str) address to send the claim to
--amount=<amount> : (int) Amount of credits to claim name for,
defaults to the current amount on the claim
Returns:
(dict) Dictionary containing result of the claim
{
'tx' : (str) hex encoded transaction
'txid' : (str) txid of resulting claim
'nout' : (int) nout of the resulting claim
'fee' : (float) fee paid for the claim transaction
'claim_id' : (str) claim ID of the resulting claim
}
"""
decode_address(address)
return self.wallet_manager.send_claim_to_address(
claim_id, address, self.get_dewies_or_error("amount", amount) if amount else None
)
@requires(WALLET_COMPONENT)
def jsonrpc_claim_list_mine(self, account_id=None, page=None, page_size=None):
"""
List my name claims
Usage:
claim_list_mine [<account_id> | --account_id=<account_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to query
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns:
(list) List of name claims owned by user
[
{
'address': (str) address that owns the claim
'amount': (float) amount assigned to the claim
'blocks_to_expiration': (int) number of blocks until it expires
'category': (str) "claim", "update" , or "support"
'claim_id': (str) claim ID of the claim
'confirmations': (int) number of blocks of confirmations for the claim
'expiration_height': (int) the block height which the claim will expire
'expired': (bool) true if expired, false otherwise
'height': (int) height of the block containing the claim
'is_spent': (bool) true if claim is abandoned, false otherwise
'name': (str) name of the claim
'permanent_url': (str) permanent url of the claim,
'txid': (str) txid of the claim
'nout': (int) nout of the claim
'value': (str) value of the claim
},
]
"""
account = self.get_account_or_default(account_id)
return maybe_paginate(
account.get_claims,
account.get_claim_count,
page, page_size
)
@requires(WALLET_COMPONENT)
async def jsonrpc_claim_list(self, name):
"""
List current claims and information about them for a given name
Usage:
claim_list (<name> | --name=<name>)
Options:
--name=<name> : (str) name of the claim to list info about
Returns:
(dict) State of claims assigned for the name
{
'claims': (list) list of claims for the name
[
{
'amount': (float) amount assigned to the claim
'effective_amount': (float) total amount assigned to the claim,
including supports
'claim_id': (str) claim ID of the claim
'height': (int) height of block containing the claim
'txid': (str) txid of the claim
'nout': (int) nout of the claim
'permanent_url': (str) permanent url of the claim,
'supports': (list) a list of supports attached to the claim
'value': (str) the value of the claim
},
]
'supports_without_claims': (list) supports without any claims attached to them
'last_takeover_height': (int) the height of last takeover for the name
}
"""
claims = await self.wallet_manager.get_claims_for_name(name) # type: dict
sort_claim_results(claims['claims'])
return claims
@requires(WALLET_COMPONENT)
async def jsonrpc_claim_list_by_channel(self, page=0, page_size=10, uri=None, uris=[]):
"""
Get paginated claims in a channel specified by a channel uri
Usage:
claim_list_by_channel (<uri> | --uri=<uri>) [<uris>...] [--page=<page>]
[--page_size=<page_size>]
Options:
--uri=<uri> : (str) uri of the channel
--uris=<uris> : (list) uris of the channel
--page=<page> : (int) which page of results to return where page 1 is the first
page, defaults to no pages
--page_size=<page_size> : (int) number of results in a page, default of 10
Returns:
{
resolved channel uri: {
If there was an error:
'error': (str) error message
'claims_in_channel': the total number of results for the channel,
If a page of results was requested:
'returned_page': page number returned,
'claims_in_channel': [
{
'absolute_channel_position': (int) claim index number in sorted list of
claims which assert to be part of the
channel
'address': (str) claim address,
'amount': (float) claim amount,
'effective_amount': (float) claim amount including supports,
'claim_id': (str) claim id,
'claim_sequence': (int) claim sequence number,
'decoded_claim': (bool) whether or not the claim value was decoded,
'height': (int) claim height,
'depth': (int) claim depth,
'has_signature': (bool) included if decoded_claim
'name': (str) claim name,
'supports: (list) list of supports [{'txid': (str) txid,
'nout': (int) nout,
'amount': (float) amount}],
'txid': (str) claim txid,
'nout': (str) claim nout,
'signature_is_valid': (bool), included if has_signature,
'value': ClaimDict if decoded, otherwise hex string
}
],
}
}
"""
uris = tuple(uris)
page = int(page)
page_size = int(page_size)
if uri is not None:
uris += (uri,)
results = {}
valid_uris = tuple()
for chan_uri in uris:
try:
parsed = parse_lbry_uri(chan_uri)
if not parsed.is_channel:
results[chan_uri] = {"error": "%s is not a channel uri" % parsed.name}
elif parsed.path:
results[chan_uri] = {"error": "%s is a claim in a channel" % parsed.path}
else:
valid_uris += (chan_uri,)
except URIParseError:
results[chan_uri] = {"error": "%s is not a valid uri" % chan_uri}
resolved = await self.wallet_manager.resolve(*valid_uris, page=page, page_size=page_size)
for u in resolved:
if 'error' in resolved[u]:
results[u] = resolved[u]
else:
results[u] = {
'claims_in_channel': resolved[u]['claims_in_channel']
}
if page:
results[u]['returned_page'] = page
results[u]['claims_in_channel'] = resolved[u].get('claims_in_channel', [])
return results
@requires(WALLET_COMPONENT)
def jsonrpc_transaction_list(self, account_id=None, page=None, page_size=None):
"""
List transactions belonging to wallet
Usage:
transaction_list [<account_id> | --account_id=<account_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to query
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns:
(list) List of transactions
{
"claim_info": (list) claim info if in txn [{
"address": (str) address of claim,
"balance_delta": (float) bid amount,
"amount": (float) claim amount,
"claim_id": (str) claim id,
"claim_name": (str) claim name,
"nout": (int) nout
}],
"abandon_info": (list) abandon info if in txn [{
"address": (str) address of abandoned claim,
"balance_delta": (float) returned amount,
"amount": (float) claim amount,
"claim_id": (str) claim id,
"claim_name": (str) claim name,
"nout": (int) nout
}],
"confirmations": (int) number of confirmations for the txn,
"date": (str) date and time of txn,
"fee": (float) txn fee,
"support_info": (list) support info if in txn [{
"address": (str) address of support,
"balance_delta": (float) support amount,
"amount": (float) support amount,
"claim_id": (str) claim id,
"claim_name": (str) claim name,
"is_tip": (bool),
"nout": (int) nout
}],
"timestamp": (int) timestamp,
"txid": (str) txn id,
"update_info": (list) update info if in txn [{
"address": (str) address of claim,
"balance_delta": (float) credited/debited
"amount": (float) absolute amount,
"claim_id": (str) claim id,
"claim_name": (str) claim name,
"nout": (int) nout
}],
"value": (float) value of txn
}
"""
account = self.get_account_or_default(account_id)
return maybe_paginate(
self.wallet_manager.get_history,
self.ledger.db.get_transaction_count,
page, page_size, account=account
)
@requires(WALLET_COMPONENT)
def jsonrpc_transaction_show(self, txid):
"""
Get a decoded transaction from a txid
Usage:
transaction_show (<txid> | --txid=<txid>)
Options:
--txid=<txid> : (str) txid of the transaction
Returns:
(dict) JSON formatted transaction
"""
return self.wallet_manager.get_transaction(txid)
@requires(WALLET_COMPONENT)
def jsonrpc_utxo_list(self, account_id=None, page=None, page_size=None):
"""
List unspent transaction outputs
Usage:
utxo_list [<account_id> | --account_id=<account_id>]
[--page=<page>] [--page_size=<page_size>]
Options:
--account_id=<account_id> : (str) id of the account to query
--page=<page> : (int) page to return during paginating
--page_size=<page_size> : (int) number of items on page during pagination
Returns:
(list) List of unspent transaction outputs (UTXOs)
[
{
"address": (str) the output address
"amount": (float) unspent amount
"height": (int) block height
"is_claim": (bool) is the tx a claim
"is_coinbase": (bool) is the tx a coinbase tx
"is_support": (bool) is the tx a support
"is_update": (bool) is the tx an update
"nout": (int) nout of the output
"txid": (str) txid of the output
},
...
]
"""
account = self.get_account_or_default(account_id)
return maybe_paginate(
account.get_utxos,
account.get_utxo_count,
page, page_size
)
@requires(WALLET_COMPONENT)
def jsonrpc_block_show(self, blockhash=None, height=None):
"""
Get contents of a block
Usage:
block_show (<blockhash> | --blockhash=<blockhash>) | (<height> | --height=<height>)
Options:
--blockhash=<blockhash> : (str) hash of the block to look up
--height=<height> : (int) height of the block to look up
Returns:
(dict) Requested block
"""
return self.wallet_manager.get_block(blockhash, height)
@requires(WALLET_COMPONENT, DHT_COMPONENT, BLOB_COMPONENT, RATE_LIMITER_COMPONENT, PAYMENT_RATE_COMPONENT,
conditions=[WALLET_IS_UNLOCKED])
@defer.inlineCallbacks
def jsonrpc_blob_get(self, blob_hash, timeout=None, encoding=None, payment_rate_manager=None):
"""
Download and return a blob
Usage:
blob_get (<blob_hash> | --blob_hash=<blob_hash>) [--timeout=<timeout>]
[--encoding=<encoding>] [--payment_rate_manager=<payment_rate_manager>]
Options:
--blob_hash=<blob_hash> : (str) blob hash of the blob to get
--timeout=<timeout> : (int) timeout in number of seconds
--encoding=<encoding> : (str) by default no attempt at decoding
is made, can be set to one of the
following decoders:
'json'
--payment_rate_manager=<payment_rate_manager> : (str) if not given the default payment rate
manager will be used.
supported alternative rate managers:
'only-free'
Returns:
(str) Success/Fail message or (dict) decoded data
"""
decoders = {
'json': json.loads
}
timeout = timeout or 30
blob = yield self._download_blob(blob_hash, rate_manager=self.payment_rate_manager, timeout=timeout)
if encoding and encoding in decoders:
blob_file = blob.open_for_reading()
result = decoders[encoding](blob_file.read())
blob_file.close()
else:
result = "Downloaded blob %s" % blob_hash
return result
@requires(BLOB_COMPONENT, DATABASE_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_blob_delete(self, blob_hash):
"""
Delete a blob
Usage:
blob_delete (<blob_hash> | --blob_hash=<blob_hash>)
Options:
--blob_hash=<blob_hash> : (str) blob hash of the blob to delete
Returns:
(str) Success/fail message
"""
if blob_hash not in self.blob_manager.blobs:
return "Don't have that blob"
try:
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(blob_hash)
yield self.storage.delete_stream(stream_hash)
except Exception as err:
pass
yield self.blob_manager.delete_blobs([blob_hash])
return "Deleted %s" % blob_hash
@requires(DHT_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_peer_list(self, blob_hash, timeout=None):
"""
Get peers for blob hash
Usage:
peer_list (<blob_hash> | --blob_hash=<blob_hash>) [<timeout> | --timeout=<timeout>]
Options:
--blob_hash=<blob_hash> : (str) find available peers for this blob hash
--timeout=<timeout> : (int) peer search timeout in seconds
Returns:
(list) List of contact dictionaries {'host': <peer ip>, 'port': <peer port>, 'node_id': <peer node id>}
"""
if not is_valid_blobhash(blob_hash):
raise Exception("invalid blob hash")
finished_deferred = self.dht_node.iterativeFindValue(unhexlify(blob_hash))
def trap_timeout(err):
err.trap(defer.TimeoutError)
return []
finished_deferred.addTimeout(timeout or conf.settings['peer_search_timeout'], self.dht_node.clock)
finished_deferred.addErrback(trap_timeout)
peers = yield finished_deferred
results = [
{
"node_id": hexlify(node_id).decode(),
"host": host,
"port": port
}
for node_id, host, port in peers
]
return results
@requires(DATABASE_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_blob_announce(self, blob_hash=None, stream_hash=None, sd_hash=None):
"""
Announce blobs to the DHT
Usage:
blob_announce [<blob_hash> | --blob_hash=<blob_hash>]
[<stream_hash> | --stream_hash=<stream_hash>] | [<sd_hash> | --sd_hash=<sd_hash>]
Options:
--blob_hash=<blob_hash> : (str) announce a blob, specified by blob_hash
--stream_hash=<stream_hash> : (str) announce all blobs associated with
stream_hash
--sd_hash=<sd_hash> : (str) announce all blobs associated with
sd_hash and the sd_hash itself
Returns:
(bool) true if successful
"""
blob_hashes = []
if blob_hash:
blob_hashes.append(blob_hash)
elif stream_hash or sd_hash:
if sd_hash and stream_hash:
raise Exception("either the sd hash or the stream hash should be provided, not both")
if sd_hash:
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
blobs = yield self.storage.get_blobs_for_stream(stream_hash, only_completed=True)
blob_hashes.extend(blob.blob_hash for blob in blobs if blob.blob_hash is not None)
else:
raise Exception('single argument must be specified')
yield self.storage.should_single_announce_blobs(blob_hashes, immediate=True)
return True
@requires(FILE_MANAGER_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_file_reflect(self, **kwargs):
"""
Reflect all the blobs in a file matching the filter criteria
Usage:
file_reflect [--sd_hash=<sd_hash>] [--file_name=<file_name>]
[--stream_hash=<stream_hash>] [--rowid=<rowid>]
[--reflector=<reflector>]
Options:
--sd_hash=<sd_hash> : (str) get file with matching sd hash
--file_name=<file_name> : (str) get file with matching file name in the
downloads folder
--stream_hash=<stream_hash> : (str) get file with matching stream hash
--rowid=<rowid> : (int) get file with matching row id
--reflector=<reflector> : (str) reflector server, ip address or url
by default choose a server from the config
Returns:
(list) list of blobs reflected
"""
reflector_server = kwargs.get('reflector', None)
lbry_files = yield self._get_lbry_files(**kwargs)
if len(lbry_files) > 1:
raise Exception('Too many (%i) files found, need one' % len(lbry_files))
elif not lbry_files:
raise Exception('No file found')
lbry_file = lbry_files[0]
results = yield reupload.reflect_file(lbry_file, reflector_server=reflector_server)
return results
@requires(BLOB_COMPONENT, WALLET_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_blob_list(self, uri=None, stream_hash=None, sd_hash=None, needed=None,
finished=None, page_size=None, page=None):
"""
Returns blob hashes. If not given filters, returns all blobs known by the blob manager
Usage:
blob_list [--needed] [--finished] [<uri> | --uri=<uri>]
[<stream_hash> | --stream_hash=<stream_hash>]
[<sd_hash> | --sd_hash=<sd_hash>]
[<page_size> | --page_size=<page_size>]
[<page> | --page=<page>]
Options:
--needed : (bool) only return needed blobs
--finished : (bool) only return finished blobs
--uri=<uri> : (str) filter blobs by stream in a uri
--stream_hash=<stream_hash> : (str) filter blobs by stream hash
--sd_hash=<sd_hash> : (str) filter blobs by sd hash
--page_size=<page_size> : (int) results page size
--page=<page> : (int) page of results to return
Returns:
(list) List of blob hashes
"""
if uri or stream_hash or sd_hash:
if uri:
metadata = (yield f2d(self.wallet_manager.resolve(uri)))[uri]
sd_hash = utils.get_sd_hash(metadata)
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
elif stream_hash:
sd_hash = yield self.storage.get_sd_blob_hash_for_stream(stream_hash)
elif sd_hash:
stream_hash = yield self.storage.get_stream_hash_for_sd_hash(sd_hash)
sd_hash = yield self.storage.get_sd_blob_hash_for_stream(stream_hash)
if stream_hash:
crypt_blobs = yield self.storage.get_blobs_for_stream(stream_hash)
blobs = yield defer.gatherResults([
self.blob_manager.get_blob(crypt_blob.blob_hash, crypt_blob.length)
for crypt_blob in crypt_blobs if crypt_blob.blob_hash is not None
])
else:
blobs = []
# get_blobs_for_stream does not include the sd blob, so we'll add it manually
if sd_hash in self.blob_manager.blobs:
blobs = [self.blob_manager.blobs[sd_hash]] + blobs
else:
blobs = self.blob_manager.blobs.values()
if needed:
blobs = [blob for blob in blobs if not blob.get_is_verified()]
if finished:
blobs = [blob for blob in blobs if blob.get_is_verified()]
blob_hashes = [blob.blob_hash for blob in blobs if blob.blob_hash]
page_size = page_size or len(blob_hashes)
page = page or 0
start_index = page * page_size
stop_index = start_index + page_size
return blob_hashes[start_index:stop_index]
@requires(BLOB_COMPONENT)
def jsonrpc_blob_reflect(self, blob_hashes, reflector_server=None):
"""
Reflects specified blobs
Usage:
blob_reflect (<blob_hashes>...) [--reflector_server=<reflector_server>]
Options:
--reflector_server=<reflector_server> : (str) reflector address
Returns:
(list) reflected blob hashes
"""
d = reupload.reflect_blob_hashes(blob_hashes, self.blob_manager, reflector_server)
d.addCallback(lambda r: self._render_response(r))
return d
@requires(BLOB_COMPONENT)
def jsonrpc_blob_reflect_all(self):
"""
Reflects all saved blobs
Usage:
blob_reflect_all
Options:
None
Returns:
(bool) true if successful
"""
d = self.blob_manager.get_all_verified_blobs()
d.addCallback(reupload.reflect_blob_hashes, self.blob_manager)
d.addCallback(lambda r: self._render_response(r))
return d
@requires(DHT_COMPONENT)
@defer.inlineCallbacks
def jsonrpc_peer_ping(self, node_id, address=None, port=None):
"""
Send a kademlia ping to the specified peer. If address and port are provided the peer is directly pinged,
if not provided the peer is located first.
Usage:
peer_ping (<node_id> | --node_id=<node_id>) [<address> | --address=<address>] [<port> | --port=<port>]
Options:
--address=<address> : (str) ip address of the peer
--port=<port> : (int) udp port of the peer
Returns:
(str) pong, or {'error': <error message>} if an error is encountered
"""
contact = None
if node_id and address and port:
contact = self.dht_node.contact_manager.get_contact(unhexlify(node_id), address, int(port))
if not contact:
contact = self.dht_node.contact_manager.make_contact(
unhexlify(node_id), address, int(port), self.dht_node._protocol
)
if not contact:
try:
contact = yield self.dht_node.findContact(unhexlify(node_id))
except TimeoutError:
return {'error': 'timeout finding peer'}
if not contact:
return {'error': 'peer not found'}
try:
result = (yield contact.ping()).decode()
except TimeoutError:
result = {'error': 'ping timeout'}
return result
@requires(DHT_COMPONENT)
def jsonrpc_routing_table_get(self):
"""
Get DHT routing information
Usage:
routing_table_get
Options:
None
Returns:
(dict) dictionary containing routing and contact information
{
"buckets": {
<bucket index>: [
{
"address": (str) peer address,
"port": (int) peer udp port
"node_id": (str) peer node id,
"blobs": (list) blob hashes announced by peer
}
]
},
"contacts": (list) contact node ids,
"blob_hashes": (list) all of the blob hashes stored by peers in the list of buckets,
"node_id": (str) the local dht node id
}
"""
result = {}
data_store = self.dht_node._dataStore
hosts = {}
for k, v in data_store.items():
for contact in map(itemgetter(0), v):
hosts.setdefault(contact, []).append(hexlify(k).decode())
contact_set = set()
blob_hashes = set()
result['buckets'] = {}
for i in range(len(self.dht_node._routingTable._buckets)):
result['buckets'][i] = []
for contact in self.dht_node._routingTable._buckets[i]._contacts:
blobs = list(hosts.pop(contact)) if contact in hosts else []
blob_hashes.update(blobs)
host = {
"address": contact.address,
"port": contact.port,
"node_id": hexlify(contact.id).decode(),
"blobs": blobs,
}
result['buckets'][i].append(host)
contact_set.add(hexlify(contact.id).decode())
result['contacts'] = list(contact_set)
result['blob_hashes'] = list(blob_hashes)
result['node_id'] = hexlify(self.dht_node.node_id).decode()
return self._render_response(result)
# the single peer downloader needs wallet access
@requires(DHT_COMPONENT, WALLET_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
def jsonrpc_blob_availability(self, blob_hash, search_timeout=None, blob_timeout=None):
"""
Get blob availability
Usage:
blob_availability (<blob_hash>) [<search_timeout> | --search_timeout=<search_timeout>]
[<blob_timeout> | --blob_timeout=<blob_timeout>]
Options:
--blob_hash=<blob_hash> : (str) check availability for this blob hash
--search_timeout=<search_timeout> : (int) how long to search for peers for the blob
in the dht
--blob_timeout=<blob_timeout> : (int) how long to try downloading from a peer
Returns:
(dict) {
"is_available": <bool, true if blob is available from a peer from peer list>
"reachable_peers": ["<ip>:<port>"],
"unreachable_peers": ["<ip>:<port>"]
}
"""
return self._blob_availability(blob_hash, search_timeout, blob_timeout)
@requires(UPNP_COMPONENT, WALLET_COMPONENT, DHT_COMPONENT, conditions=[WALLET_IS_UNLOCKED])
@defer.inlineCallbacks
def jsonrpc_stream_availability(self, uri, search_timeout=None, blob_timeout=None):
"""
Get stream availability for lbry uri
Usage:
stream_availability (<uri> | --uri=<uri>)
[<search_timeout> | --search_timeout=<search_timeout>]
[<blob_timeout> | --blob_timeout=<blob_timeout>]
Options:
--uri=<uri> : (str) check availability for this uri
--search_timeout=<search_timeout> : (int) how long to search for peers for the blob
in the dht
--blob_timeout=<blob_timeout> : (int) how long to try downloading from a peer
Returns:
(dict) {
'is_available': <bool>,
'did_decode': <bool>,
'did_resolve': <bool>,
'is_stream': <bool>,
'num_blobs_in_stream': <int>,
'sd_hash': <str>,
'sd_blob_availability': <dict> see `blob_availability`,
'head_blob_hash': <str>,
'head_blob_availability': <dict> see `blob_availability`,
'use_upnp': <bool>,
'upnp_redirect_is_set': <bool>,
'error': <None> | <str> error message
}
"""
search_timeout = search_timeout or conf.settings['peer_search_timeout']
blob_timeout = blob_timeout or conf.settings['sd_download_timeout']
response = {
'is_available': False,
'did_decode': False,
'did_resolve': False,
'is_stream': False,
'num_blobs_in_stream': None,
'sd_hash': None,
'sd_blob_availability': {},
'head_blob_hash': None,
'head_blob_availability': {},
'use_upnp': conf.settings['use_upnp'],
'upnp_redirect_is_set': len(self.upnp.upnp_redirects),
'error': None
}
try:
resolved_result = (yield self.wallet_manager.resolve(uri))[uri]
response['did_resolve'] = True
except UnknownNameError:
response['error'] = "Failed to resolve name"
defer.returnValue(response)
except URIParseError:
response['error'] = "Invalid URI"
defer.returnValue(response)
try:
claim_obj = smart_decode(resolved_result[uri]['claim']['hex'])
response['did_decode'] = True
except DecodeError:
response['error'] = "Failed to decode claim value"
defer.returnValue(response)
response['is_stream'] = claim_obj.is_stream
if not claim_obj.is_stream:
response['error'] = "Claim for \"%s\" does not contain a stream" % uri
defer.returnValue(response)
sd_hash = claim_obj.source_hash
response['sd_hash'] = sd_hash
head_blob_hash = None
downloader = self._get_single_peer_downloader()
have_sd_blob = sd_hash in self.blob_manager.blobs
try:
sd_blob = yield self.jsonrpc_blob_get(sd_hash, timeout=blob_timeout,
encoding="json")
if not have_sd_blob:
yield self.jsonrpc_blob_delete(sd_hash)
if sd_blob and 'blobs' in sd_blob:
response['num_blobs_in_stream'] = len(sd_blob['blobs']) - 1
head_blob_hash = sd_blob['blobs'][0]['blob_hash']
head_blob_availability = yield self._blob_availability(head_blob_hash,
search_timeout,
blob_timeout,
downloader)
response['head_blob_availability'] = head_blob_availability
except Exception as err:
response['error'] = err
response['head_blob_hash'] = head_blob_hash
response['sd_blob_availability'] = yield self._blob_availability(sd_hash,
search_timeout,
blob_timeout,
downloader)
response['is_available'] = response['sd_blob_availability'].get('is_available') and \
response['head_blob_availability'].get('is_available')
defer.returnValue(response)
async def get_channel_or_error(
self, accounts: List[LBCAccount], channel_id: str = None, channel_name: str = None):
if channel_id is not None:
certificates = await self.wallet_manager.get_certificates(
private_key_accounts=accounts, claim_id=channel_id)
if not certificates:
raise ValueError("Couldn't find channel with claim_id '{}'." .format(channel_id))
return certificates[0]
if channel_name is not None:
certificates = await self.wallet_manager.get_certificates(
private_key_accounts=accounts, claim_name=channel_name)
if not certificates:
raise ValueError(f"Couldn't find channel with name '{channel_name}'.")
return certificates[0]
raise ValueError("Couldn't find channel because a channel name or channel_id was not provided.")
def get_account_or_default(self, account_id: str, argument_name: str = "account", lbc_only=True):
if account_id is None:
return self.default_account
return self.get_account_or_error(account_id, argument_name, lbc_only)
def get_accounts_or_all(self, account_ids: List[str]):
return [
self.get_account_or_error(account_id)
for account_id in account_ids
] if account_ids else self.default_wallet.accounts
def get_account_or_error(self, account_id: str, argument_name: str = "account", lbc_only=True):
for account in self.default_wallet.accounts:
if account.id == account_id:
if lbc_only and not isinstance(account, LBCAccount):
raise ValueError(
"Found '{}', but it's an {} ledger account. "
"'{}' requires specifying an LBC ledger account."
.format(account_id, account.ledger.symbol, argument_name)
)
return account
raise ValueError(f"Couldn't find account: {account_id}.")
@staticmethod
def get_dewies_or_error(argument: str, lbc: str):
try:
return lbc_to_dewies(lbc)
except ValueError as e:
raise ValueError("Invalid value for '{}': {}".format(argument, e.args[0]))
def loggly_time_string(dt):
formatted_dt = dt.strftime("%Y-%m-%dT%H:%M:%S")
milliseconds = str(round(dt.microsecond * (10.0 ** -5), 3))
return urllib.parse.quote(formatted_dt + milliseconds + "Z")
def get_loggly_query_string(installation_id):
base_loggly_search_url = "https://lbry.loggly.com/search#"
now = utils.now()
yesterday = now - utils.timedelta(days=1)
params = {
'terms': 'json.installation_id:{}*'.format(installation_id[:SHORT_ID_LEN]),
'from': loggly_time_string(yesterday),
'to': loggly_time_string(now)
}
data = urllib.parse.urlencode(params)
return base_loggly_search_url + data
def report_bug_to_slack(message, installation_id, platform_name, app_version):
webhook = utils.deobfuscate(conf.settings['SLACK_WEBHOOK'])
payload_template = "os: %s\n version: %s\n<%s|loggly>\n%s"
payload_params = (
platform_name,
app_version,
get_loggly_query_string(installation_id),
message
)
payload = {
"text": payload_template % payload_params
}
requests.post(webhook, json.dumps(payload))
def get_lbry_file_search_value(search_fields):
for searchtype in FileID:
value = search_fields.get(searchtype, None)
if value is not None:
return searchtype, value
raise NoValidSearch(f'{search_fields} is missing a valid search type')
def iter_lbry_file_search_values(search_fields):
for searchtype in FileID:
value = search_fields.get(searchtype, None)
if value is not None:
yield searchtype, value
def create_key_getter(field):
search_path = field.split('.')
def key_getter(value):
for key in search_path:
try:
value = value[key]
except KeyError as e:
errmsg = "Failed to get '{}', key {} was not found."
raise Exception(errmsg.format(field, str(e)))
return value
return key_getter
| 42.083211
| 120
| 0.558645
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.