hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c44e5497fb4e61b1aa4587e81545dabe660d10a
| 150
|
py
|
Python
|
sdk/python/feast/pyspark/launchers/standalone/__init__.py
|
rafalzydowicz/feast
|
0d5cb8df2b2bd45b6631351c5ec8ba96bfd4d709
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/pyspark/launchers/standalone/__init__.py
|
rafalzydowicz/feast
|
0d5cb8df2b2bd45b6631351c5ec8ba96bfd4d709
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/pyspark/launchers/standalone/__init__.py
|
rafalzydowicz/feast
|
0d5cb8df2b2bd45b6631351c5ec8ba96bfd4d709
|
[
"Apache-2.0"
] | null | null | null |
from .local import StandaloneClusterLauncher, StandaloneClusterRetrievalJob
__all__ = ["StandaloneClusterRetrievalJob", "StandaloneClusterLauncher"]
| 37.5
| 75
| 0.866667
|
from .local import StandaloneClusterLauncher, StandaloneClusterRetrievalJob
__all__ = ["StandaloneClusterRetrievalJob", "StandaloneClusterLauncher"]
| true
| true
|
1c44e57f91d648a32e072962c884f38cb5c387d3
| 1,742
|
py
|
Python
|
aliyun-python-sdk-retailcloud/aliyunsdkretailcloud/request/v20180313/BatchAddServersRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-retailcloud/aliyunsdkretailcloud/request/v20180313/BatchAddServersRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:51:47.000Z
|
2020-05-31T14:51:47.000Z
|
aliyun-python-sdk-retailcloud/aliyunsdkretailcloud/request/v20180313/BatchAddServersRequest.py
|
jia-jerry/aliyun-openapi-python-sdk
|
e90f3683a250cfec5b681b5f1d73a68f0dc9970d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkretailcloud.endpoint import endpoint_data
class BatchAddServersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'retailcloud', '2018-03-13', 'BatchAddServers','retailcloud')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_Sign(self):
return self.get_query_params().get('Sign')
def set_Sign(self,Sign):
self.add_query_param('Sign',Sign)
| 34.84
| 90
| 0.760046
|
from aliyunsdkcore.request import RpcRequest
from aliyunsdkretailcloud.endpoint import endpoint_data
class BatchAddServersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'retailcloud', '2018-03-13', 'BatchAddServers','retailcloud')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_Sign(self):
return self.get_query_params().get('Sign')
def set_Sign(self,Sign):
self.add_query_param('Sign',Sign)
| true
| true
|
1c44e59d37dfc9d219bf668ad78d0ddb164f0805
| 5,556
|
py
|
Python
|
yfinance/ticker.py
|
x1011x/yfinance
|
87a6dc2e9be7b013a11f956eb4593a5595798e2e
|
[
"Apache-2.0"
] | null | null | null |
yfinance/ticker.py
|
x1011x/yfinance
|
87a6dc2e9be7b013a11f956eb4593a5595798e2e
|
[
"Apache-2.0"
] | null | null | null |
yfinance/ticker.py
|
x1011x/yfinance
|
87a6dc2e9be7b013a11f956eb4593a5595798e2e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# import time as _time
import datetime as _datetime
import requests as _requests
import pandas as _pd
# import numpy as _np
# import json as _json
# import re as _re
from collections import namedtuple as _namedtuple
from .base import TickerBase
class Ticker(TickerBase):
def __repr__(self):
return 'yfinance.Ticker object <%s>' % self.ticker
def _download_options(self, date=None, proxy=None):
if date is None:
url = "{}/v7/finance/options/{}".format(
self._base_url, self.ticker)
else:
url = "{}/v7/finance/options/{}?date={}".format(
self._base_url, self.ticker, date)
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
r = _requests.get(url=url, proxies=proxy).json()
if r['optionChain']['result']:
for exp in r['optionChain']['result'][0]['expirationDates']:
self._expirations[_datetime.datetime.utcfromtimestamp(
exp).strftime('%Y-%m-%d')] = exp
return r['optionChain']['result'][0]['options'][0]
return {}
def _options2df(self, opt, tz=None):
data = _pd.DataFrame(opt).reindex(columns=[
'contractSymbol',
'lastTradeDate',
'strike',
'lastPrice',
'bid',
'ask',
'change',
'percentChange',
'volume',
'openInterest',
'impliedVolatility',
'inTheMoney',
'contractSize',
'currency'])
data['lastTradeDate'] = _pd.to_datetime(
data['lastTradeDate'], unit='s')
if tz is not None:
data['lastTradeDate'] = data['lastTradeDate'].tz_localize(tz)
return data
def option_chain(self, date=None, proxy=None, tz=None):
if date is None:
options = self._download_options(proxy=proxy)
else:
if not self._expirations:
self._download_options()
if date not in self._expirations:
raise ValueError(
"Expiration `%s` cannot be found. "
"Available expiration are: [%s]" % (
date, ', '.join(self._expirations)))
date = self._expirations[date]
options = self._download_options(date, proxy=proxy)
return _namedtuple('Options', ['calls', 'puts'])(**{
"calls": self._options2df(options['calls'], tz=tz),
"puts": self._options2df(options['puts'], tz=tz)
})
# ------------------------
@property
def isin(self):
return self.get_isin()
@property
def major_holders(self):
return self.get_major_holders()
@property
def institutional_holders(self):
return self.get_institutional_holders()
@property
def mutualfund_holders(self):
return self.get_mutualfund_holders()
@property
def dividends(self):
return self.get_dividends()
@property
def splits(self):
return self.get_splits()
@property
def actions(self):
return self.get_actions()
@property
def info(self):
return self.get_info()
@property
def calendar(self):
return self.get_calendar()
@property
def recommendations(self):
return self.get_recommendations()
@property
def earnings(self):
return self.get_earnings()
@property
def quarterly_earnings(self):
return self.get_earnings(freq='quarterly')
@property
def financials(self):
return self.get_financials()
@property
def annualbasiceps(self):
return self.get_annualbasiceps()
@property
def quarterly_financials(self):
return self.get_financials(freq='quarterly')
@property
def balance_sheet(self):
return self.get_balancesheet()
@property
def quarterly_balance_sheet(self):
return self.get_balancesheet(freq='quarterly')
@property
def balancesheet(self):
return self.get_balancesheet()
@property
def quarterly_balancesheet(self):
return self.get_balancesheet(freq='quarterly')
@property
def cashflow(self):
return self.get_cashflow()
@property
def quarterly_cashflow(self):
return self.get_cashflow(freq='quarterly')
@property
def sustainability(self):
return self.get_sustainability()
@property
def options(self):
if not self._expirations:
self._download_options()
return tuple(self._expirations.keys())
| 27.641791
| 74
| 0.608531
|
from __future__ import print_function
import datetime as _datetime
import requests as _requests
import pandas as _pd
from collections import namedtuple as _namedtuple
from .base import TickerBase
class Ticker(TickerBase):
def __repr__(self):
return 'yfinance.Ticker object <%s>' % self.ticker
def _download_options(self, date=None, proxy=None):
if date is None:
url = "{}/v7/finance/options/{}".format(
self._base_url, self.ticker)
else:
url = "{}/v7/finance/options/{}?date={}".format(
self._base_url, self.ticker, date)
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
r = _requests.get(url=url, proxies=proxy).json()
if r['optionChain']['result']:
for exp in r['optionChain']['result'][0]['expirationDates']:
self._expirations[_datetime.datetime.utcfromtimestamp(
exp).strftime('%Y-%m-%d')] = exp
return r['optionChain']['result'][0]['options'][0]
return {}
def _options2df(self, opt, tz=None):
data = _pd.DataFrame(opt).reindex(columns=[
'contractSymbol',
'lastTradeDate',
'strike',
'lastPrice',
'bid',
'ask',
'change',
'percentChange',
'volume',
'openInterest',
'impliedVolatility',
'inTheMoney',
'contractSize',
'currency'])
data['lastTradeDate'] = _pd.to_datetime(
data['lastTradeDate'], unit='s')
if tz is not None:
data['lastTradeDate'] = data['lastTradeDate'].tz_localize(tz)
return data
def option_chain(self, date=None, proxy=None, tz=None):
if date is None:
options = self._download_options(proxy=proxy)
else:
if not self._expirations:
self._download_options()
if date not in self._expirations:
raise ValueError(
"Expiration `%s` cannot be found. "
"Available expiration are: [%s]" % (
date, ', '.join(self._expirations)))
date = self._expirations[date]
options = self._download_options(date, proxy=proxy)
return _namedtuple('Options', ['calls', 'puts'])(**{
"calls": self._options2df(options['calls'], tz=tz),
"puts": self._options2df(options['puts'], tz=tz)
})
@property
def isin(self):
return self.get_isin()
@property
def major_holders(self):
return self.get_major_holders()
@property
def institutional_holders(self):
return self.get_institutional_holders()
@property
def mutualfund_holders(self):
return self.get_mutualfund_holders()
@property
def dividends(self):
return self.get_dividends()
@property
def splits(self):
return self.get_splits()
@property
def actions(self):
return self.get_actions()
@property
def info(self):
return self.get_info()
@property
def calendar(self):
return self.get_calendar()
@property
def recommendations(self):
return self.get_recommendations()
@property
def earnings(self):
return self.get_earnings()
@property
def quarterly_earnings(self):
return self.get_earnings(freq='quarterly')
@property
def financials(self):
return self.get_financials()
@property
def annualbasiceps(self):
return self.get_annualbasiceps()
@property
def quarterly_financials(self):
return self.get_financials(freq='quarterly')
@property
def balance_sheet(self):
return self.get_balancesheet()
@property
def quarterly_balance_sheet(self):
return self.get_balancesheet(freq='quarterly')
@property
def balancesheet(self):
return self.get_balancesheet()
@property
def quarterly_balancesheet(self):
return self.get_balancesheet(freq='quarterly')
@property
def cashflow(self):
return self.get_cashflow()
@property
def quarterly_cashflow(self):
return self.get_cashflow(freq='quarterly')
@property
def sustainability(self):
return self.get_sustainability()
@property
def options(self):
if not self._expirations:
self._download_options()
return tuple(self._expirations.keys())
| true
| true
|
1c44e70e26e2aae7a184066da9cd3e62efc063db
| 102
|
py
|
Python
|
src/question_analysis/__init__.py
|
collab-uniba/qavmentor-service
|
f3c6f8a02bca3eeb0521ca3ac3b6e97542754c2a
|
[
"MIT"
] | 1
|
2018-07-23T14:42:22.000Z
|
2018-07-23T14:42:22.000Z
|
src/question_analysis/__init__.py
|
collab-uniba/qavmentor-service
|
f3c6f8a02bca3eeb0521ca3ac3b6e97542754c2a
|
[
"MIT"
] | 56
|
2018-05-24T09:40:03.000Z
|
2022-02-11T03:40:09.000Z
|
src/question_analysis/__init__.py
|
collab-uniba/qavmentor
|
669025a40dd04cd8c9cbd264587918025ef39d20
|
[
"MIT"
] | 1
|
2018-05-20T09:30:48.000Z
|
2018-05-20T09:30:48.000Z
|
from question_analysis.feature_analysis import FeatureAnalysis
from question_analysis.post import Post
| 51
| 62
| 0.911765
|
from question_analysis.feature_analysis import FeatureAnalysis
from question_analysis.post import Post
| true
| true
|
1c44e7acfddcff965ad8a65694d273cf32dc1d48
| 513
|
py
|
Python
|
Graphs/Line Graph.py
|
TausifAnsari/PyHub
|
f6c949dc6a3974f57d7d146708443d0ceeb4418f
|
[
"MIT"
] | 1
|
2020-09-30T19:31:20.000Z
|
2020-09-30T19:31:20.000Z
|
Graphs/Line Graph.py
|
TanviSutar/PyHub
|
6281e9f515674fb51f0d0862c26ec18020fa7d83
|
[
"MIT"
] | null | null | null |
Graphs/Line Graph.py
|
TanviSutar/PyHub
|
6281e9f515674fb51f0d0862c26ec18020fa7d83
|
[
"MIT"
] | null | null | null |
# pip install matplotlib
import matplotlib.pyplot as graph
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul"]
scores = [100,130,125,90,20,50,70]
graph.plot(months,scores,color=(0/255,0/255,255/255),marker = "+",markersize = 10,markeredgewidth = 2,
linewidth = 2,linestyle = "dotted", markeredgecolor = (255/255,0,0))
# The colour code is in RGB. Make sure you divide it by 255 (values have to be between 0 and 1)
graph.title("Monthly Analysis")
graph.xlabel("Months")
graph.ylabel("Stocks Sold")
graph.show()
| 39.461538
| 102
| 0.707602
|
import matplotlib.pyplot as graph
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul"]
scores = [100,130,125,90,20,50,70]
graph.plot(months,scores,color=(0/255,0/255,255/255),marker = "+",markersize = 10,markeredgewidth = 2,
linewidth = 2,linestyle = "dotted", markeredgecolor = (255/255,0,0))
graph.title("Monthly Analysis")
graph.xlabel("Months")
graph.ylabel("Stocks Sold")
graph.show()
| true
| true
|
1c44e8a498108faa483b7bb06f12f9ac647c973f
| 6,436
|
py
|
Python
|
ietf/utils/text.py
|
unofficial-mirror/ietfdb
|
ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81
|
[
"BSD-3-Clause"
] | null | null | null |
ietf/utils/text.py
|
unofficial-mirror/ietfdb
|
ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81
|
[
"BSD-3-Clause"
] | null | null | null |
ietf/utils/text.py
|
unofficial-mirror/ietfdb
|
ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright The IETF Trust 2016-2019, All Rights Reserved
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import re
import six
import textwrap
import unicodedata
from django.utils.functional import keep_lazy
from django.utils.safestring import mark_safe
import debug # pyflakes:ignore
from .texescape import init as texescape_init, tex_escape_map
@keep_lazy(str)
def xslugify(value):
"""
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, slash, or hyphens. Converts to
lowercase. Also strips leading and trailing whitespace.
(I.e., does the same as slugify, but also converts slashes to dashes.)
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s/-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s/]+', '-', value))
def strip_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
else:
return text
def strip_suffix(text, suffix):
if text.endswith(suffix):
return text[:-len(suffix)]
else:
return text
def fill(text, width):
"""Wraps each paragraph in text (a string) so every line
is at most width characters long, and returns a single string
containing the wrapped paragraph.
"""
width = int(width)
paras = text.replace("\r\n","\n").replace("\r","\n").split("\n\n")
wrapped = []
for para in paras:
if para:
lines = para.split("\n")
maxlen = max([len(line) for line in lines])
if maxlen > width:
para = textwrap.fill(para, width, replace_whitespace=False)
wrapped.append(para)
return "\n\n".join(wrapped)
def wordwrap(text, width=80):
"""Wraps long lines without loosing the formatting and indentation
of short lines"""
if not isinstance(text, six.string_types):
return text
def block_separator(s):
"Look for lines of identical symbols, at least three long"
ss = s.strip()
chars = set(ss)
return len(chars) == 1 and len(ss) >= 3 and ss[0] in set('#*+-.=_~')
width = int(width) # ensure we have an int, if this is used as a template filter
text = re.sub(" *\r\n", "\n", text) # get rid of DOS line endings
text = re.sub(" *\r", "\n", text) # get rid of MAC line endings
text = re.sub("( *\n){3,}", "\n\n", text) # get rid of excessive vertical whitespace
lines = text.split("\n")
filled = []
wrapped = False
prev_indent = None
for line in lines:
line = line.expandtabs().rstrip()
indent = " " * (len(line) - len(line.lstrip()))
ind = len(indent)
if wrapped and line.strip() != "" and indent == prev_indent and not block_separator(line):
line = filled[-1] + " " + line.lstrip()
filled = filled[:-1]
else:
wrapped = False
while (len(line) > width) and (" " in line[ind:]):
linelength = len(line)
wrapped = True
breakpoint = line.rfind(" ",ind,width)
if breakpoint == -1:
breakpoint = line.find(" ", ind)
filled += [ line[:breakpoint] ]
line = indent + line[breakpoint+1:]
if len(line) >= linelength:
break
filled += [ line.rstrip() ]
prev_indent = indent
return "\n".join(filled)
# def alternative_wrap(text, width=80):
# # From http://blog.belgoat.com/python-textwrap-wrap-your-text-to-terminal-size/
# textLines = text.split('\n')
# wrapped_lines = []
# # Preserve any indent (after the general indent)
# for line in textLines:
# preservedIndent = ''
# existIndent = re.search(r'^(\W+)', line)
# # Change the existing wrap indent to the original one
# if (existIndent):
# preservedIndent = existIndent.groups()[0]
# wrapped_lines.append(textwrap.fill(line, width=width, subsequent_indent=preservedIndent))
# text = '\n'.join(wrapped_lines)
# return text
def wrap_text_if_unwrapped(text, width=80, max_tolerated_line_length=100):
text = re.sub(" *\r\n", "\n", text) # get rid of DOS line endings
text = re.sub(" *\r", "\n", text) # get rid of MAC line endings
width = int(width) # ensure we have an int, if this is used as a template filter
max_tolerated_line_length = int(max_tolerated_line_length)
contains_long_lines = any(" " in l and len(l) > max_tolerated_line_length
for l in text.split("\n"))
if contains_long_lines:
text = wordwrap(text, width)
return text
def isascii(text):
try:
text.encode('ascii')
return True
except (UnicodeEncodeError, UnicodeDecodeError):
return False
def maybe_split(text, split=True, pos=5000):
if split:
n = text.find("\n", pos)
text = text[:n+1]
return text
def decode(raw):
assert isinstance(raw, six.binary_type)
try:
text = raw.decode('utf-8')
except UnicodeDecodeError:
# if this fails, don't catch the exception here; let it propagate
text = raw.decode('latin-1')
#
return text
def text_to_dict(t):
"Converts text with RFC2822-formatted header fields into a dictionary-like object."
# ensure we're handed a unicode parameter
assert isinstance(t, six.text_type)
d = {}
# Return {} for malformed input
if not len(t.lstrip()) == len(t):
return {}
lines = t.splitlines()
items = []
# unfold folded lines
for l in lines:
if len(l) and l[0].isspace():
if items:
items[-1] += l
else:
return {}
else:
items.append(l)
for i in items:
if re.match('^[A-Za-z0-9-]+: ', i):
k, v = i.split(': ', 1)
d[k] = v
else:
return {}
return d
def dict_to_text(d):
"Convert a dictionary to RFC2822-formatted text"
t = ""
for k, v in d.items():
t += "%s: %s\n" % (k, v)
return t
def texescape(s):
if not tex_escape_map:
texescape_init()
t = s.translate(tex_escape_map)
return t
def unwrap(s):
return s.replace('\n', ' ')
| 32.670051
| 101
| 0.588254
|
from __future__ import absolute_import, print_function, unicode_literals
import re
import six
import textwrap
import unicodedata
from django.utils.functional import keep_lazy
from django.utils.safestring import mark_safe
import debug
from .texescape import init as texescape_init, tex_escape_map
@keep_lazy(str)
def xslugify(value):
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s/-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s/]+', '-', value))
def strip_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
else:
return text
def strip_suffix(text, suffix):
if text.endswith(suffix):
return text[:-len(suffix)]
else:
return text
def fill(text, width):
width = int(width)
paras = text.replace("\r\n","\n").replace("\r","\n").split("\n\n")
wrapped = []
for para in paras:
if para:
lines = para.split("\n")
maxlen = max([len(line) for line in lines])
if maxlen > width:
para = textwrap.fill(para, width, replace_whitespace=False)
wrapped.append(para)
return "\n\n".join(wrapped)
def wordwrap(text, width=80):
if not isinstance(text, six.string_types):
return text
def block_separator(s):
ss = s.strip()
chars = set(ss)
return len(chars) == 1 and len(ss) >= 3 and ss[0] in set('#*+-.=_~')
width = int(width)
text = re.sub(" *\r\n", "\n", text)
text = re.sub(" *\r", "\n", text)
text = re.sub("( *\n){3,}", "\n\n", text)
lines = text.split("\n")
filled = []
wrapped = False
prev_indent = None
for line in lines:
line = line.expandtabs().rstrip()
indent = " " * (len(line) - len(line.lstrip()))
ind = len(indent)
if wrapped and line.strip() != "" and indent == prev_indent and not block_separator(line):
line = filled[-1] + " " + line.lstrip()
filled = filled[:-1]
else:
wrapped = False
while (len(line) > width) and (" " in line[ind:]):
linelength = len(line)
wrapped = True
breakpoint = line.rfind(" ",ind,width)
if breakpoint == -1:
breakpoint = line.find(" ", ind)
filled += [ line[:breakpoint] ]
line = indent + line[breakpoint+1:]
if len(line) >= linelength:
break
filled += [ line.rstrip() ]
prev_indent = indent
return "\n".join(filled)
h = int(width)
max_tolerated_line_length = int(max_tolerated_line_length)
contains_long_lines = any(" " in l and len(l) > max_tolerated_line_length
for l in text.split("\n"))
if contains_long_lines:
text = wordwrap(text, width)
return text
def isascii(text):
try:
text.encode('ascii')
return True
except (UnicodeEncodeError, UnicodeDecodeError):
return False
def maybe_split(text, split=True, pos=5000):
if split:
n = text.find("\n", pos)
text = text[:n+1]
return text
def decode(raw):
assert isinstance(raw, six.binary_type)
try:
text = raw.decode('utf-8')
except UnicodeDecodeError:
text = raw.decode('latin-1')
#
return text
def text_to_dict(t):
# ensure we're handed a unicode parameter
assert isinstance(t, six.text_type)
d = {}
if not len(t.lstrip()) == len(t):
return {}
lines = t.splitlines()
items = []
for l in lines:
if len(l) and l[0].isspace():
if items:
items[-1] += l
else:
return {}
else:
items.append(l)
for i in items:
if re.match('^[A-Za-z0-9-]+: ', i):
k, v = i.split(': ', 1)
d[k] = v
else:
return {}
return d
def dict_to_text(d):
t = ""
for k, v in d.items():
t += "%s: %s\n" % (k, v)
return t
def texescape(s):
if not tex_escape_map:
texescape_init()
t = s.translate(tex_escape_map)
return t
def unwrap(s):
return s.replace('\n', ' ')
| true
| true
|
1c44e8a6132356e9861a86f661b14f607195ba2c
| 54,029
|
py
|
Python
|
src/sage/calculus/desolvers.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/calculus/desolvers.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | null | null | null |
src/sage/calculus/desolvers.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
r"""
Solving ordinary differential equations
This file contains functions useful for solving differential equations
which occur commonly in a 1st semester differential equations
course. For another numerical solver see :meth:`ode_solver` function
and optional package Octave.
Commands:
- ``desolve`` - Computes the "general solution" to a 1st or 2nd order
ODE via Maxima.
- ``desolve_laplace`` - Solves an ODE using laplace transforms via
Maxima. Initials conditions are optional.
- ``desolve_system`` - Solves any size system of 1st order odes using
Maxima. Initials conditions are optional.
- ``desolve_rk4`` - Solves numerically IVP for one first order
equation, returns list of points or plot
- ``desolve_system_rk4`` - Solves numerically IVP for system of first
order equations, returns list of points
- ``desolve_odeint`` - Solves numerically a system of first-order ordinary
differential equations using ``odeint`` from scipy.integrate module.
- ``eulers_method`` - Approximate solution to a 1st order DE,
presented as a table.
- ``eulers_method_2x2`` - Approximate solution to a 1st order system
of DEs, presented as a table.
- ``eulers_method_2x2_plot`` - Plots the sequence of points obtained
from Euler's method.
AUTHORS:
- David Joyner (3-2006) - Initial version of functions
- Marshall Hampton (7-2007) - Creation of Python module and testing
- Robert Bradshaw (10-2008) - Some interface cleanup.
- Robert Marik (10-2009) - Some bugfixes and enhancements
"""
##########################################################################
# Copyright (C) 2006 David Joyner <wdjoyner@gmail.com>, Marshall Hampton,
# Robert Marik <marik@mendelu.cz>
#
# Distributed under the terms of the GNU General Public License (GPL):
#
# http://www.gnu.org/licenses/
##########################################################################
from sage.interfaces.maxima import Maxima
from sage.plot.all import line
from sage.symbolic.expression import is_SymbolicEquation
from sage.symbolic.ring import is_SymbolicVariable
from sage.calculus.functional import diff
from sage.misc.decorators import rename_keyword
maxima = Maxima()
def desolve(de, dvar, ics=None, ivar=None, show_method=False, contrib_ode=False):
r"""
Solves a 1st or 2nd order linear ODE via maxima. Including IVP and BVP.
*Use* ``desolve? <tab>`` *if the output in truncated in notebook.*
INPUT:
- ``de`` - an expression or equation representing the ODE
- ``dvar`` - the dependent variable (hereafter called ``y``)
- ``ics`` - (optional) the initial or boundary conditions
- for a first-order equation, specify the initial ``x`` and ``y``
- for a second-order equation, specify the initial ``x``, ``y``,
and ``dy/dx``, i.e. write `[x_0, y(x_0), y'(x_0)]`
- for a second-order boundary solution, specify initial and
final ``x`` and ``y`` boundary conditions, i.e. write `[x_0, y(x_0), x_1, y(x_1)]`.
- gives an error if the solution is not SymbolicEquation (as happens for
example for Clairaut equation)
- ``ivar`` - (optional) the independent variable (hereafter called
x), which must be specified if there is more than one
independent variable in the equation.
- ``show_method`` - (optional) if true, then Sage returns pair
``[solution, method]``, where method is the string describing
method which has been used to get solution (Maxima uses the
following order for first order equations: linear, separable,
exact (including exact with integrating factor), homogeneous,
bernoulli, generalized homogeneous) - use carefully in class,
see below for the example of the equation which is separable but
this property is not recognized by Maxima and equation is solved
as exact.
- ``contrib_ode`` - (optional) if true, desolve allows to solve
clairaut, lagrange, riccati and some other equations. May take
a long time and thus turned off by default. Initial conditions
can be used only if the result is one SymbolicEquation (does not
contain singular solution, for example)
OUTPUT:
In most cases returns SymbolicEquation which defines the solution
implicitly. If the result is in the form y(x)=... (happens for
linear eqs.), returns the right-hand side only. The possible
constant solutions of separable ODE's are omitted.
EXAMPLES::
sage: x = var('x')
sage: y = function('y', x)
sage: desolve(diff(y,x) + y - 1, y)
(c + e^x)*e^(-x)
::
sage: f = desolve(diff(y,x) + y - 1, y, ics=[10,2]); f
(e^10 + e^x)*e^(-x)
::
sage: plot(f)
We can also solve second-order differential equations.::
sage: x = var('x')
sage: y = function('y', x)
sage: de = diff(y,x,2) - y == x
sage: desolve(de, y)
k2*e^(-x) + k1*e^x - x
::
sage: f = desolve(de, y, [10,2,1]); f
-x + 7*e^(x - 10) + 5*e^(-x + 10)
::
sage: f(x=10)
2
::
sage: diff(f,x)(x=10)
1
::
sage: de = diff(y,x,2) + y == 0
sage: desolve(de, y)
k2*cos(x) + k1*sin(x)
::
sage: desolve(de, y, [0,1,pi/2,4])
cos(x) + 4*sin(x)
::
sage: desolve(y*diff(y,x)+sin(x)==0,y)
-1/2*y(x)^2 == c - cos(x)
Clairot equation: general and singular solutions::
sage: desolve(diff(y,x)^2+x*diff(y,x)-y==0,y,contrib_ode=True,show_method=True)
[[y(x) == c^2 + c*x, y(x) == -1/4*x^2], 'clairault']
For equations involving more variables we specify independent variable::
sage: a,b,c,n=var('a b c n')
sage: desolve(x^2*diff(y,x)==a+b*x^n+c*x^2*y^2,y,ivar=x,contrib_ode=True)
[[y(x) == 0, (b*x^(n - 2) + a/x^2)*c^2*u == 0]]
::
sage: desolve(x^2*diff(y,x)==a+b*x^n+c*x^2*y^2,y,ivar=x,contrib_ode=True,show_method=True)
[[[y(x) == 0, (b*x^(n - 2) + a/x^2)*c^2*u == 0]], 'riccati']
Higher orded, not involving independent variable::
sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y).expand()
1/6*y(x)^3 + k1*y(x) == k2 + x
::
sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y,[0,1,1,3]).expand()
1/6*y(x)^3 - 5/3*y(x) == x - 3/2
::
sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y,[0,1,1,3],show_method=True)
[1/6*y(x)^3 - 5/3*y(x) == x - 3/2, 'freeofx']
Separable equations - Sage returns solution in implicit form::
sage: desolve(diff(y,x)*sin(y) == cos(x),y)
-cos(y(x)) == c + sin(x)
::
sage: desolve(diff(y,x)*sin(y) == cos(x),y,show_method=True)
[-cos(y(x)) == c + sin(x), 'separable']
::
sage: desolve(diff(y,x)*sin(y) == cos(x),y,[pi/2,1])
-cos(y(x)) == -cos(1) + sin(x) - 1
Linear equation - Sage returns the expression on the right hand side only::
sage: desolve(diff(y,x)+(y) == cos(x),y)
1/2*((cos(x) + sin(x))*e^x + 2*c)*e^(-x)
::
sage: desolve(diff(y,x)+(y) == cos(x),y,show_method=True)
[1/2*((cos(x) + sin(x))*e^x + 2*c)*e^(-x), 'linear']
::
sage: desolve(diff(y,x)+(y) == cos(x),y,[0,1])
1/2*(cos(x)*e^x + e^x*sin(x) + 1)*e^(-x)
This ODE with separated variables is solved as
exact. Explanation - factor does not split `e^{x-y}` in Maxima
into `e^{x}e^{y}`::
sage: desolve(diff(y,x)==exp(x-y),y,show_method=True)
[-e^x + e^y(x) == c, 'exact']
You can solve Bessel equations. You can also use initial
conditions, but you cannot put (sometimes desired) initial
condition at x=0, since this point is singlar point of the
equation. Anyway, if the solution should be bounded at x=0, then
k2=0.::
sage: desolve(x^2*diff(y,x,x)+x*diff(y,x)+(x^2-4)*y==0,y)
k1*bessel_J(2, x) + k2*bessel_Y(2, x)
Difficult ODE produces error::
sage: desolve(sqrt(y)*diff(y,x)+e^(y)+cos(x)-sin(x+y)==0,y) # not tested
Traceback (click to the left for traceback)
...
NotImplementedError, "Maxima was unable to solve this ODE. Consider to set option contrib_ode to True."
Difficult ODE produces error - moreover, takes a long time ::
sage: desolve(sqrt(y)*diff(y,x)+e^(y)+cos(x)-sin(x+y)==0,y,contrib_ode=True) # not tested
Some more types od ODE's::
sage: desolve(x*diff(y,x)^2-(1+x*y)*diff(y,x)+y==0,y,contrib_ode=True,show_method=True)
[[y(x) == c + log(x), y(x) == c*e^x], 'factor']
::
sage: desolve(diff(y,x)==(x+y)^2,y,contrib_ode=True,show_method=True)
[[[x == c - arctan(sqrt(t)), y(x) == -x - sqrt(t)], [x == c + arctan(sqrt(t)), y(x) == -x + sqrt(t)]], 'lagrange']
These two examples produce error (as expected, Maxima 5.18 cannot
solve equations from initial conditions). Current Maxima 5.18
returns false answer in this case!::
sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y,[0,1,2]).expand() # not tested
Traceback (click to the left for traceback)
...
NotImplementedError, "Maxima was unable to solve this ODE. Consider to set option contrib_ode to True."
::
sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y,[0,1,2],show_method=True) # not tested
Traceback (click to the left for traceback)
...
NotImplementedError, "Maxima was unable to solve this ODE. Consider to set option contrib_ode to True."
Second order linear ODE::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y)
(k2*x + k1)*e^(-x) + 1/2*sin(x)
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,show_method=True)
[(k2*x + k1)*e^(-x) + 1/2*sin(x), 'variationofparameters']
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,[0,3,1])
1/2*(7*x + 6)*e^(-x) + 1/2*sin(x)
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,[0,3,1],show_method=True)
[1/2*(7*x + 6)*e^(-x) + 1/2*sin(x), 'variationofparameters']
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,[0,3,pi/2,2])
3*(x*(e^(1/2*pi) - 2)/pi + 1)*e^(-x) + 1/2*sin(x)
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,[0,3,pi/2,2],show_method=True)
[3*(x*(e^(1/2*pi) - 2)/pi + 1)*e^(-x) + 1/2*sin(x), 'variationofparameters']
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y)
(k2*x + k1)*e^(-x)
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,show_method=True)
[(k2*x + k1)*e^(-x), 'constcoeff']
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,[0,3,1])
(4*x + 3)*e^(-x)
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,[0,3,1],show_method=True)
[(4*x + 3)*e^(-x), 'constcoeff']
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,[0,3,pi/2,2])
(2*x*(2*e^(1/2*pi) - 3)/pi + 3)*e^(-x)
::
sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,[0,3,pi/2,2],show_method=True)
[(2*x*(2*e^(1/2*pi) - 3)/pi + 3)*e^(-x), 'constcoeff']
TESTS:
Trac #9961 fixed (allow assumptions on the dependent variable in desolve)::
sage: y=function('y',x); assume(x>0); assume(y>0)
sage: sage.calculus.calculus.maxima('domain:real') # needed since Maxima 5.26.0 to get the answer as below
real
sage: desolve(x*diff(y,x)-x*sqrt(y^2+x^2)-y == 0, y, contrib_ode=True)
[x - arcsinh(y(x)/x) == c]
Trac #10682 updated Maxima to 5.26, and it started to show a different
solution in the complex domain for the ODE above::
sage: sage.calculus.calculus.maxima('domain:complex') # back to the default complex domain
complex
sage: desolve(x*diff(y,x)-x*sqrt(y^2+x^2)-y == 0, y, contrib_ode=True)
[1/2*(2*x^2*sqrt(x^(-2)) - 2*x*sqrt(x^(-2))*arcsinh(y(x)/sqrt(x^2)) -
2*x*sqrt(x^(-2))*arcsinh(y(x)^2/(x*sqrt(y(x)^2))) +
log(4*(2*x^2*sqrt((x^2*y(x)^2 + y(x)^4)/x^2)*sqrt(x^(-2)) + x^2 +
2*y(x)^2)/x^2))/(x*sqrt(x^(-2))) == c]
Trac #6479 fixed::
sage: x = var('x')
sage: y = function('y', x)
sage: desolve( diff(y,x,x) == 0, y, [0,0,1])
x
::
sage: desolve( diff(y,x,x) == 0, y, [0,1,1])
x + 1
Trac #9835 fixed::
sage: x = var('x')
sage: y = function('y', x)
sage: desolve(diff(y,x,2)+y*(1-y^2)==0,y,[0,-1,1,1])
Traceback (most recent call last):
...
NotImplementedError: Unable to use initial condition for this equation (freeofx).
Trac #8931 fixed::
sage: x=var('x'); f=function('f',x); k=var('k'); assume(k>0)
sage: desolve(diff(f,x,2)/f==k,f,ivar=x)
k1*e^(sqrt(k)*x) + k2*e^(-sqrt(k)*x)
AUTHORS:
- David Joyner (1-2006)
- Robert Bradshaw (10-2008)
- Robert Marik (10-2009)
"""
if is_SymbolicEquation(de):
de = de.lhs() - de.rhs()
if is_SymbolicVariable(dvar):
raise ValueError("You have to declare dependent variable as a function, eg. y=function('y',x)")
# for backwards compatibility
if isinstance(dvar, list):
dvar, ivar = dvar
elif ivar is None:
ivars = de.variables()
ivars = [t for t in ivars if t is not dvar]
if len(ivars) != 1:
raise ValueError("Unable to determine independent variable, please specify.")
ivar = ivars[0]
def sanitize_var(exprs):
return exprs.replace("'"+dvar_str+"("+ivar_str+")",dvar_str)
de00 = de._maxima_()
P = de00.parent()
dvar_str=P(dvar.operator()).str()
ivar_str=P(ivar).str()
de00 = de00.str()
de0 = sanitize_var(de00)
ode_solver="ode2"
cmd="(TEMP:%s(%s,%s,%s), if TEMP=false then TEMP else substitute(%s=%s(%s),TEMP))"%(ode_solver,de0,dvar_str,ivar_str,dvar_str,dvar_str,ivar_str)
# we produce string like this
# ode2('diff(y,x,2)+2*'diff(y,x,1)+y-cos(x),y(x),x)
soln = P(cmd)
if str(soln).strip() == 'false':
if contrib_ode:
ode_solver="contrib_ode"
P("load('contrib_ode)")
cmd="(TEMP:%s(%s,%s,%s), if TEMP=false then TEMP else substitute(%s=%s(%s),TEMP))"%(ode_solver,de0,dvar_str,ivar_str,dvar_str,dvar_str,ivar_str)
# we produce string like this
# (TEMP:contrib_ode(x*('diff(y,x,1))^2-(x*y+1)*'diff(y,x,1)+y,y,x), if TEMP=false then TEMP else substitute(y=y(x),TEMP))
soln = P(cmd)
if str(soln).strip() == 'false':
raise NotImplementedError("Maxima was unable to solve this ODE.")
else:
raise NotImplementedError("Maxima was unable to solve this ODE. Consider to set option contrib_ode to True.")
if show_method:
maxima_method=P("method")
if (ics is not None):
if not is_SymbolicEquation(soln.sage()):
if not show_method:
maxima_method=P("method")
raise NotImplementedError("Unable to use initial condition for this equation (%s)."%(str(maxima_method).strip()))
if len(ics) == 2:
tempic=(ivar==ics[0])._maxima_().str()
tempic=tempic+","+(dvar==ics[1])._maxima_().str()
cmd="(TEMP:ic1(%s(%s,%s,%s),%s),substitute(%s=%s(%s),TEMP))"%(ode_solver,de00,dvar_str,ivar_str,tempic,dvar_str,dvar_str,ivar_str)
cmd=sanitize_var(cmd)
# we produce string like this
# (TEMP:ic2(ode2('diff(y,x,2)+2*'diff(y,x,1)+y-cos(x),y,x),x=0,y=3,'diff(y,x)=1),substitute(y=y(x),TEMP))
soln=P(cmd)
if len(ics) == 3:
#fixed ic2 command from Maxima - we have to ensure that %k1, %k2 do not depend on variables, should be removed when fixed in Maxima
P("ic2_sage(soln,xa,ya,dya):=block([programmode:true,backsubst:true,singsolve:true,temp,%k2,%k1,TEMP_k], \
noteqn(xa), noteqn(ya), noteqn(dya), boundtest('%k1,%k1), boundtest('%k2,%k2), \
temp: lhs(soln) - rhs(soln), \
TEMP_k:solve([subst([xa,ya],soln), subst([dya,xa], lhs(dya)=-subst(0,lhs(dya),diff(temp,lhs(xa)))/diff(temp,lhs(ya)))],[%k1,%k2]), \
if not freeof(lhs(ya),TEMP_k) or not freeof(lhs(xa),TEMP_k) then return (false), \
temp: maplist(lambda([zz], subst(zz,soln)), TEMP_k), \
if length(temp)=1 then return(first(temp)) else return(temp))")
tempic=P(ivar==ics[0]).str()
tempic=tempic+","+P(dvar==ics[1]).str()
tempic=tempic+",'diff("+dvar_str+","+ivar_str+")="+P(ics[2]).str()
cmd="(TEMP:ic2_sage(%s(%s,%s,%s),%s),substitute(%s=%s(%s),TEMP))"%(ode_solver,de00,dvar_str,ivar_str,tempic,dvar_str,dvar_str,ivar_str)
cmd=sanitize_var(cmd)
# we produce string like this
# (TEMP:ic2(ode2('diff(y,x,2)+2*'diff(y,x,1)+y-cos(x),y,x),x=0,y=3,'diff(y,x)=1),substitute(y=y(x),TEMP))
soln=P(cmd)
if str(soln).strip() == 'false':
raise NotImplementedError("Maxima was unable to solve this IVP. Remove the initial condition to get the general solution.")
if len(ics) == 4:
#fixed bc2 command from Maxima - we have to ensure that %k1, %k2 do not depend on variables, should be removed when fixed in Maxima
P("bc2_sage(soln,xa,ya,xb,yb):=block([programmode:true,backsubst:true,singsolve:true,temp,%k1,%k2,TEMP_k], \
noteqn(xa), noteqn(ya), noteqn(xb), noteqn(yb), boundtest('%k1,%k1), boundtest('%k2,%k2), \
TEMP_k:solve([subst([xa,ya],soln), subst([xb,yb],soln)], [%k1,%k2]), \
if not freeof(lhs(ya),TEMP_k) or not freeof(lhs(xa),TEMP_k) then return (false), \
temp: maplist(lambda([zz], subst(zz,soln)),TEMP_k), \
if length(temp)=1 then return(first(temp)) else return(temp))")
cmd="bc2_sage(%s(%s,%s,%s),%s,%s=%s,%s,%s=%s)"%(ode_solver,de00,dvar_str,ivar_str,P(ivar==ics[0]).str(),dvar_str,P(ics[1]).str(),P(ivar==ics[2]).str(),dvar_str,P(ics[3]).str())
cmd="(TEMP:%s,substitute(%s=%s(%s),TEMP))"%(cmd,dvar_str,dvar_str,ivar_str)
cmd=sanitize_var(cmd)
# we produce string like this
# (TEMP:bc2(ode2('diff(y,x,2)+2*'diff(y,x,1)+y-cos(x),y,x),x=0,y=3,x=%pi/2,y=2),substitute(y=y(x),TEMP))
soln=P(cmd)
if str(soln).strip() == 'false':
raise NotImplementedError("Maxima was unable to solve this BVP. Remove the initial condition to get the general solution.")
soln=soln.sage()
if is_SymbolicEquation(soln) and soln.lhs() == dvar:
# Remark: Here we do not check that the right hand side does not depend on dvar.
# This probably will not hapen for soutions obtained via ode2, anyway.
soln = soln.rhs()
if show_method:
return [soln,maxima_method.str()]
else:
return soln
#def desolve_laplace2(de,vars,ics=None):
## """
## Solves an ODE using laplace transforms via maxima. Initial conditions
## are optional.
## INPUT:
## de -- a lambda expression representing the ODE
## (eg, de = "diff(f(x),x,2)=diff(f(x),x)+sin(x)")
## vars -- a list of strings representing the variables
## (eg, vars = ["x","f"], if x is the independent
## variable and f is the dependent variable)
## ics -- a list of numbers representing initial conditions,
## with symbols allowed which are represented by strings
## (eg, f(0)=1, f'(0)=2 is ics = [0,1,2])
## EXAMPLES:
## sage: from sage.calculus.desolvers import desolve_laplace
## sage: x = var('x')
## sage: f = function('f', x)
## sage: de = lambda y: diff(y,x,x) - 2*diff(y,x) + y
## sage: desolve_laplace(de(f(x)),[f,x])
## #x*%e^x*(?%at('diff('f(x),x,1),x=0))-'f(0)*x*%e^x+'f(0)*%e^x
## sage: desolve_laplace(de(f(x)),[f,x],[0,1,2]) ## IC option does not work
## #x*%e^x*(?%at('diff('f(x),x,1),x=0))-'f(0)*x*%e^x+'f(0)*%e^x
## AUTHOR: David Joyner (1st version 1-2006, 8-2007)
## """
# ######## this method seems reasonable but doesn't work for some reason
# name0 = vars[0]._repr_()[0:(len(vars[0]._repr_())-2-len(str(vars[1])))]
# name1 = str(vars[1])
# #maxima("de:"+de+";")
# if ics!=None:
# ic0 = maxima("ic:"+str(vars[1])+"="+str(ics[0]))
# d = len(ics)
# for i in range(d-1):
# maxima(vars[0](vars[1])).diff(vars[1],i).atvalue(ic0,ics[i+1])
# de0 = de._maxima_()
# #cmd = "desolve("+de+","+vars[1]+"("+vars[0]+"));"
# #return maxima.eval(cmd)
# return de0.desolve(vars[0]).rhs()
def desolve_laplace(de, dvar, ics=None, ivar=None):
"""
Solves an ODE using laplace transforms. Initials conditions are optional.
INPUT:
- ``de`` - a lambda expression representing the ODE (eg, de =
diff(y,x,2) == diff(y,x)+sin(x))
- ``dvar`` - the dependent variable (eg y)
- ``ivar`` - (optional) the independent variable (hereafter called
x), which must be specified if there is more than one
independent variable in the equation.
- ``ics`` - a list of numbers representing initial conditions, (eg,
f(0)=1, f'(0)=2 is ics = [0,1,2])
OUTPUT:
Solution of the ODE as symbolic expression
EXAMPLES::
sage: u=function('u',x)
sage: eq = diff(u,x) - exp(-x) - u == 0
sage: desolve_laplace(eq,u)
1/2*(2*u(0) + 1)*e^x - 1/2*e^(-x)
We can use initial conditions::
sage: desolve_laplace(eq,u,ics=[0,3])
-1/2*e^(-x) + 7/2*e^x
The initial conditions do not persist in the system (as they persisted
in previous versions)::
sage: desolve_laplace(eq,u)
1/2*(2*u(0) + 1)*e^x - 1/2*e^(-x)
::
sage: f=function('f', x)
sage: eq = diff(f,x) + f == 0
sage: desolve_laplace(eq,f,[0,1])
e^(-x)
::
sage: x = var('x')
sage: f = function('f', x)
sage: de = diff(f,x,x) - 2*diff(f,x) + f
sage: desolve_laplace(de,f)
-x*e^x*f(0) + x*e^x*D[0](f)(0) + e^x*f(0)
::
sage: desolve_laplace(de,f,ics=[0,1,2])
x*e^x + e^x
TESTS:
Trac #4839 fixed::
sage: t=var('t')
sage: x=function('x', t)
sage: soln=desolve_laplace(diff(x,t)+x==1, x, ics=[0,2])
sage: soln
e^(-t) + 1
::
sage: soln(t=3)
e^(-3) + 1
AUTHORS:
- David Joyner (1-2006,8-2007)
- Robert Marik (10-2009)
"""
#This is the original code from David Joyner (inputs and outputs strings)
#maxima("de:"+de._repr_()+"=0;")
#if ics!=None:
# d = len(ics)
# for i in range(0,d-1):
# ic = "atvalue(diff("+vars[1]+"("+vars[0]+"),"+str(vars[0])+","+str(i)+"),"+str(vars[0])+"="+str(ics[0])+","+str(ics[1+i])+")"
# maxima(ic)
#
#cmd = "desolve("+de._repr_()+","+vars[1]+"("+vars[0]+"));"
#return maxima(cmd).rhs()._maxima_init_()
## verbatim copy from desolve - begin
if is_SymbolicEquation(de):
de = de.lhs() - de.rhs()
if is_SymbolicVariable(dvar):
raise ValueError("You have to declare dependent variable as a function, eg. y=function('y',x)")
# for backwards compatibility
if isinstance(dvar, list):
dvar, ivar = dvar
elif ivar is None:
ivars = de.variables()
ivars = [t for t in ivars if t != dvar]
if len(ivars) != 1:
raise ValueError("Unable to determine independent variable, please specify.")
ivar = ivars[0]
## verbatim copy from desolve - end
def sanitize_var(exprs): # 'y(x) -> y(x)
return exprs.replace("'"+str(dvar),str(dvar))
de0=de._maxima_()
P = de0.parent()
cmd = sanitize_var("desolve("+de0.str()+","+str(dvar)+")")
soln=P(cmd).rhs()
if str(soln).strip() == 'false':
raise NotImplementedError("Maxima was unable to solve this ODE.")
soln=soln.sage()
if ics!=None:
d = len(ics)
for i in range(0,d-1):
soln=eval('soln.substitute(diff(dvar,ivar,i)('+str(ivar)+'=ics[0])==ics[i+1])')
return soln
def desolve_system(des, vars, ics=None, ivar=None):
"""
Solves any size system of 1st order ODE's. Initials conditions are optional.
Onedimensional systems are passed to :meth:`desolve_laplace`.
INPUT:
- ``des`` - list of ODEs
- ``vars`` - list of dependent variables
- ``ics`` - (optional) list of initial values for ivar and vars
- ``ivar`` - (optional) the independent variable, which must be
specified if there is more than one independent variable in the
equation.
EXAMPLES::
sage: t = var('t')
sage: x = function('x', t)
sage: y = function('y', t)
sage: de1 = diff(x,t) + y - 1 == 0
sage: de2 = diff(y,t) - x + 1 == 0
sage: desolve_system([de1, de2], [x,y])
[x(t) == (x(0) - 1)*cos(t) - (y(0) - 1)*sin(t) + 1,
y(t) == (y(0) - 1)*cos(t) + (x(0) - 1)*sin(t) + 1]
Now we give some initial conditions::
sage: sol = desolve_system([de1, de2], [x,y], ics=[0,1,2]); sol
[x(t) == -sin(t) + 1, y(t) == cos(t) + 1]
::
sage: solnx, solny = sol[0].rhs(), sol[1].rhs()
sage: plot([solnx,solny],(0,1)) # not tested
sage: parametric_plot((solnx,solny),(0,1)) # not tested
TESTS:
Trac #9823 fixed::
sage: t = var('t')
sage: x = function('x', t)
sage: de1 = diff(x,t) + 1 == 0
sage: desolve_system([de1], [x])
-t + x(0)
AUTHORS:
- Robert Bradshaw (10-2008)
"""
if len(des)==1:
return desolve_laplace(des[0], vars[0], ics=ics, ivar=ivar)
ivars = set([])
for i, de in enumerate(des):
if not is_SymbolicEquation(de):
des[i] = de == 0
ivars = ivars.union(set(de.variables()))
if ivar is None:
ivars = ivars - set(vars)
if len(ivars) != 1:
raise ValueError("Unable to determine independent variable, please specify.")
ivar = list(ivars)[0]
dvars = [v._maxima_() for v in vars]
if ics is not None:
ivar_ic = ics[0]
for dvar, ic in zip(dvars, ics[1:]):
dvar.atvalue(ivar==ivar_ic, ic)
soln = dvars[0].parent().desolve(des, dvars)
if str(soln).strip() == 'false':
raise NotImplementedError("Maxima was unable to solve this system.")
soln = list(soln)
for i, sol in enumerate(soln):
soln[i] = sol.sage()
if ics is not None:
ivar_ic = ics[0]
for dvar, ic in zip(dvars, ics[:1]):
dvar.atvalue(ivar==ivar_ic, dvar)
return soln
def desolve_system_strings(des,vars,ics=None):
r"""
Solves any size system of 1st order ODE's. Initials conditions are optional.
This function is obsolete, use desolve_system.
INPUT:
- ``de`` - a list of strings representing the ODEs in maxima
notation (eg, de = "diff(f(x),x,2)=diff(f(x),x)+sin(x)")
- ``vars`` - a list of strings representing the variables (eg,
vars = ["s","x","y"], where s is the independent variable and
x,y the dependent variables)
- ``ics`` - a list of numbers representing initial conditions
(eg, x(0)=1, y(0)=2 is ics = [0,1,2])
WARNING:
The given ics sets the initial values of the dependent vars in
maxima, so subsequent ODEs involving these variables will have
these initial conditions automatically imposed.
EXAMPLES::
sage: from sage.calculus.desolvers import desolve_system_strings
sage: s = var('s')
sage: function('x', s)
x(s)
::
sage: function('y', s)
y(s)
::
sage: de1 = lambda z: diff(z[0],s) + z[1] - 1
sage: de2 = lambda z: diff(z[1],s) - z[0] + 1
sage: des = [de1([x(s),y(s)]),de2([x(s),y(s)])]
sage: vars = ["s","x","y"]
sage: desolve_system_strings(des,vars)
["(1-'y(0))*sin(s)+('x(0)-1)*cos(s)+1", "('x(0)-1)*sin(s)+('y(0)-1)*cos(s)+1"]
::
sage: ics = [0,1,-1]
sage: soln = desolve_system_strings(des,vars,ics); soln
['2*sin(s)+1', '1-2*cos(s)']
::
sage: solnx, solny = map(SR, soln)
sage: RR(solnx(s=3))
1.28224001611973
::
sage: P1 = plot([solnx,solny],(0,1))
sage: P2 = parametric_plot((solnx,solny),(0,1))
Now type show(P1), show(P2) to view these.
AUTHORS:
- David Joyner (3-2006, 8-2007)
"""
d = len(des)
dess = [de._maxima_init_() + "=0" for de in des]
for i in range(d):
cmd="de:" + dess[int(i)] + ";"
maxima.eval(cmd)
desstr = "[" + ",".join(dess) + "]"
d = len(vars)
varss = list("'" + vars[i] + "(" + vars[0] + ")" for i in range(1,d))
varstr = "[" + ",".join(varss) + "]"
if ics is not None:
#d = len(ics) ## must be same as len(des)
for i in range(1,d):
ic = "atvalue('" + vars[i] + "("+vars[0] + ")," + str(vars[0]) + "=" + str(ics[0]) + "," + str(ics[i]) + ")"
maxima.eval(ic)
cmd = "desolve(" + desstr + "," + varstr + ");"
soln = maxima(cmd)
return [f.rhs()._maxima_init_() for f in soln]
@rename_keyword(deprecation=6094, method="algorithm")
def eulers_method(f,x0,y0,h,x1,algorithm="table"):
r"""
This implements Euler's method for finding numerically the
solution of the 1st order ODE ``y' = f(x,y)``, ``y(a)=c``. The "x"
column of the table increments from ``x0`` to ``x1`` by ``h`` (so
``(x1-x0)/h`` must be an integer). In the "y" column, the new
y-value equals the old y-value plus the corresponding entry in the
last column.
*For pedagogical purposes only.*
EXAMPLES::
sage: from sage.calculus.desolvers import eulers_method
sage: x,y = PolynomialRing(QQ,2,"xy").gens()
sage: eulers_method(5*x+y-5,0,1,1/2,1)
x y h*f(x,y)
0 1 -2
1/2 -1 -7/4
1 -11/4 -11/8
::
sage: x,y = PolynomialRing(QQ,2,"xy").gens()
sage: eulers_method(5*x+y-5,0,1,1/2,1,algorithm="none")
[[0, 1], [1/2, -1], [1, -11/4], [3/2, -33/8]]
::
sage: RR = RealField(sci_not=0, prec=4, rnd='RNDU')
sage: x,y = PolynomialRing(RR,2,"xy").gens()
sage: eulers_method(5*x+y-5,0,1,1/2,1,algorithm="None")
[[0, 1], [1/2, -1.0], [1, -2.7], [3/2, -4.0]]
::
sage: RR = RealField(sci_not=0, prec=4, rnd='RNDU')
sage: x,y=PolynomialRing(RR,2,"xy").gens()
sage: eulers_method(5*x+y-5,0,1,1/2,1)
x y h*f(x,y)
0 1 -2.0
1/2 -1.0 -1.7
1 -2.7 -1.3
::
sage: x,y=PolynomialRing(QQ,2,"xy").gens()
sage: eulers_method(5*x+y-5,1,1,1/3,2)
x y h*f(x,y)
1 1 1/3
4/3 4/3 1
5/3 7/3 17/9
2 38/9 83/27
::
sage: eulers_method(5*x+y-5,0,1,1/2,1,algorithm="none")
[[0, 1], [1/2, -1], [1, -11/4], [3/2, -33/8]]
::
sage: pts = eulers_method(5*x+y-5,0,1,1/2,1,algorithm="none")
sage: P1 = list_plot(pts)
sage: P2 = line(pts)
sage: (P1+P2).show()
AUTHORS:
- David Joyner
"""
if algorithm=="table":
print("%10s %20s %25s"%("x","y","h*f(x,y)"))
n=int((1.0)*(x1-x0)/h)
x00=x0; y00=y0
soln = [[x00,y00]]
for i in range(n+1):
if algorithm=="table":
print("%10r %20r %20r"%(x00,y00,h*f(x00,y00)))
y00 = y00+h*f(x00,y00)
x00=x00+h
soln.append([x00,y00])
if algorithm!="table":
return soln
@rename_keyword(deprecation=6094, method="algorithm")
def eulers_method_2x2(f,g, t0, x0, y0, h, t1,algorithm="table"):
r"""
This implements Euler's method for finding numerically the
solution of the 1st order system of two ODEs
``x' = f(t, x, y), x(t0)=x0.``
``y' = g(t, x, y), y(t0)=y0.``
The "t" column of the table increments from `t_0` to `t_1` by `h`
(so `\\frac{t_1-t_0}{h}` must be an integer). In the "x" column,
the new x-value equals the old x-value plus the corresponding
entry in the next (third) column. In the "y" column, the new
y-value equals the old y-value plus the corresponding entry in the
next (last) column.
*For pedagogical purposes only.*
EXAMPLES::
sage: from sage.calculus.desolvers import eulers_method_2x2
sage: t, x, y = PolynomialRing(QQ,3,"txy").gens()
sage: f = x+y+t; g = x-y
sage: eulers_method_2x2(f,g, 0, 0, 0, 1/3, 1,algorithm="none")
[[0, 0, 0], [1/3, 0, 0], [2/3, 1/9, 0], [1, 10/27, 1/27], [4/3, 68/81, 4/27]]
::
sage: eulers_method_2x2(f,g, 0, 0, 0, 1/3, 1)
t x h*f(t,x,y) y h*g(t,x,y)
0 0 0 0 0
1/3 0 1/9 0 0
2/3 1/9 7/27 0 1/27
1 10/27 38/81 1/27 1/9
::
sage: RR = RealField(sci_not=0, prec=4, rnd='RNDU')
sage: t,x,y=PolynomialRing(RR,3,"txy").gens()
sage: f = x+y+t; g = x-y
sage: eulers_method_2x2(f,g, 0, 0, 0, 1/3, 1)
t x h*f(t,x,y) y h*g(t,x,y)
0 0 0.00 0 0.00
1/3 0.00 0.13 0.00 0.00
2/3 0.13 0.29 0.00 0.043
1 0.41 0.57 0.043 0.15
To numerically approximate `y(1)`, where `(1+t^2)y''+y'-y=0`,
`y(0)=1`, `y'(0)=-1`, using 4 steps of Euler's method, first
convert to a system: `y_1' = y_2`, `y_1(0)=1`; `y_2' =
\\frac{y_1-y_2}{1+t^2}`, `y_2(0)=-1`.::
sage: RR = RealField(sci_not=0, prec=4, rnd='RNDU')
sage: t, x, y=PolynomialRing(RR,3,"txy").gens()
sage: f = y; g = (x-y)/(1+t^2)
sage: eulers_method_2x2(f,g, 0, 1, -1, 1/4, 1)
t x h*f(t,x,y) y h*g(t,x,y)
0 1 -0.25 -1 0.50
1/4 0.75 -0.12 -0.50 0.29
1/2 0.63 -0.054 -0.21 0.19
3/4 0.63 -0.0078 -0.031 0.11
1 0.63 0.020 0.079 0.071
To numerically approximate y(1), where `y''+ty'+y=0`, `y(0)=1`, `y'(0)=0`::
sage: t,x,y=PolynomialRing(RR,3,"txy").gens()
sage: f = y; g = -x-y*t
sage: eulers_method_2x2(f,g, 0, 1, 0, 1/4, 1)
t x h*f(t,x,y) y h*g(t,x,y)
0 1 0.00 0 -0.25
1/4 1.0 -0.062 -0.25 -0.23
1/2 0.94 -0.11 -0.46 -0.17
3/4 0.88 -0.15 -0.62 -0.10
1 0.75 -0.17 -0.68 -0.015
AUTHORS:
- David Joyner
"""
if algorithm=="table":
print("%10s %20s %25s %20s %20s"%("t", "x","h*f(t,x,y)","y", "h*g(t,x,y)"))
n=int((1.0)*(t1-t0)/h)
t00 = t0; x00 = x0; y00 = y0
soln = [[t00,x00,y00]]
for i in range(n+1):
if algorithm=="table":
print("%10r %20r %25r %20r %20r"%(t00,x00,h*f(t00,x00,y00),y00,h*g(t00,x00,y00)))
x01 = x00 + h*f(t00,x00,y00)
y00 = y00 + h*g(t00,x00,y00)
x00 = x01
t00 = t00 + h
soln.append([t00,x00,y00])
if algorithm!="table":
return soln
def eulers_method_2x2_plot(f,g, t0, x0, y0, h, t1):
r"""
Plots solution of ODE
This plots the soln in the rectangle ``(xrange[0],xrange[1])
x (yrange[0],yrange[1])`` and plots using Euler's method the
numerical solution of the 1st order ODEs `x' = f(t,x,y)`,
`x(a)=x_0`, `y' = g(t,x,y)`, `y(a) = y_0`.
*For pedagogical purposes only.*
EXAMPLES::
sage: from sage.calculus.desolvers import eulers_method_2x2_plot
The following example plots the solution to
`\theta''+\sin(\theta)=0`, `\theta(0)=\frac 34`, `\theta'(0) =
0`. Type ``P[0].show()`` to plot the solution,
``(P[0]+P[1]).show()`` to plot `(t,\theta(t))` and
`(t,\theta'(t))`::
sage: f = lambda z : z[2]; g = lambda z : -sin(z[1])
sage: P = eulers_method_2x2_plot(f,g, 0.0, 0.75, 0.0, 0.1, 1.0)
"""
n=int((1.0)*(t1-t0)/h)
t00 = t0; x00 = x0; y00 = y0
soln = [[t00,x00,y00]]
for i in range(n+1):
x01 = x00 + h*f([t00,x00,y00])
y00 = y00 + h*g([t00,x00,y00])
x00 = x01
t00 = t00 + h
soln.append([t00,x00,y00])
Q1 = line([[x[0],x[1]] for x in soln], rgbcolor=(1/4,1/8,3/4))
Q2 = line([[x[0],x[2]] for x in soln], rgbcolor=(1/2,1/8,1/4))
return [Q1,Q2]
def desolve_rk4_determine_bounds(ics,end_points=None):
"""
Used to determine bounds for numerical integration.
- If end_points is None, the interval for integration is from ics[0]
to ics[0]+10
- If end_points is a or [a], the interval for integration is from min(ics[0],a)
to max(ics[0],a)
- If end_points is [a,b], the interval for integration is from min(ics[0],a)
to max(ics[0],b)
EXAMPLES::
sage: from sage.calculus.desolvers import desolve_rk4_determine_bounds
sage: desolve_rk4_determine_bounds([0,2],1)
(0, 1)
::
sage: desolve_rk4_determine_bounds([0,2])
(0, 10)
::
sage: desolve_rk4_determine_bounds([0,2],[-2])
(-2, 0)
::
sage: desolve_rk4_determine_bounds([0,2],[-2,4])
(-2, 4)
"""
if end_points is None:
return((ics[0],ics[0]+10))
if not isinstance(end_points,list):
end_points=[end_points]
if len(end_points)==1:
return (min(ics[0],end_points[0]),max(ics[0],end_points[0]))
else:
return (min(ics[0],end_points[0]),max(ics[0],end_points[1]))
def desolve_rk4(de, dvar, ics=None, ivar=None, end_points=None, step=0.1, output='list', **kwds):
"""
Solves numerically one first-order ordinary differential
equation. See also ``ode_solver``.
INPUT:
input is similar to ``desolve`` command. The differential equation can be
written in a form close to the plot_slope_field or desolve command
- Variant 1 (function in two variables)
- ``de`` - right hand side, i.e. the function `f(x,y)` from ODE `y'=f(x,y)`
- ``dvar`` - dependent variable (symbolic variable declared by var)
- Variant 2 (symbolic equation)
- ``de`` - equation, including term with ``diff(y,x)``
- ``dvar``` - dependent variable (declared as funciton of independent variable)
- Other parameters
- ``ivar`` - should be specified, if there are more variables or if the equation is autonomous
- ``ics`` - initial conditions in the form [x0,y0]
- ``end_points`` - the end points of the interval
- if end_points is a or [a], we integrate on between min(ics[0],a) and max(ics[0],a)
- if end_points is None, we use end_points=ics[0]+10
- if end_points is [a,b] we integrate on between min(ics[0],a) and max(ics[0],b)
- ``step`` - (optional, default:0.1) the length of the step (positive number)
- ``output`` - (optional, default: 'list') one of 'list',
'plot', 'slope_field' (graph of the solution with slope field)
OUTPUT:
Returns a list of points, or plot produced by list_plot,
optionally with slope field.
EXAMPLES::
sage: from sage.calculus.desolvers import desolve_rk4
Variant 2 for input - more common in numerics::
sage: x,y=var('x y')
sage: desolve_rk4(x*y*(2-y),y,ics=[0,1],end_points=1,step=0.5)
[[0, 1], [0.5, 1.12419127425], [1.0, 1.46159016229]]
Variant 1 for input - we can pass ODE in the form used by
desolve function In this example we integrate bakwards, since
``end_points < ics[0]``::
sage: y=function('y',x)
sage: desolve_rk4(diff(y,x)+y*(y-1) == x-2,y,ics=[1,1],step=0.5, end_points=0)
[[0.0, 8.90425710896], [0.5, 1.90932794536], [1, 1]]
Here we show how to plot simple pictures. For more advanced
aplications use list_plot instead. To see the resulting picture
use ``show(P)`` in Sage notebook. ::
sage: x,y=var('x y')
sage: P=desolve_rk4(y*(2-y),y,ics=[0,.1],ivar=x,output='slope_field',end_points=[-4,6],thickness=3)
ALGORITHM:
4th order Runge-Kutta method. Wrapper for command ``rk`` in
Maxima's dynamics package. Perhaps could be faster by using
fast_float instead.
AUTHORS:
- Robert Marik (10-2009)
"""
if ics is None:
raise ValueError("No initial conditions, specify with ics=[x0,y0].")
if ivar is None:
ivars = de.variables()
ivars = [t for t in ivars if t != dvar]
if len(ivars) != 1:
raise ValueError("Unable to determine independent variable, please specify.")
ivar = ivars[0]
if not is_SymbolicVariable(dvar):
from sage.calculus.var import var
from sage.calculus.all import diff
from sage.symbolic.relation import solve
if is_SymbolicEquation(de):
de = de.lhs() - de.rhs()
dummy_dvar=var('dummy_dvar')
# consider to add warning if the solution is not unique
de=solve(de,diff(dvar,ivar),solution_dict=True)
if len(de) != 1:
raise NotImplementedError("Sorry, cannot find explicit formula for right-hand side of the ODE.")
de=de[0][diff(dvar,ivar)].subs(dvar==dummy_dvar)
else:
dummy_dvar=dvar
step=abs(step)
de0=de._maxima_()
maxima("load('dynamics)")
lower_bound,upper_bound=desolve_rk4_determine_bounds(ics,end_points)
sol_1, sol_2 = [],[]
if lower_bound<ics[0]:
cmd="rk(%s,%s,%s,[%s,%s,%s,%s])\
"%(de0.str(),str(dummy_dvar),str(ics[1]),str(ivar),str(ics[0]),lower_bound,-step)
sol_1=maxima(cmd).sage()
sol_1.pop(0)
sol_1.reverse()
if upper_bound>ics[0]:
cmd="rk(%s,%s,%s,[%s,%s,%s,%s])\
"%(de0.str(),str(dummy_dvar),str(ics[1]),str(ivar),str(ics[0]),upper_bound,step)
sol_2=maxima(cmd).sage()
sol_2.pop(0)
sol=sol_1
sol.extend([[ics[0],ics[1]]])
sol.extend(sol_2)
if output=='list':
return sol
from sage.plot.plot import list_plot
from sage.plot.plot_field import plot_slope_field
R = list_plot(sol,plotjoined=True,**kwds)
if output=='plot':
return R
if output=='slope_field':
XMIN=sol[0][0]
YMIN=sol[0][1]
XMAX=XMIN
YMAX=YMIN
for s,t in sol:
if s>XMAX:XMAX=s
if s<XMIN:XMIN=s
if t>YMAX:YMAX=t
if t<YMIN:YMIN=t
return plot_slope_field(de,(ivar,XMIN,XMAX),(dummy_dvar,YMIN,YMAX))+R
raise ValueError("Option output should be 'list', 'plot' or 'slope_field'.")
def desolve_system_rk4(des, vars, ics=None, ivar=None, end_points=None, step=0.1):
r"""
Solves numerically system of first-order ordinary differential
equations using the 4th order Runge-Kutta method. Wrapper for
Maxima command ``rk``. See also ``ode_solver``.
INPUT:
input is similar to desolve_system and desolve_rk4 commands
- ``des`` - right hand sides of the system
- ``vars`` - dependent variables
- ``ivar`` - (optional) should be specified, if there are more variables or
if the equation is autonomous and the independent variable is
missing
- ``ics`` - initial conditions in the form [x0,y01,y02,y03,....]
- ``end_points`` - the end points of the interval
- if end_points is a or [a], we integrate on between min(ics[0],a) and max(ics[0],a)
- if end_points is None, we use end_points=ics[0]+10
- if end_points is [a,b] we integrate on between min(ics[0],a) and max(ics[0],b)
- ``step`` -- (optional, default: 0.1) the length of the step
OUTPUT:
Returns a list of points.
EXAMPLES::
sage: from sage.calculus.desolvers import desolve_system_rk4
Lotka Volterra system::
sage: from sage.calculus.desolvers import desolve_system_rk4
sage: x,y,t=var('x y t')
sage: P=desolve_system_rk4([x*(1-y),-y*(1-x)],[x,y],ics=[0,0.5,2],ivar=t,end_points=20)
sage: Q=[ [i,j] for i,j,k in P]
sage: LP=list_plot(Q)
sage: Q=[ [j,k] for i,j,k in P]
sage: LP=list_plot(Q)
ALGORITHM:
4th order Runge-Kutta method. Wrapper for command ``rk`` in Maxima's
dynamics package. Perhaps could be faster by using ``fast_float``
instead.
AUTHOR:
- Robert Marik (10-2009)
"""
if ics is None:
raise ValueError("No initial conditions, specify with ics=[x0,y01,y02,...].")
ivars = set([])
for de in des:
ivars = ivars.union(set(de.variables()))
if ivar is None:
ivars = ivars - set(vars)
if len(ivars) != 1:
raise ValueError("Unable to determine independent variable, please specify.")
ivar = list(ivars)[0]
dess = [de._maxima_().str() for de in des]
desstr = "[" + ",".join(dess) + "]"
varss = [varsi._maxima_().str() for varsi in vars]
varstr = "[" + ",".join(varss) + "]"
x0=ics[0]
icss = [ics[i]._maxima_().str() for i in range(1,len(ics))]
icstr = "[" + ",".join(icss) + "]"
step=abs(step)
maxima("load('dynamics)")
lower_bound,upper_bound=desolve_rk4_determine_bounds(ics,end_points)
sol_1, sol_2 = [],[]
if lower_bound<ics[0]:
cmd="rk(%s,%s,%s,[%s,%s,%s,%s])\
"%(desstr,varstr,icstr,str(ivar),str(x0),lower_bound,-step)
sol_1=maxima(cmd).sage()
sol_1.pop(0)
sol_1.reverse()
if upper_bound>ics[0]:
cmd="rk(%s,%s,%s,[%s,%s,%s,%s])\
"%(desstr,varstr,icstr,str(ivar),str(x0),upper_bound,step)
sol_2=maxima(cmd).sage()
sol_2.pop(0)
sol=sol_1
sol.append(ics)
sol.extend(sol_2)
return sol
def desolve_odeint(des, ics, times, dvars, ivar=None, compute_jac=False, args=()
, rtol=None, atol=None, tcrit=None, h0=0.0, hmax=0.0, hmin=0.0, ixpr=0
, mxstep=0, mxhnil=0, mxordn=12, mxords=5, printmessg=0):
r"""
Solves numerically a system of first-order ordinary differential equations
using ``odeint`` from scipy.integrate module.
INPUT:
- ``des`` -- right hand sides of the system
- ``ics`` -- initial conditions
- ``times`` -- a sequence of time points in which the solution must be found
- ``dvars`` -- dependent variables. ATTENTION: the order must be the same as
in des, that means: d(dvars[i])/dt=des[i]
- ``ivar`` -- independent variable, optional.
- ``compute_jac`` -- boolean. If True, the Jacobian of des is computed and
used during the integration of Stiff Systems. Default value is False.
Other Parameters (taken from the documentation of odeint function from
scipy.integrate module)
- ``rtol``, ``atol`` : float
The input parameters rtol and atol determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form:
max-norm of (e / ewt) <= 1
where ewt is a vector of positive error weights computed as:
ewt = rtol * abs(y) + atol
rtol and atol can be either vectors the same length as y or scalars.
- ``tcrit`` : array
Vector of critical points (e.g. singularities) where integration
care should be taken.
- ``h0`` : float, (0: solver-determined)
The step size to be attempted on the first step.
- ``hmax`` : float, (0: solver-determined)
The maximum absolute step size allowed.
- ``hmin`` : float, (0: solver-determined)
The minimum absolute step size allowed.
- ``ixpr`` : boolean.
Whether to generate extra printing at method switches.
- ``mxstep`` : integer, (0: solver-determined)
Maximum number of (internally defined) steps allowed for each
integration point in t.
- ``mxhnil`` : integer, (0: solver-determined)
Maximum number of messages printed.
- ``mxordn`` : integer, (0: solver-determined)
Maximum order to be allowed for the nonstiff (Adams) method.
- ``mxords`` : integer, (0: solver-determined)
Maximum order to be allowed for the stiff (BDF) method.
OUTPUT:
Returns a list with the solution of the system at each time in times.
EXAMPLES:
Lotka Volterra Equations::
sage: from sage.calculus.desolvers import desolve_odeint
sage: x,y=var('x,y')
sage: f=[x*(1-y),-y*(1-x)]
sage: sol=desolve_odeint(f,[0.5,2],srange(0,10,0.1),[x,y])
sage: p=line(zip(sol[:,0],sol[:,1]))
sage: p.show()
Lorenz Equations::
sage: x,y,z=var('x,y,z')
sage: # Next we define the parameters
sage: sigma=10
sage: rho=28
sage: beta=8/3
sage: # The Lorenz equations
sage: lorenz=[sigma*(y-x),x*(rho-z)-y,x*y-beta*z]
sage: # Time and initial conditions
sage: times=srange(0,50.05,0.05)
sage: ics=[0,1,1]
sage: sol=desolve_odeint(lorenz,ics,times,[x,y,z],rtol=1e-13,atol=1e-14)
One-dimensional Stiff system::
sage: y= var('y')
sage: epsilon=0.01
sage: f=y^2*(1-y)
sage: ic=epsilon
sage: t=srange(0,2/epsilon,1)
sage: sol=desolve_odeint(f,ic,t,y,rtol=1e-9,atol=1e-10,compute_jac=True)
sage: p=points(zip(t,sol))
sage: p.show()
Another Stiff system with some optional parameters with no
default value::
sage: y1,y2,y3=var('y1,y2,y3')
sage: f1=77.27*(y2+y1*(1-8.375*1e-6*y1-y2))
sage: f2=1/77.27*(y3-(1+y1)*y2)
sage: f3=0.16*(y1-y3)
sage: f=[f1,f2,f3]
sage: ci=[0.2,0.4,0.7]
sage: t=srange(0,10,0.01)
sage: v=[y1,y2,y3]
sage: sol=desolve_odeint(f,ci,t,v,rtol=1e-3,atol=1e-4,h0=0.1,hmax=1,hmin=1e-4,mxstep=1000,mxords=17)
AUTHOR:
- Oriol Castejon (05-2010)
"""
from scipy.integrate import odeint
from sage.ext.fast_eval import fast_float
from sage.calculus.functions import jacobian
if ivar==None:
if len(dvars)==0 or len(dvars)==1:
if len(dvars)==1:
des=des[0]
dvars=dvars[0]
all_vars = set(des.variables())
else:
all_vars = set([])
for de in des:
all_vars.update(set(de.variables()))
if is_SymbolicVariable(dvars):
ivars = all_vars - set([dvars])
else:
ivars = all_vars - set(dvars)
if len(ivars)==1:
ivar = ivars.pop()
elif not ivars:
from sage.symbolic.ring import var
try:
safe_names = [ 't_' + str(dvar) for dvar in dvars ]
except TypeError: # not iterable
safe_names = [ 't_' + str(dvars) ]
ivar = map(var, safe_names)
else:
raise ValueError("Unable to determine independent variable, please specify.")
# one-dimensional systems:
if is_SymbolicVariable(dvars):
func = fast_float(des,dvars,ivar)
if not compute_jac:
Dfun=None
else:
J = diff(des,dvars)
J = fast_float(J,dvars,ivar)
Dfun = lambda y,t: [J(y,t)]
# n-dimensional systems:
else:
desc = []
variabs = dvars[:]
variabs.append(ivar)
for de in des:
desc.append(fast_float(de,*variabs))
def func(y,t):
v = list(y[:])
v.append(t)
return [dec(*v) for dec in desc]
if not compute_jac:
Dfun=None
else:
J = jacobian(des,dvars)
J = [list(v) for v in J]
J = fast_float(J,*variabs)
def Dfun(y,t):
v = list(y[:])
v.append(t)
return [[element(*v) for element in row] for row in J]
sol=odeint(func, ics, times, args=args, Dfun=Dfun, rtol=rtol, atol=atol,
tcrit=tcrit, h0=h0, hmax=hmax, hmin=hmin, ixpr=ixpr, mxstep=mxstep,
mxhnil=mxhnil, mxordn=mxordn, mxords=mxords, printmessg=printmessg)
return sol
| 34.65619
| 188
| 0.543986
|
f str(soln).strip() == 'false':
raise NotImplementedError("Maxima was unable to solve this BVP. Remove the initial condition to get the general solution.")
soln=soln.sage()
if is_SymbolicEquation(soln) and soln.lhs() == dvar:
# Remark: Here we do not check that the right hand side does not depend on dvar.
# This probably will not hapen for soutions obtained via ode2, anyway.
soln = soln.rhs()
if show_method:
return [soln,maxima_method.str()]
else:
return soln
#def desolve_laplace2(de,vars,ics=None):
## """
## Solves an ODE using laplace transforms via maxima. Initial conditions
## are optional.
## INPUT:
## de -- a lambda expression representing the ODE
## (eg, de = "diff(f(x),x,2)=diff(f(x),x)+sin(x)")
## vars -- a list of strings representing the variables
## (eg, vars = ["x","f"], if x is the independent
## variable and f is the dependent variable)
## ics -- a list of numbers representing initial conditions,
## with symbols allowed which are represented by strings
## (eg, f(0)=1, f'(0)=2 is ics = [0,1,2])
## EXAMPLES:
## sage: from sage.calculus.desolvers import desolve_laplace
## sage: x = var('x')
## sage: f = function('f', x)
## sage: de = lambda y: diff(y,x,x) - 2*diff(y,x) + y
## sage: desolve_laplace(de(f(x)),[f,x])
## #x*%e^x*(?%at('diff('f(x),x,1),x=0))-'f(0)*x*%e^x+'f(0)*%e^x
## sage: desolve_laplace(de(f(x)),[f,x],[0,1,2]) ## IC option does not work
## #x*%e^x*(?%at('diff('f(x),x,1),x=0))-'f(0)*x*%e^x+'f(0)*%e^x
## AUTHOR: David Joyner (1st version 1-2006, 8-2007)
## """
=None):
#This is the original code from David Joyner (inputs and outputs strings)
#maxima("de:"+de._repr_()+"=0;")
#if ics!=None:
# d = len(ics)
# for i in range(0,d-1):
# ic = "atvalue(diff("+vars[1]+"("+vars[0]+"),"+str(vars[0])+","+str(i)+"),"+str(vars[0])+"="+str(ics[0])+","+str(ics[1+i])+")"
# maxima(ic)
#
#cmd = "desolve("+de._repr_()+","+vars[1]+"("+vars[0]+"));"
#return maxima(cmd).rhs()._maxima_init_()
## verbatim copy from desolve - begin
if is_SymbolicEquation(de):
de = de.lhs() - de.rhs()
if is_SymbolicVariable(dvar):
raise ValueError("You have to declare dependent variable as a function, eg. y=function('y',x)")
# for backwards compatibility
if isinstance(dvar, list):
dvar, ivar = dvar
elif ivar is None:
ivars = de.variables()
ivars = [t for t in ivars if t != dvar]
if len(ivars) != 1:
raise ValueError("Unable to determine independent variable, please specify.")
ivar = ivars[0]
## verbatim copy from desolve - end
def sanitize_var(exprs): # 'y(x) -> y(x)
return exprs.replace("'"+str(dvar),str(dvar))
de0=de._maxima_()
P = de0.parent()
cmd = sanitize_var("desolve("+de0.str()+","+str(dvar)+")")
soln=P(cmd).rhs()
if str(soln).strip() == 'false':
raise NotImplementedError("Maxima was unable to solve this ODE.")
soln=soln.sage()
if ics!=None:
d = len(ics)
for i in range(0,d-1):
soln=eval('soln.substitute(diff(dvar,ivar,i)('+str(ivar)+'=ics[0])==ics[i+1])')
return soln
def desolve_system(des, vars, ics=None, ivar=None):
if len(des)==1:
return desolve_laplace(des[0], vars[0], ics=ics, ivar=ivar)
ivars = set([])
for i, de in enumerate(des):
if not is_SymbolicEquation(de):
des[i] = de == 0
ivars = ivars.union(set(de.variables()))
if ivar is None:
ivars = ivars - set(vars)
if len(ivars) != 1:
raise ValueError("Unable to determine independent variable, please specify.")
ivar = list(ivars)[0]
dvars = [v._maxima_() for v in vars]
if ics is not None:
ivar_ic = ics[0]
for dvar, ic in zip(dvars, ics[1:]):
dvar.atvalue(ivar==ivar_ic, ic)
soln = dvars[0].parent().desolve(des, dvars)
if str(soln).strip() == 'false':
raise NotImplementedError("Maxima was unable to solve this system.")
soln = list(soln)
for i, sol in enumerate(soln):
soln[i] = sol.sage()
if ics is not None:
ivar_ic = ics[0]
for dvar, ic in zip(dvars, ics[:1]):
dvar.atvalue(ivar==ivar_ic, dvar)
return soln
def desolve_system_strings(des,vars,ics=None):
d = len(des)
dess = [de._maxima_init_() + "=0" for de in des]
for i in range(d):
cmd="de:" + dess[int(i)] + ";"
maxima.eval(cmd)
desstr = "[" + ",".join(dess) + "]"
d = len(vars)
varss = list("'" + vars[i] + "(" + vars[0] + ")" for i in range(1,d))
varstr = "[" + ",".join(varss) + "]"
if ics is not None:
ue('" + vars[i] + "("+vars[0] + ")," + str(vars[0]) + "=" + str(ics[0]) + "," + str(ics[i]) + ")"
maxima.eval(ic)
cmd = "desolve(" + desstr + "," + varstr + ");"
soln = maxima(cmd)
return [f.rhs()._maxima_init_() for f in soln]
@rename_keyword(deprecation=6094, method="algorithm")
def eulers_method(f,x0,y0,h,x1,algorithm="table"):
if algorithm=="table":
print("%10s %20s %25s"%("x","y","h*f(x,y)"))
n=int((1.0)*(x1-x0)/h)
x00=x0; y00=y0
soln = [[x00,y00]]
for i in range(n+1):
if algorithm=="table":
print("%10r %20r %20r"%(x00,y00,h*f(x00,y00)))
y00 = y00+h*f(x00,y00)
x00=x00+h
soln.append([x00,y00])
if algorithm!="table":
return soln
@rename_keyword(deprecation=6094, method="algorithm")
def eulers_method_2x2(f,g, t0, x0, y0, h, t1,algorithm="table"):
if algorithm=="table":
print("%10s %20s %25s %20s %20s"%("t", "x","h*f(t,x,y)","y", "h*g(t,x,y)"))
n=int((1.0)*(t1-t0)/h)
t00 = t0; x00 = x0; y00 = y0
soln = [[t00,x00,y00]]
for i in range(n+1):
if algorithm=="table":
print("%10r %20r %25r %20r %20r"%(t00,x00,h*f(t00,x00,y00),y00,h*g(t00,x00,y00)))
x01 = x00 + h*f(t00,x00,y00)
y00 = y00 + h*g(t00,x00,y00)
x00 = x01
t00 = t00 + h
soln.append([t00,x00,y00])
if algorithm!="table":
return soln
def eulers_method_2x2_plot(f,g, t0, x0, y0, h, t1):
n=int((1.0)*(t1-t0)/h)
t00 = t0; x00 = x0; y00 = y0
soln = [[t00,x00,y00]]
for i in range(n+1):
x01 = x00 + h*f([t00,x00,y00])
y00 = y00 + h*g([t00,x00,y00])
x00 = x01
t00 = t00 + h
soln.append([t00,x00,y00])
Q1 = line([[x[0],x[1]] for x in soln], rgbcolor=(1/4,1/8,3/4))
Q2 = line([[x[0],x[2]] for x in soln], rgbcolor=(1/2,1/8,1/4))
return [Q1,Q2]
def desolve_rk4_determine_bounds(ics,end_points=None):
if end_points is None:
return((ics[0],ics[0]+10))
if not isinstance(end_points,list):
end_points=[end_points]
if len(end_points)==1:
return (min(ics[0],end_points[0]),max(ics[0],end_points[0]))
else:
return (min(ics[0],end_points[0]),max(ics[0],end_points[1]))
def desolve_rk4(de, dvar, ics=None, ivar=None, end_points=None, step=0.1, output='list', **kwds):
if ics is None:
raise ValueError("No initial conditions, specify with ics=[x0,y0].")
if ivar is None:
ivars = de.variables()
ivars = [t for t in ivars if t != dvar]
if len(ivars) != 1:
raise ValueError("Unable to determine independent variable, please specify.")
ivar = ivars[0]
if not is_SymbolicVariable(dvar):
from sage.calculus.var import var
from sage.calculus.all import diff
from sage.symbolic.relation import solve
if is_SymbolicEquation(de):
de = de.lhs() - de.rhs()
dummy_dvar=var('dummy_dvar')
# consider to add warning if the solution is not unique
de=solve(de,diff(dvar,ivar),solution_dict=True)
if len(de) != 1:
raise NotImplementedError("Sorry, cannot find explicit formula for right-hand side of the ODE.")
de=de[0][diff(dvar,ivar)].subs(dvar==dummy_dvar)
else:
dummy_dvar=dvar
step=abs(step)
de0=de._maxima_()
maxima("load('dynamics)")
lower_bound,upper_bound=desolve_rk4_determine_bounds(ics,end_points)
sol_1, sol_2 = [],[]
if lower_bound<ics[0]:
cmd="rk(%s,%s,%s,[%s,%s,%s,%s])\
"%(de0.str(),str(dummy_dvar),str(ics[1]),str(ivar),str(ics[0]),lower_bound,-step)
sol_1=maxima(cmd).sage()
sol_1.pop(0)
sol_1.reverse()
if upper_bound>ics[0]:
cmd="rk(%s,%s,%s,[%s,%s,%s,%s])\
"%(de0.str(),str(dummy_dvar),str(ics[1]),str(ivar),str(ics[0]),upper_bound,step)
sol_2=maxima(cmd).sage()
sol_2.pop(0)
sol=sol_1
sol.extend([[ics[0],ics[1]]])
sol.extend(sol_2)
if output=='list':
return sol
from sage.plot.plot import list_plot
from sage.plot.plot_field import plot_slope_field
R = list_plot(sol,plotjoined=True,**kwds)
if output=='plot':
return R
if output=='slope_field':
XMIN=sol[0][0]
YMIN=sol[0][1]
XMAX=XMIN
YMAX=YMIN
for s,t in sol:
if s>XMAX:XMAX=s
if s<XMIN:XMIN=s
if t>YMAX:YMAX=t
if t<YMIN:YMIN=t
return plot_slope_field(de,(ivar,XMIN,XMAX),(dummy_dvar,YMIN,YMAX))+R
raise ValueError("Option output should be 'list', 'plot' or 'slope_field'.")
def desolve_system_rk4(des, vars, ics=None, ivar=None, end_points=None, step=0.1):
if ics is None:
raise ValueError("No initial conditions, specify with ics=[x0,y01,y02,...].")
ivars = set([])
for de in des:
ivars = ivars.union(set(de.variables()))
if ivar is None:
ivars = ivars - set(vars)
if len(ivars) != 1:
raise ValueError("Unable to determine independent variable, please specify.")
ivar = list(ivars)[0]
dess = [de._maxima_().str() for de in des]
desstr = "[" + ",".join(dess) + "]"
varss = [varsi._maxima_().str() for varsi in vars]
varstr = "[" + ",".join(varss) + "]"
x0=ics[0]
icss = [ics[i]._maxima_().str() for i in range(1,len(ics))]
icstr = "[" + ",".join(icss) + "]"
step=abs(step)
maxima("load('dynamics)")
lower_bound,upper_bound=desolve_rk4_determine_bounds(ics,end_points)
sol_1, sol_2 = [],[]
if lower_bound<ics[0]:
cmd="rk(%s,%s,%s,[%s,%s,%s,%s])\
"%(desstr,varstr,icstr,str(ivar),str(x0),lower_bound,-step)
sol_1=maxima(cmd).sage()
sol_1.pop(0)
sol_1.reverse()
if upper_bound>ics[0]:
cmd="rk(%s,%s,%s,[%s,%s,%s,%s])\
"%(desstr,varstr,icstr,str(ivar),str(x0),upper_bound,step)
sol_2=maxima(cmd).sage()
sol_2.pop(0)
sol=sol_1
sol.append(ics)
sol.extend(sol_2)
return sol
def desolve_odeint(des, ics, times, dvars, ivar=None, compute_jac=False, args=()
, rtol=None, atol=None, tcrit=None, h0=0.0, hmax=0.0, hmin=0.0, ixpr=0
, mxstep=0, mxhnil=0, mxordn=12, mxords=5, printmessg=0):
from scipy.integrate import odeint
from sage.ext.fast_eval import fast_float
from sage.calculus.functions import jacobian
if ivar==None:
if len(dvars)==0 or len(dvars)==1:
if len(dvars)==1:
des=des[0]
dvars=dvars[0]
all_vars = set(des.variables())
else:
all_vars = set([])
for de in des:
all_vars.update(set(de.variables()))
if is_SymbolicVariable(dvars):
ivars = all_vars - set([dvars])
else:
ivars = all_vars - set(dvars)
if len(ivars)==1:
ivar = ivars.pop()
elif not ivars:
from sage.symbolic.ring import var
try:
safe_names = [ 't_' + str(dvar) for dvar in dvars ]
except TypeError: # not iterable
safe_names = [ 't_' + str(dvars) ]
ivar = map(var, safe_names)
else:
raise ValueError("Unable to determine independent variable, please specify.")
# one-dimensional systems:
if is_SymbolicVariable(dvars):
func = fast_float(des,dvars,ivar)
if not compute_jac:
Dfun=None
else:
J = diff(des,dvars)
J = fast_float(J,dvars,ivar)
Dfun = lambda y,t: [J(y,t)]
# n-dimensional systems:
else:
desc = []
variabs = dvars[:]
variabs.append(ivar)
for de in des:
desc.append(fast_float(de,*variabs))
def func(y,t):
v = list(y[:])
v.append(t)
return [dec(*v) for dec in desc]
if not compute_jac:
Dfun=None
else:
J = jacobian(des,dvars)
J = [list(v) for v in J]
J = fast_float(J,*variabs)
def Dfun(y,t):
v = list(y[:])
v.append(t)
return [[element(*v) for element in row] for row in J]
sol=odeint(func, ics, times, args=args, Dfun=Dfun, rtol=rtol, atol=atol,
tcrit=tcrit, h0=h0, hmax=hmax, hmin=hmin, ixpr=ixpr, mxstep=mxstep,
mxhnil=mxhnil, mxordn=mxordn, mxords=mxords, printmessg=printmessg)
return sol
| true
| true
|
1c44e91630531b387cea6e23a3a75d9cc1102f8c
| 1,137
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iotlink/models/GprsStatusResp.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iotlink/models/GprsStatusResp.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/iotlink/models/GprsStatusResp.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class GprsStatusResp(object):
def __init__(self, iccid=None, msisdn=None, imsi=None, onlinestatus=None):
"""
:param iccid: (Optional) 物联网卡iccid
:param msisdn: (Optional) 物联网卡msisdn
:param imsi: (Optional) 物联网卡imsi
:param onlinestatus: (Optional) GPRS在线状态(00:离线;01:在线;02:该运营商不支持查询;03:未知)
"""
self.iccid = iccid
self.msisdn = msisdn
self.imsi = imsi
self.onlinestatus = onlinestatus
| 33.441176
| 80
| 0.702726
|
class GprsStatusResp(object):
def __init__(self, iccid=None, msisdn=None, imsi=None, onlinestatus=None):
self.iccid = iccid
self.msisdn = msisdn
self.imsi = imsi
self.onlinestatus = onlinestatus
| true
| true
|
1c44e9532c723c2f96f75d1ac6d79c88a9c36f09
| 12,528
|
py
|
Python
|
datasets.py
|
hubertjb/dynamic-spatial-filtering
|
4580d60c06cd926b34470b8d05d4d72f8e2fd58c
|
[
"BSD-3-Clause"
] | 16
|
2021-05-28T07:27:57.000Z
|
2022-03-07T09:00:50.000Z
|
datasets.py
|
hubertjb/dynamic-spatial-filtering
|
4580d60c06cd926b34470b8d05d4d72f8e2fd58c
|
[
"BSD-3-Clause"
] | null | null | null |
datasets.py
|
hubertjb/dynamic-spatial-filtering
|
4580d60c06cd926b34470b8d05d4d72f8e2fd58c
|
[
"BSD-3-Clause"
] | 4
|
2021-07-16T15:53:29.000Z
|
2022-03-05T14:30:14.000Z
|
"""Dataset-related functions and classes.
Inspired by `mne.datasets.sleep_physionet`.
"""
import os
import os.path as op
import mne
import wfdb
import numpy as np
import pandas as pd
from mne.datasets.utils import _get_path
from mne.datasets.sleep_physionet._utils import _fetch_one
from braindecode.datasets import BaseDataset, BaseConcatDataset
from braindecode.datautil.preprocess import _preprocess
from joblib import Parallel, delayed
PC18_DIR = op.join(op.dirname(__file__), 'data', 'pc18')
PC18_RECORDS = op.join(PC18_DIR, 'sleep_records.csv')
PC18_INFO = op.join(PC18_DIR, 'age-sex.csv')
PC18_URL = 'https://physionet.org/files/challenge-2018/1.0.0/'
PC18_SHA1_TRAINING = op.join(PC18_DIR, 'training_SHA1SUMS')
PC18_SHA1_TEST = op.join(PC18_DIR, 'test_SHA1SUMS')
def update_pc18_sleep_records(fname=PC18_RECORDS):
"""Create CSV file with information about available PC18 recordings.
"""
# Load and massage the checksums.
sha_train_df = pd.read_csv(PC18_SHA1_TRAINING, sep=' ', header=None,
names=['sha', 'fname'], engine='python')
sha_test_df = pd.read_csv(PC18_SHA1_TEST, sep=' ', header=None,
names=['sha', 'fname'], engine='python')
sha_train_df['Split'] = 'training'
sha_test_df['Split'] = 'test'
sha_df = pd.concat([sha_train_df, sha_test_df], axis=0, ignore_index=True)
select_records = ((sha_df.fname.str.startswith('tr') |
sha_df.fname.str.startswith('te')) &
~sha_df.fname.str.endswith('arousal.mat'))
sha_df = sha_df[select_records]
sha_df['Record'] = sha_df['fname'].str.split('/', expand=True)[0]
sha_df['fname'] = sha_df[['Split', 'fname']].agg('/'.join, axis=1)
# Load and massage the data.
data = pd.read_csv(PC18_INFO)
data = data.reset_index().rename({'index': 'Subject'}, axis=1)
data['Sex'] = data['Sex'].map(
{'F': 'female', 'M': 'male', 'm': 'male'}).astype('category')
data = sha_df.merge(data, on='Record')
data['Record type'] = data['fname'].str.split('.', expand=True)[1].map(
{'hea': 'Header', 'mat': 'PSG', 'arousal': 'Arousal'}).astype(
'category')
data = data[['Subject', 'Record', 'Record type', 'Split', 'Age', 'Sex',
'sha', 'fname']].sort_values(by='Subject')
# Save the data.
data.to_csv(fname, index=False)
def _data_path(path=None, force_update=False, update_path=None, verbose=None):
"""Get path to local copy of PC18 dataset.
"""
key = 'PC18_DATASET_PATH'
name = 'PC18_DATASET_SLEEP'
path = _get_path(path, key, name)
subdirs = os.listdir(path)
if 'training' in subdirs or 'test' in subdirs: # the specified path is
# already at the training and test folders level
return path
else:
return op.join(path, 'pc18-sleep-data')
def fetch_pc18_data(subjects, path=None, force_update=False, update_path=None,
base_url=PC18_URL, verbose=None):
"""Get paths to local copies of PhysioNet Challenge 2018 dataset files.
This will fetch data from the publicly available PhysioNet Computing in
Cardiology Challenge 2018 dataset on sleep arousal detection [1]_ [2]_.
This corresponds to 1983 recordings from individual subjects with
(suspected) sleep apnea. The dataset is separated into a training set with
994 recordings for which arousal annotation are available and a test set
with 989 recordings for which the labels have not been revealed. Across the
entire dataset, mean age is 55 years old and 65% of recordings are from
male subjects.
More information can be found on the
`physionet website <https://physionet.org/content/challenge-2018/1.0.0/>`_.
Parameters
----------
subjects : list of int
The subjects to use. Can be in the range of 0-1982 (inclusive). Test
recordings are 0-988, while training recordings are 989-1982.
path : None | str
Location of where to look for the PC18 data storing location. If None,
the environment variable or config parameter ``PC18_DATASET_PATH``
is used. If it doesn't exist, the "~/mne_data" directory is used. If
the dataset is not found under the given path, the data will be
automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the PC18_DATASET_PATH in mne-python config to the given
path. If None, the user is prompted.
base_url : str
The URL root.
%(verbose)s
Returns
-------
paths : list
List of local data paths of the given type.
References
----------
.. [1] Mohammad M Ghassemi, Benjamin E Moody, Li-wei H Lehman, Christopher
Song, Qiao Li, Haoqi Sun, Roger G Mark, M Brandon Westover, Gari D
Clifford. You Snooze, You Win: the PhysioNet/Computing in Cardiology
Challenge 2018.
.. [2] Goldberger, A., Amaral, L., Glass, L., Hausdorff, J., Ivanov, P. C.,
Mark, R., ... & Stanley, H. E. (2000). PhysioBank, PhysioToolkit, and
PhysioNet: Components of a new research resource for complex physiologic
signals. Circulation [Online]. 101 (23), pp. e215–e220.)
"""
records = pd.read_csv(PC18_RECORDS)
psg_records = records[records['Record type'] == 'PSG']
hea_records = records[records['Record type'] == 'Header']
arousal_records = records[records['Record type'] == 'Arousal']
path = _data_path(path=path, update_path=update_path)
params = [path, force_update, base_url]
fnames = []
for subject in subjects:
for idx in np.where(psg_records['Subject'] == subject)[0]:
psg_fname = _fetch_one(psg_records['fname'].iloc[idx],
psg_records['sha'].iloc[idx], *params)
hea_fname = _fetch_one(hea_records['fname'].iloc[idx],
hea_records['sha'].iloc[idx], *params)
if psg_records['Split'].iloc[idx] == 'training':
train_idx = np.where(
arousal_records['Subject'] == subject)[0][0]
arousal_fname = _fetch_one(
arousal_records['fname'].iloc[train_idx],
arousal_records['sha'].iloc[train_idx], *params)
else:
arousal_fname = None
fnames.append([psg_fname, hea_fname, arousal_fname])
return fnames
def convert_wfdb_anns_to_mne_annotations(annots):
"""Convert wfdb.io.Annotation format to MNE's.
Parameters
----------
annots : wfdb.io.Annotation
Annotation object obtained by e.g. loading an annotation file with
wfdb.rdann().
Returns
-------
mne.Annotations :
MNE Annotations object.
"""
ann_chs = set(annots.chan)
onsets = annots.sample / annots.fs
new_onset, new_duration, new_description = list(), list(), list()
for ch in ann_chs:
mask = annots.chan == ch
ch_onsets = onsets[mask]
ch_descs = np.array(annots.aux_note)[mask]
# Events with beginning and end, defined by '(event' and 'event)'
if all([(i.startswith('(') or i.endswith(')')) for i in ch_descs]):
pass
else: # Sleep stage-like annotations
ch_durations = np.concatenate([np.diff(ch_onsets), [30]])
assert all(ch_durations > 0), 'Negative duration'
new_onset.extend(ch_onsets)
new_duration.extend(ch_durations)
new_description.extend(ch_descs)
mne_annots = mne.Annotations(
new_onset, new_duration, new_description, orig_time=None)
return mne_annots
class PC18(BaseConcatDataset):
"""Physionet Challenge 2018 polysomnography dataset.
Sleep dataset from https://physionet.org/content/challenge-2018/1.0.0/.
Contains overnight recordings from 1983 healthy subjects.
See `fetch_pc18_data` for a more complete description.
Parameters
----------
subject_ids: list(int) | str | None
(list of) int of subject(s) to be loaded. If None, load all available
subjects. If 'training', load all training recordings. If 'test', load
all test recordings.
path : None | str
Location of where to look for the PC18 data storing location. If None,
the environment variable or config parameter ``MNE_DATASETS_PC18_PATH``
is used. If it doesn't exist, the "~/mne_data" directory is used. If
the dataset is not found under the given path, the data will be
automatically downloaded to the specified folder.
load_eeg_only: bool
If True, only load the EEG channels and discard the others (EOG, EMG,
temperature, respiration) to avoid resampling the other signals.
preproc : list(Preprocessor) | None
List of preprocessors to apply to each file individually. This way the
data can e.g., be downsampled (temporally and spatially) to limit the
memory usage of the entire Dataset object. This also enables applying
preprocessing in parallel over the recordings.
windower : callable | None
Function to split the raw data into windows. If provided, windowing is
integrated into the loading process (after preprocessing) such that
memory usage is minized while allowing parallelization.
n_jobs : int
Number of parallel processes.
"""
def __init__(self, subject_ids=None, path=None, load_eeg_only=True,
preproc=None, windower=None, n_jobs=1):
if subject_ids is None:
subject_ids = range(1983)
elif subject_ids == 'training':
subject_ids = range(989, 1983)
elif subject_ids == 'test':
subject_ids = range(989)
paths = fetch_pc18_data(subject_ids, path=path)
self.info_df = pd.read_csv(PC18_INFO)
if n_jobs == 1:
all_base_ds = [self._load_raw(
subject_id, p[0], p[2], load_eeg_only=load_eeg_only,
preproc=preproc, windower=windower)
for subject_id, p in zip(subject_ids, paths)]
else:
all_base_ds = Parallel(n_jobs=n_jobs)(delayed(self._load_raw)(
subject_id, p[0], p[2], load_eeg_only=load_eeg_only,
preproc=preproc, windower=windower)
for subject_id, p in zip(subject_ids, paths))
super().__init__(all_base_ds)
def _load_raw(self, subj_nb, raw_fname, arousal_fname, load_eeg_only,
preproc, windower):
channel_types = ['eeg'] * 7
if load_eeg_only:
channels = list(range(7))
else:
channel_types += ['emg', 'misc', 'misc', 'misc', 'misc', 'ecg']
channels = None
# Load raw signals and header
record = wfdb.io.rdrecord(op.splitext(raw_fname)[0], channels=channels)
# Convert to right units for MNE (EEG should be in V)
data = record.p_signal.T
data[np.array(record.units) == 'uV'] /= 1e6
data[np.array(record.units) == 'mV'] /= 1e3
info = mne.create_info(record.sig_name, record.fs, channel_types)
out = mne.io.RawArray(data, info)
# Extract annotations
if arousal_fname is not None:
annots = wfdb.rdann(
op.splitext(raw_fname)[0], 'arousal', sampfrom=0, sampto=None,
shift_samps=False, return_label_elements=['symbol'],
summarize_labels=False)
mne_annots = convert_wfdb_anns_to_mne_annotations(annots)
out.set_annotations(mne_annots)
record_name = op.splitext(op.basename(raw_fname))[0]
record_info = self.info_df[
self.info_df['Record'] == record_name].iloc[0]
if record_info['Record'].startswith('tr'):
split = 'training'
elif record_info['Record'].startswith('te'):
split = 'test'
else:
split = 'unknown'
desc = pd.Series({
'subject': subj_nb,
'record': record_info['Record'],
'split': split,
'age': record_info['Age'],
'sex': record_info['Sex']
}, name='')
if preproc is not None:
_preprocess(out, preproc)
out = BaseDataset(out, desc)
if windower is not None:
out = windower(out)
out.windows.load_data()
return out
| 40.282958
| 79
| 0.632024
|
import os
import os.path as op
import mne
import wfdb
import numpy as np
import pandas as pd
from mne.datasets.utils import _get_path
from mne.datasets.sleep_physionet._utils import _fetch_one
from braindecode.datasets import BaseDataset, BaseConcatDataset
from braindecode.datautil.preprocess import _preprocess
from joblib import Parallel, delayed
PC18_DIR = op.join(op.dirname(__file__), 'data', 'pc18')
PC18_RECORDS = op.join(PC18_DIR, 'sleep_records.csv')
PC18_INFO = op.join(PC18_DIR, 'age-sex.csv')
PC18_URL = 'https://physionet.org/files/challenge-2018/1.0.0/'
PC18_SHA1_TRAINING = op.join(PC18_DIR, 'training_SHA1SUMS')
PC18_SHA1_TEST = op.join(PC18_DIR, 'test_SHA1SUMS')
def update_pc18_sleep_records(fname=PC18_RECORDS):
sha_train_df = pd.read_csv(PC18_SHA1_TRAINING, sep=' ', header=None,
names=['sha', 'fname'], engine='python')
sha_test_df = pd.read_csv(PC18_SHA1_TEST, sep=' ', header=None,
names=['sha', 'fname'], engine='python')
sha_train_df['Split'] = 'training'
sha_test_df['Split'] = 'test'
sha_df = pd.concat([sha_train_df, sha_test_df], axis=0, ignore_index=True)
select_records = ((sha_df.fname.str.startswith('tr') |
sha_df.fname.str.startswith('te')) &
~sha_df.fname.str.endswith('arousal.mat'))
sha_df = sha_df[select_records]
sha_df['Record'] = sha_df['fname'].str.split('/', expand=True)[0]
sha_df['fname'] = sha_df[['Split', 'fname']].agg('/'.join, axis=1)
data = pd.read_csv(PC18_INFO)
data = data.reset_index().rename({'index': 'Subject'}, axis=1)
data['Sex'] = data['Sex'].map(
{'F': 'female', 'M': 'male', 'm': 'male'}).astype('category')
data = sha_df.merge(data, on='Record')
data['Record type'] = data['fname'].str.split('.', expand=True)[1].map(
{'hea': 'Header', 'mat': 'PSG', 'arousal': 'Arousal'}).astype(
'category')
data = data[['Subject', 'Record', 'Record type', 'Split', 'Age', 'Sex',
'sha', 'fname']].sort_values(by='Subject')
data.to_csv(fname, index=False)
def _data_path(path=None, force_update=False, update_path=None, verbose=None):
key = 'PC18_DATASET_PATH'
name = 'PC18_DATASET_SLEEP'
path = _get_path(path, key, name)
subdirs = os.listdir(path)
if 'training' in subdirs or 'test' in subdirs:
return path
else:
return op.join(path, 'pc18-sleep-data')
def fetch_pc18_data(subjects, path=None, force_update=False, update_path=None,
base_url=PC18_URL, verbose=None):
records = pd.read_csv(PC18_RECORDS)
psg_records = records[records['Record type'] == 'PSG']
hea_records = records[records['Record type'] == 'Header']
arousal_records = records[records['Record type'] == 'Arousal']
path = _data_path(path=path, update_path=update_path)
params = [path, force_update, base_url]
fnames = []
for subject in subjects:
for idx in np.where(psg_records['Subject'] == subject)[0]:
psg_fname = _fetch_one(psg_records['fname'].iloc[idx],
psg_records['sha'].iloc[idx], *params)
hea_fname = _fetch_one(hea_records['fname'].iloc[idx],
hea_records['sha'].iloc[idx], *params)
if psg_records['Split'].iloc[idx] == 'training':
train_idx = np.where(
arousal_records['Subject'] == subject)[0][0]
arousal_fname = _fetch_one(
arousal_records['fname'].iloc[train_idx],
arousal_records['sha'].iloc[train_idx], *params)
else:
arousal_fname = None
fnames.append([psg_fname, hea_fname, arousal_fname])
return fnames
def convert_wfdb_anns_to_mne_annotations(annots):
ann_chs = set(annots.chan)
onsets = annots.sample / annots.fs
new_onset, new_duration, new_description = list(), list(), list()
for ch in ann_chs:
mask = annots.chan == ch
ch_onsets = onsets[mask]
ch_descs = np.array(annots.aux_note)[mask]
if all([(i.startswith('(') or i.endswith(')')) for i in ch_descs]):
pass
else:
ch_durations = np.concatenate([np.diff(ch_onsets), [30]])
assert all(ch_durations > 0), 'Negative duration'
new_onset.extend(ch_onsets)
new_duration.extend(ch_durations)
new_description.extend(ch_descs)
mne_annots = mne.Annotations(
new_onset, new_duration, new_description, orig_time=None)
return mne_annots
class PC18(BaseConcatDataset):
def __init__(self, subject_ids=None, path=None, load_eeg_only=True,
preproc=None, windower=None, n_jobs=1):
if subject_ids is None:
subject_ids = range(1983)
elif subject_ids == 'training':
subject_ids = range(989, 1983)
elif subject_ids == 'test':
subject_ids = range(989)
paths = fetch_pc18_data(subject_ids, path=path)
self.info_df = pd.read_csv(PC18_INFO)
if n_jobs == 1:
all_base_ds = [self._load_raw(
subject_id, p[0], p[2], load_eeg_only=load_eeg_only,
preproc=preproc, windower=windower)
for subject_id, p in zip(subject_ids, paths)]
else:
all_base_ds = Parallel(n_jobs=n_jobs)(delayed(self._load_raw)(
subject_id, p[0], p[2], load_eeg_only=load_eeg_only,
preproc=preproc, windower=windower)
for subject_id, p in zip(subject_ids, paths))
super().__init__(all_base_ds)
def _load_raw(self, subj_nb, raw_fname, arousal_fname, load_eeg_only,
preproc, windower):
channel_types = ['eeg'] * 7
if load_eeg_only:
channels = list(range(7))
else:
channel_types += ['emg', 'misc', 'misc', 'misc', 'misc', 'ecg']
channels = None
record = wfdb.io.rdrecord(op.splitext(raw_fname)[0], channels=channels)
data = record.p_signal.T
data[np.array(record.units) == 'uV'] /= 1e6
data[np.array(record.units) == 'mV'] /= 1e3
info = mne.create_info(record.sig_name, record.fs, channel_types)
out = mne.io.RawArray(data, info)
if arousal_fname is not None:
annots = wfdb.rdann(
op.splitext(raw_fname)[0], 'arousal', sampfrom=0, sampto=None,
shift_samps=False, return_label_elements=['symbol'],
summarize_labels=False)
mne_annots = convert_wfdb_anns_to_mne_annotations(annots)
out.set_annotations(mne_annots)
record_name = op.splitext(op.basename(raw_fname))[0]
record_info = self.info_df[
self.info_df['Record'] == record_name].iloc[0]
if record_info['Record'].startswith('tr'):
split = 'training'
elif record_info['Record'].startswith('te'):
split = 'test'
else:
split = 'unknown'
desc = pd.Series({
'subject': subj_nb,
'record': record_info['Record'],
'split': split,
'age': record_info['Age'],
'sex': record_info['Sex']
}, name='')
if preproc is not None:
_preprocess(out, preproc)
out = BaseDataset(out, desc)
if windower is not None:
out = windower(out)
out.windows.load_data()
return out
| true
| true
|
1c44e957950c99df2052672e9ed2657f2a10cc68
| 1,411
|
py
|
Python
|
SoundSourceLocalization/SSL_Settings.py
|
zhaocy14/SmartWalker
|
b025a7b4a2b305838a22fe4e6116ddb951c4d7bf
|
[
"MIT"
] | 2
|
2021-11-13T14:16:06.000Z
|
2022-01-12T06:07:32.000Z
|
SoundSourceLocalization/SSL_Settings.py
|
zhaocy14/SmartWalker
|
b025a7b4a2b305838a22fe4e6116ddb951c4d7bf
|
[
"MIT"
] | null | null | null |
SoundSourceLocalization/SSL_Settings.py
|
zhaocy14/SmartWalker
|
b025a7b4a2b305838a22fe4e6116ddb951c4d7bf
|
[
"MIT"
] | 3
|
2021-08-30T04:40:39.000Z
|
2022-01-09T11:34:04.000Z
|
import os, sys
import pyaudio
# sample audio
RECORD_DEVICE_NAME = "USB Camera-B4.09.24.1"
SAMPLE_RATE = 16000
CHANNELS = 4
RECORD_WIDTH = 2
CHUNK = 1024
CHUNK_SIZE = 16 # 1ms的采样点数,此参数可以使得语音队列中每一个值对应1ms的音频
AUDIO_COMMUNICATION_TOPIC = 'audio'
# KeyWord Spotting
MAX_COMMAND_SECONDS = 3
CLIP_MS = 1000
KWS_WINDOW_STRIDE_MS = 200
KWS_COMMUNICATION_TOPIC = 'keyword'
WORD_QUEUE_CLEAR_COMMUNICATION_TOPIC = 'WORD_QUEUE_CLEAR'
# Noise Suppression
RECORD_SECONDS = 1.1 # 1
# SSL
KWS_TIMEOUT_SECONDS = 0.5
SSL_DOA_COMMUNICATION_TOPIC = 'DOA'
SSL_WAIT_COMMUNICATION_TOPIC = 'WAIT'
# 在SSL模块接收KWS识别的关键词时,由于会在一个(可能)连续的时间内,传来多段语音。此参数集用来表征用户说一次关键词,SSL收集持续多长时间内的关键词语音
# Reinforcement Learning
GCC_LENG = 366
GCC_BIAS = 6
ACTION_SPACE = 8
FORMAT = pyaudio.paInt16
FORWARD_SECONDS = 3
STEP_SIZE = 1 # 1
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
print(father_path)
sys.path.append(father_path)
# KWS parameters
KWS_WAVE_PATH = father_path + "/resource/stream_tmp"
KWS_MODEL_PATH = father_path + "/resource/Pretrained_models/DNN/follow.pb"
KWS_LABEL_PATH = father_path + "/resource/Pretrained_models/follow_labels.txt"
MODEL_PATH = father_path + "/resource/model/save20.ckpt"
WAV_PATH = father_path + "/resource/wav/online"
ONLINE_MODEL_PATH = father_path + "/resource/model/online.ckpt"
# sliding window size can be seen in KWS detector
| 26.622642
| 80
| 0.787385
|
import os, sys
import pyaudio
RECORD_DEVICE_NAME = "USB Camera-B4.09.24.1"
SAMPLE_RATE = 16000
CHANNELS = 4
RECORD_WIDTH = 2
CHUNK = 1024
CHUNK_SIZE = 16
AUDIO_COMMUNICATION_TOPIC = 'audio'
MAX_COMMAND_SECONDS = 3
CLIP_MS = 1000
KWS_WINDOW_STRIDE_MS = 200
KWS_COMMUNICATION_TOPIC = 'keyword'
WORD_QUEUE_CLEAR_COMMUNICATION_TOPIC = 'WORD_QUEUE_CLEAR'
RECORD_SECONDS = 1.1
KWS_TIMEOUT_SECONDS = 0.5
SSL_DOA_COMMUNICATION_TOPIC = 'DOA'
SSL_WAIT_COMMUNICATION_TOPIC = 'WAIT'
GCC_LENG = 366
GCC_BIAS = 6
ACTION_SPACE = 8
FORMAT = pyaudio.paInt16
FORWARD_SECONDS = 3
STEP_SIZE = 1
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
print(father_path)
sys.path.append(father_path)
KWS_WAVE_PATH = father_path + "/resource/stream_tmp"
KWS_MODEL_PATH = father_path + "/resource/Pretrained_models/DNN/follow.pb"
KWS_LABEL_PATH = father_path + "/resource/Pretrained_models/follow_labels.txt"
MODEL_PATH = father_path + "/resource/model/save20.ckpt"
WAV_PATH = father_path + "/resource/wav/online"
ONLINE_MODEL_PATH = father_path + "/resource/model/online.ckpt"
| true
| true
|
1c44ea76e3eb171a9eabbd38585f4423f5c5f1e6
| 642
|
py
|
Python
|
tests/display/test_window.py
|
cmarshall108/panda3d-python3
|
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
|
[
"PHP-3.0",
"PHP-3.01"
] | null | null | null |
tests/display/test_window.py
|
cmarshall108/panda3d-python3
|
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
|
[
"PHP-3.0",
"PHP-3.01"
] | null | null | null |
tests/display/test_window.py
|
cmarshall108/panda3d-python3
|
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
|
[
"PHP-3.0",
"PHP-3.01"
] | null | null | null |
def test_window_basic(window):
from panda3d.core import WindowProperties
assert window is not None
current_props = window.get_properties()
default_props = WindowProperties.get_default()
# Opening the window changes these from the defaults. Note that we have
# no guarantee that it opens in the foreground or with the requested size.
default_props.set_size(current_props.get_size())
default_props.set_origin(current_props.get_origin())
default_props.set_minimized(False)
default_props.foreground = current_props.foreground
# The rest should be the same
assert current_props == default_props
| 37.764706
| 78
| 0.766355
|
def test_window_basic(window):
from panda3d.core import WindowProperties
assert window is not None
current_props = window.get_properties()
default_props = WindowProperties.get_default()
default_props.set_size(current_props.get_size())
default_props.set_origin(current_props.get_origin())
default_props.set_minimized(False)
default_props.foreground = current_props.foreground
assert current_props == default_props
| true
| true
|
1c44eae3c5cfc0326c5ec644aa8f726f42ae47f1
| 326
|
py
|
Python
|
contest/abc069/C.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
contest/abc069/C.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
contest/abc069/C.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
n = int(input())
a = list(map(int, input().split()))
cnt_odd = 0
cnt_4 = 0
for i in range(n):
if a[i] % 2 == 1:
cnt_odd += 1
elif a[i] % 4 == 0:
cnt_4 += 1
if len(a) % 2 == 1 and (cnt_odd - 1) <= cnt_4:
print('Yes')
elif len(a) % 2 == 0 and cnt_odd <= cnt_4:
print('Yes')
else:
print('No')
| 20.375
| 46
| 0.490798
|
n = int(input())
a = list(map(int, input().split()))
cnt_odd = 0
cnt_4 = 0
for i in range(n):
if a[i] % 2 == 1:
cnt_odd += 1
elif a[i] % 4 == 0:
cnt_4 += 1
if len(a) % 2 == 1 and (cnt_odd - 1) <= cnt_4:
print('Yes')
elif len(a) % 2 == 0 and cnt_odd <= cnt_4:
print('Yes')
else:
print('No')
| true
| true
|
1c44eaef4f320ce8ec78f27d1e567fc01a6906ee
| 1,279
|
py
|
Python
|
app/app.py
|
tahosa/discord-util-bot
|
2f261c5ae06da8a62e72502b53341720437860f5
|
[
"MIT"
] | null | null | null |
app/app.py
|
tahosa/discord-util-bot
|
2f261c5ae06da8a62e72502b53341720437860f5
|
[
"MIT"
] | null | null | null |
app/app.py
|
tahosa/discord-util-bot
|
2f261c5ae06da8a62e72502b53341720437860f5
|
[
"MIT"
] | 1
|
2022-02-09T04:16:54.000Z
|
2022-02-09T04:16:54.000Z
|
import os
import logging
import config
import discord
from discord.ext.commands import Bot
import nest_asyncio
import tasks
nest_asyncio.apply()
_LOG = logging.getLogger('discord-util')
_HANDLER = logging.StreamHandler()
_HANDLER.addFilter(logging.Filter(name = 'discord-util'))
_HANDLER.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logging.getLogger().addHandler(_HANDLER)
try:
env_level = os.getenv('LOG_LEVEL', logging.INFO)
log_level = int(env_level)
_LOG.setLevel(log_level)
except ValueError:
_LOG.setLevel(logging.INFO)
_LOG.error(f'Could not parse log level "{env_level}" from env. Log level must be an int. Defaulting to INFO')
cfg = config.Config('server.cfg')
intents = discord.Intents.default()
intents.members = True
bot = Bot('!', intents = intents)
def start():
if cfg['tasks.uwu.enabled']:
bot.add_cog(tasks.uwu.Uwu())
if cfg['tasks.scoresaber.enabled']:
sb = tasks.scoresaber.Scoresaber(bot, cfg)
sb.run()
bot.add_cog(sb)
if cfg['tasks.mtg.enabled']:
mtg = tasks.mtg.Mtg(bot, cfg)
bot.add_cog(mtg)
@bot.event
async def on_ready():
_LOG.info(f'We have logged in as {bot.user.name}')
start()
bot.run(cfg['bot_token'])
| 23.254545
| 113
| 0.690383
|
import os
import logging
import config
import discord
from discord.ext.commands import Bot
import nest_asyncio
import tasks
nest_asyncio.apply()
_LOG = logging.getLogger('discord-util')
_HANDLER = logging.StreamHandler()
_HANDLER.addFilter(logging.Filter(name = 'discord-util'))
_HANDLER.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logging.getLogger().addHandler(_HANDLER)
try:
env_level = os.getenv('LOG_LEVEL', logging.INFO)
log_level = int(env_level)
_LOG.setLevel(log_level)
except ValueError:
_LOG.setLevel(logging.INFO)
_LOG.error(f'Could not parse log level "{env_level}" from env. Log level must be an int. Defaulting to INFO')
cfg = config.Config('server.cfg')
intents = discord.Intents.default()
intents.members = True
bot = Bot('!', intents = intents)
def start():
if cfg['tasks.uwu.enabled']:
bot.add_cog(tasks.uwu.Uwu())
if cfg['tasks.scoresaber.enabled']:
sb = tasks.scoresaber.Scoresaber(bot, cfg)
sb.run()
bot.add_cog(sb)
if cfg['tasks.mtg.enabled']:
mtg = tasks.mtg.Mtg(bot, cfg)
bot.add_cog(mtg)
@bot.event
async def on_ready():
_LOG.info(f'We have logged in as {bot.user.name}')
start()
bot.run(cfg['bot_token'])
| true
| true
|
1c44ecf70040a369583ea80ca86ad4befece86ed
| 5,167
|
py
|
Python
|
flexget/plugins/sites/cpasbien.py
|
tarzasai/Flexget
|
e5822874b2ee088b508390ff02c4eda9785596bc
|
[
"MIT"
] | 1
|
2018-05-02T21:14:50.000Z
|
2018-05-02T21:14:50.000Z
|
flexget/plugins/sites/cpasbien.py
|
tarzasai/Flexget
|
e5822874b2ee088b508390ff02c4eda9785596bc
|
[
"MIT"
] | null | null | null |
flexget/plugins/sites/cpasbien.py
|
tarzasai/Flexget
|
e5822874b2ee088b508390ff02c4eda9785596bc
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote_plus
import logging
import re
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.search import normalize_unicode
from flexget.utils.tools import parse_filesize
log = logging.getLogger('search_cpasbien')
session = requests.Session()
class SearchCPASBIEN(object):
schema = {
'type': 'object',
'properties':
{
'category': {
'type': 'string',
'enum': ['films', 'series', 'musique', 'films-french',
'720p', 'series-francaise', 'films-dvdrip', 'all',
'films-vostfr', '1080p', 'series-vostfr', 'ebook']
},
},
'required': ['category'],
'additionalProperties': False
}
@plugin.internet(log)
def search(self, task, entry, config):
"""CPASBIEN search plugin
Config example:
tv_search_cpasbien:
discover:
what:
- trakt_list:
username: xxxxxxx
api_key: xxxxxxx
series: watchlist
from:
- cpasbien:
category: "series-vostfr"
interval: 1 day
ignore_estimations: yes
Category is ONE of:
all
films
series
musique
films-french
1080p
720p
series-francaise
films-dvdrip
films-vostfr
series-vostfr
ebook
"""
base_url = 'http://www.cpasbien.io'
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
search_string = search_string.replace(' ', '-').lower()
search_string = search_string.replace('(', '')
search_string = search_string.replace(')', '')
query = normalize_unicode(search_string)
query_url_fragment = quote_plus(query.encode('utf-8'))
# http://www.cpasbien.pe/recherche/ncis.html
if config['category'] == 'all':
str_url = (base_url, 'recherche', query_url_fragment)
url = '/'.join(str_url)
else:
category_url_fragment = '%s' % config['category']
str_url = (base_url, 'recherche', category_url_fragment, query_url_fragment)
url = '/'.join(str_url)
log.debug('search url: %s' % url + '.html')
# GET URL
f = task.requests.get(url + '.html').content
soup = get_soup(f)
if soup.findAll(text=re.compile(' 0 torrents')):
log.debug('search returned no results')
else:
nextpage = 0
while (nextpage >= 0):
if (nextpage > 0):
newurl = url + '/page-' + str(nextpage)
log.debug('-----> NEXT PAGE : %s' % newurl)
f1 = task.requests.get(newurl).content
soup = get_soup(f1)
for result in soup.findAll('div', attrs={'class': re.compile('ligne')}):
entry = Entry()
link = result.find('a', attrs={'href': re.compile('dl-torrent')})
entry['title'] = link.contents[0]
# REWRITE URL
page_link = link.get('href')
link_rewrite = page_link.split('/')
# get last value in array remove .html and replace by .torrent
endlink = link_rewrite[-1]
str_url = (base_url, '/telechargement/', endlink[:-5], '.torrent')
entry['url'] = ''.join(str_url)
log.debug('Title: %s | DL LINK: %s' % (entry['title'], entry['url']))
entry['torrent_seeds'] = (int(result.find('span', attrs={'class': re.compile('seed')}).text))
entry['torrent_leeches'] = (int(result.find('div', attrs={'class': re.compile('down')}).text))
size = result.find('div', attrs={'class': re.compile('poid')}).text
entry['content_size'] = parse_filesize(size, si=False)
if (entry['torrent_seeds'] > 0):
entries.add(entry)
else:
log.debug('0 SEED, not adding entry')
if soup.find(text=re.compile('Suiv')):
nextpage += 1
else:
nextpage = -1
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchCPASBIEN, 'cpasbien', groups=['search'], api_ver=2)
| 38.274074
| 118
| 0.494484
|
from __future__ import unicode_literals, division, absolute_import
from builtins import *
from future.moves.urllib.parse import quote_plus
import logging
import re
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.search import normalize_unicode
from flexget.utils.tools import parse_filesize
log = logging.getLogger('search_cpasbien')
session = requests.Session()
class SearchCPASBIEN(object):
schema = {
'type': 'object',
'properties':
{
'category': {
'type': 'string',
'enum': ['films', 'series', 'musique', 'films-french',
'720p', 'series-francaise', 'films-dvdrip', 'all',
'films-vostfr', '1080p', 'series-vostfr', 'ebook']
},
},
'required': ['category'],
'additionalProperties': False
}
@plugin.internet(log)
def search(self, task, entry, config):
base_url = 'http://www.cpasbien.io'
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
search_string = search_string.replace(' ', '-').lower()
search_string = search_string.replace('(', '')
search_string = search_string.replace(')', '')
query = normalize_unicode(search_string)
query_url_fragment = quote_plus(query.encode('utf-8'))
if config['category'] == 'all':
str_url = (base_url, 'recherche', query_url_fragment)
url = '/'.join(str_url)
else:
category_url_fragment = '%s' % config['category']
str_url = (base_url, 'recherche', category_url_fragment, query_url_fragment)
url = '/'.join(str_url)
log.debug('search url: %s' % url + '.html')
f = task.requests.get(url + '.html').content
soup = get_soup(f)
if soup.findAll(text=re.compile(' 0 torrents')):
log.debug('search returned no results')
else:
nextpage = 0
while (nextpage >= 0):
if (nextpage > 0):
newurl = url + '/page-' + str(nextpage)
log.debug('-----> NEXT PAGE : %s' % newurl)
f1 = task.requests.get(newurl).content
soup = get_soup(f1)
for result in soup.findAll('div', attrs={'class': re.compile('ligne')}):
entry = Entry()
link = result.find('a', attrs={'href': re.compile('dl-torrent')})
entry['title'] = link.contents[0]
page_link = link.get('href')
link_rewrite = page_link.split('/')
endlink = link_rewrite[-1]
str_url = (base_url, '/telechargement/', endlink[:-5], '.torrent')
entry['url'] = ''.join(str_url)
log.debug('Title: %s | DL LINK: %s' % (entry['title'], entry['url']))
entry['torrent_seeds'] = (int(result.find('span', attrs={'class': re.compile('seed')}).text))
entry['torrent_leeches'] = (int(result.find('div', attrs={'class': re.compile('down')}).text))
size = result.find('div', attrs={'class': re.compile('poid')}).text
entry['content_size'] = parse_filesize(size, si=False)
if (entry['torrent_seeds'] > 0):
entries.add(entry)
else:
log.debug('0 SEED, not adding entry')
if soup.find(text=re.compile('Suiv')):
nextpage += 1
else:
nextpage = -1
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchCPASBIEN, 'cpasbien', groups=['search'], api_ver=2)
| true
| true
|
1c44ed932e4df18c56a889f9357a0bf15de24d8a
| 13
|
py
|
Python
|
first.py
|
mohammad716e/python_training
|
0654623c603c775ed2cbdc3919dc815891c8fdeb
|
[
"MIT"
] | null | null | null |
first.py
|
mohammad716e/python_training
|
0654623c603c775ed2cbdc3919dc815891c8fdeb
|
[
"MIT"
] | null | null | null |
first.py
|
mohammad716e/python_training
|
0654623c603c775ed2cbdc3919dc815891c8fdeb
|
[
"MIT"
] | null | null | null |
print ( 'hi')
| 13
| 13
| 0.538462
|
print ( 'hi')
| true
| true
|
1c44edc5b8d1e8fbfa19019187f9a6854e4f69e8
| 918
|
py
|
Python
|
examples/ether_transfer.py
|
meetmangukiya/ethereum_kms_signer
|
bc54aa5e4dfc2406417ed1cce15f52fcc5f97043
|
[
"MIT"
] | 6
|
2021-09-29T15:07:44.000Z
|
2022-03-31T22:15:13.000Z
|
examples/ether_transfer.py
|
meetmangukiya/ethereum_kms_signer
|
bc54aa5e4dfc2406417ed1cce15f52fcc5f97043
|
[
"MIT"
] | 2
|
2021-10-30T07:16:02.000Z
|
2021-10-30T08:04:51.000Z
|
examples/ether_transfer.py
|
meetmangukiya/ethereum_kms_signer
|
bc54aa5e4dfc2406417ed1cce15f52fcc5f97043
|
[
"MIT"
] | 1
|
2022-01-25T18:30:17.000Z
|
2022-01-25T18:30:17.000Z
|
import fire
from web3 import Web3
from ethereum_kms_signer.kms import get_eth_address, sign_transaction
def ether_transfer(
web3_provider: str, key_id: str, to_address: str, amount: float
) -> None:
web3 = Web3(Web3.HTTPProvider(web3_provider))
self_address = web3.toChecksumAddress(get_eth_address(key_id).lower())
nonce = web3.eth.get_transaction_count(self_address)
# build a transaction in a dictionary
tx = {
"nonce": nonce,
"to": to_address,
"value": web3.toWei(amount, "ether"),
"gas": 2000000,
"gasPrice": web3.toWei("50", "gwei"),
}
# sign the transaction
signed_tx = sign_transaction(tx, key_id)
# send transaction
tx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction)
# get transaction hash
print("Transaction Hash:", web3.toHex(tx_hash))
if __name__ == "__main__":
fire.Fire(ether_transfer)
| 26.228571
| 74
| 0.685185
|
import fire
from web3 import Web3
from ethereum_kms_signer.kms import get_eth_address, sign_transaction
def ether_transfer(
web3_provider: str, key_id: str, to_address: str, amount: float
) -> None:
web3 = Web3(Web3.HTTPProvider(web3_provider))
self_address = web3.toChecksumAddress(get_eth_address(key_id).lower())
nonce = web3.eth.get_transaction_count(self_address)
tx = {
"nonce": nonce,
"to": to_address,
"value": web3.toWei(amount, "ether"),
"gas": 2000000,
"gasPrice": web3.toWei("50", "gwei"),
}
signed_tx = sign_transaction(tx, key_id)
tx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction)
print("Transaction Hash:", web3.toHex(tx_hash))
if __name__ == "__main__":
fire.Fire(ether_transfer)
| true
| true
|
1c44ee058389f3af01626c5e07bcecdf56660a91
| 26,104
|
py
|
Python
|
uniter_model/train_vcr.py
|
intersun/LightningDOT
|
5f2880f69ba87b8701ab89348d70ebb11432578c
|
[
"MIT"
] | 64
|
2021-03-17T02:01:34.000Z
|
2021-12-31T08:05:57.000Z
|
uniter_model/train_vcr.py
|
intersun/LightningDOT
|
5f2880f69ba87b8701ab89348d70ebb11432578c
|
[
"MIT"
] | 9
|
2021-04-16T07:58:33.000Z
|
2021-11-09T11:09:58.000Z
|
uniter_model/train_vcr.py
|
intersun/LightningDOT
|
5f2880f69ba87b8701ab89348d70ebb11432578c
|
[
"MIT"
] | 5
|
2021-03-18T01:21:44.000Z
|
2022-01-20T13:23:39.000Z
|
# coding=utf-8
# copied from hugginface github
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc.
# team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT pre-training runner."""
import argparse
import json
import os
from os.path import exists, join
import random
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam, Adamax
from torch.utils.data import DataLoader, ConcatDataset
from apex import amp
from horovod import torch as hvd
import numpy as np
from tqdm import tqdm
from data import (DistributedTokenBucketSampler,
DetectFeatLmdb, VcrDataset, VcrEvalDataset,
vcr_collate, vcr_eval_collate,
PrefetchLoader)
from model import BertForVisualCommonsenseReasoning
from optim import warmup_linear, noam_schedule, vqa_schedule, AdamW
from torch.utils.data.distributed import DistributedSampler
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config
NUM_SPECIAL_TOKENS = 81
def load_img_feat(dir_list, path2imgdir, opts):
dir_ = dir_list.split(";")
assert len(dir_) <= 2, "More than two img_dirs found"
img_dir_gt, img_dir = None, None
gt_dir_path, dir_path = "", ""
for d in dir_:
if "gt" in d:
gt_dir_path = d
else:
dir_path = d
if gt_dir_path != "":
img_dir_gt = path2imgdir.get(gt_dir_path, None)
if img_dir_gt is None:
img_dir_gt = DetectFeatLmdb(gt_dir_path, -1,
opts.max_bb, opts.min_bb, 100,
opts.compressed_db)
path2imgdir[gt_dir_path] = img_dir_gt
if dir_path != "":
img_dir = path2imgdir.get(dir_path, None)
if img_dir is None:
img_dir = DetectFeatLmdb(dir_path, opts.conf_th,
opts.max_bb, opts.min_bb, opts.num_bb,
opts.compressed_db)
path2imgdir[dir_path] = img_dir
return img_dir, img_dir_gt, path2imgdir
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
random.seed(opts.seed)
np.random.seed(opts.seed)
torch.manual_seed(opts.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(opts.seed)
# train_examples = None
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_dir}")
# load DBs and image dirs
train_txt_dbs = opts.train_txt_db.split(':')
train_img_dirs = opts.train_img_dir.split(':')
path2imgdir = {}
train_datasets = []
for db, dir_list in zip(train_txt_dbs, train_img_dirs):
img_dir, img_dir_gt, path2imgdir = load_img_feat(
dir_list, path2imgdir, opts)
train_datasets.append(VcrDataset(opts.mask_prob, db, img_dir_gt,
img_dir,
opts.max_txt_len, task="qa"))
train_datasets.append(VcrDataset(opts.mask_prob, db, img_dir_gt,
img_dir,
opts.max_txt_len, task="qar"))
train_dataset = ConcatDataset(train_datasets)
train_lens = [l for dset in train_datasets for l in dset.lens]
val_img_dir, val_img_dir_gt, path2imgdir = load_img_feat(
opts.val_img_dir, path2imgdir, opts)
val_dataset = VcrEvalDataset("val", opts.val_txt_db,
val_img_dir_gt, val_img_dir,
max_txt_len=-1)
val_final_dataset = VcrEvalDataset("test", opts.val_txt_db,
val_img_dir_gt, val_img_dir,
max_txt_len=-1)
# Prepare model
train_txt_db = train_txt_dbs[0]
emb_file = f'{train_txt_db}/embedding.pt'
if opts.checkpoint and opts.checkpoint_from == "pretrain":
if opts.checkpoint == 'google-bert':
checkpoint = None
else:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
bert_model = json.load(open(f'{train_txt_db}/meta.json'))['bert']
if 'bert' not in bert_model:
bert_model = 'bert-large-cased' # quick hack for glove exp
model = BertForVisualCommonsenseReasoning.from_pretrained(
bert_model, img_dim=2048, obj_cls=False,
state_dict=checkpoint)
model.init_type_embedding()
model.init_word_embedding(NUM_SPECIAL_TOKENS)
if opts.checkpoint_from == "vcr":
checkpoint = torch.load(opts.checkpoint)
state_dict = checkpoint.get('model_state', checkpoint)
matched_state_dict = {}
unexpected_keys = set()
missing_keys = set()
for name, param in model.named_parameters():
missing_keys.add(name)
for key, data in state_dict.items():
if key in missing_keys:
matched_state_dict[key] = data
missing_keys.remove(key)
else:
unexpected_keys.add(key)
print("Unexpected_keys:", list(unexpected_keys))
print("Missing_keys:", list(missing_keys))
model.load_state_dict(matched_state_dict, strict=False)
if opts.cut_bert != -1:
# cut some layers of BERT
model.bert.encoder.layer = torch.nn.ModuleList(
model.bert.encoder.layer[:opts.cut_bert])
if exists(emb_file) and not opts.checkpoint:
glove = torch.load(f'{train_txt_db}/embedding.pt')
vsize = glove.size(0)
hid_size = model.config.hidden_size
model.bert.embeddings.word_embeddings = torch.nn.Embedding(
vsize, hid_size)
mul_ = hid_size // 300 + 1
model.bert.embeddings.word_embeddings.weight.data = glove.repeat(
1, mul_)[:, :hid_size]
LOGGER.info('using GloVe for BERT')
del checkpoint
for name, module in model.named_modules():
# we might want to tune dropout for smaller dataset
if isinstance(module, torch.nn.Dropout):
if module.p != opts.dropout:
module.p = opts.dropout
LOGGER.info(f'{name} set to {opts.dropout}')
model.to(device)
if rank != -1:
# make sure every process has same model parameters in the beginning
broadcast_tensors([p.data for p in model.parameters()], 0)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
train_sampler = DistributedTokenBucketSampler(
n_gpu, rank, train_lens, bucket_size=8192,
batch_size=opts.train_batch_size, droplast=True)
val_sampler = DistributedSampler(
val_dataset, num_replicas=n_gpu, rank=rank)
val_final_sampler = DistributedSampler(
val_final_dataset, num_replicas=n_gpu, rank=rank)
train_dataloader = DataLoader(train_dataset,
batch_sampler=train_sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vcr_collate)
train_dataloader = PrefetchLoader(train_dataloader)
val_dataloader = DataLoader(val_dataset,
batch_size=opts.val_batch_size*3,
sampler=val_sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vcr_eval_collate)
val_final_dataloader = DataLoader(val_final_dataset,
batch_size=opts.val_batch_size,
sampler=val_final_sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vcr_eval_collate)
val_dataloader = PrefetchLoader(val_dataloader)
val_final_dataloader = PrefetchLoader(val_final_dataloader)
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
os.makedirs(join(opts.output_dir, 'results')) # store VQA predictions
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_vcr_loss = RunningMeter('vcr_loss')
running_obj_loss = RunningMeter('obj_cls_loss')
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
# quick hack for amp delay_unscale bug
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
*_, targets = batch
n_examples += targets.size(0)
vcr_loss, obj_cls_loss = model(*batch, compute_loss=True)
# loss = loss.mean()
loss = vcr_loss + obj_cls_loss
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
# gather gradients from every processes
# do this before unscaling to make sure every process uses
# the same gradient scale
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
running_vcr_loss(vcr_loss.item())
running_obj_loss(obj_cls_loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
# learning rate scheduling
if opts.decay == 'linear':
lr_this_step = opts.learning_rate * warmup_linear(
global_step, opts.warmup_steps, opts.num_train_steps)
elif opts.decay == 'invsqrt':
lr_this_step = opts.learning_rate * noam_schedule(
global_step, opts.warmup_steps)
elif opts.decay == 'constant':
lr_this_step = opts.learning_rate
elif opts.decay == 'vqa':
lr_this_step = opts.learning_rate * vqa_schedule(
global_step, opts.warm_int, opts.decay_int,
opts.decay_st, opts.decay_rate)
if lr_this_step < 0:
# save guard for possible miscalculation of train steps
lr_this_step = 1e-8
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
# log loss
losses = all_gather_list(running_loss)
running_loss = RunningMeter(
'loss', sum(l.val for l in losses)/len(losses))
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
vcr_losses = all_gather_list(running_vcr_loss)
running_vcr_loss = RunningMeter(
'vcr_loss', sum(l.val for l in vcr_losses)/len(vcr_losses))
TB_LOGGER.add_scalar('vcr_loss', running_vcr_loss.val,
global_step)
obj_losses = all_gather_list(running_obj_loss)
running_obj_loss = RunningMeter(
'obj_cls_loss',
sum(l.val for l in obj_losses)/len(obj_losses))
TB_LOGGER.add_scalar('obj_cls_loss', running_obj_loss.val,
global_step)
TB_LOGGER.step()
# update model params
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 5 == 0:
torch.cuda.empty_cache()
if global_step % 100 == 0:
# monitor training throughput
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
if global_step % opts.valid_steps == 0:
val_log, results = validate(
model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"finished {n_epoch} epochs")
val_log, results = validate(
model, val_final_dataloader)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, f'{global_step}_final')
def compute_accuracies(out_qa, labels_qa, out_qar, labels_qar):
outputs_qa = out_qa.max(dim=-1)[1]
outputs_qar = out_qar.max(dim=-1)[1]
matched_qa = outputs_qa.squeeze() == labels_qa.squeeze()
matched_qar = outputs_qar.squeeze() == labels_qar.squeeze()
matched_joined = matched_qa & matched_qar
n_correct_qa = matched_qa.sum().item()
n_correct_qar = matched_qar.sum().item()
n_correct_joined = matched_joined.sum().item()
return n_correct_qa, n_correct_qar, n_correct_joined
@torch.no_grad()
def validate(model, val_loader):
if hvd.rank() == 0:
val_pbar = tqdm(total=len(val_loader))
else:
val_pbar = NoOp()
LOGGER.info(f"start running evaluation ...")
model.eval()
val_qa_loss, val_qar_loss = 0, 0
tot_qa_score, tot_qar_score, tot_score = 0, 0, 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(val_loader):
qids, *inputs, qa_targets, qar_targets, _ = batch
scores = model(
*inputs, targets=None, compute_loss=False)
scores = scores.view(len(qids), -1)
vcr_qa_loss = F.cross_entropy(
scores[:, :4], qa_targets.squeeze(-1), reduction="sum")
if scores.shape[1] > 8:
qar_index = [4+answer_ind.item()*4+i for answer_ind in qa_targets
for i in range(4)]
qar_scores = scores[:, qar_index]
else:
qar_scores = scores[:, 4:]
vcr_qar_loss = F.cross_entropy(
qar_scores, qar_targets.squeeze(-1), reduction="sum")
val_qa_loss += vcr_qa_loss.item()
val_qar_loss += vcr_qar_loss.item()
curr_qa_score, curr_qar_score, curr_score = compute_accuracies(
scores[:, :4], qa_targets, qar_scores, qar_targets)
tot_qar_score += curr_qar_score
tot_qa_score += curr_qa_score
tot_score += curr_score
for qid, score in zip(qids, scores):
results[qid] = score.cpu().tolist()
n_ex += len(qids)
val_pbar.update(1)
val_qa_loss = sum(all_gather_list(val_qa_loss))
val_qar_loss = sum(all_gather_list(val_qar_loss))
tot_qa_score = sum(all_gather_list(tot_qa_score))
tot_qar_score = sum(all_gather_list(tot_qar_score))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_qa_loss /= n_ex
val_qar_loss /= n_ex
val_qa_acc = tot_qa_score / n_ex
val_qar_acc = tot_qar_score / n_ex
val_acc = tot_score / n_ex
val_log = {f'valid/vcr_qa_loss': val_qa_loss,
f'valid/vcr_qar_loss': val_qar_loss,
f'valid/acc_qa': val_qa_acc,
f'valid/acc_qar': val_qar_acc,
f'valid/acc': val_acc,
f'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score_qa: {val_qa_acc*100:.2f} "
f"score_qar: {val_qar_acc*100:.2f} "
f"score: {val_acc*100:.2f} ")
return val_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--task",
default="qa", type=str,
choices=['qa', 'qar'],
help="VCR tasks: qa or qar")
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_dir",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_dir",
default=None, type=str,
help="The input validation images.")
parser.add_argument('--img_format', default='npz',
choices=['npz', 'lmdb', 'lmdb-compress'],
help='format of image feature')
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model (can take 'google-bert') ")
parser.add_argument("--checkpoint_from",
default='pretrain', type=str,
choices=['pretrain', 'vcr'],
help="which setting is checkpoint from")
parser.add_argument("--cut_bert", default=-1, type=int,
help="reduce BERT layers (-1 for original depth)")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# Prepro parameters
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
# training parameters
parser.add_argument("--train_batch_size",
default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size",
default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps",
default=1000,
type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps",
default=100000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument('--mask_prob', default=0.15, type=float,
help='probability to mask in MRC training')
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--decay", default='linear',
choices=['linear', 'invsqrt', 'constant', 'vqa'],
help="learning rate decay method")
parser.add_argument("--decay_int", default=2000, type=int,
help="interval between VQA lr decy")
parser.add_argument("--warm_int", default=2000, type=int,
help="interval for VQA lr warmup")
parser.add_argument("--decay_st", default=20000, type=int,
help="when to start decay")
parser.add_argument("--decay_rate", default=0.2, type=float,
help="ratio of lr decay")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for. (invsqrt decay)")
# device parameters
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
# can use config files
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
# options safe guard
# TODO
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| 43.147107
| 79
| 0.572403
|
import argparse
import json
import os
from os.path import exists, join
import random
from time import time
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam, Adamax
from torch.utils.data import DataLoader, ConcatDataset
from apex import amp
from horovod import torch as hvd
import numpy as np
from tqdm import tqdm
from data import (DistributedTokenBucketSampler,
DetectFeatLmdb, VcrDataset, VcrEvalDataset,
vcr_collate, vcr_eval_collate,
PrefetchLoader)
from model import BertForVisualCommonsenseReasoning
from optim import warmup_linear, noam_schedule, vqa_schedule, AdamW
from torch.utils.data.distributed import DistributedSampler
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
broadcast_tensors)
from utils.save import ModelSaver, save_training_meta
from utils.misc import NoOp, parse_with_config
NUM_SPECIAL_TOKENS = 81
def load_img_feat(dir_list, path2imgdir, opts):
dir_ = dir_list.split(";")
assert len(dir_) <= 2, "More than two img_dirs found"
img_dir_gt, img_dir = None, None
gt_dir_path, dir_path = "", ""
for d in dir_:
if "gt" in d:
gt_dir_path = d
else:
dir_path = d
if gt_dir_path != "":
img_dir_gt = path2imgdir.get(gt_dir_path, None)
if img_dir_gt is None:
img_dir_gt = DetectFeatLmdb(gt_dir_path, -1,
opts.max_bb, opts.min_bb, 100,
opts.compressed_db)
path2imgdir[gt_dir_path] = img_dir_gt
if dir_path != "":
img_dir = path2imgdir.get(dir_path, None)
if img_dir is None:
img_dir = DetectFeatLmdb(dir_path, opts.conf_th,
opts.max_bb, opts.min_bb, opts.num_bb,
opts.compressed_db)
path2imgdir[dir_path] = img_dir
return img_dir, img_dir_gt, path2imgdir
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
opts.rank = rank
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if opts.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
"should be >= 1".format(
opts.gradient_accumulation_steps))
random.seed(opts.seed)
np.random.seed(opts.seed)
torch.manual_seed(opts.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(opts.seed)
LOGGER.info(f"Loading Train Dataset {opts.train_txt_db}, "
f"{opts.train_img_dir}")
train_txt_dbs = opts.train_txt_db.split(':')
train_img_dirs = opts.train_img_dir.split(':')
path2imgdir = {}
train_datasets = []
for db, dir_list in zip(train_txt_dbs, train_img_dirs):
img_dir, img_dir_gt, path2imgdir = load_img_feat(
dir_list, path2imgdir, opts)
train_datasets.append(VcrDataset(opts.mask_prob, db, img_dir_gt,
img_dir,
opts.max_txt_len, task="qa"))
train_datasets.append(VcrDataset(opts.mask_prob, db, img_dir_gt,
img_dir,
opts.max_txt_len, task="qar"))
train_dataset = ConcatDataset(train_datasets)
train_lens = [l for dset in train_datasets for l in dset.lens]
val_img_dir, val_img_dir_gt, path2imgdir = load_img_feat(
opts.val_img_dir, path2imgdir, opts)
val_dataset = VcrEvalDataset("val", opts.val_txt_db,
val_img_dir_gt, val_img_dir,
max_txt_len=-1)
val_final_dataset = VcrEvalDataset("test", opts.val_txt_db,
val_img_dir_gt, val_img_dir,
max_txt_len=-1)
train_txt_db = train_txt_dbs[0]
emb_file = f'{train_txt_db}/embedding.pt'
if opts.checkpoint and opts.checkpoint_from == "pretrain":
if opts.checkpoint == 'google-bert':
checkpoint = None
else:
checkpoint = torch.load(opts.checkpoint)
else:
checkpoint = {}
bert_model = json.load(open(f'{train_txt_db}/meta.json'))['bert']
if 'bert' not in bert_model:
bert_model = 'bert-large-cased'
model = BertForVisualCommonsenseReasoning.from_pretrained(
bert_model, img_dim=2048, obj_cls=False,
state_dict=checkpoint)
model.init_type_embedding()
model.init_word_embedding(NUM_SPECIAL_TOKENS)
if opts.checkpoint_from == "vcr":
checkpoint = torch.load(opts.checkpoint)
state_dict = checkpoint.get('model_state', checkpoint)
matched_state_dict = {}
unexpected_keys = set()
missing_keys = set()
for name, param in model.named_parameters():
missing_keys.add(name)
for key, data in state_dict.items():
if key in missing_keys:
matched_state_dict[key] = data
missing_keys.remove(key)
else:
unexpected_keys.add(key)
print("Unexpected_keys:", list(unexpected_keys))
print("Missing_keys:", list(missing_keys))
model.load_state_dict(matched_state_dict, strict=False)
if opts.cut_bert != -1:
model.bert.encoder.layer = torch.nn.ModuleList(
model.bert.encoder.layer[:opts.cut_bert])
if exists(emb_file) and not opts.checkpoint:
glove = torch.load(f'{train_txt_db}/embedding.pt')
vsize = glove.size(0)
hid_size = model.config.hidden_size
model.bert.embeddings.word_embeddings = torch.nn.Embedding(
vsize, hid_size)
mul_ = hid_size // 300 + 1
model.bert.embeddings.word_embeddings.weight.data = glove.repeat(
1, mul_)[:, :hid_size]
LOGGER.info('using GloVe for BERT')
del checkpoint
for name, module in model.named_modules():
if isinstance(module, torch.nn.Dropout):
if module.p != opts.dropout:
module.p = opts.dropout
LOGGER.info(f'{name} set to {opts.dropout}')
model.to(device)
if rank != -1:
broadcast_tensors([p.data for p in model.parameters()], 0)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
model, optimizer = amp.initialize(model, optimizer,
enabled=opts.fp16, opt_level='O2')
train_sampler = DistributedTokenBucketSampler(
n_gpu, rank, train_lens, bucket_size=8192,
batch_size=opts.train_batch_size, droplast=True)
val_sampler = DistributedSampler(
val_dataset, num_replicas=n_gpu, rank=rank)
val_final_sampler = DistributedSampler(
val_final_dataset, num_replicas=n_gpu, rank=rank)
train_dataloader = DataLoader(train_dataset,
batch_sampler=train_sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vcr_collate)
train_dataloader = PrefetchLoader(train_dataloader)
val_dataloader = DataLoader(val_dataset,
batch_size=opts.val_batch_size*3,
sampler=val_sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vcr_eval_collate)
val_final_dataloader = DataLoader(val_final_dataset,
batch_size=opts.val_batch_size,
sampler=val_final_sampler,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vcr_eval_collate)
val_dataloader = PrefetchLoader(val_dataloader)
val_final_dataloader = PrefetchLoader(val_final_dataloader)
global_step = 0
if rank == 0:
save_training_meta(opts)
TB_LOGGER.create(join(opts.output_dir, 'log'))
pbar = tqdm(total=opts.num_train_steps)
model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))
os.makedirs(join(opts.output_dir, 'results'))
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
else:
LOGGER.disabled = True
pbar = NoOp()
model_saver = NoOp()
LOGGER.info(f"***** Running training with {n_gpu} GPUs *****")
LOGGER.info(" Num examples = %d", len(train_dataset))
LOGGER.info(" Batch size = %d", opts.train_batch_size)
LOGGER.info(" Accumulate steps = %d", opts.gradient_accumulation_steps)
LOGGER.info(" Num steps = %d", opts.num_train_steps)
running_vcr_loss = RunningMeter('vcr_loss')
running_obj_loss = RunningMeter('obj_cls_loss')
running_loss = RunningMeter('loss')
model.train()
n_examples = 0
n_epoch = 0
start = time()
optimizer.zero_grad()
optimizer.step()
while True:
for step, batch in enumerate(train_dataloader):
*_, targets = batch
n_examples += targets.size(0)
vcr_loss, obj_cls_loss = model(*batch, compute_loss=True)
loss = vcr_loss + obj_cls_loss
delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0
with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale
) as scaled_loss:
scaled_loss.backward()
if not delay_unscale:
grads = [p.grad.data for p in model.parameters()
if p.requires_grad and p.grad is not None]
all_reduce_and_rescale_tensors(grads, float(1))
running_loss(loss.item())
running_vcr_loss(vcr_loss.item())
running_obj_loss(obj_cls_loss.item())
if (step + 1) % opts.gradient_accumulation_steps == 0:
global_step += 1
if opts.decay == 'linear':
lr_this_step = opts.learning_rate * warmup_linear(
global_step, opts.warmup_steps, opts.num_train_steps)
elif opts.decay == 'invsqrt':
lr_this_step = opts.learning_rate * noam_schedule(
global_step, opts.warmup_steps)
elif opts.decay == 'constant':
lr_this_step = opts.learning_rate
elif opts.decay == 'vqa':
lr_this_step = opts.learning_rate * vqa_schedule(
global_step, opts.warm_int, opts.decay_int,
opts.decay_st, opts.decay_rate)
if lr_this_step < 0:
lr_this_step = 1e-8
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
TB_LOGGER.add_scalar('lr', lr_this_step, global_step)
losses = all_gather_list(running_loss)
running_loss = RunningMeter(
'loss', sum(l.val for l in losses)/len(losses))
TB_LOGGER.add_scalar('loss', running_loss.val, global_step)
vcr_losses = all_gather_list(running_vcr_loss)
running_vcr_loss = RunningMeter(
'vcr_loss', sum(l.val for l in vcr_losses)/len(vcr_losses))
TB_LOGGER.add_scalar('vcr_loss', running_vcr_loss.val,
global_step)
obj_losses = all_gather_list(running_obj_loss)
running_obj_loss = RunningMeter(
'obj_cls_loss',
sum(l.val for l in obj_losses)/len(obj_losses))
TB_LOGGER.add_scalar('obj_cls_loss', running_obj_loss.val,
global_step)
TB_LOGGER.step()
if opts.grad_norm != -1:
grad_norm = clip_grad_norm_(amp.master_params(optimizer),
opts.grad_norm)
TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)
optimizer.step()
optimizer.zero_grad()
pbar.update(1)
if global_step % 5 == 0:
torch.cuda.empty_cache()
if global_step % 100 == 0:
tot_ex = sum(all_gather_list(n_examples))
ex_per_sec = int(tot_ex / (time()-start))
LOGGER.info(f'{tot_ex} examples trained at '
f'{ex_per_sec} ex/s')
TB_LOGGER.add_scalar('perf/ex_per_s',
ex_per_sec, global_step)
if global_step % opts.valid_steps == 0:
val_log, results = validate(
model, val_dataloader)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, global_step)
if global_step >= opts.num_train_steps:
break
if global_step >= opts.num_train_steps:
break
n_epoch += 1
LOGGER.info(f"finished {n_epoch} epochs")
val_log, results = validate(
model, val_final_dataloader)
with open(f'{opts.output_dir}/results/'
f'results_{global_step}_'
f'rank{rank}.json', 'w') as f:
json.dump(results, f)
TB_LOGGER.log_scaler_dict(val_log)
model_saver.save(model, f'{global_step}_final')
def compute_accuracies(out_qa, labels_qa, out_qar, labels_qar):
outputs_qa = out_qa.max(dim=-1)[1]
outputs_qar = out_qar.max(dim=-1)[1]
matched_qa = outputs_qa.squeeze() == labels_qa.squeeze()
matched_qar = outputs_qar.squeeze() == labels_qar.squeeze()
matched_joined = matched_qa & matched_qar
n_correct_qa = matched_qa.sum().item()
n_correct_qar = matched_qar.sum().item()
n_correct_joined = matched_joined.sum().item()
return n_correct_qa, n_correct_qar, n_correct_joined
@torch.no_grad()
def validate(model, val_loader):
if hvd.rank() == 0:
val_pbar = tqdm(total=len(val_loader))
else:
val_pbar = NoOp()
LOGGER.info(f"start running evaluation ...")
model.eval()
val_qa_loss, val_qar_loss = 0, 0
tot_qa_score, tot_qar_score, tot_score = 0, 0, 0
n_ex = 0
st = time()
results = {}
for i, batch in enumerate(val_loader):
qids, *inputs, qa_targets, qar_targets, _ = batch
scores = model(
*inputs, targets=None, compute_loss=False)
scores = scores.view(len(qids), -1)
vcr_qa_loss = F.cross_entropy(
scores[:, :4], qa_targets.squeeze(-1), reduction="sum")
if scores.shape[1] > 8:
qar_index = [4+answer_ind.item()*4+i for answer_ind in qa_targets
for i in range(4)]
qar_scores = scores[:, qar_index]
else:
qar_scores = scores[:, 4:]
vcr_qar_loss = F.cross_entropy(
qar_scores, qar_targets.squeeze(-1), reduction="sum")
val_qa_loss += vcr_qa_loss.item()
val_qar_loss += vcr_qar_loss.item()
curr_qa_score, curr_qar_score, curr_score = compute_accuracies(
scores[:, :4], qa_targets, qar_scores, qar_targets)
tot_qar_score += curr_qar_score
tot_qa_score += curr_qa_score
tot_score += curr_score
for qid, score in zip(qids, scores):
results[qid] = score.cpu().tolist()
n_ex += len(qids)
val_pbar.update(1)
val_qa_loss = sum(all_gather_list(val_qa_loss))
val_qar_loss = sum(all_gather_list(val_qar_loss))
tot_qa_score = sum(all_gather_list(tot_qa_score))
tot_qar_score = sum(all_gather_list(tot_qar_score))
tot_score = sum(all_gather_list(tot_score))
n_ex = sum(all_gather_list(n_ex))
tot_time = time()-st
val_qa_loss /= n_ex
val_qar_loss /= n_ex
val_qa_acc = tot_qa_score / n_ex
val_qar_acc = tot_qar_score / n_ex
val_acc = tot_score / n_ex
val_log = {f'valid/vcr_qa_loss': val_qa_loss,
f'valid/vcr_qar_loss': val_qar_loss,
f'valid/acc_qa': val_qa_acc,
f'valid/acc_qar': val_qar_acc,
f'valid/acc': val_acc,
f'valid/ex_per_s': n_ex/tot_time}
model.train()
LOGGER.info(f"validation finished in {int(tot_time)} seconds, "
f"score_qa: {val_qa_acc*100:.2f} "
f"score_qar: {val_qar_acc*100:.2f} "
f"score: {val_acc*100:.2f} ")
return val_log, results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task",
default="qa", type=str,
choices=['qa', 'qar'],
help="VCR tasks: qa or qar")
parser.add_argument("--train_txt_db",
default=None, type=str,
help="The input train corpus. (LMDB)")
parser.add_argument("--train_img_dir",
default=None, type=str,
help="The input train images.")
parser.add_argument("--val_txt_db",
default=None, type=str,
help="The input validation corpus. (LMDB)")
parser.add_argument("--val_img_dir",
default=None, type=str,
help="The input validation images.")
parser.add_argument('--img_format', default='npz',
choices=['npz', 'lmdb', 'lmdb-compress'],
help='format of image feature')
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model (can take 'google-bert') ")
parser.add_argument("--checkpoint_from",
default='pretrain', type=str,
choices=['pretrain', 'vcr'],
help="which setting is checkpoint from")
parser.add_argument("--cut_bert", default=-1, type=int,
help="reduce BERT layers (-1 for original depth)")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
parser.add_argument('--max_txt_len', type=int, default=60,
help='max number of tokens in text (BERT BPE)')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=36,
help='static number of bounding boxes')
parser.add_argument("--train_batch_size",
default=4096, type=int,
help="Total batch size for training. "
"(batch by tokens)")
parser.add_argument("--val_batch_size",
default=4096, type=int,
help="Total batch size for validation. "
"(batch by tokens)")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=16,
help="Number of updates steps to accumualte before "
"performing a backward/update pass.")
parser.add_argument("--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--valid_steps",
default=1000,
type=int,
help="Run validation every X steps")
parser.add_argument("--num_train_steps",
default=100000,
type=int,
help="Total number of training updates to perform.")
parser.add_argument('--mask_prob', default=0.15, type=float,
help='probability to mask in MRC training')
parser.add_argument("--optim", default='adam',
choices=['adam', 'adamax', 'adamw'],
help="optimizer")
parser.add_argument("--betas", default=[0.9, 0.98], nargs='+',
help="beta for adam optimizer")
parser.add_argument("--decay", default='linear',
choices=['linear', 'invsqrt', 'constant', 'vqa'],
help="learning rate decay method")
parser.add_argument("--decay_int", default=2000, type=int,
help="interval between VQA lr decy")
parser.add_argument("--warm_int", default=2000, type=int,
help="interval for VQA lr warmup")
parser.add_argument("--decay_st", default=20000, type=int,
help="when to start decay")
parser.add_argument("--decay_rate", default=0.2, type=float,
help="ratio of lr decay")
parser.add_argument("--dropout",
default=0.1,
type=float,
help="tune dropout regularization")
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="weight decay (L2) regularization")
parser.add_argument("--grad_norm",
default=0.25,
type=float,
help="gradient clipping (-1 for no clipping)")
parser.add_argument("--warmup_steps",
default=4000,
type=int,
help="Number of training steps to perform linear "
"learning rate warmup for. (invsqrt decay)")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
parser.add_argument('--config', help='JSON config files')
args = parse_with_config(parser)
if exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not "
"empty.".format(args.output_dir))
if args.conf_th == -1:
assert args.max_bb + args.max_txt_len + 2 <= 512
else:
assert args.num_bb + args.max_txt_len + 2 <= 512
main(args)
| true
| true
|
1c44ee480649d17d538021caa9f3ca7f0b5ab20e
| 13,669
|
py
|
Python
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/hr/models/hr.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/hr/models/hr.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/hr/models/hr.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models
from odoo import tools, _
from odoo.exceptions import ValidationError
from odoo.modules.module import get_module_resource
_logger = logging.getLogger(__name__)
class EmployeeCategory(models.Model):
_name = "hr.employee.category"
_description = "Employee Category"
name = fields.Char(string="Employee Tag", required=True)
color = fields.Integer(string='Color Index')
employee_ids = fields.Many2many('hr.employee', 'employee_category_rel', 'category_id', 'emp_id', string='Employees')
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class Job(models.Model):
_name = "hr.job"
_description = "Job Position"
_inherit = ['mail.thread']
name = fields.Char(string='Job Title', required=True, index=True, translate=True)
expected_employees = fields.Integer(compute='_compute_employees', string='Total Forecasted Employees', store=True,
help='Expected number of employees for this job position after new recruitment.')
no_of_employee = fields.Integer(compute='_compute_employees', string="Current Number of Employees", store=True,
help='Number of employees currently occupying this job position.')
no_of_recruitment = fields.Integer(string='Expected New Employees', copy=False,
help='Number of new employees you expect to recruit.', default=1)
no_of_hired_employee = fields.Integer(string='Hired Employees', copy=False,
help='Number of hired employees for this job position during recruitment phase.')
employee_ids = fields.One2many('hr.employee', 'job_id', string='Employees', groups='base.group_user')
description = fields.Text(string='Job Description')
requirements = fields.Text('Requirements')
department_id = fields.Many2one('hr.department', string='Department')
company_id = fields.Many2one('res.company', string='Company', default=lambda self: self.env.user.company_id)
state = fields.Selection([
('recruit', 'Recruitment in Progress'),
('open', 'Not Recruiting')
], string='Status', readonly=True, required=True, track_visibility='always', copy=False, default='recruit', help="Set whether the recruitment process is open or closed for this job position.")
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id, department_id)', 'The name of the job position must be unique per department in company!'),
]
@api.depends('no_of_recruitment', 'employee_ids.job_id', 'employee_ids.active')
def _compute_employees(self):
employee_data = self.env['hr.employee'].read_group([('job_id', 'in', self.ids)], ['job_id'], ['job_id'])
result = dict((data['job_id'][0], data['job_id_count']) for data in employee_data)
for job in self:
job.no_of_employee = result.get(job.id, 0)
job.expected_employees = result.get(job.id, 0) + job.no_of_recruitment
@api.model
def create(self, values):
""" We don't want the current user to be follower of all created job """
return super(Job, self.with_context(mail_create_nosubscribe=True)).create(values)
@api.multi
def copy(self, default=None):
self.ensure_one()
default = dict(default or {})
if 'name' not in default:
default['name'] = _("%s (copy)") % (self.name)
return super(Job, self).copy(default=default)
@api.multi
def set_recruit(self):
for record in self:
no_of_recruitment = 1 if record.no_of_recruitment == 0 else record.no_of_recruitment
record.write({'state': 'recruit', 'no_of_recruitment': no_of_recruitment})
return True
@api.multi
def set_open(self):
return self.write({
'state': 'open',
'no_of_recruitment': 0,
'no_of_hired_employee': 0
})
class Employee(models.Model):
_name = "hr.employee"
_description = "Employee"
_order = 'name_related'
_inherits = {'resource.resource': "resource_id"}
_inherit = ['mail.thread']
_mail_post_access = 'read'
@api.model
def _default_image(self):
image_path = get_module_resource('hr', 'static/src/img', 'default_image.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
# we need a related field in order to be able to sort the employee by name
name_related = fields.Char(related='resource_id.name', string="Resource Name", readonly=True, store=True)
country_id = fields.Many2one('res.country', string='Nationality (Country)')
birthday = fields.Date('Date of Birth')
ssnid = fields.Char('SSN No', help='Social Security Number')
sinid = fields.Char('SIN No', help='Social Insurance Number')
identification_id = fields.Char(string='Identification No')
gender = fields.Selection([
('male', 'Male'),
('female', 'Female'),
('other', 'Other')
])
marital = fields.Selection([
('single', 'Single'),
('married', 'Married'),
('widower', 'Widower'),
('divorced', 'Divorced')
], string='Marital Status')
department_id = fields.Many2one('hr.department', string='Department')
address_id = fields.Many2one('res.partner', string='Working Address')
address_home_id = fields.Many2one('res.partner', string='Home Address')
bank_account_id = fields.Many2one('res.partner.bank', string='Bank Account Number',
domain="[('partner_id', '=', address_home_id)]", help='Employee bank salary account')
work_phone = fields.Char('Work Phone')
mobile_phone = fields.Char('Work Mobile')
work_email = fields.Char('Work Email')
work_location = fields.Char('Work Location')
notes = fields.Text('Notes')
parent_id = fields.Many2one('hr.employee', string='Manager')
category_ids = fields.Many2many('hr.employee.category', 'employee_category_rel', 'emp_id', 'category_id', string='Tags')
child_ids = fields.One2many('hr.employee', 'parent_id', string='Subordinates')
resource_id = fields.Many2one('resource.resource', string='Resource',
ondelete='cascade', required=True, auto_join=True)
coach_id = fields.Many2one('hr.employee', string='Coach')
job_id = fields.Many2one('hr.job', string='Job Title')
passport_id = fields.Char('Passport No')
color = fields.Integer('Color Index', default=0)
city = fields.Char(related='address_id.city')
login = fields.Char(related='user_id.login', readonly=True)
last_login = fields.Datetime(related='user_id.login_date', string='Latest Connection', readonly=True)
# image: all image fields are base64 encoded and PIL-supported
image = fields.Binary("Photo", default=_default_image, attachment=True,
help="This field holds the image used as photo for the employee, limited to 1024x1024px.")
image_medium = fields.Binary("Medium-sized photo", attachment=True,
help="Medium-sized photo of the employee. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary("Small-sized photo", attachment=True,
help="Small-sized photo of the employee. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.constrains('parent_id')
def _check_parent_id(self):
for employee in self:
if not employee._check_recursion():
raise ValidationError(_('Error! You cannot create recursive hierarchy of Employee(s).'))
@api.onchange('address_id')
def _onchange_address(self):
self.work_phone = self.address_id.phone
self.mobile_phone = self.address_id.mobile
@api.onchange('company_id')
def _onchange_company(self):
address = self.company_id.partner_id.address_get(['default'])
self.address_id = address['default'] if address else False
@api.onchange('department_id')
def _onchange_department(self):
self.parent_id = self.department_id.manager_id
@api.onchange('user_id')
def _onchange_user(self):
self.work_email = self.user_id.email
self.name = self.user_id.name
self.image = self.user_id.image
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(Employee, self).create(vals)
@api.multi
def write(self, vals):
if 'address_home_id' in vals:
account_id = vals.get('bank_account_id') or self.bank_account_id.id
if account_id:
self.env['res.partner.bank'].browse(account_id).partner_id = vals['address_home_id']
tools.image_resize_images(vals)
return super(Employee, self).write(vals)
@api.multi
def unlink(self):
resources = self.mapped('resource_id')
super(Employee, self).unlink()
return resources.unlink()
@api.multi
def action_follow(self):
""" Wrapper because message_subscribe_users take a user_ids=None
that receive the context without the wrapper.
"""
return self.message_subscribe_users()
@api.multi
def action_unfollow(self):
""" Wrapper because message_unsubscribe_users take a user_ids=None
that receive the context without the wrapper.
"""
return self.message_unsubscribe_users()
@api.model
def _message_get_auto_subscribe_fields(self, updated_fields, auto_follow_fields=None):
""" Overwrite of the original method to always follow user_id field,
even when not track_visibility so that a user will follow it's employee
"""
if auto_follow_fields is None:
auto_follow_fields = ['user_id']
user_field_lst = []
for name, field in self._fields.items():
if name in auto_follow_fields and name in updated_fields and field.comodel_name == 'res.users':
user_field_lst.append(name)
return user_field_lst
@api.multi
def _message_auto_subscribe_notify(self, partner_ids):
# Do not notify user it has been marked as follower of its employee.
return
class Department(models.Model):
_name = "hr.department"
_description = "Hr Department"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_order = "name"
name = fields.Char('Department Name', required=True)
active = fields.Boolean('Active', default=True)
company_id = fields.Many2one('res.company', string='Company', index=True, default=lambda self: self.env.user.company_id)
parent_id = fields.Many2one('hr.department', string='Parent Department', index=True)
child_ids = fields.One2many('hr.department', 'parent_id', string='Child Departments')
manager_id = fields.Many2one('hr.employee', string='Manager', track_visibility='onchange')
member_ids = fields.One2many('hr.employee', 'department_id', string='Members', readonly=True)
jobs_ids = fields.One2many('hr.job', 'department_id', string='Jobs')
note = fields.Text('Note')
color = fields.Integer('Color Index')
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('Error! You cannot create recursive departments.'))
@api.multi
def name_get(self):
result = []
for record in self:
name = record.name
if record.parent_id:
name = "%s / %s" % (record.parent_id.name_get()[0][1], name)
result.append((record.id, name))
return result
@api.model
def create(self, vals):
# TDE note: auto-subscription of manager done by hand, because currently
# the tracking allows to track+subscribe fields linked to a res.user record
# An update of the limited behavior should come, but not currently done.
department = super(Department, self.with_context(mail_create_nosubscribe=True)).create(vals)
manager = self.env['hr.employee'].browse(vals.get("manager_id"))
if manager.user_id:
department.message_subscribe_users(user_ids=manager.user_id.ids)
return department
@api.multi
def write(self, vals):
""" If updating manager of a department, we need to update all the employees
of department hierarchy, and subscribe the new manager.
"""
# TDE note: auto-subscription of manager done by hand, because currently
# the tracking allows to track+subscribe fields linked to a res.user record
# An update of the limited behavior should come, but not currently done.
if 'manager_id' in vals:
manager_id = vals.get("manager_id")
if manager_id:
manager = self.env['hr.employee'].browse(manager_id)
# subscribe the manager user
if manager.user_id:
self.message_subscribe_users(user_ids=manager.user_id.ids)
employees = self.env['hr.employee']
for department in self:
employees = employees | self.env['hr.employee'].search([
('id', '!=', manager_id),
('department_id', '=', department.id),
('parent_id', '=', department.manager_id.id)
])
employees.write({'parent_id': manager_id})
return super(Department, self).write(vals)
| 44.093548
| 196
| 0.665301
|
import logging
from odoo import api, fields, models
from odoo import tools, _
from odoo.exceptions import ValidationError
from odoo.modules.module import get_module_resource
_logger = logging.getLogger(__name__)
class EmployeeCategory(models.Model):
_name = "hr.employee.category"
_description = "Employee Category"
name = fields.Char(string="Employee Tag", required=True)
color = fields.Integer(string='Color Index')
employee_ids = fields.Many2many('hr.employee', 'employee_category_rel', 'category_id', 'emp_id', string='Employees')
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
class Job(models.Model):
_name = "hr.job"
_description = "Job Position"
_inherit = ['mail.thread']
name = fields.Char(string='Job Title', required=True, index=True, translate=True)
expected_employees = fields.Integer(compute='_compute_employees', string='Total Forecasted Employees', store=True,
help='Expected number of employees for this job position after new recruitment.')
no_of_employee = fields.Integer(compute='_compute_employees', string="Current Number of Employees", store=True,
help='Number of employees currently occupying this job position.')
no_of_recruitment = fields.Integer(string='Expected New Employees', copy=False,
help='Number of new employees you expect to recruit.', default=1)
no_of_hired_employee = fields.Integer(string='Hired Employees', copy=False,
help='Number of hired employees for this job position during recruitment phase.')
employee_ids = fields.One2many('hr.employee', 'job_id', string='Employees', groups='base.group_user')
description = fields.Text(string='Job Description')
requirements = fields.Text('Requirements')
department_id = fields.Many2one('hr.department', string='Department')
company_id = fields.Many2one('res.company', string='Company', default=lambda self: self.env.user.company_id)
state = fields.Selection([
('recruit', 'Recruitment in Progress'),
('open', 'Not Recruiting')
], string='Status', readonly=True, required=True, track_visibility='always', copy=False, default='recruit', help="Set whether the recruitment process is open or closed for this job position.")
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id, department_id)', 'The name of the job position must be unique per department in company!'),
]
@api.depends('no_of_recruitment', 'employee_ids.job_id', 'employee_ids.active')
def _compute_employees(self):
employee_data = self.env['hr.employee'].read_group([('job_id', 'in', self.ids)], ['job_id'], ['job_id'])
result = dict((data['job_id'][0], data['job_id_count']) for data in employee_data)
for job in self:
job.no_of_employee = result.get(job.id, 0)
job.expected_employees = result.get(job.id, 0) + job.no_of_recruitment
@api.model
def create(self, values):
return super(Job, self.with_context(mail_create_nosubscribe=True)).create(values)
@api.multi
def copy(self, default=None):
self.ensure_one()
default = dict(default or {})
if 'name' not in default:
default['name'] = _("%s (copy)") % (self.name)
return super(Job, self).copy(default=default)
@api.multi
def set_recruit(self):
for record in self:
no_of_recruitment = 1 if record.no_of_recruitment == 0 else record.no_of_recruitment
record.write({'state': 'recruit', 'no_of_recruitment': no_of_recruitment})
return True
@api.multi
def set_open(self):
return self.write({
'state': 'open',
'no_of_recruitment': 0,
'no_of_hired_employee': 0
})
class Employee(models.Model):
_name = "hr.employee"
_description = "Employee"
_order = 'name_related'
_inherits = {'resource.resource': "resource_id"}
_inherit = ['mail.thread']
_mail_post_access = 'read'
@api.model
def _default_image(self):
image_path = get_module_resource('hr', 'static/src/img', 'default_image.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
name_related = fields.Char(related='resource_id.name', string="Resource Name", readonly=True, store=True)
country_id = fields.Many2one('res.country', string='Nationality (Country)')
birthday = fields.Date('Date of Birth')
ssnid = fields.Char('SSN No', help='Social Security Number')
sinid = fields.Char('SIN No', help='Social Insurance Number')
identification_id = fields.Char(string='Identification No')
gender = fields.Selection([
('male', 'Male'),
('female', 'Female'),
('other', 'Other')
])
marital = fields.Selection([
('single', 'Single'),
('married', 'Married'),
('widower', 'Widower'),
('divorced', 'Divorced')
], string='Marital Status')
department_id = fields.Many2one('hr.department', string='Department')
address_id = fields.Many2one('res.partner', string='Working Address')
address_home_id = fields.Many2one('res.partner', string='Home Address')
bank_account_id = fields.Many2one('res.partner.bank', string='Bank Account Number',
domain="[('partner_id', '=', address_home_id)]", help='Employee bank salary account')
work_phone = fields.Char('Work Phone')
mobile_phone = fields.Char('Work Mobile')
work_email = fields.Char('Work Email')
work_location = fields.Char('Work Location')
notes = fields.Text('Notes')
parent_id = fields.Many2one('hr.employee', string='Manager')
category_ids = fields.Many2many('hr.employee.category', 'employee_category_rel', 'emp_id', 'category_id', string='Tags')
child_ids = fields.One2many('hr.employee', 'parent_id', string='Subordinates')
resource_id = fields.Many2one('resource.resource', string='Resource',
ondelete='cascade', required=True, auto_join=True)
coach_id = fields.Many2one('hr.employee', string='Coach')
job_id = fields.Many2one('hr.job', string='Job Title')
passport_id = fields.Char('Passport No')
color = fields.Integer('Color Index', default=0)
city = fields.Char(related='address_id.city')
login = fields.Char(related='user_id.login', readonly=True)
last_login = fields.Datetime(related='user_id.login_date', string='Latest Connection', readonly=True)
image = fields.Binary("Photo", default=_default_image, attachment=True,
help="This field holds the image used as photo for the employee, limited to 1024x1024px.")
image_medium = fields.Binary("Medium-sized photo", attachment=True,
help="Medium-sized photo of the employee. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary("Small-sized photo", attachment=True,
help="Small-sized photo of the employee. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
@api.constrains('parent_id')
def _check_parent_id(self):
for employee in self:
if not employee._check_recursion():
raise ValidationError(_('Error! You cannot create recursive hierarchy of Employee(s).'))
@api.onchange('address_id')
def _onchange_address(self):
self.work_phone = self.address_id.phone
self.mobile_phone = self.address_id.mobile
@api.onchange('company_id')
def _onchange_company(self):
address = self.company_id.partner_id.address_get(['default'])
self.address_id = address['default'] if address else False
@api.onchange('department_id')
def _onchange_department(self):
self.parent_id = self.department_id.manager_id
@api.onchange('user_id')
def _onchange_user(self):
self.work_email = self.user_id.email
self.name = self.user_id.name
self.image = self.user_id.image
@api.model
def create(self, vals):
tools.image_resize_images(vals)
return super(Employee, self).create(vals)
@api.multi
def write(self, vals):
if 'address_home_id' in vals:
account_id = vals.get('bank_account_id') or self.bank_account_id.id
if account_id:
self.env['res.partner.bank'].browse(account_id).partner_id = vals['address_home_id']
tools.image_resize_images(vals)
return super(Employee, self).write(vals)
@api.multi
def unlink(self):
resources = self.mapped('resource_id')
super(Employee, self).unlink()
return resources.unlink()
@api.multi
def action_follow(self):
return self.message_subscribe_users()
@api.multi
def action_unfollow(self):
return self.message_unsubscribe_users()
@api.model
def _message_get_auto_subscribe_fields(self, updated_fields, auto_follow_fields=None):
if auto_follow_fields is None:
auto_follow_fields = ['user_id']
user_field_lst = []
for name, field in self._fields.items():
if name in auto_follow_fields and name in updated_fields and field.comodel_name == 'res.users':
user_field_lst.append(name)
return user_field_lst
@api.multi
def _message_auto_subscribe_notify(self, partner_ids):
return
class Department(models.Model):
_name = "hr.department"
_description = "Hr Department"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_order = "name"
name = fields.Char('Department Name', required=True)
active = fields.Boolean('Active', default=True)
company_id = fields.Many2one('res.company', string='Company', index=True, default=lambda self: self.env.user.company_id)
parent_id = fields.Many2one('hr.department', string='Parent Department', index=True)
child_ids = fields.One2many('hr.department', 'parent_id', string='Child Departments')
manager_id = fields.Many2one('hr.employee', string='Manager', track_visibility='onchange')
member_ids = fields.One2many('hr.employee', 'department_id', string='Members', readonly=True)
jobs_ids = fields.One2many('hr.job', 'department_id', string='Jobs')
note = fields.Text('Note')
color = fields.Integer('Color Index')
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('Error! You cannot create recursive departments.'))
@api.multi
def name_get(self):
result = []
for record in self:
name = record.name
if record.parent_id:
name = "%s / %s" % (record.parent_id.name_get()[0][1], name)
result.append((record.id, name))
return result
@api.model
def create(self, vals):
department = super(Department, self.with_context(mail_create_nosubscribe=True)).create(vals)
manager = self.env['hr.employee'].browse(vals.get("manager_id"))
if manager.user_id:
department.message_subscribe_users(user_ids=manager.user_id.ids)
return department
@api.multi
def write(self, vals):
if 'manager_id' in vals:
manager_id = vals.get("manager_id")
if manager_id:
manager = self.env['hr.employee'].browse(manager_id)
if manager.user_id:
self.message_subscribe_users(user_ids=manager.user_id.ids)
employees = self.env['hr.employee']
for department in self:
employees = employees | self.env['hr.employee'].search([
('id', '!=', manager_id),
('department_id', '=', department.id),
('parent_id', '=', department.manager_id.id)
])
employees.write({'parent_id': manager_id})
return super(Department, self).write(vals)
| true
| true
|
1c44ee6380ee5448632893ca93070185326ad09f
| 11,616
|
py
|
Python
|
ApplicationPerformance/automationinterface/autoInterface.py
|
hsy5332/Blog
|
3c17e097b31dcddfc41896149cc14b69fea1ae14
|
[
"Apache-2.0"
] | null | null | null |
ApplicationPerformance/automationinterface/autoInterface.py
|
hsy5332/Blog
|
3c17e097b31dcddfc41896149cc14b69fea1ae14
|
[
"Apache-2.0"
] | null | null | null |
ApplicationPerformance/automationinterface/autoInterface.py
|
hsy5332/Blog
|
3c17e097b31dcddfc41896149cc14b69fea1ae14
|
[
"Apache-2.0"
] | null | null | null |
import xlrd
import requests
import json
import openpyxl
import time
import ApplicationPerformance.applicationperformance.launchTime as launchTime
from openpyxl.styles import Font, colors, Alignment, borders
# 读取Excel测试用例,并请求接口
def readExcel():
# 创建Excel
createdcase = openpyxl.Workbook()
sheetform = createdcase.active # 创建一个活动
sheetformOnestyle = Font(name='等线', size=12, color=colors.RED, )
sheetform.title = 'Result'
sheetform['A1'] = "用例编号"
sheetform['B1'] = "接口地址"
sheetform['C1'] = "请求参数"
sheetform['D1'] = '请求方式'
sheetform['E1'] = '返回参数'
sheetform['F1'] = '是否执行' # 写入对应的数值
sheetform['G1'] = '备注' # 写入对应的数值
sheetform['A1'].font = sheetformOnestyle
sheetform['B1'].font = sheetformOnestyle
sheetform['C1'].font = sheetformOnestyle
sheetform['D1'].font = sheetformOnestyle
sheetform['E1'].font = sheetformOnestyle
sheetform['F1'].font = sheetformOnestyle
sheetform['G1'].font = sheetformOnestyle
# 读取Excel
excledata = xlrd.open_workbook("interfacecase.xlsx")
excledata_sheel = excledata.sheet_by_name('Sheet1') # 获取Excel表格中的数据,为sheet1的工作间
exclerows = excledata_sheel.nrows # 获取Excel的行数
row_list = [] # 存放用例数据的列表
datakey = [] # 拆分Excel中参数栏,存放请求参数的key
datavalues = [] # 拆分Excel中参数栏,存放请求参数的values
datadict = {} # 用于存放请求参数字段使用
returndatalist = []
print("执行用例的总数量为:%s" % (exclerows - 1))
starttime = time.time()
eventid = time.strftime("%Y%m%d%H%M%S", time.localtime())
for i in range(1, exclerows):
row_data = excledata_sheel.row_values(i)
if "http" in row_data[1]:
# 把读取Excel的数据存放到0-4的表格中(0开始不包含4)
sheetform.append(excledata_sheel.row_values(i)[0:4])
# 把读取Excel的数据存放到4的表格中(1开始数,post列)
poststr = ("F%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[poststr] = excledata_sheel.row_values(i)[4]
# 把读取Excel的数据存放到5的表格中(1开始数,备注列)
tfstr = ("G%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[tfstr] = excledata_sheel.row_values(i)[5]
if "Y" in row_data[4]: # 判断Excel中设置的方式是否为执行状态
row_list.append(row_data)
datastr = row_data[2].replace(' ', '')
datastr = datastr.replace(',', '=') # 格式化row_data[2]参数的字符串
datastr = datastr.split('=')
datakey = datastr[::2] # 把key放入datakey
datavalues = datastr[1::2] # 把values放入datavalues
datadict = dict(zip(datakey, datavalues)) # 生成请求参数字典
url = row_data[1]
if 'post' == str(row_data[3]): # 判断接口请求方式是否为post
try:
returnparameter = interfaceRequest(url, datadict, 'post')
print(int(row_data[0]), "接口返回数据 :", returnparameter)
returndatalist.append(returnparameter)
# 把读取Excel的数据存放到E2的列表格中
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
returndatalist = []
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
except:
print(int(row_data[0]), "请检查用例 %s 的接口地址以及参数是否有问题 !" % (int(row_data[0])))
returndatalist.append("请检查用例的接口地址以及参数是否有问题 !")
# 把读取Excel的数据存放到E2的列表格中
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returnparameter = "返回的参数有问题"
returndatalist = []
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
elif 'get' == str(row_data[3]): # 判断接口请求方式是否为get
try:
returnparameter = interfaceRequest(url, datadict, 'get')
print(int(row_data[0]), "接口返回数据 :", returnparameter)
returndatalist.append(returnparameter)
# 把读取Excel的数据存放到E2的列表格中
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
returndatalist = []
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), row_data[4], int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
except:
print(int(row_data[0]), "请检查用例 %s 的接口地址以及参数是否有问题 !" % (int(row_data[0])))
returndatalist.append("请检查用例的接口地址以及参数是否有问题 !")
# 把读取Excel的数据存放到E2的列表格中
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returnparameter = "返回的参数有问题"
returndatalist = []
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), row_data[4], int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
else: # 接口请求方式不为get也不为post
print(int(row_data[0]), "请检查用例 %s 的请求方式是否填写正确 !" % (int(row_data[0])))
returndatalist.append("请检查用例的请求方式是否填写正确 !")
# 把读取Excel的数据存放到E2的列表格中
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returndatalist = []
returnparameter = "请求方式有问题"
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
else:
print(int(row_data[0]), "用例编号:%s 设置为不执行。" % (int(row_data[0])))
returndatalist.append("用例未执行")
# 把读取Excel的数据存放到E2的列表格中
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returndatalist = []
returnparameter = " "
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
else:
print("表格中的接口地址错误,请填写正确的接口地址。")
returndatalist.append("表格中的接口地址错误,请填写正确的接口地址。")
# 把读取Excel的数据存放到E2的列表格中
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returndatalist = []
returnparameter = " "
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
break
endtime = time.time()
print("执行用例的总时间为:", round((endtime - starttime), 2))
# 为创建的Excel表格命名
excelname = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
createdcase.save('%s.xlsx' % (excelname))
# 请求接口地址并返回数据
def interfaceRequest(url, data, method):
if method == "post":
request = requests.post(url=url, data=data)
return json.loads(request.text)
else:
request = requests.get(url=url, data=data)
return json.loads(request.text)
readExcel()
| 61.136842
| 291
| 0.546229
|
import xlrd
import requests
import json
import openpyxl
import time
import ApplicationPerformance.applicationperformance.launchTime as launchTime
from openpyxl.styles import Font, colors, Alignment, borders
def readExcel():
createdcase = openpyxl.Workbook()
sheetform = createdcase.active
sheetformOnestyle = Font(name='等线', size=12, color=colors.RED, )
sheetform.title = 'Result'
sheetform['A1'] = "用例编号"
sheetform['B1'] = "接口地址"
sheetform['C1'] = "请求参数"
sheetform['D1'] = '请求方式'
sheetform['E1'] = '返回参数'
sheetform['F1'] = '是否执行'
sheetform['G1'] = '备注'
sheetform['A1'].font = sheetformOnestyle
sheetform['B1'].font = sheetformOnestyle
sheetform['C1'].font = sheetformOnestyle
sheetform['D1'].font = sheetformOnestyle
sheetform['E1'].font = sheetformOnestyle
sheetform['F1'].font = sheetformOnestyle
sheetform['G1'].font = sheetformOnestyle
excledata = xlrd.open_workbook("interfacecase.xlsx")
excledata_sheel = excledata.sheet_by_name('Sheet1')
exclerows = excledata_sheel.nrows
row_list = []
datakey = []
datavalues = []
datadict = {}
returndatalist = []
print("执行用例的总数量为:%s" % (exclerows - 1))
starttime = time.time()
eventid = time.strftime("%Y%m%d%H%M%S", time.localtime())
for i in range(1, exclerows):
row_data = excledata_sheel.row_values(i)
if "http" in row_data[1]:
sheetform.append(excledata_sheel.row_values(i)[0:4])
poststr = ("F%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[poststr] = excledata_sheel.row_values(i)[4]
tfstr = ("G%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[tfstr] = excledata_sheel.row_values(i)[5]
if "Y" in row_data[4]:
row_list.append(row_data)
datastr = row_data[2].replace(' ', '')
datastr = datastr.replace(',', '=')
datastr = datastr.split('=')
datakey = datastr[::2]
datavalues = datastr[1::2]
datadict = dict(zip(datakey, datavalues))
url = row_data[1]
if 'post' == str(row_data[3]):
try:
returnparameter = interfaceRequest(url, datadict, 'post')
print(int(row_data[0]), "接口返回数据 :", returnparameter)
returndatalist.append(returnparameter)
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
returndatalist = []
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
except:
print(int(row_data[0]), "请检查用例 %s 的接口地址以及参数是否有问题 !" % (int(row_data[0])))
returndatalist.append("请检查用例的接口地址以及参数是否有问题 !")
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returnparameter = "返回的参数有问题"
returndatalist = []
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
elif 'get' == str(row_data[3]):
try:
returnparameter = interfaceRequest(url, datadict, 'get')
print(int(row_data[0]), "接口返回数据 :", returnparameter)
returndatalist.append(returnparameter)
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
returndatalist = []
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), row_data[4], int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
except:
print(int(row_data[0]), "请检查用例 %s 的接口地址以及参数是否有问题 !" % (int(row_data[0])))
returndatalist.append("请检查用例的接口地址以及参数是否有问题 !")
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returnparameter = "返回的参数有问题"
returndatalist = []
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), row_data[4], int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
else:
print(int(row_data[0]), "请检查用例 %s 的请求方式是否填写正确 !" % (int(row_data[0])))
returndatalist.append("请检查用例的请求方式是否填写正确 !")
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returndatalist = []
returnparameter = "请求方式有问题"
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
else:
print(int(row_data[0]), "用例编号:%s 设置为不执行。" % (int(row_data[0])))
returndatalist.append("用例未执行")
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returndatalist = []
returnparameter = " "
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
else:
print("表格中的接口地址错误,请填写正确的接口地址。")
returndatalist.append("表格中的接口地址错误,请填写正确的接口地址。")
returnparm = ("E%s" % (int(excledata_sheel.row_values(i)[0]) + 1))
sheetform[returnparm] = str(returndatalist[0])
sheetform[returnparm].font = sheetformOnestyle
returndatalist = []
returnparameter = " "
savedate = "insert into automationquery_automation_interface (`interfaceurl`,`requestparameter`,`returnparameter`,`requesttype`,`casestatus`,`caseid`,`remark`,`createdtime`,`updatetime`,`eventid`)VALUES(\"%s\",\"%s\",\"%s\",'%s','%s','%s',\"%s\",'%s','%s','%s')" % (
url, datadict, returnparameter, str(row_data[3]), int(row_data[4]), int(row_data[0]),
str(row_data[5]),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), eventid)
launchTime.MysqlConnect().saveDatatoMysql(savedate)
break
endtime = time.time()
print("执行用例的总时间为:", round((endtime - starttime), 2))
excelname = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time()))
createdcase.save('%s.xlsx' % (excelname))
def interfaceRequest(url, data, method):
if method == "post":
request = requests.post(url=url, data=data)
return json.loads(request.text)
else:
request = requests.get(url=url, data=data)
return json.loads(request.text)
readExcel()
| true
| true
|
1c44eec40a52889d5fb14ae7d17436eec46e63e9
| 2,983
|
py
|
Python
|
tests/unit/test_proxy.py
|
doytsujin/localstack
|
46ffd646af553f381cc567e4a7a06f604640c1c7
|
[
"Apache-2.0"
] | 1
|
2022-03-17T07:22:23.000Z
|
2022-03-17T07:22:23.000Z
|
tests/unit/test_proxy.py
|
doytsujin/localstack
|
46ffd646af553f381cc567e4a7a06f604640c1c7
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_proxy.py
|
doytsujin/localstack
|
46ffd646af553f381cc567e4a7a06f604640c1c7
|
[
"Apache-2.0"
] | null | null | null |
import gzip
import json
import logging
import unittest
import requests
from localstack import config
from localstack.constants import HEADER_ACCEPT_ENCODING, LOCALHOST_HOSTNAME
from localstack.services.generic_proxy import ProxyListener, start_proxy_server
from localstack.services.infra import start_proxy_for_service
from localstack.utils.common import (
get_free_tcp_port,
is_port_open,
poll_condition,
to_str,
wait_for_port_open,
)
from localstack.utils.server.proxy_server import start_ssl_proxy
LOG = logging.getLogger(__name__)
class TestProxyServer(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.cfg_val = config.FORWARD_EDGE_INMEM
config.FORWARD_EDGE_INMEM = False
@classmethod
def tearDownClass(cls) -> None:
config.FORWARD_EDGE_INMEM = cls.cfg_val
def test_start_and_stop(self):
proxy_port = get_free_tcp_port()
backend_port = get_free_tcp_port()
server = start_proxy_for_service(
"myservice",
proxy_port,
backend_port,
update_listener=None,
quiet=True,
params={"protocol_version": "HTTP/1.0"},
)
self.assertIsNotNone(server)
try:
self.assertTrue(
poll_condition(lambda: is_port_open(proxy_port), timeout=15),
"gave up waiting for port %d" % proxy_port,
)
finally:
print("stopping proxy server")
server.stop()
print("waiting max 15 seconds for server to terminate")
server.join(timeout=15)
self.assertFalse(is_port_open(proxy_port))
def test_ssl_proxy_server():
class MyListener(ProxyListener):
def forward_request(self, *args, **kwargs):
invocations.append((args, kwargs))
return {"foo": "bar"}
invocations = []
# start SSL proxy
listener = MyListener()
port = get_free_tcp_port()
server = start_proxy_server(port, update_listener=listener, use_ssl=True)
wait_for_port_open(port)
# start SSL proxy
proxy_port = get_free_tcp_port()
proxy = start_ssl_proxy(proxy_port, port, asynchronous=True, fix_encoding=True)
wait_for_port_open(proxy_port)
# invoke SSL proxy server
url = f"https://{LOCALHOST_HOSTNAME}:{proxy_port}"
num_requests = 3
for i in range(num_requests):
response = requests.get(url, verify=False)
assert response.status_code == 200
# assert backend server has been invoked
assert len(invocations) == num_requests
# invoke SSL proxy server with gzip response
for encoding in ["gzip", "gzip, deflate"]:
headers = {HEADER_ACCEPT_ENCODING: encoding}
response = requests.get(url, headers=headers, verify=False, stream=True)
result = response.raw.read()
assert to_str(gzip.decompress(result)) == json.dumps({"foo": "bar"})
# clean up
proxy.stop()
server.stop()
| 28.961165
| 83
| 0.671472
|
import gzip
import json
import logging
import unittest
import requests
from localstack import config
from localstack.constants import HEADER_ACCEPT_ENCODING, LOCALHOST_HOSTNAME
from localstack.services.generic_proxy import ProxyListener, start_proxy_server
from localstack.services.infra import start_proxy_for_service
from localstack.utils.common import (
get_free_tcp_port,
is_port_open,
poll_condition,
to_str,
wait_for_port_open,
)
from localstack.utils.server.proxy_server import start_ssl_proxy
LOG = logging.getLogger(__name__)
class TestProxyServer(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.cfg_val = config.FORWARD_EDGE_INMEM
config.FORWARD_EDGE_INMEM = False
@classmethod
def tearDownClass(cls) -> None:
config.FORWARD_EDGE_INMEM = cls.cfg_val
def test_start_and_stop(self):
proxy_port = get_free_tcp_port()
backend_port = get_free_tcp_port()
server = start_proxy_for_service(
"myservice",
proxy_port,
backend_port,
update_listener=None,
quiet=True,
params={"protocol_version": "HTTP/1.0"},
)
self.assertIsNotNone(server)
try:
self.assertTrue(
poll_condition(lambda: is_port_open(proxy_port), timeout=15),
"gave up waiting for port %d" % proxy_port,
)
finally:
print("stopping proxy server")
server.stop()
print("waiting max 15 seconds for server to terminate")
server.join(timeout=15)
self.assertFalse(is_port_open(proxy_port))
def test_ssl_proxy_server():
class MyListener(ProxyListener):
def forward_request(self, *args, **kwargs):
invocations.append((args, kwargs))
return {"foo": "bar"}
invocations = []
listener = MyListener()
port = get_free_tcp_port()
server = start_proxy_server(port, update_listener=listener, use_ssl=True)
wait_for_port_open(port)
proxy_port = get_free_tcp_port()
proxy = start_ssl_proxy(proxy_port, port, asynchronous=True, fix_encoding=True)
wait_for_port_open(proxy_port)
url = f"https://{LOCALHOST_HOSTNAME}:{proxy_port}"
num_requests = 3
for i in range(num_requests):
response = requests.get(url, verify=False)
assert response.status_code == 200
assert len(invocations) == num_requests
for encoding in ["gzip", "gzip, deflate"]:
headers = {HEADER_ACCEPT_ENCODING: encoding}
response = requests.get(url, headers=headers, verify=False, stream=True)
result = response.raw.read()
assert to_str(gzip.decompress(result)) == json.dumps({"foo": "bar"})
proxy.stop()
server.stop()
| true
| true
|
1c44eeca9e001981d13a1e1093d34646e8352fa6
| 6,446
|
py
|
Python
|
EvalScript/evalResult.py
|
stanleynguyen/m-hmm
|
5677d7d19f008a19bfa616f2095278e3eadcb85a
|
[
"MIT"
] | null | null | null |
EvalScript/evalResult.py
|
stanleynguyen/m-hmm
|
5677d7d19f008a19bfa616f2095278e3eadcb85a
|
[
"MIT"
] | 1
|
2017-12-06T13:53:10.000Z
|
2017-12-06T13:53:10.000Z
|
EvalScript/evalResult.py
|
stanleynguyen/m-hmm
|
5677d7d19f008a19bfa616f2095278e3eadcb85a
|
[
"MIT"
] | null | null | null |
import sys
import re
from copy import copy
from collections import defaultdict
from optparse import OptionParser
# Read entities from predcition
def get_predicted(predicted, answers=defaultdict(lambda: defaultdict(defaultdict))):
example = 0
word_index = 0
entity = []
last_ne = "O"
last_sent = ""
last_entity = []
answers[example] = []
for line in predicted:
line = line.strip()
if line.startswith("##"):
continue
elif len(line) == 0:
if entity:
answers[example].append(list(entity))
entity = []
example += 1
answers[example] = []
word_index = 0
last_ne = "O"
continue
else:
split_line = line.split(separator)
#word = split_line[0]
value = split_line[outputColumnIndex]
ne = value[0]
sent = value[2:]
last_entity = []
# check if it is start of entity
if ne == 'B' or (ne == 'I' and last_ne == 'O') or (last_ne != 'O' and ne == 'I' and last_sent != sent):
if entity:
last_entity = list(entity)
entity = [sent]
entity.append(word_index)
elif ne == 'I':
entity.append(word_index)
elif ne == 'O':
if last_ne == 'B' or last_ne == 'I':
last_entity = list(entity)
entity = []
if last_entity:
answers[example].append(list(last_entity))
last_entity = []
last_sent = sent
last_ne = ne
word_index += 1
if entity:
answers[example].append(list(entity))
return answers
# Read entities from gold data
def get_observed(observed):
example = 0
word_index = 0
entity = []
last_ne = "O"
last_sent = ""
last_entity = []
observations = defaultdict(defaultdict)
observations[example] = []
for line in observed:
line = line.strip()
if line.startswith("##"):
continue
elif len(line) == 0:
if entity:
observations[example].append(list(entity))
entity = []
example += 1
observations[example] = []
word_index = 0
last_ne = "O"
continue
else:
split_line = line.split(separator)
word = split_line[0]
value = split_line[outputColumnIndex]
ne = value[0]
sent = value[2:]
last_entity = []
# check if it is start of entity, suppose there is no weird case in gold data
if ne == 'B' or (ne == 'I' and last_ne == 'O') or (last_ne != 'O' and ne == 'I' and last_sent != sent):
if entity:
last_entity = entity
entity = [sent]
entity.append(word_index)
elif ne == 'I':
entity.append(word_index)
elif ne == 'O':
if last_ne == 'B' or last_ne == 'I':
last_entity = entity
entity = []
if last_entity:
observations[example].append(list(last_entity))
last_entity = []
last_ne = ne
last_sent = sent
word_index += 1
if entity:
observations[example].append(list(entity))
return observations
# Print Results and deal with division by 0
def printResult(evalTarget, num_correct, prec, rec):
if abs(prec + rec) < 1e-6:
f = 0
else:
f = 2 * prec * rec / (prec + rec)
print('#Correct', evalTarget, ':', num_correct)
print(evalTarget, ' precision: %.4f' % (prec))
print(evalTarget, ' recall: %.4f' % (rec))
print(evalTarget, ' F: %.4f' % (f))
return f
# Compare results bewteen gold data and prediction data
def compare_observed_to_predicted(observed, predicted):
correct_sentiment = 0
correct_entity = 0
total_observed = 0.0
total_predicted = 0.0
# For each Instance Index example (example = 0,1,2,3.....)
for example in observed:
observed_instance = observed[example]
predicted_instance = predicted[example]
# Count number of entities in gold data
total_observed += len(observed_instance)
# Count number of entities in prediction data
total_predicted += len(predicted_instance)
# For each entity in prediction
for span in predicted_instance:
span_begin = span[1]
span_length = len(span) - 1
span_ne = (span_begin, span_length)
span_sent = span[0]
# For each entity in gold data
for observed_span in observed_instance:
begin = observed_span[1]
length = len(observed_span) - 1
ne = (begin, length)
sent = observed_span[0]
# Entity matched
if span_ne == ne:
correct_entity += 1
# Entity & Sentiment both are matched
if span_sent == sent:
correct_sentiment += 1
print()
print('#Entity in gold data: %d' % (total_observed))
print('#Entity in prediction: %d' % (total_predicted))
print()
prec = correct_entity / total_predicted
rec = correct_entity / total_observed
entity_f = printResult('Entity', correct_entity, prec, rec)
print()
prec = correct_sentiment / total_predicted
rec = correct_sentiment / total_observed
sentiment_f = printResult('Sentiment', correct_sentiment, prec, rec)
return entity_f, sentiment_f
##############Main Function##################
if len(sys.argv) < 3:
print ('Please make sure you have installed Python 3.4 or above!')
print ("Usage on Windows: python evalResult.py gold predictions")
print ("Usage on Linux/Mac: python3 evalResult.py gold predictions")
sys.exit()
gold = open(sys.argv[1], "r", encoding='UTF-8')
prediction = open(sys.argv[2], "r", encoding='UTF-8')
# column separator
separator = ' '
# the column index for tags
outputColumnIndex = 1
# Read Gold data
observed = get_observed(gold)
# Read Predction data
predicted = get_predicted(prediction)
# Compare
compare_observed_to_predicted(observed, predicted)
| 26.746888
| 115
| 0.549643
|
import sys
import re
from copy import copy
from collections import defaultdict
from optparse import OptionParser
def get_predicted(predicted, answers=defaultdict(lambda: defaultdict(defaultdict))):
example = 0
word_index = 0
entity = []
last_ne = "O"
last_sent = ""
last_entity = []
answers[example] = []
for line in predicted:
line = line.strip()
if line.startswith("##"):
continue
elif len(line) == 0:
if entity:
answers[example].append(list(entity))
entity = []
example += 1
answers[example] = []
word_index = 0
last_ne = "O"
continue
else:
split_line = line.split(separator)
value = split_line[outputColumnIndex]
ne = value[0]
sent = value[2:]
last_entity = []
if ne == 'B' or (ne == 'I' and last_ne == 'O') or (last_ne != 'O' and ne == 'I' and last_sent != sent):
if entity:
last_entity = list(entity)
entity = [sent]
entity.append(word_index)
elif ne == 'I':
entity.append(word_index)
elif ne == 'O':
if last_ne == 'B' or last_ne == 'I':
last_entity = list(entity)
entity = []
if last_entity:
answers[example].append(list(last_entity))
last_entity = []
last_sent = sent
last_ne = ne
word_index += 1
if entity:
answers[example].append(list(entity))
return answers
def get_observed(observed):
example = 0
word_index = 0
entity = []
last_ne = "O"
last_sent = ""
last_entity = []
observations = defaultdict(defaultdict)
observations[example] = []
for line in observed:
line = line.strip()
if line.startswith("##"):
continue
elif len(line) == 0:
if entity:
observations[example].append(list(entity))
entity = []
example += 1
observations[example] = []
word_index = 0
last_ne = "O"
continue
else:
split_line = line.split(separator)
word = split_line[0]
value = split_line[outputColumnIndex]
ne = value[0]
sent = value[2:]
last_entity = []
if ne == 'B' or (ne == 'I' and last_ne == 'O') or (last_ne != 'O' and ne == 'I' and last_sent != sent):
if entity:
last_entity = entity
entity = [sent]
entity.append(word_index)
elif ne == 'I':
entity.append(word_index)
elif ne == 'O':
if last_ne == 'B' or last_ne == 'I':
last_entity = entity
entity = []
if last_entity:
observations[example].append(list(last_entity))
last_entity = []
last_ne = ne
last_sent = sent
word_index += 1
if entity:
observations[example].append(list(entity))
return observations
def printResult(evalTarget, num_correct, prec, rec):
if abs(prec + rec) < 1e-6:
f = 0
else:
f = 2 * prec * rec / (prec + rec)
print('#Correct', evalTarget, ':', num_correct)
print(evalTarget, ' precision: %.4f' % (prec))
print(evalTarget, ' recall: %.4f' % (rec))
print(evalTarget, ' F: %.4f' % (f))
return f
def compare_observed_to_predicted(observed, predicted):
correct_sentiment = 0
correct_entity = 0
total_observed = 0.0
total_predicted = 0.0
for example in observed:
observed_instance = observed[example]
predicted_instance = predicted[example]
total_observed += len(observed_instance)
total_predicted += len(predicted_instance)
for span in predicted_instance:
span_begin = span[1]
span_length = len(span) - 1
span_ne = (span_begin, span_length)
span_sent = span[0]
for observed_span in observed_instance:
begin = observed_span[1]
length = len(observed_span) - 1
ne = (begin, length)
sent = observed_span[0]
if span_ne == ne:
correct_entity += 1
if span_sent == sent:
correct_sentiment += 1
print()
print('#Entity in gold data: %d' % (total_observed))
print('#Entity in prediction: %d' % (total_predicted))
print()
prec = correct_entity / total_predicted
rec = correct_entity / total_observed
entity_f = printResult('Entity', correct_entity, prec, rec)
print()
prec = correct_sentiment / total_predicted
rec = correct_sentiment / total_observed
sentiment_f = printResult('Sentiment', correct_sentiment, prec, rec)
return entity_f, sentiment_f
| true
| true
|
1c44eed02120243fb429c3e2e94c22b73a0e766c
| 2,588
|
py
|
Python
|
tests/conftest.py
|
red-coracle/pyintacct
|
8064134d3e8cfa0e53ef4da1e9f50afb7b829ea7
|
[
"MIT"
] | 7
|
2019-07-24T01:46:40.000Z
|
2022-03-08T17:51:39.000Z
|
tests/conftest.py
|
red-coracle/pyintacct
|
8064134d3e8cfa0e53ef4da1e9f50afb7b829ea7
|
[
"MIT"
] | 1
|
2021-09-22T23:18:21.000Z
|
2021-09-22T23:18:21.000Z
|
tests/conftest.py
|
red-coracle/pyintacct
|
8064134d3e8cfa0e53ef4da1e9f50afb7b829ea7
|
[
"MIT"
] | 2
|
2021-04-27T15:13:19.000Z
|
2022-03-08T18:02:37.000Z
|
import pytest
from .config import config
from decimal import Decimal
from pyintacct import IntacctAPI
from pyintacct.models.base import Date
from pyintacct.models.company import Contact, MailAddress
from pyintacct.models.purchasing import POTransaction, POTransactionItem, POTransactionItems
@pytest.fixture(scope='session')
def client():
return IntacctAPI(config=config)
@pytest.fixture
def make_contact_record():
def _make_contact_record(name):
address = MailAddress(address1='100 Main Street',
address2='Suite 200',
city='San Francisco',
state='CA',
country='United States')
contact = Contact(contactname=name,
printas='ρyIntacct',
companyname='Foobar Inc.',
firstname='John',
lastname='Smith',
phone1='555-555-5555',
mailaddress=address,
taxid='00-000000')
return contact
return _make_contact_record
@pytest.fixture
def make_podocument():
def _make_podocument(documentno):
potransaction = POTransaction(
transactiontype='Purchase Order',
datecreated=Date(year='2019', month='9', day='1'),
vendorid='20025',
documentno=documentno,
referenceno=documentno,
vendordocno='INV-00001',
datedue=Date(year='2019', month='10', day='21'),
returnto=Contact(contactname='EirGrid Ireland'),
payto=Contact(contactname='EirGrid Ireland'),
basecurr='EUR',
currency='EUR',
exchratetype='Intacct Daily Rate',
potransitems=POTransactionItems(potransitem=[]))
potransaction.potransitems.potransitem.append(POTransactionItem(
itemid='340',
itemdesc='Test widget #1',
quantity=Decimal(19),
unit='Each',
price=Decimal('34.40'),
locationid='500',
departmentid='500',
vendorid='20025'))
potransaction.potransitems.potransitem.append(POTransactionItem(
itemid='System Support',
itemdesc='Support for test widget #1',
quantity=Decimal(19),
unit='Each',
price=Decimal('12.40'),
locationid='500',
departmentid='500',
vendorid='20025'))
return potransaction
return _make_podocument
| 35.452055
| 92
| 0.56762
|
import pytest
from .config import config
from decimal import Decimal
from pyintacct import IntacctAPI
from pyintacct.models.base import Date
from pyintacct.models.company import Contact, MailAddress
from pyintacct.models.purchasing import POTransaction, POTransactionItem, POTransactionItems
@pytest.fixture(scope='session')
def client():
return IntacctAPI(config=config)
@pytest.fixture
def make_contact_record():
def _make_contact_record(name):
address = MailAddress(address1='100 Main Street',
address2='Suite 200',
city='San Francisco',
state='CA',
country='United States')
contact = Contact(contactname=name,
printas='ρyIntacct',
companyname='Foobar Inc.',
firstname='John',
lastname='Smith',
phone1='555-555-5555',
mailaddress=address,
taxid='00-000000')
return contact
return _make_contact_record
@pytest.fixture
def make_podocument():
def _make_podocument(documentno):
potransaction = POTransaction(
transactiontype='Purchase Order',
datecreated=Date(year='2019', month='9', day='1'),
vendorid='20025',
documentno=documentno,
referenceno=documentno,
vendordocno='INV-00001',
datedue=Date(year='2019', month='10', day='21'),
returnto=Contact(contactname='EirGrid Ireland'),
payto=Contact(contactname='EirGrid Ireland'),
basecurr='EUR',
currency='EUR',
exchratetype='Intacct Daily Rate',
potransitems=POTransactionItems(potransitem=[]))
potransaction.potransitems.potransitem.append(POTransactionItem(
itemid='340',
itemdesc='Test widget #1',
quantity=Decimal(19),
unit='Each',
price=Decimal('34.40'),
locationid='500',
departmentid='500',
vendorid='20025'))
potransaction.potransitems.potransitem.append(POTransactionItem(
itemid='System Support',
itemdesc='Support for test widget #1',
quantity=Decimal(19),
unit='Each',
price=Decimal('12.40'),
locationid='500',
departmentid='500',
vendorid='20025'))
return potransaction
return _make_podocument
| true
| true
|
1c44efd0b5d5abda9e33514c921d7eb78c13c4bc
| 859
|
py
|
Python
|
main.py
|
barrypp/ChangeImgTime
|
baa56095e7f00651e4ae507892b9594ed0fa5817
|
[
"MIT"
] | null | null | null |
main.py
|
barrypp/ChangeImgTime
|
baa56095e7f00651e4ae507892b9594ed0fa5817
|
[
"MIT"
] | null | null | null |
main.py
|
barrypp/ChangeImgTime
|
baa56095e7f00651e4ae507892b9594ed0fa5817
|
[
"MIT"
] | null | null | null |
import os
import re
import time
from datetime import datetime, timedelta
from pathlib import Path
import piexif
info = re.compile(r'\(v([0-9]+)\) - p([0-9]+)')
#info = re.compile(r' - c([0-9]+).+ - p([0-9]+)')
p = Path('data3')
count = 0
for x in p.rglob('*.*'):
#
i = info.search(x.name)
hour = int(i.group(1))
num = int(i.group(2))
t = datetime(2019,1,1) + timedelta(seconds=num,hours=hour)
#
if x.suffix == '.jpg':
exif_dict = piexif.load(str(x))
exif_dict['Exif'][piexif.ExifIFD.DateTimeDigitized] = t.strftime('%Y:%m:%d %H:%M:%S')
exif_dict['Exif'][piexif.ExifIFD.DateTimeOriginal] = t.strftime('%Y:%m:%d %H:%M:%S')
piexif.insert(piexif.dump(exif_dict), str(x))
#
os.utime(x,(t.timestamp(),t.timestamp()))
#
count += 1
print(count,x.name)
| 27.709677
| 94
| 0.561118
|
import os
import re
import time
from datetime import datetime, timedelta
from pathlib import Path
import piexif
info = re.compile(r'\(v([0-9]+)\) - p([0-9]+)')
p = Path('data3')
count = 0
for x in p.rglob('*.*'):
i = info.search(x.name)
hour = int(i.group(1))
num = int(i.group(2))
t = datetime(2019,1,1) + timedelta(seconds=num,hours=hour)
if x.suffix == '.jpg':
exif_dict = piexif.load(str(x))
exif_dict['Exif'][piexif.ExifIFD.DateTimeDigitized] = t.strftime('%Y:%m:%d %H:%M:%S')
exif_dict['Exif'][piexif.ExifIFD.DateTimeOriginal] = t.strftime('%Y:%m:%d %H:%M:%S')
piexif.insert(piexif.dump(exif_dict), str(x))
os.utime(x,(t.timestamp(),t.timestamp()))
count += 1
print(count,x.name)
| true
| true
|
1c44f0e1ee44d25710b20fb98f024b9b4d0e5068
| 15,168
|
py
|
Python
|
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/custom_text_recognition_evaluator.py
|
Ohtani-y/open_model_zoo
|
280b59fc6c00455889a1949c795558252fdad96f
|
[
"Apache-2.0"
] | 2
|
2019-08-20T15:30:19.000Z
|
2020-09-01T15:16:33.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/custom_text_recognition_evaluator.py
|
Ohtani-y/open_model_zoo
|
280b59fc6c00455889a1949c795558252fdad96f
|
[
"Apache-2.0"
] | null | null | null |
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/custom_text_recognition_evaluator.py
|
Ohtani-y/open_model_zoo
|
280b59fc6c00455889a1949c795558252fdad96f
|
[
"Apache-2.0"
] | 2
|
2021-06-25T06:18:58.000Z
|
2021-08-04T10:05:32.000Z
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functools import partial
import numpy as np
from .base_custom_evaluator import BaseCustomEvaluator
from .base_models import BaseDLSDKModel, BaseOpenVINOModel, BaseCascadeModel, create_model
from ...config import ConfigError
from ...utils import contains_all, extract_image_representations, generate_layer_name
from ...representation import CharacterRecognitionPrediction, CharacterRecognitionAnnotation
class TextRecognitionWithAttentionEvaluator(BaseCustomEvaluator):
def __init__(self, dataset_config, launcher, model, lowercase, orig_config):
super().__init__(dataset_config, launcher, orig_config)
self.model = model
self.lowercase = lowercase
@classmethod
def from_configs(cls, config, delayed_model_loading=False, orig_config=None):
dataset_config, launcher, _ = cls.get_dataset_and_launcher_info(config)
lowercase = config.get('lowercase', False)
model_type = config.get('model_type', 'SequentialFormulaRecognitionModel')
if model_type not in MODEL_TYPES.keys():
raise ValueError(f'Model type {model_type} is not supported')
meta = {}
if config.get('custom_label_map'):
meta.update({
'custom_label_map': config['custom_label_map']
})
if config.get('max_seq_len'):
meta.update({
'max_seq_len': config['max_seq_len']
})
model = MODEL_TYPES[model_type](
config.get('network_info', {}), launcher, config.get('_models', []), meta, config.get('_model_is_blob'),
delayed_model_loading=delayed_model_loading
)
return cls(dataset_config, launcher, model, lowercase, orig_config)
def _process(self, output_callback, calculate_metrics, progress_reporter, metric_config, csv_file):
for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):
batch_inputs = self.preprocessor.process(batch_inputs, batch_annotation)
batch_data, batch_meta = extract_image_representations(batch_inputs)
temporal_output_callback = None
if output_callback:
temporal_output_callback = partial(output_callback, metrics_result=None,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids)
batch_prediction, batch_raw_prediction = self.model.predict(
batch_identifiers, batch_data, callback=temporal_output_callback
)
if self.lowercase:
batch_prediction = batch_prediction.lower()
batch_annotation = [CharacterRecognitionAnnotation(
label=ann.label.lower(), identifier=ann.identifier) for ann in batch_annotation]
batch_prediction = [CharacterRecognitionPrediction(
label=batch_prediction, identifier=batch_annotation[0].identifier)]
batch_annotation, batch_prediction = self.postprocessor.process_batch(
batch_annotation, batch_prediction, batch_meta
)
metrics_result = self._get_metrics_result(batch_input_ids, batch_annotation, batch_prediction,
calculate_metrics)
if output_callback:
output_callback(batch_raw_prediction, metrics_result=metrics_result,
element_identifiers=batch_identifiers, dataset_indices=batch_input_ids)
self._update_progress(progress_reporter, metric_config, batch_id, len(batch_prediction), csv_file)
def reset(self):
super().reset()
self.model.reset()
def select_dataset(self, dataset_tag):
super().select_dataset(dataset_tag)
if self.model.vocab is None:
self.model.vocab = self.dataset.metadata.get('vocab', {})
class BaseSequentialModel(BaseCascadeModel):
def __init__(self, network_info, launcher, models_args, meta, is_blob=None, delayed_model_loading=False):
super().__init__(network_info, launcher)
parts = ['recognizer_encoder', 'recognizer_decoder']
network_info = self.fill_part_with_model(network_info, parts, models_args, is_blob, delayed_model_loading)
if not contains_all(network_info, parts) and not delayed_model_loading:
raise ConfigError('network_info should contain encoder and decoder fields')
self._recognizer_mapping = {
'dlsdk': RecognizerDLSDKModel,
'openvino': RecognizerOVModel,
}
self.recognizer_encoder = create_model(network_info['recognizer_encoder'], launcher, self._recognizer_mapping,
'encoder', delayed_model_loading=delayed_model_loading)
self.recognizer_decoder = create_model(network_info['recognizer_decoder'], launcher, self._recognizer_mapping,
'decoder', delayed_model_loading=delayed_model_loading)
self.sos_index = 0
self.eos_index = 2
self.max_seq_len = int(meta.get('max_seq_len', 0))
self._part_by_name = {'encoder': self.recognizer_encoder, 'decoder': self.recognizer_decoder}
self.with_prefix = False
def load_model(self, network_list, launcher):
super().load_model(network_list, launcher)
self.update_inputs_outputs_info()
def load_network(self, network_list, launcher):
super().load_network(network_list, launcher)
self.update_inputs_outputs_info()
def update_inputs_outputs_info(self):
with_prefix = next(iter(self.recognizer_encoder.network.input_info)).startswith('encoder')
if with_prefix != self.with_prefix:
for input_k, input_name in self.recognizer_encoder.inputs_mapping.items():
self.recognizer_encoder.inputs_mapping[input_k] = generate_layer_name(input_name, 'encoder_',
with_prefix)
for out_k, out_name in self.recognizer_encoder.outputs_mapping.items():
self.recognizer_encoder.outputs_mapping[out_k] = generate_layer_name(out_name, 'encoder_',
with_prefix)
for input_k, input_name in self.recognizer_decoder.inputs_mapping.items():
self.recognizer_decoder.inputs_mapping[input_k] = generate_layer_name(input_name, 'decoder_',
with_prefix)
for out_k, out_name in self.recognizer_decoder.outputs_mapping.items():
self.recognizer_decoder.outputs_mapping[out_k] = generate_layer_name(out_name, 'decoder_',
with_prefix)
self.with_prefix = with_prefix
def predict(self, identifiers, input_data):
pass
class SequentialTextRecognitionModel(BaseSequentialModel):
def __init__(self, network_info, launcher, models_args, meta, is_blob=None, delayed_model_loading=False):
super().__init__(
network_info, launcher, models_args, meta, is_blob=is_blob,
delayed_model_loading=delayed_model_loading
)
self.vocab = meta.get('custom_label_map')
self.recognizer_encoder.inputs_mapping = {'imgs': 'imgs'}
self.recognizer_encoder.outputs_mapping = {'features': 'features', 'decoder_hidden': 'decoder_hidden'}
self.recognizer_decoder.inputs_mapping = {
'features': 'features', 'hidden': 'hidden', 'decoder_input': 'decoder_input'
}
self.recognizer_decoder.outputs_mapping = {
'decoder_hidden': 'decoder_hidden',
'decoder_output': 'decoder_output'
}
def get_phrase(self, indices):
res = ''.join(self.vocab.get(idx, '?') for idx in indices)
return res
def predict(self, identifiers, input_data, callback=None):
assert len(identifiers) == 1
input_data = np.array(input_data)
input_data = np.transpose(input_data, (0, 3, 1, 2))
enc_res = self.recognizer_encoder.predict(identifiers,
{self.recognizer_encoder.inputs_mapping['imgs']: input_data})
if callback:
callback(enc_res)
features = enc_res[self.recognizer_encoder.outputs_mapping['features']]
dec_state = enc_res[self.recognizer_encoder.outputs_mapping['decoder_hidden']]
tgt = np.array([[self.sos_index]])
logits = []
for _ in range(self.max_seq_len):
dec_res = self.recognizer_decoder.predict(
identifiers,
{
self.recognizer_decoder.inputs_mapping['features']: features,
self.recognizer_decoder.inputs_mapping['hidden']: dec_state,
self.recognizer_decoder.inputs_mapping['decoder_input']: tgt
})
dec_state = dec_res[self.recognizer_decoder.outputs_mapping['decoder_hidden']]
logit = dec_res[self.recognizer_decoder.outputs_mapping['decoder_output']]
tgt = np.argmax(logit, axis=1)
if self.eos_index == tgt[0]:
break
logits.append(logit)
if callback:
callback(dec_res)
logits = np.array(logits)
logits = logits.squeeze(axis=1)
targets = np.argmax(logits, axis=1)
result_phrase = self.get_phrase(targets)
return result_phrase, dec_res
class SequentialFormulaRecognitionModel(BaseSequentialModel):
def __init__(self, network_info, launcher, models_args, meta, is_blob=None, delayed_model_loading=False):
super().__init__(network_info, launcher, models_args, meta, is_blob,
delayed_model_loading=delayed_model_loading)
self.vocab = meta.get('vocab')
self.recognizer_encoder.inputs_mapping = {
'imgs': 'imgs'
}
self.recognizer_encoder.outputs_mapping = {
'row_enc_out': 'row_enc_out',
'hidden': 'hidden',
'context': 'context',
'init_0': 'init_0'
}
self.recognizer_decoder.inputs_mapping = {
'row_enc_out': 'row_enc_out',
'dec_st_c': 'dec_st_c',
'dec_st_h': 'dec_st_h',
'output_prev': 'output_prev',
'tgt': 'tgt'
}
self.recognizer_decoder.outputs_mapping = {
'dec_st_h_t': 'dec_st_h_t',
'dec_st_c_t': 'dec_st_c_t',
'output': 'output',
'logit': 'logit'
}
def get_phrase(self, indices):
res = ''
for idx in indices:
if idx != self.eos_index:
res += ' ' + str(self.vocab.get(idx, '?'))
else:
return res.strip()
return res.strip()
def predict(self, identifiers, input_data, callback=None):
assert len(identifiers) == 1
input_data = np.array(input_data)
input_data = np.transpose(input_data, (0, 3, 1, 2))
enc_res = self.recognizer_encoder.predict(identifiers,
{self.recognizer_encoder.inputs_mapping['imgs']: input_data})
if callback:
callback(enc_res)
row_enc_out = enc_res[self.recognizer_encoder.outputs_mapping['row_enc_out']]
dec_states_h = enc_res[self.recognizer_encoder.outputs_mapping['hidden']]
dec_states_c = enc_res[self.recognizer_encoder.outputs_mapping['context']]
O_t = enc_res[self.recognizer_encoder.outputs_mapping['init_0']]
tgt = np.array([[self.sos_index]])
logits = []
for _ in range(self.max_seq_len):
dec_res = self.recognizer_decoder.predict(
identifiers,
{
self.recognizer_decoder.inputs_mapping['row_enc_out']: row_enc_out,
self.recognizer_decoder.inputs_mapping['dec_st_c']: dec_states_c,
self.recognizer_decoder.inputs_mapping['dec_st_h']: dec_states_h,
self.recognizer_decoder.inputs_mapping['output_prev']: O_t,
self.recognizer_decoder.inputs_mapping['tgt']: tgt
})
if callback:
callback(dec_res)
dec_states_h = dec_res[self.recognizer_decoder.outputs_mapping['dec_st_h_t']]
dec_states_c = dec_res[self.recognizer_decoder.outputs_mapping['dec_st_c_t']]
O_t = dec_res[self.recognizer_decoder.outputs_mapping['output']]
logit = dec_res[self.recognizer_decoder.outputs_mapping['logit']]
logits.append(logit)
tgt = np.array([[np.argmax(np.array(logit), axis=1)]])
if tgt[0][0][0] == self.eos_index:
break
logits = np.array(logits)
logits = logits.squeeze(axis=1)
targets = np.argmax(logits, axis=1)
result_phrase = self.get_phrase(targets)
return result_phrase, dec_res
class RecognizerDLSDKModel(BaseDLSDKModel):
def __init__(self, network_info, launcher, suffix,
delayed_model_loading=False, inputs_mapping=None, outputs_mapping=None):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.inputs_mapping = inputs_mapping
self.outputs_mapping = outputs_mapping
def predict(self, identifiers, input_data):
if not self.is_dynamic and self.dynamic_inputs:
self._reshape_input({k: v.shape for k, v in input_data.items()})
return self.exec_network.infer(input_data)
class RecognizerOVModel(BaseOpenVINOModel):
def __init__(self, network_info, launcher, suffix,
delayed_model_loading=False, inputs_mapping=None, outputs_mapping=None):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.inputs_mapping = inputs_mapping
self.outputs_mapping = outputs_mapping
def predict(self, identifiers, input_data):
if not self.is_dynamic and self.dynamic_inputs:
self._reshape_input({k: v.shape for k, v in input_data.items()})
return self.infer(input_data)
MODEL_TYPES = {
'SequentialTextRecognitionModel': SequentialTextRecognitionModel,
'SequentialFormulaRecognitionModel': SequentialFormulaRecognitionModel,
}
| 48
| 118
| 0.642471
|
from functools import partial
import numpy as np
from .base_custom_evaluator import BaseCustomEvaluator
from .base_models import BaseDLSDKModel, BaseOpenVINOModel, BaseCascadeModel, create_model
from ...config import ConfigError
from ...utils import contains_all, extract_image_representations, generate_layer_name
from ...representation import CharacterRecognitionPrediction, CharacterRecognitionAnnotation
class TextRecognitionWithAttentionEvaluator(BaseCustomEvaluator):
def __init__(self, dataset_config, launcher, model, lowercase, orig_config):
super().__init__(dataset_config, launcher, orig_config)
self.model = model
self.lowercase = lowercase
@classmethod
def from_configs(cls, config, delayed_model_loading=False, orig_config=None):
dataset_config, launcher, _ = cls.get_dataset_and_launcher_info(config)
lowercase = config.get('lowercase', False)
model_type = config.get('model_type', 'SequentialFormulaRecognitionModel')
if model_type not in MODEL_TYPES.keys():
raise ValueError(f'Model type {model_type} is not supported')
meta = {}
if config.get('custom_label_map'):
meta.update({
'custom_label_map': config['custom_label_map']
})
if config.get('max_seq_len'):
meta.update({
'max_seq_len': config['max_seq_len']
})
model = MODEL_TYPES[model_type](
config.get('network_info', {}), launcher, config.get('_models', []), meta, config.get('_model_is_blob'),
delayed_model_loading=delayed_model_loading
)
return cls(dataset_config, launcher, model, lowercase, orig_config)
def _process(self, output_callback, calculate_metrics, progress_reporter, metric_config, csv_file):
for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):
batch_inputs = self.preprocessor.process(batch_inputs, batch_annotation)
batch_data, batch_meta = extract_image_representations(batch_inputs)
temporal_output_callback = None
if output_callback:
temporal_output_callback = partial(output_callback, metrics_result=None,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids)
batch_prediction, batch_raw_prediction = self.model.predict(
batch_identifiers, batch_data, callback=temporal_output_callback
)
if self.lowercase:
batch_prediction = batch_prediction.lower()
batch_annotation = [CharacterRecognitionAnnotation(
label=ann.label.lower(), identifier=ann.identifier) for ann in batch_annotation]
batch_prediction = [CharacterRecognitionPrediction(
label=batch_prediction, identifier=batch_annotation[0].identifier)]
batch_annotation, batch_prediction = self.postprocessor.process_batch(
batch_annotation, batch_prediction, batch_meta
)
metrics_result = self._get_metrics_result(batch_input_ids, batch_annotation, batch_prediction,
calculate_metrics)
if output_callback:
output_callback(batch_raw_prediction, metrics_result=metrics_result,
element_identifiers=batch_identifiers, dataset_indices=batch_input_ids)
self._update_progress(progress_reporter, metric_config, batch_id, len(batch_prediction), csv_file)
def reset(self):
super().reset()
self.model.reset()
def select_dataset(self, dataset_tag):
super().select_dataset(dataset_tag)
if self.model.vocab is None:
self.model.vocab = self.dataset.metadata.get('vocab', {})
class BaseSequentialModel(BaseCascadeModel):
def __init__(self, network_info, launcher, models_args, meta, is_blob=None, delayed_model_loading=False):
super().__init__(network_info, launcher)
parts = ['recognizer_encoder', 'recognizer_decoder']
network_info = self.fill_part_with_model(network_info, parts, models_args, is_blob, delayed_model_loading)
if not contains_all(network_info, parts) and not delayed_model_loading:
raise ConfigError('network_info should contain encoder and decoder fields')
self._recognizer_mapping = {
'dlsdk': RecognizerDLSDKModel,
'openvino': RecognizerOVModel,
}
self.recognizer_encoder = create_model(network_info['recognizer_encoder'], launcher, self._recognizer_mapping,
'encoder', delayed_model_loading=delayed_model_loading)
self.recognizer_decoder = create_model(network_info['recognizer_decoder'], launcher, self._recognizer_mapping,
'decoder', delayed_model_loading=delayed_model_loading)
self.sos_index = 0
self.eos_index = 2
self.max_seq_len = int(meta.get('max_seq_len', 0))
self._part_by_name = {'encoder': self.recognizer_encoder, 'decoder': self.recognizer_decoder}
self.with_prefix = False
def load_model(self, network_list, launcher):
super().load_model(network_list, launcher)
self.update_inputs_outputs_info()
def load_network(self, network_list, launcher):
super().load_network(network_list, launcher)
self.update_inputs_outputs_info()
def update_inputs_outputs_info(self):
with_prefix = next(iter(self.recognizer_encoder.network.input_info)).startswith('encoder')
if with_prefix != self.with_prefix:
for input_k, input_name in self.recognizer_encoder.inputs_mapping.items():
self.recognizer_encoder.inputs_mapping[input_k] = generate_layer_name(input_name, 'encoder_',
with_prefix)
for out_k, out_name in self.recognizer_encoder.outputs_mapping.items():
self.recognizer_encoder.outputs_mapping[out_k] = generate_layer_name(out_name, 'encoder_',
with_prefix)
for input_k, input_name in self.recognizer_decoder.inputs_mapping.items():
self.recognizer_decoder.inputs_mapping[input_k] = generate_layer_name(input_name, 'decoder_',
with_prefix)
for out_k, out_name in self.recognizer_decoder.outputs_mapping.items():
self.recognizer_decoder.outputs_mapping[out_k] = generate_layer_name(out_name, 'decoder_',
with_prefix)
self.with_prefix = with_prefix
def predict(self, identifiers, input_data):
pass
class SequentialTextRecognitionModel(BaseSequentialModel):
def __init__(self, network_info, launcher, models_args, meta, is_blob=None, delayed_model_loading=False):
super().__init__(
network_info, launcher, models_args, meta, is_blob=is_blob,
delayed_model_loading=delayed_model_loading
)
self.vocab = meta.get('custom_label_map')
self.recognizer_encoder.inputs_mapping = {'imgs': 'imgs'}
self.recognizer_encoder.outputs_mapping = {'features': 'features', 'decoder_hidden': 'decoder_hidden'}
self.recognizer_decoder.inputs_mapping = {
'features': 'features', 'hidden': 'hidden', 'decoder_input': 'decoder_input'
}
self.recognizer_decoder.outputs_mapping = {
'decoder_hidden': 'decoder_hidden',
'decoder_output': 'decoder_output'
}
def get_phrase(self, indices):
res = ''.join(self.vocab.get(idx, '?') for idx in indices)
return res
def predict(self, identifiers, input_data, callback=None):
assert len(identifiers) == 1
input_data = np.array(input_data)
input_data = np.transpose(input_data, (0, 3, 1, 2))
enc_res = self.recognizer_encoder.predict(identifiers,
{self.recognizer_encoder.inputs_mapping['imgs']: input_data})
if callback:
callback(enc_res)
features = enc_res[self.recognizer_encoder.outputs_mapping['features']]
dec_state = enc_res[self.recognizer_encoder.outputs_mapping['decoder_hidden']]
tgt = np.array([[self.sos_index]])
logits = []
for _ in range(self.max_seq_len):
dec_res = self.recognizer_decoder.predict(
identifiers,
{
self.recognizer_decoder.inputs_mapping['features']: features,
self.recognizer_decoder.inputs_mapping['hidden']: dec_state,
self.recognizer_decoder.inputs_mapping['decoder_input']: tgt
})
dec_state = dec_res[self.recognizer_decoder.outputs_mapping['decoder_hidden']]
logit = dec_res[self.recognizer_decoder.outputs_mapping['decoder_output']]
tgt = np.argmax(logit, axis=1)
if self.eos_index == tgt[0]:
break
logits.append(logit)
if callback:
callback(dec_res)
logits = np.array(logits)
logits = logits.squeeze(axis=1)
targets = np.argmax(logits, axis=1)
result_phrase = self.get_phrase(targets)
return result_phrase, dec_res
class SequentialFormulaRecognitionModel(BaseSequentialModel):
def __init__(self, network_info, launcher, models_args, meta, is_blob=None, delayed_model_loading=False):
super().__init__(network_info, launcher, models_args, meta, is_blob,
delayed_model_loading=delayed_model_loading)
self.vocab = meta.get('vocab')
self.recognizer_encoder.inputs_mapping = {
'imgs': 'imgs'
}
self.recognizer_encoder.outputs_mapping = {
'row_enc_out': 'row_enc_out',
'hidden': 'hidden',
'context': 'context',
'init_0': 'init_0'
}
self.recognizer_decoder.inputs_mapping = {
'row_enc_out': 'row_enc_out',
'dec_st_c': 'dec_st_c',
'dec_st_h': 'dec_st_h',
'output_prev': 'output_prev',
'tgt': 'tgt'
}
self.recognizer_decoder.outputs_mapping = {
'dec_st_h_t': 'dec_st_h_t',
'dec_st_c_t': 'dec_st_c_t',
'output': 'output',
'logit': 'logit'
}
def get_phrase(self, indices):
res = ''
for idx in indices:
if idx != self.eos_index:
res += ' ' + str(self.vocab.get(idx, '?'))
else:
return res.strip()
return res.strip()
def predict(self, identifiers, input_data, callback=None):
assert len(identifiers) == 1
input_data = np.array(input_data)
input_data = np.transpose(input_data, (0, 3, 1, 2))
enc_res = self.recognizer_encoder.predict(identifiers,
{self.recognizer_encoder.inputs_mapping['imgs']: input_data})
if callback:
callback(enc_res)
row_enc_out = enc_res[self.recognizer_encoder.outputs_mapping['row_enc_out']]
dec_states_h = enc_res[self.recognizer_encoder.outputs_mapping['hidden']]
dec_states_c = enc_res[self.recognizer_encoder.outputs_mapping['context']]
O_t = enc_res[self.recognizer_encoder.outputs_mapping['init_0']]
tgt = np.array([[self.sos_index]])
logits = []
for _ in range(self.max_seq_len):
dec_res = self.recognizer_decoder.predict(
identifiers,
{
self.recognizer_decoder.inputs_mapping['row_enc_out']: row_enc_out,
self.recognizer_decoder.inputs_mapping['dec_st_c']: dec_states_c,
self.recognizer_decoder.inputs_mapping['dec_st_h']: dec_states_h,
self.recognizer_decoder.inputs_mapping['output_prev']: O_t,
self.recognizer_decoder.inputs_mapping['tgt']: tgt
})
if callback:
callback(dec_res)
dec_states_h = dec_res[self.recognizer_decoder.outputs_mapping['dec_st_h_t']]
dec_states_c = dec_res[self.recognizer_decoder.outputs_mapping['dec_st_c_t']]
O_t = dec_res[self.recognizer_decoder.outputs_mapping['output']]
logit = dec_res[self.recognizer_decoder.outputs_mapping['logit']]
logits.append(logit)
tgt = np.array([[np.argmax(np.array(logit), axis=1)]])
if tgt[0][0][0] == self.eos_index:
break
logits = np.array(logits)
logits = logits.squeeze(axis=1)
targets = np.argmax(logits, axis=1)
result_phrase = self.get_phrase(targets)
return result_phrase, dec_res
class RecognizerDLSDKModel(BaseDLSDKModel):
def __init__(self, network_info, launcher, suffix,
delayed_model_loading=False, inputs_mapping=None, outputs_mapping=None):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.inputs_mapping = inputs_mapping
self.outputs_mapping = outputs_mapping
def predict(self, identifiers, input_data):
if not self.is_dynamic and self.dynamic_inputs:
self._reshape_input({k: v.shape for k, v in input_data.items()})
return self.exec_network.infer(input_data)
class RecognizerOVModel(BaseOpenVINOModel):
def __init__(self, network_info, launcher, suffix,
delayed_model_loading=False, inputs_mapping=None, outputs_mapping=None):
super().__init__(network_info, launcher, suffix, delayed_model_loading)
self.inputs_mapping = inputs_mapping
self.outputs_mapping = outputs_mapping
def predict(self, identifiers, input_data):
if not self.is_dynamic and self.dynamic_inputs:
self._reshape_input({k: v.shape for k, v in input_data.items()})
return self.infer(input_data)
MODEL_TYPES = {
'SequentialTextRecognitionModel': SequentialTextRecognitionModel,
'SequentialFormulaRecognitionModel': SequentialFormulaRecognitionModel,
}
| true
| true
|
1c44f0fd84aad62f085f8065aec4b09e83ec9f76
| 20,360
|
py
|
Python
|
QGrain/algorithms.py
|
erslog/QGrain
|
9644415c73a929bbdd30d7eb4c3fa861401a5ea4
|
[
"MIT"
] | 1
|
2020-12-20T13:24:44.000Z
|
2020-12-20T13:24:44.000Z
|
QGrain/algorithms.py
|
erslog/QGrain
|
9644415c73a929bbdd30d7eb4c3fa861401a5ea4
|
[
"MIT"
] | null | null | null |
QGrain/algorithms.py
|
erslog/QGrain
|
9644415c73a929bbdd30d7eb4c3fa861401a5ea4
|
[
"MIT"
] | null | null | null |
import weakref
from enum import Enum, unique
from threading import Lock
from typing import Callable, Dict, Iterable, List, Tuple
import numpy as np
from scipy.special import gamma
INFINITESIMAL = 1e-100
FRACTION_PARAM_NAME = "f"
NAME_KEY = "Name"
BOUNDS_KEY = "Bounds"
DEFAULT_VALUE_KEY = "Default"
LOCATION_KEY = "Location"
COMPONENT_INDEX_KEY = "ComponentIndex"
PARAM_INDEX_KEY = "ParamIndex"
@unique
class DistributionType(Enum):
Normal = 0
Weibull = 1
GeneralWeibull = 2
def check_component_number(component_number: int):
# Check the validity of `component_number`
if type(component_number) != int:
raise TypeError(component_number)
elif component_number < 1:
raise ValueError(component_number)
def get_param_count(distribution_type: DistributionType) -> int:
if distribution_type == DistributionType.Normal:
return 2
elif distribution_type == DistributionType.Weibull:
return 2
elif distribution_type == DistributionType.GeneralWeibull:
return 3
else:
raise NotImplementedError(distribution_type)
def get_param_names(distribution_type: DistributionType) -> Tuple[str]:
if distribution_type == DistributionType.Normal:
return ("mu", "sigma")
elif distribution_type == DistributionType.Weibull:
return ("beta", "eta")
elif distribution_type == DistributionType.GeneralWeibull:
return ("mu", "beta", "eta")
else:
raise NotImplementedError(distribution_type)
def get_base_func_name(distribution_type: DistributionType) -> str:
if distribution_type == DistributionType.Normal:
return "normal"
elif distribution_type == DistributionType.Weibull:
return "weibull"
elif distribution_type == DistributionType.GeneralWeibull:
return "gen_weibull"
else:
raise NotImplementedError(distribution_type)
def get_param_bounds(distribution_type: DistributionType) -> Tuple[Tuple[float, float]]:
if distribution_type == DistributionType.Normal:
return ((INFINITESIMAL, None), (INFINITESIMAL, None))
elif distribution_type == DistributionType.Weibull:
return ((INFINITESIMAL, None), (INFINITESIMAL, None))
elif distribution_type == DistributionType.GeneralWeibull:
return ((INFINITESIMAL, None), (INFINITESIMAL, None), (INFINITESIMAL, None))
else:
raise NotImplementedError(distribution_type)
# in order to obtain better performance,
# the params of components should be different
def get_param_defaults(distribution_type: DistributionType, component_number: int) -> Tuple[Tuple]:
check_component_number(component_number)
if distribution_type == DistributionType.Normal:
return tuple(((i*10, 2+i) for i in range(1, component_number+1)))
elif distribution_type == DistributionType.Weibull:
return tuple(((10+i, (i+1)*15) for i in range(1, component_number+1)))
elif distribution_type == DistributionType.GeneralWeibull:
return tuple(((0, 2+i, i*10) for i in range(1, component_number+1)))
else:
raise NotImplementedError(distribution_type)
def get_params(distribution_type: DistributionType, component_number: int) -> List[Dict]:
check_component_number(component_number)
params = []
param_count = get_param_count(distribution_type)
param_names = get_param_names(distribution_type)
param_bounds = get_param_bounds(distribution_type)
param_defaults = get_param_defaults(distribution_type, component_number)
# generate params for all components
for component_index, component_defaults in enumerate(param_defaults):
for param_index, name, bounds, defalut in zip(range(param_count), param_names, param_bounds, component_defaults):
params.append({NAME_KEY: name+str(component_index+1), BOUNDS_KEY: bounds,
DEFAULT_VALUE_KEY: defalut, COMPONENT_INDEX_KEY: component_index,
PARAM_INDEX_KEY: param_index, LOCATION_KEY: component_index*param_count+param_index})
# generate fractions for front n-1 components
for component_index in range(component_number-1):
# the fraction of each distribution
params.append({NAME_KEY: FRACTION_PARAM_NAME+str(component_index+1), BOUNDS_KEY: (0, 1),
DEFAULT_VALUE_KEY: 1/component_number, COMPONENT_INDEX_KEY: component_index,
LOCATION_KEY: component_number*param_count + component_index})
sort_params_by_location_in_place(params)
return params
def sort_params_by_location_in_place(params: List[Dict]):
params.sort(key=lambda element: element[LOCATION_KEY])
def get_bounds(params: List[Dict]) -> Tuple[Tuple]:
bounds = []
for param in params:
bounds.append(param[BOUNDS_KEY])
return tuple(bounds)
def get_constrains(component_number: int) -> Tuple[Dict]:
if component_number == 1:
return ()
elif component_number > 1:
return ({'type': 'ineq', 'fun': lambda args: 1 - np.sum(args[1-component_number:]) + INFINITESIMAL})
else:
raise ValueError(component_number)
def get_defaults(params: List[Dict]) -> Tuple[float]:
defaults = []
for param in params:
defaults.append(param[DEFAULT_VALUE_KEY])
return tuple(defaults)
def get_lambda_str(distribution_type: DistributionType, component_number:int) -> str:
base_func_name = get_base_func_name(distribution_type)
param_count = get_param_count(distribution_type)
param_names = get_param_names(distribution_type)
if component_number == 1:
return "lambda x, {0}: {1}(x, {0})".format(", ".join(param_names), base_func_name)
elif component_number > 1:
parameter_list = ", ".join(["x"] + [name+str(i+1) for i in range(component_number) for name in param_names] + [FRACTION_PARAM_NAME+str(i+1) for i in range(component_number-1)])
# " + " to connect each sub-function
# the previous sub-function str list means the m-1 sub-functions with n params `fj * base_func(x, param_1_j, ..., param_i_j, ..., param_n_j)`
# the last sub-function str which represents `(1-f_1-...-f_j-...-f_m-1) * base_func(x, param_1_j, ..., param_i_j, ..., param_n_j)`
previous_format_str = "{0}{1}*{2}(x, " + ", ".join(["{"+str(i+3)+"}{1}" for i in range(param_count)]) + ")"
previous_sub_func_strs = [previous_format_str.format(FRACTION_PARAM_NAME, i+1, base_func_name, *param_names) for i in range(component_number-1)]
last_format_str = "({0})*{1}(x, " + ", ".join(["{"+str(i+3)+"}{2}" for i in range(param_count)]) + ")"
last_sub_func_str = last_format_str.format("-".join(["1"]+["f{0}".format(i+1) for i in range(component_number-1)]), base_func_name, component_number, *param_names)
expression = " + ".join(previous_sub_func_strs + [last_sub_func_str])
lambda_string = "lambda {0}: {1}".format(parameter_list, expression)
return lambda_string
else:
raise ValueError(component_number)
# prcess the raw params list to make it easy to use
def process_params(distribution_type: DistributionType, component_number: int, fitted_params: Iterable) -> Tuple[Tuple[Tuple, float]]:
param_count = get_param_count(distribution_type)
if component_number == 1:
assert len(fitted_params) == param_count
return ((tuple(fitted_params), 1.0),)
elif component_number > 1:
assert len(fitted_params) == (param_count+1) * component_number - 1
expanded = list(fitted_params) + [1.0-sum(fitted_params[component_number*param_count:])]
return tuple(((tuple(expanded[i*param_count:(i+1)*param_count]), expanded[component_number*param_count+i]) for i in range(component_number)))
else:
raise ValueError(component_number)
# the pdf function of Normal distribution
def normal(x, mu, sigma):
if sigma <= 0.0:
return np.zeros_like(x, dtype=np.float64)
else:
return 1/(sigma*np.sqrt(2*np.pi))*np.exp(-np.square(x-mu)/(2*np.square(sigma)))
def double_normal(x, mu1, sigma1, mu2, sigma2, f1):
return f1 * normal(x, mu1, sigma1) + (1-f1) * normal(x, mu2, sigma2)
def triple_normal(x, mu1, sigma1, mu2, sigma2, mu3, sigma3, f1, f2):
return f1 * normal(x, mu1, sigma1) + f2 * normal(x, mu2, sigma2) + (1-f1-f2) * normal(x, mu3, sigma3)
def quadruple_normal(x, mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, f1, f2, f3):
return f1 * normal(x, mu1, sigma1) + f2 * normal(x, mu2, sigma2) + f3 * normal(x, mu3, sigma3) + (1-f1-f2-f3) * normal(x, mu4, sigma4)
def normal_mean(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return mu
def normal_median(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return mu
def normal_mode(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return mu
def normal_standard_deviation(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return sigma
def normal_variance(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return sigma**2
def normal_skewness(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return 0.0
def normal_kurtosis(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return 0.0
# The pdf function of Weibull distribution
def weibull(x, beta, eta):
results = np.zeros_like(x, dtype=np.float64)
if beta <= 0.0 or eta <= 0.0:
return results
else:
non_zero = np.greater(x, 0.0)
results[non_zero] = (beta/eta) * (x[non_zero]/eta)**(beta-1) * np.exp(-(x[non_zero]/eta)**beta)
return results
# return (beta/eta) * (x/eta)**(beta-1) * np.exp(-(x/eta)**beta)
def double_weibull(x, beta1, eta1, beta2, eta2, f):
return f * weibull(x, beta1, eta1) + (1-f) * weibull(x, beta2, eta2)
def triple_weibull(x, beta1, eta1, beta2, eta2, beta3, eta3, f1, f2):
return f1 * weibull(x, beta1, eta1) + f2 * weibull(x, beta2, eta2) + (1-f1-f2) * weibull(x, beta3, eta3)
def quadruple_weibull(x, beta1, eta1, beta2, eta2, beta3, eta3, beta4, eta4, f1, f2, f3):
return f1 * weibull(x, beta1, eta1) + f2 * weibull(x, beta2, eta2) + f3 * weibull(x, beta3, eta3) + (1-f1-f2-f3) * weibull(x, beta4, eta4)
def weibull_mean(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return eta*gamma(1/beta+1)
def weibull_median(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return eta*(np.log(2)**(1/beta))
def weibull_mode(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
elif beta <= 1:
return 0.0
else:
return eta*(1-1/beta)**(1/beta)
def weibull_standard_deviation(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return eta*np.sqrt(gamma(2/beta+1) - gamma(1/beta+1)**2)
def weibull_variance(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return (eta**2)*(gamma(2/beta+1)-gamma(1/beta+1)**2)
def weibull_skewness(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return (2*gamma(1/beta+1)**3 - 3*gamma(2/beta+1)*gamma(1/beta+1) + gamma(3/beta+1)) / (gamma(2/beta+1)-gamma(1/beta+1)**2)**(3/2)
def weibull_kurtosis(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return (-3*gamma(1/beta+1)**4 + 6*gamma(2/beta+1)*gamma(1/beta+1)**2 - 4*gamma(3/beta+1)*gamma(1/beta+1) + gamma(4/beta+1)) / (gamma(2/beta+1)-gamma(1/beta+1)**2)**2
def gen_weibull(x, mu, beta, eta):
return weibull(x-mu, beta, eta)
def double_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, f):
return f * gen_weibull(x, mu1, beta1, eta1) + (1-f) * gen_weibull(x, mu2, beta2, eta2)
def triple_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, mu3, beta3, eta3, f1, f2):
return f1 * gen_weibull(x, mu1, beta1, eta1) + f2 * gen_weibull(x, mu2, beta2, eta2) + (1-f1-f2)*gen_weibull(x, mu3, beta3, eta3)
def quadruple_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, mu3, beta3, eta3, mu4, beta4, eta4, f1, f2, f3):
return f1 * gen_weibull(x, mu1, beta1, eta1) + f2 * gen_weibull(x, mu2, beta2, eta2) + f3 * gen_weibull(x, mu3, beta3, eta3) + (1-f1-f2-f3) * gen_weibull(x, mu4, beta4, eta4)
def gen_weibull_mean(mu, beta, eta):
return weibull_mean(beta, eta) + mu
def gen_weibull_median(mu, beta, eta):
return weibull_median(beta, eta) + mu
def gen_weibull_mode(mu, beta, eta):
return weibull_mode(beta, eta) + mu
def gen_weibull_standard_deviation(mu, beta, eta):
return weibull_standard_deviation(beta, eta)
def gen_weibull_variance(mu, beta, eta):
return weibull_variance(beta, eta)
def gen_weibull_skewness(mu, beta, eta):
return weibull_skewness(beta, eta)
def gen_weibull_kurtosis(mu, beta, eta):
return weibull_kurtosis(beta, eta)
def get_single_func(distribution_type: DistributionType) -> Callable:
if distribution_type == DistributionType.Normal:
return normal
elif distribution_type == DistributionType.Weibull:
return weibull
elif distribution_type == DistributionType.GeneralWeibull:
return gen_weibull
else:
raise NotImplementedError(distribution_type)
def get_param_by_mean(distribution_type: DistributionType, component_number: int, mean_values: Iterable):
assert len(mean_values) == component_number
param_count = get_param_count(distribution_type)
func_params = get_params(distribution_type, component_number)
param_values = list(get_defaults(func_params))
if distribution_type == DistributionType.Normal:
for i in range(component_number):
# for normal distribution
# only change the loaction param (first param of each component)
param_values[i*param_count] = mean_values[i]
elif distribution_type == DistributionType.Weibull:
for i in range(component_number):
beta = param_values[i*param_count]
param_values[i*param_count+1] = mean_values[i] / gamma(1/beta+1)
elif distribution_type == DistributionType.GeneralWeibull:
for i in range(component_number):
mu = param_values[i*param_count]
beta = param_values[i*param_count+1]
param_values[i*param_count+2] = (mean_values[i]-mu) / gamma(1/beta+1)
else:
raise NotImplementedError(distribution_type)
return tuple(param_values)
class AlgorithmData:
__cache = weakref.WeakValueDictionary()
__cache_lock = Lock()
def __init__(self, distribution_type: DistributionType, component_number: int):
check_component_number(component_number)
self.__distribution_type = distribution_type
self.__component_number = component_number
self.__param_count = get_param_count(self.distribution_type)
self.__param_names = get_param_names(self.distribution_type)
self.__single_func = get_single_func(distribution_type)
self.__lambda_str = get_lambda_str(distribution_type, component_number)
self.__mixed_func = self.__get_func_by_lambda_str(self.__lambda_str)
self.__func_params = get_params(distribution_type, component_number)
self.__bounds = get_bounds(self.__func_params)
self.__defaults = get_defaults(self.__func_params)
self.__constrains = get_constrains(component_number)
self.__get_statistic_func()
def __get_func_by_lambda_str(self, lambda_str: str) -> Callable:
local_params = {"__tempMixedFunc": None}
exec("__tempMixedFunc=" + lambda_str, None, local_params)
mixed_func = local_params["__tempMixedFunc"]
return mixed_func
def __get_statistic_func(self):
if self.distribution_type == DistributionType.Normal:
self.__mean = normal_mean
self.__median = normal_median
self.__mode = normal_mode
self.__standard_deviation = normal_standard_deviation
self.__variance = normal_variance
self.__skewness = normal_skewness
self.__kurtosis = normal_kurtosis
elif self.distribution_type == DistributionType.Weibull:
self.__mean = weibull_mean
self.__median = weibull_median
self.__mode = weibull_mode
self.__standard_deviation = weibull_standard_deviation
self.__variance = weibull_variance
self.__skewness = weibull_skewness
self.__kurtosis = weibull_kurtosis
elif self.distribution_type == DistributionType.GeneralWeibull:
self.__mean = gen_weibull_mean
self.__median = gen_weibull_median
self.__mode = gen_weibull_mode
self.__standard_deviation = gen_weibull_standard_deviation
self.__variance = gen_weibull_variance
self.__skewness = gen_weibull_skewness
self.__kurtosis = gen_weibull_kurtosis
else:
raise NotImplementedError(self.distribution_type)
@property
def distribution_type(self) -> DistributionType:
return self.__distribution_type
@property
def component_number(self) -> int:
return self.__component_number
@property
def param_count(self) -> int:
return self.__param_count
@property
def param_names(self) -> Tuple[str]:
return self.__param_names
@property
def single_func(self) -> Callable:
return self.__single_func
@property
def mixed_func(self) -> Callable:
return self.__mixed_func
@property
def bounds(self) -> Tuple[Tuple]:
return self.__bounds
@property
def defaults(self) -> Tuple[float]:
return self.__defaults
@property
def constrains(self) -> Tuple[Dict]:
return self.__constrains
@property
def mean(self) -> Callable:
return self.__mean
@property
def median(self) -> Callable:
return self.__median
@property
def mode(self) -> Callable:
return self.__mode
@property
def variance(self) -> Callable:
return self.__variance
@property
def standard_deviation(self) -> Callable:
return self.__standard_deviation
@property
def skewness(self) -> Callable:
return self.__skewness
@property
def kurtosis(self) -> Callable:
return self.__kurtosis
@classmethod
def get_algorithm_data(cls, distribution_type: DistributionType,
component_number: int):
cls.__cache_lock.acquire()
key = (distribution_type, component_number)
if key in cls.__cache:
data = cls.__cache[key]
else:
data = AlgorithmData(distribution_type, component_number)
cls.__cache[key] = data
cls.__cache_lock.release()
return data
def process_params(self, fitted_params: Iterable, x_offset: float) -> Tuple[Tuple[Tuple, float]]:
params_copy = np.array(fitted_params)
param_count = get_param_count(self.distribution_type)
if self.distribution_type == DistributionType.Normal or self.distribution_type == DistributionType.GeneralWeibull:
for i in range(self.component_number):
params_copy[i*param_count] += x_offset
return process_params(self.distribution_type, self.component_number, params_copy)
def get_param_by_mean(self, mean_values: Iterable):
return get_param_by_mean(self.distribution_type, self.component_number, mean_values)
if __name__ == "__main__":
# test the generating speed of algorithm data
import time
import sys
start_uncached = time.time()
data_list_uncached = []
for i in range(10000):
for component_number in range(3, 11):
data = AlgorithmData(DistributionType.GeneralWeibull, component_number)
data_list_uncached.append(data)
end_uncached = time.time()
print("Uncached time spent:", end_uncached-start_uncached, "s")
start_cached = time.time()
data_list_cached = []
for i in range(10000):
for component_number in range(3, 11):
data = AlgorithmData.get_algorithm_data(DistributionType.GeneralWeibull, component_number)
data_list_cached.append(data)
end_cached = time.time()
print("Cached time spent:", end_cached-start_cached, "s")
| 39.078695
| 184
| 0.677701
|
import weakref
from enum import Enum, unique
from threading import Lock
from typing import Callable, Dict, Iterable, List, Tuple
import numpy as np
from scipy.special import gamma
INFINITESIMAL = 1e-100
FRACTION_PARAM_NAME = "f"
NAME_KEY = "Name"
BOUNDS_KEY = "Bounds"
DEFAULT_VALUE_KEY = "Default"
LOCATION_KEY = "Location"
COMPONENT_INDEX_KEY = "ComponentIndex"
PARAM_INDEX_KEY = "ParamIndex"
@unique
class DistributionType(Enum):
Normal = 0
Weibull = 1
GeneralWeibull = 2
def check_component_number(component_number: int):
if type(component_number) != int:
raise TypeError(component_number)
elif component_number < 1:
raise ValueError(component_number)
def get_param_count(distribution_type: DistributionType) -> int:
if distribution_type == DistributionType.Normal:
return 2
elif distribution_type == DistributionType.Weibull:
return 2
elif distribution_type == DistributionType.GeneralWeibull:
return 3
else:
raise NotImplementedError(distribution_type)
def get_param_names(distribution_type: DistributionType) -> Tuple[str]:
if distribution_type == DistributionType.Normal:
return ("mu", "sigma")
elif distribution_type == DistributionType.Weibull:
return ("beta", "eta")
elif distribution_type == DistributionType.GeneralWeibull:
return ("mu", "beta", "eta")
else:
raise NotImplementedError(distribution_type)
def get_base_func_name(distribution_type: DistributionType) -> str:
if distribution_type == DistributionType.Normal:
return "normal"
elif distribution_type == DistributionType.Weibull:
return "weibull"
elif distribution_type == DistributionType.GeneralWeibull:
return "gen_weibull"
else:
raise NotImplementedError(distribution_type)
def get_param_bounds(distribution_type: DistributionType) -> Tuple[Tuple[float, float]]:
if distribution_type == DistributionType.Normal:
return ((INFINITESIMAL, None), (INFINITESIMAL, None))
elif distribution_type == DistributionType.Weibull:
return ((INFINITESIMAL, None), (INFINITESIMAL, None))
elif distribution_type == DistributionType.GeneralWeibull:
return ((INFINITESIMAL, None), (INFINITESIMAL, None), (INFINITESIMAL, None))
else:
raise NotImplementedError(distribution_type)
def get_param_defaults(distribution_type: DistributionType, component_number: int) -> Tuple[Tuple]:
check_component_number(component_number)
if distribution_type == DistributionType.Normal:
return tuple(((i*10, 2+i) for i in range(1, component_number+1)))
elif distribution_type == DistributionType.Weibull:
return tuple(((10+i, (i+1)*15) for i in range(1, component_number+1)))
elif distribution_type == DistributionType.GeneralWeibull:
return tuple(((0, 2+i, i*10) for i in range(1, component_number+1)))
else:
raise NotImplementedError(distribution_type)
def get_params(distribution_type: DistributionType, component_number: int) -> List[Dict]:
check_component_number(component_number)
params = []
param_count = get_param_count(distribution_type)
param_names = get_param_names(distribution_type)
param_bounds = get_param_bounds(distribution_type)
param_defaults = get_param_defaults(distribution_type, component_number)
for component_index, component_defaults in enumerate(param_defaults):
for param_index, name, bounds, defalut in zip(range(param_count), param_names, param_bounds, component_defaults):
params.append({NAME_KEY: name+str(component_index+1), BOUNDS_KEY: bounds,
DEFAULT_VALUE_KEY: defalut, COMPONENT_INDEX_KEY: component_index,
PARAM_INDEX_KEY: param_index, LOCATION_KEY: component_index*param_count+param_index})
for component_index in range(component_number-1):
params.append({NAME_KEY: FRACTION_PARAM_NAME+str(component_index+1), BOUNDS_KEY: (0, 1),
DEFAULT_VALUE_KEY: 1/component_number, COMPONENT_INDEX_KEY: component_index,
LOCATION_KEY: component_number*param_count + component_index})
sort_params_by_location_in_place(params)
return params
def sort_params_by_location_in_place(params: List[Dict]):
params.sort(key=lambda element: element[LOCATION_KEY])
def get_bounds(params: List[Dict]) -> Tuple[Tuple]:
bounds = []
for param in params:
bounds.append(param[BOUNDS_KEY])
return tuple(bounds)
def get_constrains(component_number: int) -> Tuple[Dict]:
if component_number == 1:
return ()
elif component_number > 1:
return ({'type': 'ineq', 'fun': lambda args: 1 - np.sum(args[1-component_number:]) + INFINITESIMAL})
else:
raise ValueError(component_number)
def get_defaults(params: List[Dict]) -> Tuple[float]:
defaults = []
for param in params:
defaults.append(param[DEFAULT_VALUE_KEY])
return tuple(defaults)
def get_lambda_str(distribution_type: DistributionType, component_number:int) -> str:
base_func_name = get_base_func_name(distribution_type)
param_count = get_param_count(distribution_type)
param_names = get_param_names(distribution_type)
if component_number == 1:
return "lambda x, {0}: {1}(x, {0})".format(", ".join(param_names), base_func_name)
elif component_number > 1:
parameter_list = ", ".join(["x"] + [name+str(i+1) for i in range(component_number) for name in param_names] + [FRACTION_PARAM_NAME+str(i+1) for i in range(component_number-1)])
previous_format_str = "{0}{1}*{2}(x, " + ", ".join(["{"+str(i+3)+"}{1}" for i in range(param_count)]) + ")"
previous_sub_func_strs = [previous_format_str.format(FRACTION_PARAM_NAME, i+1, base_func_name, *param_names) for i in range(component_number-1)]
last_format_str = "({0})*{1}(x, " + ", ".join(["{"+str(i+3)+"}{2}" for i in range(param_count)]) + ")"
last_sub_func_str = last_format_str.format("-".join(["1"]+["f{0}".format(i+1) for i in range(component_number-1)]), base_func_name, component_number, *param_names)
expression = " + ".join(previous_sub_func_strs + [last_sub_func_str])
lambda_string = "lambda {0}: {1}".format(parameter_list, expression)
return lambda_string
else:
raise ValueError(component_number)
def process_params(distribution_type: DistributionType, component_number: int, fitted_params: Iterable) -> Tuple[Tuple[Tuple, float]]:
param_count = get_param_count(distribution_type)
if component_number == 1:
assert len(fitted_params) == param_count
return ((tuple(fitted_params), 1.0),)
elif component_number > 1:
assert len(fitted_params) == (param_count+1) * component_number - 1
expanded = list(fitted_params) + [1.0-sum(fitted_params[component_number*param_count:])]
return tuple(((tuple(expanded[i*param_count:(i+1)*param_count]), expanded[component_number*param_count+i]) for i in range(component_number)))
else:
raise ValueError(component_number)
def normal(x, mu, sigma):
if sigma <= 0.0:
return np.zeros_like(x, dtype=np.float64)
else:
return 1/(sigma*np.sqrt(2*np.pi))*np.exp(-np.square(x-mu)/(2*np.square(sigma)))
def double_normal(x, mu1, sigma1, mu2, sigma2, f1):
return f1 * normal(x, mu1, sigma1) + (1-f1) * normal(x, mu2, sigma2)
def triple_normal(x, mu1, sigma1, mu2, sigma2, mu3, sigma3, f1, f2):
return f1 * normal(x, mu1, sigma1) + f2 * normal(x, mu2, sigma2) + (1-f1-f2) * normal(x, mu3, sigma3)
def quadruple_normal(x, mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, f1, f2, f3):
return f1 * normal(x, mu1, sigma1) + f2 * normal(x, mu2, sigma2) + f3 * normal(x, mu3, sigma3) + (1-f1-f2-f3) * normal(x, mu4, sigma4)
def normal_mean(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return mu
def normal_median(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return mu
def normal_mode(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return mu
def normal_standard_deviation(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return sigma
def normal_variance(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return sigma**2
def normal_skewness(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return 0.0
def normal_kurtosis(mu, sigma):
if sigma <= 0.0:
return np.nan
else:
return 0.0
def weibull(x, beta, eta):
results = np.zeros_like(x, dtype=np.float64)
if beta <= 0.0 or eta <= 0.0:
return results
else:
non_zero = np.greater(x, 0.0)
results[non_zero] = (beta/eta) * (x[non_zero]/eta)**(beta-1) * np.exp(-(x[non_zero]/eta)**beta)
return results
def double_weibull(x, beta1, eta1, beta2, eta2, f):
return f * weibull(x, beta1, eta1) + (1-f) * weibull(x, beta2, eta2)
def triple_weibull(x, beta1, eta1, beta2, eta2, beta3, eta3, f1, f2):
return f1 * weibull(x, beta1, eta1) + f2 * weibull(x, beta2, eta2) + (1-f1-f2) * weibull(x, beta3, eta3)
def quadruple_weibull(x, beta1, eta1, beta2, eta2, beta3, eta3, beta4, eta4, f1, f2, f3):
return f1 * weibull(x, beta1, eta1) + f2 * weibull(x, beta2, eta2) + f3 * weibull(x, beta3, eta3) + (1-f1-f2-f3) * weibull(x, beta4, eta4)
def weibull_mean(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return eta*gamma(1/beta+1)
def weibull_median(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return eta*(np.log(2)**(1/beta))
def weibull_mode(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
elif beta <= 1:
return 0.0
else:
return eta*(1-1/beta)**(1/beta)
def weibull_standard_deviation(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return eta*np.sqrt(gamma(2/beta+1) - gamma(1/beta+1)**2)
def weibull_variance(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return (eta**2)*(gamma(2/beta+1)-gamma(1/beta+1)**2)
def weibull_skewness(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return (2*gamma(1/beta+1)**3 - 3*gamma(2/beta+1)*gamma(1/beta+1) + gamma(3/beta+1)) / (gamma(2/beta+1)-gamma(1/beta+1)**2)**(3/2)
def weibull_kurtosis(beta, eta):
if beta <= 0.0 or eta <= 0.0:
return np.nan
else:
return (-3*gamma(1/beta+1)**4 + 6*gamma(2/beta+1)*gamma(1/beta+1)**2 - 4*gamma(3/beta+1)*gamma(1/beta+1) + gamma(4/beta+1)) / (gamma(2/beta+1)-gamma(1/beta+1)**2)**2
def gen_weibull(x, mu, beta, eta):
return weibull(x-mu, beta, eta)
def double_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, f):
return f * gen_weibull(x, mu1, beta1, eta1) + (1-f) * gen_weibull(x, mu2, beta2, eta2)
def triple_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, mu3, beta3, eta3, f1, f2):
return f1 * gen_weibull(x, mu1, beta1, eta1) + f2 * gen_weibull(x, mu2, beta2, eta2) + (1-f1-f2)*gen_weibull(x, mu3, beta3, eta3)
def quadruple_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, mu3, beta3, eta3, mu4, beta4, eta4, f1, f2, f3):
return f1 * gen_weibull(x, mu1, beta1, eta1) + f2 * gen_weibull(x, mu2, beta2, eta2) + f3 * gen_weibull(x, mu3, beta3, eta3) + (1-f1-f2-f3) * gen_weibull(x, mu4, beta4, eta4)
def gen_weibull_mean(mu, beta, eta):
return weibull_mean(beta, eta) + mu
def gen_weibull_median(mu, beta, eta):
return weibull_median(beta, eta) + mu
def gen_weibull_mode(mu, beta, eta):
return weibull_mode(beta, eta) + mu
def gen_weibull_standard_deviation(mu, beta, eta):
return weibull_standard_deviation(beta, eta)
def gen_weibull_variance(mu, beta, eta):
return weibull_variance(beta, eta)
def gen_weibull_skewness(mu, beta, eta):
return weibull_skewness(beta, eta)
def gen_weibull_kurtosis(mu, beta, eta):
return weibull_kurtosis(beta, eta)
def get_single_func(distribution_type: DistributionType) -> Callable:
if distribution_type == DistributionType.Normal:
return normal
elif distribution_type == DistributionType.Weibull:
return weibull
elif distribution_type == DistributionType.GeneralWeibull:
return gen_weibull
else:
raise NotImplementedError(distribution_type)
def get_param_by_mean(distribution_type: DistributionType, component_number: int, mean_values: Iterable):
assert len(mean_values) == component_number
param_count = get_param_count(distribution_type)
func_params = get_params(distribution_type, component_number)
param_values = list(get_defaults(func_params))
if distribution_type == DistributionType.Normal:
for i in range(component_number):
param_values[i*param_count] = mean_values[i]
elif distribution_type == DistributionType.Weibull:
for i in range(component_number):
beta = param_values[i*param_count]
param_values[i*param_count+1] = mean_values[i] / gamma(1/beta+1)
elif distribution_type == DistributionType.GeneralWeibull:
for i in range(component_number):
mu = param_values[i*param_count]
beta = param_values[i*param_count+1]
param_values[i*param_count+2] = (mean_values[i]-mu) / gamma(1/beta+1)
else:
raise NotImplementedError(distribution_type)
return tuple(param_values)
class AlgorithmData:
__cache = weakref.WeakValueDictionary()
__cache_lock = Lock()
def __init__(self, distribution_type: DistributionType, component_number: int):
check_component_number(component_number)
self.__distribution_type = distribution_type
self.__component_number = component_number
self.__param_count = get_param_count(self.distribution_type)
self.__param_names = get_param_names(self.distribution_type)
self.__single_func = get_single_func(distribution_type)
self.__lambda_str = get_lambda_str(distribution_type, component_number)
self.__mixed_func = self.__get_func_by_lambda_str(self.__lambda_str)
self.__func_params = get_params(distribution_type, component_number)
self.__bounds = get_bounds(self.__func_params)
self.__defaults = get_defaults(self.__func_params)
self.__constrains = get_constrains(component_number)
self.__get_statistic_func()
def __get_func_by_lambda_str(self, lambda_str: str) -> Callable:
local_params = {"__tempMixedFunc": None}
exec("__tempMixedFunc=" + lambda_str, None, local_params)
mixed_func = local_params["__tempMixedFunc"]
return mixed_func
def __get_statistic_func(self):
if self.distribution_type == DistributionType.Normal:
self.__mean = normal_mean
self.__median = normal_median
self.__mode = normal_mode
self.__standard_deviation = normal_standard_deviation
self.__variance = normal_variance
self.__skewness = normal_skewness
self.__kurtosis = normal_kurtosis
elif self.distribution_type == DistributionType.Weibull:
self.__mean = weibull_mean
self.__median = weibull_median
self.__mode = weibull_mode
self.__standard_deviation = weibull_standard_deviation
self.__variance = weibull_variance
self.__skewness = weibull_skewness
self.__kurtosis = weibull_kurtosis
elif self.distribution_type == DistributionType.GeneralWeibull:
self.__mean = gen_weibull_mean
self.__median = gen_weibull_median
self.__mode = gen_weibull_mode
self.__standard_deviation = gen_weibull_standard_deviation
self.__variance = gen_weibull_variance
self.__skewness = gen_weibull_skewness
self.__kurtosis = gen_weibull_kurtosis
else:
raise NotImplementedError(self.distribution_type)
@property
def distribution_type(self) -> DistributionType:
return self.__distribution_type
@property
def component_number(self) -> int:
return self.__component_number
@property
def param_count(self) -> int:
return self.__param_count
@property
def param_names(self) -> Tuple[str]:
return self.__param_names
@property
def single_func(self) -> Callable:
return self.__single_func
@property
def mixed_func(self) -> Callable:
return self.__mixed_func
@property
def bounds(self) -> Tuple[Tuple]:
return self.__bounds
@property
def defaults(self) -> Tuple[float]:
return self.__defaults
@property
def constrains(self) -> Tuple[Dict]:
return self.__constrains
@property
def mean(self) -> Callable:
return self.__mean
@property
def median(self) -> Callable:
return self.__median
@property
def mode(self) -> Callable:
return self.__mode
@property
def variance(self) -> Callable:
return self.__variance
@property
def standard_deviation(self) -> Callable:
return self.__standard_deviation
@property
def skewness(self) -> Callable:
return self.__skewness
@property
def kurtosis(self) -> Callable:
return self.__kurtosis
@classmethod
def get_algorithm_data(cls, distribution_type: DistributionType,
component_number: int):
cls.__cache_lock.acquire()
key = (distribution_type, component_number)
if key in cls.__cache:
data = cls.__cache[key]
else:
data = AlgorithmData(distribution_type, component_number)
cls.__cache[key] = data
cls.__cache_lock.release()
return data
def process_params(self, fitted_params: Iterable, x_offset: float) -> Tuple[Tuple[Tuple, float]]:
params_copy = np.array(fitted_params)
param_count = get_param_count(self.distribution_type)
if self.distribution_type == DistributionType.Normal or self.distribution_type == DistributionType.GeneralWeibull:
for i in range(self.component_number):
params_copy[i*param_count] += x_offset
return process_params(self.distribution_type, self.component_number, params_copy)
def get_param_by_mean(self, mean_values: Iterable):
return get_param_by_mean(self.distribution_type, self.component_number, mean_values)
if __name__ == "__main__":
import time
import sys
start_uncached = time.time()
data_list_uncached = []
for i in range(10000):
for component_number in range(3, 11):
data = AlgorithmData(DistributionType.GeneralWeibull, component_number)
data_list_uncached.append(data)
end_uncached = time.time()
print("Uncached time spent:", end_uncached-start_uncached, "s")
start_cached = time.time()
data_list_cached = []
for i in range(10000):
for component_number in range(3, 11):
data = AlgorithmData.get_algorithm_data(DistributionType.GeneralWeibull, component_number)
data_list_cached.append(data)
end_cached = time.time()
print("Cached time spent:", end_cached-start_cached, "s")
| true
| true
|
1c44f126bc35348c8eda7a554188d06765c226be
| 20,944
|
py
|
Python
|
electrum_ltc/gui/kivy/uix/screens.py
|
LedgerHQ/electrum-ltc
|
8307e3978b12ae27fc3f750f47cda7f18d5fafe5
|
[
"MIT"
] | null | null | null |
electrum_ltc/gui/kivy/uix/screens.py
|
LedgerHQ/electrum-ltc
|
8307e3978b12ae27fc3f750f47cda7f18d5fafe5
|
[
"MIT"
] | 1
|
2022-03-06T09:22:26.000Z
|
2022-03-06T09:22:26.000Z
|
electrum_ltc/gui/kivy/uix/screens.py
|
isabella232/electrum-ltc
|
8307e3978b12ae27fc3f750f47cda7f18d5fafe5
|
[
"MIT"
] | 1
|
2022-03-06T09:16:48.000Z
|
2022-03-06T09:16:48.000Z
|
import asyncio
from weakref import ref
from decimal import Decimal
import re
import threading
import traceback, sys
from typing import TYPE_CHECKING, List, Optional, Dict, Any
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.recycleview import RecycleView
from kivy.uix.label import Label
from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from electrum_ltc.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat
from electrum_ltc.invoices import (PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING,
PR_PAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT,
LNInvoice, pr_expiration_values, Invoice, OnchainInvoice)
from electrum_ltc import bitcoin, constants
from electrum_ltc.transaction import Transaction, tx_from_any, PartialTransaction, PartialTxOutput
from electrum_ltc.util import parse_URI, InvalidBitcoinURI, TxMinedInfo, maybe_extract_bolt11_invoice
from electrum_ltc.wallet import InternalAddressCorruption
from electrum_ltc import simple_config
from electrum_ltc.lnaddr import lndecode
from electrum_ltc.lnutil import RECEIVED, SENT, PaymentFailure
from electrum_ltc.logging import Logger
from .dialogs.question import Question
from .dialogs.lightning_open_channel import LightningOpenChannelDialog
from electrum_ltc.gui.kivy import KIVY_GUI_PATH
from electrum_ltc.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum_ltc.gui.kivy.main_window import ElectrumWindow
from electrum_ltc.paymentrequest import PaymentRequest
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
def on_activate(self):
setattr(self.app, self.kvname + '_screen', self)
self.update()
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/history.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/send.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/receive.kv')
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx_item = self.history.get(key)
if tx_item.get('lightning') and tx_item['type'] == 'payment':
self.app.lightning_tx_dialog(tx_item)
return
if tx_item.get('lightning'):
tx = self.app.wallet.lnworker.lnwatcher.db.get_transaction(key)
else:
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status = 0
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = f'atlas://{KIVY_GUI_PATH}/theming/light/lightning'
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = f'atlas://{KIVY_GUI_PATH}/theming/light/' + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff = True)
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
self.history = wallet.get_full_history(self.app.fx)
history = reversed(self.history.values())
history_card = self.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen, Logger):
kvname = 'send'
payment_request = None # type: Optional[PaymentRequest]
parsed_URI = None
def __init__(self, **kwargs):
CScreen.__init__(self, **kwargs)
Logger.__init__(self)
self.is_max = False
def set_URI(self, text: str):
if not self.app.wallet:
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.address = uri.get('address', '')
self.message = uri.get('message', '')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.is_max = False
self.payment_request = None
self.is_lightning = False
def set_ln_invoice(self, invoice: str):
try:
invoice = str(invoice).lower()
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.address = invoice
self.message = dict(lnaddr.tags).get('d', None)
self.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.is_lightning = True
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_invoices()
_list.reverse()
payments_container = self.ids.payments_container
payments_container.data = [self.get_card(item) for item in _list]
def update_item(self, key, invoice):
payments_container = self.ids.payments_container
data = payments_container.data
for item in data:
if item['key'] == key:
status = self.app.wallet.get_invoice_status(invoice)
status_str = invoice.get_status_str(status)
item['status'] = status
item['status_str'] = status_str
payments_container.data = data
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item: Invoice):
status = self.app.wallet.get_invoice_status(item)
status_str = item.get_status_str(status)
is_lightning = item.type == PR_TYPE_LN
if is_lightning:
assert isinstance(item, LNInvoice)
key = item.rhash
address = key
if self.app.wallet.lnworker:
log = self.app.wallet.lnworker.logs.get(key)
if status == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
is_bip70 = False
else:
assert isinstance(item, OnchainInvoice)
key = item.id
address = item.get_address()
is_bip70 = bool(item.bip70)
return {
'is_lightning': is_lightning,
'is_bip70': is_bip70,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item.message or _('No Description'),
'address': address,
'amount': self.app.format_amount_and_units(item.get_amount_sat() or 0),
}
def do_clear(self):
self.amount = ''
self.message = ''
self.address = ''
self.payment_request = None
self.is_lightning = False
self.is_bip70 = False
self.parsed_URI = None
self.is_max = False
def set_request(self, pr: 'PaymentRequest'):
self.address = pr.get_requestor()
amount = pr.get_amount()
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.message = pr.get_memo()
self.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
tx = tx_from_any(data)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
# try to decode as URI/address
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Litecoin address or a payment request'))
return
if not self.amount:
self.app.show_error(_('Please enter an amount'))
return
if self.is_max:
amount = '!'
else:
try:
amount = self.app.get_amount(self.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.amount)
return
message = self.message
if self.is_lightning:
return LNInvoice.from_bech32(address)
else: # on-chain
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Litecoin Address') + ':\n' + address)
return
outputs = [PartialTxOutput.from_address_and_value(address, amount)]
return self.app.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.parsed_URI)
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.save_invoice(invoice)
def save_invoice(self, invoice):
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice.is_lightning():
if self.app.wallet.lnworker:
self.app.protected(_('Pay lightning invoice?'), self._do_pay_lightning, (invoice,))
else:
self.app.show_error(_("Lightning payments are not available for this wallet"))
else:
self._do_pay_onchain(invoice)
def _do_pay_lightning(self, invoice: LNInvoice, pw) -> None:
def pay_thread():
try:
self.app.wallet.lnworker.pay(invoice.invoice, attempts=10)
except Exception as e:
self.app.show_error(repr(e))
self.save_invoice(invoice)
threading.Thread(target=pay_thread).start()
def _do_pay_onchain(self, invoice: OnchainInvoice) -> None:
from .dialogs.confirm_tx_dialog import ConfirmTxDialog
d = ConfirmTxDialog(self.app, invoice)
d.open()
def send_tx(self, tx, invoice, password):
if self.app.wallet.has_password() and password is None:
return
self.save_invoice(invoice)
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
self.is_max = False # not used for receiving (see app.amount_dialog)
def expiry(self):
return self.app.electrum_config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
def clear(self):
self.address = ''
self.amount = ''
self.message = ''
self.lnaddr = ''
def set_address(self, addr):
self.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.status = ''
if req:
self.message = req.get('memo', '')
amount = req.get('amount')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum_ltc.util import create_bip21_uri
amount = self.amount
if amount:
a, u = self.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.address, amount, self.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.message
if lightning:
key = self.app.wallet.lnworker.add_request(amount, message, self.expiry())
else:
addr = self.address or self.app.wallet.get_unused_address()
if not addr:
if not self.app.wallet.is_deterministic():
addr = self.app.wallet.get_receiving_address()
else:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req: Invoice) -> Dict[str, Any]:
is_lightning = req.is_lightning()
if not is_lightning:
assert isinstance(req, OnchainInvoice)
address = req.get_address()
key = address
else:
assert isinstance(req, LNInvoice)
key = req.rhash
address = req.invoice
amount = req.get_amount_sat()
description = req.message
status = self.app.wallet.get_request_status(key)
status_str = req.get_status_str(status)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description or _('No Description')
ci['status'] = status
ci['status_str'] = status_str
return ci
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_requests()
_list.reverse()
requests_container = self.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list]
def update_item(self, key, request):
payments_container = self.ids.requests_container
data = payments_container.data
for item in data:
if item['key'] == key:
status = self.app.wallet.get_request_status(key)
status_str = request.get_status_str(status)
item['status'] = status
item['status_str'] = status_str
payments_container.data = data # needed?
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
| 35.259259
| 130
| 0.605758
|
import asyncio
from weakref import ref
from decimal import Decimal
import re
import threading
import traceback, sys
from typing import TYPE_CHECKING, List, Optional, Dict, Any
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.recycleview import RecycleView
from kivy.uix.label import Label
from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from electrum_ltc.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat
from electrum_ltc.invoices import (PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING,
PR_PAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT,
LNInvoice, pr_expiration_values, Invoice, OnchainInvoice)
from electrum_ltc import bitcoin, constants
from electrum_ltc.transaction import Transaction, tx_from_any, PartialTransaction, PartialTxOutput
from electrum_ltc.util import parse_URI, InvalidBitcoinURI, TxMinedInfo, maybe_extract_bolt11_invoice
from electrum_ltc.wallet import InternalAddressCorruption
from electrum_ltc import simple_config
from electrum_ltc.lnaddr import lndecode
from electrum_ltc.lnutil import RECEIVED, SENT, PaymentFailure
from electrum_ltc.logging import Logger
from .dialogs.question import Question
from .dialogs.lightning_open_channel import LightningOpenChannelDialog
from electrum_ltc.gui.kivy import KIVY_GUI_PATH
from electrum_ltc.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum_ltc.gui.kivy.main_window import ElectrumWindow
from electrum_ltc.paymentrequest import PaymentRequest
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
kvname = None
app = App.get_running_app()
def on_enter(self):
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
def on_activate(self):
setattr(self.app, self.kvname + '_screen', self)
self.update()
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/history.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/send.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/receive.kv')
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx_item = self.history.get(key)
if tx_item.get('lightning') and tx_item['type'] == 'payment':
self.app.lightning_tx_dialog(tx_item)
return
if tx_item.get('lightning'):
tx = self.app.wallet.lnworker.lnwatcher.db.get_transaction(key)
else:
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status = 0
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = f'atlas://{KIVY_GUI_PATH}/theming/light/lightning'
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = f'atlas://{KIVY_GUI_PATH}/theming/light/' + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff = True)
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
self.history = wallet.get_full_history(self.app.fx)
history = reversed(self.history.values())
history_card = self.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen, Logger):
kvname = 'send'
payment_request = None # type: Optional[PaymentRequest]
parsed_URI = None
def __init__(self, **kwargs):
CScreen.__init__(self, **kwargs)
Logger.__init__(self)
self.is_max = False
def set_URI(self, text: str):
if not self.app.wallet:
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.address = uri.get('address', '')
self.message = uri.get('message', '')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.is_max = False
self.payment_request = None
self.is_lightning = False
def set_ln_invoice(self, invoice: str):
try:
invoice = str(invoice).lower()
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.address = invoice
self.message = dict(lnaddr.tags).get('d', None)
self.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.is_lightning = True
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_invoices()
_list.reverse()
payments_container = self.ids.payments_container
payments_container.data = [self.get_card(item) for item in _list]
def update_item(self, key, invoice):
payments_container = self.ids.payments_container
data = payments_container.data
for item in data:
if item['key'] == key:
status = self.app.wallet.get_invoice_status(invoice)
status_str = invoice.get_status_str(status)
item['status'] = status
item['status_str'] = status_str
payments_container.data = data
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item: Invoice):
status = self.app.wallet.get_invoice_status(item)
status_str = item.get_status_str(status)
is_lightning = item.type == PR_TYPE_LN
if is_lightning:
assert isinstance(item, LNInvoice)
key = item.rhash
address = key
if self.app.wallet.lnworker:
log = self.app.wallet.lnworker.logs.get(key)
if status == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
is_bip70 = False
else:
assert isinstance(item, OnchainInvoice)
key = item.id
address = item.get_address()
is_bip70 = bool(item.bip70)
return {
'is_lightning': is_lightning,
'is_bip70': is_bip70,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item.message or _('No Description'),
'address': address,
'amount': self.app.format_amount_and_units(item.get_amount_sat() or 0),
}
def do_clear(self):
self.amount = ''
self.message = ''
self.address = ''
self.payment_request = None
self.is_lightning = False
self.is_bip70 = False
self.parsed_URI = None
self.is_max = False
def set_request(self, pr: 'PaymentRequest'):
self.address = pr.get_requestor()
amount = pr.get_amount()
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.message = pr.get_memo()
self.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
tx = tx_from_any(data)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
# try to decode as URI/address
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Litecoin address or a payment request'))
return
if not self.amount:
self.app.show_error(_('Please enter an amount'))
return
if self.is_max:
amount = '!'
else:
try:
amount = self.app.get_amount(self.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.amount)
return
message = self.message
if self.is_lightning:
return LNInvoice.from_bech32(address)
else: # on-chain
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Litecoin Address') + ':\n' + address)
return
outputs = [PartialTxOutput.from_address_and_value(address, amount)]
return self.app.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.parsed_URI)
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.save_invoice(invoice)
def save_invoice(self, invoice):
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice.is_lightning():
if self.app.wallet.lnworker:
self.app.protected(_('Pay lightning invoice?'), self._do_pay_lightning, (invoice,))
else:
self.app.show_error(_("Lightning payments are not available for this wallet"))
else:
self._do_pay_onchain(invoice)
def _do_pay_lightning(self, invoice: LNInvoice, pw) -> None:
def pay_thread():
try:
self.app.wallet.lnworker.pay(invoice.invoice, attempts=10)
except Exception as e:
self.app.show_error(repr(e))
self.save_invoice(invoice)
threading.Thread(target=pay_thread).start()
def _do_pay_onchain(self, invoice: OnchainInvoice) -> None:
from .dialogs.confirm_tx_dialog import ConfirmTxDialog
d = ConfirmTxDialog(self.app, invoice)
d.open()
def send_tx(self, tx, invoice, password):
if self.app.wallet.has_password() and password is None:
return
self.save_invoice(invoice)
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
self.is_max = False # not used for receiving (see app.amount_dialog)
def expiry(self):
return self.app.electrum_config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
def clear(self):
self.address = ''
self.amount = ''
self.message = ''
self.lnaddr = ''
def set_address(self, addr):
self.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.status = ''
if req:
self.message = req.get('memo', '')
amount = req.get('amount')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum_ltc.util import create_bip21_uri
amount = self.amount
if amount:
a, u = self.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.address, amount, self.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.message
if lightning:
key = self.app.wallet.lnworker.add_request(amount, message, self.expiry())
else:
addr = self.address or self.app.wallet.get_unused_address()
if not addr:
if not self.app.wallet.is_deterministic():
addr = self.app.wallet.get_receiving_address()
else:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req: Invoice) -> Dict[str, Any]:
is_lightning = req.is_lightning()
if not is_lightning:
assert isinstance(req, OnchainInvoice)
address = req.get_address()
key = address
else:
assert isinstance(req, LNInvoice)
key = req.rhash
address = req.invoice
amount = req.get_amount_sat()
description = req.message
status = self.app.wallet.get_request_status(key)
status_str = req.get_status_str(status)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description or _('No Description')
ci['status'] = status
ci['status_str'] = status_str
return ci
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_requests()
_list.reverse()
requests_container = self.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list]
def update_item(self, key, request):
payments_container = self.ids.requests_container
data = payments_container.data
for item in data:
if item['key'] == key:
status = self.app.wallet.get_request_status(key)
status_str = request.get_status_str(status)
item['status'] = status
item['status_str'] = status_str
payments_container.data = data # needed?
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
| true
| true
|
1c44f21e21408f835a37bf18651cf2816383efc0
| 27,534
|
py
|
Python
|
torchreid/models/osnet_ain.py
|
kirillProkofiev/deep-object-reid
|
2abc96ec49bc0005ed556e203925354fdf12165c
|
[
"MIT"
] | null | null | null |
torchreid/models/osnet_ain.py
|
kirillProkofiev/deep-object-reid
|
2abc96ec49bc0005ed556e203925354fdf12165c
|
[
"MIT"
] | null | null | null |
torchreid/models/osnet_ain.py
|
kirillProkofiev/deep-object-reid
|
2abc96ec49bc0005ed556e203925354fdf12165c
|
[
"MIT"
] | null | null | null |
from __future__ import division, absolute_import
import warnings
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchreid.losses import AngleSimpleLinear
from torchreid.ops import Dropout, HSwish, GumbelSigmoid, LocalContrastNormalization
__all__ = ['osnet_ain_x1_0', 'osnet_ain2_x1_0']
pretrained_urls = {
'osnet_ain_x1_0': 'https://drive.google.com/uc?id=1-CaioD9NaqbHK_kzSMW8VE4_3KcsRjEo'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1, out_fn=nn.ReLU, use_in=False):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.InstanceNorm2d(out_channels, affine=True) if use_in else nn.BatchNorm2d(out_channels)
self.out_fn = out_fn() if out_fn is not None else None
def forward(self, x):
y = self.conv(x)
y = self.bn(y)
y = self.out_fn(y) if self.out_fn is not None else y
return y
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1, bn=True):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = None
if bn:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1, out_fn=nn.ReLU):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.out_fn = out_fn() if out_fn is not None else None
def forward(self, x):
y = self.conv(x)
y = self.bn(y)
y = self.out_fn(y) if self.out_fn is not None else y
return y
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
return self.relu(x)
class LightConvStream(nn.Module):
"""Lightweight convolution stream."""
def __init__(self, in_channels, out_channels, depth):
super(LightConvStream, self).__init__()
assert depth >= 1, 'depth must be equal to or larger than 1, but got {}'.format(
depth
)
layers = []
layers += [LightConv3x3(in_channels, out_channels)]
for i in range(depth - 1):
layers += [LightConv3x3(out_channels, out_channels)]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
##########
# Attention modules
##########
class ResidualAttention(nn.Module):
def __init__(self, in_channels, gumbel=True, reduction=4.0, residual=True):
super(ResidualAttention, self).__init__()
self.residual = residual
internal_channels = int(in_channels / reduction)
self.spatial_attention = nn.Sequential(
Conv1x1(in_channels, internal_channels, out_fn=None),
HSwish(),
Conv3x3(internal_channels, internal_channels, groups=internal_channels, out_fn=None),
HSwish(),
Conv1x1(internal_channels, 1, out_fn=None),
GumbelSigmoid(scale=5.0) if gumbel else nn.Sigmoid()
)
def forward(self, x, return_mask=False):
soft_mask = self.spatial_attention(x)
out = (1.0 + soft_mask) * x if self.residual else soft_mask * x
if return_mask:
return out, soft_mask
else:
return out
class AttributeAttention(nn.Module):
def __init__(self, main_num_features, attr_num_feature, out_num_features):
super(AttributeAttention, self).__init__()
self.gate = nn.Sequential(
nn.Linear(attr_num_feature, main_num_features),
nn.BatchNorm1d(main_num_features),
nn.Sigmoid()
)
self.fc = nn.Sequential(
nn.Linear(main_num_features, out_num_features),
nn.BatchNorm1d(out_num_features)
)
def forward(self, x, attr):
return self.fc(x * self.gate(attr))
##########
# Building blocks for omni-scale feature learning
##########
class LCTGate(nn.Module):
def __init__(self, channels, groups=16):
super(LCTGate, self).__init__()
assert channels > 0
assert groups > 0
self.gn = nn.GroupNorm(groups, channels, affine=True)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.gate_activation = nn.Sigmoid()
def init_params(self):
nn.init.zeros_(self.gn.weight)
nn.init.ones_(self.gn.bias)
def forward(self, x):
y = self.global_avgpool(x)
y = self.gn(y)
y = self.gate_activation(y)
out = y * x
return out
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU()
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU()
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError("Unknown gate activation: {}".format(gate_activation))
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(self, in_channels, out_channels, channel_gate, reduction=4, T=4, dropout_cfg=None, **kwargs):
super(OSBlock, self).__init__()
assert T >= 1
assert out_channels >= reduction and out_channels % reduction == 0
mid_channels = out_channels // reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, T + 1):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = channel_gate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.dropout = None
if dropout_cfg is not None:
self.dropout = Dropout(**dropout_cfg)
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(identity)
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = x2 + self.gate(x2_t)
x3 = self.conv3(x2)
if self.dropout is not None:
x3 = self.dropout(x3, x)
out = x3 + identity
return F.relu(out)
class OSBlockINin(nn.Module):
"""Omni-scale feature learning block with instance normalization."""
def __init__(self, in_channels, out_channels, channel_gate, reduction=4, T=4, dropout_cfg=None, **kwargs):
super(OSBlockINin, self).__init__()
assert T >= 1
assert out_channels >= reduction and out_channels % reduction == 0
mid_channels = out_channels // reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, T + 1):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = channel_gate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
self.dropout = None
if dropout_cfg is not None:
self.dropout = Dropout(**dropout_cfg)
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(identity)
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = x2 + self.gate(x2_t)
x3 = self.conv3(x2)
x3 = self.IN(x3) # IN inside residual
if self.dropout is not None:
x3 = self.dropout(x3, x)
out = x3 + identity
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. arXiv preprint, 2019.
"""
def __init__(
self,
num_classes,
blocks,
channels,
classification=False,
contrastive=False,
head_attention=False,
attentions=None,
dropout_cfg=None,
feature_dim=256,
loss='softmax',
input_lcn=False,
IN_first=False,
IN_conv1=False,
bn_eval=False,
bn_frozen=False,
attr_names=None,
attr_num_classes=None,
lct_gate=False,
pooling_type='avg',
**kwargs
):
super(OSNet, self).__init__()
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.classification = classification
self.contrastive = contrastive
self.pooling_type = pooling_type
num_blocks = len(blocks)
assert num_blocks == len(channels) - 1
self.loss = loss
self.feature_dim = feature_dim
assert self.feature_dim is not None and self.feature_dim > 0
self.use_attentions = attentions
if self.use_attentions is None:
self.use_attentions = [False] * (num_blocks + 2)
assert len(self.use_attentions) == num_blocks + 2
if not isinstance(num_classes, (list, tuple)):
num_classes = [num_classes]
self.num_classes = num_classes
assert len(self.num_classes) > 0
self.input_lcn = LocalContrastNormalization(3, 5, affine=True) if input_lcn else None
self.input_IN = nn.InstanceNorm2d(3, affine=True) if IN_first else None
channel_gate = LCTGate if lct_gate else ChannelGate
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN_conv1)
self.att1 = self._construct_attention_layer(channels[0], self.use_attentions[0])
self.pool1 = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._construct_layer(blocks[0], channels[0], channels[1], channel_gate, dropout_cfg)
self.att2 = self._construct_attention_layer(channels[1], self.use_attentions[1])
self.pool2 = nn.Sequential(Conv1x1(channels[1], channels[1]), nn.AvgPool2d(2, stride=2))
self.conv3 = self._construct_layer(blocks[1], channels[1], channels[2], channel_gate, dropout_cfg)
self.att3 = self._construct_attention_layer(channels[2], self.use_attentions[2])
self.pool3 = nn.Sequential(Conv1x1(channels[2], channels[2]), nn.AvgPool2d(2, stride=2))
self.conv4 = self._construct_layer(blocks[2], channels[2], channels[3], channel_gate, dropout_cfg)
self.att4 = self._construct_attention_layer(channels[3], self.use_attentions[3])
backbone_out_num_channels = channels[3]
self.conv5 = Conv1x1(channels[3], backbone_out_num_channels)
self.att5 = self._construct_attention_layer(backbone_out_num_channels, self.use_attentions[4])
self.head_att = self._construct_head_attention(backbone_out_num_channels, enable=head_attention)
classifier_block = nn.Linear if self.loss not in ['am_softmax'] else AngleSimpleLinear
self.use_attr = attr_names is not None and attr_num_classes is not None
if self.use_attr:
assert len(attr_names) == len(attr_num_classes)
in_feature_dims = [2 * self.feature_dim] * len(self.num_classes)
out_feature_dims = [self.feature_dim] * len(self.num_classes)
self.attr_names = []
self.attr, self.attr_classifier = nn.ModuleDict(), nn.ModuleDict()
attr_feature_dim = self.feature_dim // 4
for attr_name, attr_size in zip(attr_names, attr_num_classes):
if attr_size is None or attr_size <= 0:
continue
self.attr[attr_name] = self._construct_fc_layer(backbone_out_num_channels, attr_feature_dim)
self.attr_classifier[attr_name] = classifier_block(attr_feature_dim, attr_size)
self.attr_names.append(attr_name)
if len(self.attr) > 0:
mixed_hum_features = len(self.attr) * attr_feature_dim
self.attr_att = nn.ModuleList()
for trg_id in range(len(self.num_classes)):
self.attr_att.append(AttributeAttention(
in_feature_dims[trg_id], mixed_hum_features, out_feature_dims[trg_id]
))
else:
self.use_attr = False
if not self.use_attr:
in_feature_dims = [self.feature_dim] * len(self.num_classes)
out_feature_dims = [self.feature_dim] * len(self.num_classes)
self.out_feature_dims = out_feature_dims
self.fc, self.classifier = nn.ModuleList(), nn.ModuleList()
for trg_id, trg_num_classes in enumerate(self.num_classes):
self.fc.append(self._construct_fc_layer(backbone_out_num_channels, in_feature_dims[trg_id]))
if not contrastive and trg_num_classes > 0:
self.classifier.append(classifier_block(out_feature_dims[trg_id], trg_num_classes))
self._init_params()
@staticmethod
def _construct_layer(blocks, in_channels, out_channels, channel_gate, dropout_cfg=None):
layers = []
layers += [blocks[0](in_channels, out_channels, channel_gate, dropout_cfg=dropout_cfg)]
for i in range(1, len(blocks)):
layers += [blocks[i](out_channels, out_channels, channel_gate, dropout_cfg=dropout_cfg)]
return nn.Sequential(*layers)
@staticmethod
def _construct_attention_layer(num_channels, enable):
return ResidualAttention(num_channels, gumbel=False, residual=True) if enable else None
@staticmethod
def _construct_head_attention(num_channels, enable, channel_factor=8, gumbel=True, gumbel_scale=5.0):
if not enable:
return None
internal_num_channels = int(float(num_channels) / float(channel_factor))
layers = [
Conv1x1(num_channels, internal_num_channels, out_fn=None),
HSwish(),
Conv3x3(internal_num_channels, internal_num_channels, groups=internal_num_channels, out_fn=None),
HSwish(),
Conv1x1(internal_num_channels, 1, out_fn=None),
GumbelSigmoid(scale=gumbel_scale) if gumbel else nn.Sigmoid()
]
return nn.Sequential(*layers)
@staticmethod
def _construct_fc_layer(input_dim, output_dim, dropout=False):
layers = []
if dropout:
layers.append(Dropout(p=0.2, dist='gaussian'))
layers.extend([
nn.Linear(input_dim, output_dim),
nn.BatchNorm1d(output_dim)
])
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.InstanceNorm1d, nn.InstanceNorm2d)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, LocalContrastNormalization):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, LCTGate):
m.init_params()
def _backbone(self, x):
att_maps = []
y = self.input_lcn(x) if self.input_lcn is not None else x
y = self.input_IN(y) if self.input_IN is not None else y
y = self.conv1(y)
if self.att1 is not None:
y, att1 = self.att1(y, return_mask=True)
att_maps.append(att1)
y = self.pool1(y)
y = self.conv2(y)
if self.att2 is not None:
y, att2 = self.att2(y, return_mask=True)
att_maps.append(att2)
y = self.pool2(y)
y = self.conv3(y)
if self.att3 is not None:
y, att3 = self.att3(y, return_mask=True)
att_maps.append(att3)
y = self.pool3(y)
y = self.conv4(y)
if self.att4 is not None:
y, att4 = self.att4(y, return_mask=True)
att_maps.append(att4)
y = self.conv5(y)
if self.att5 is not None:
y, att5 = self.att5(y, return_mask=True)
att_maps.append(att5)
return y, att_maps
@staticmethod
def _glob_feature_vector(x, mode='avg', head_att=None):
att_map = None
if mode == 'head_att':
assert head_att is not None
att_map = head_att(x)
with torch.no_grad():
num_values = torch.sum(att_map, dim=(2, 3), keepdim=True)
scale = num_values.clamp_min(1.0).pow(-1)
y = scale * att_map * x
out = torch.sum(y, dim=(2, 3))
elif mode == 'avg':
out = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)
elif mode == 'max':
out = F.adaptive_max_pool2d(x, 1).view(x.size(0), -1)
elif mode == 'avg+max':
avg_pool = F.adaptive_avg_pool2d(x, 1)
max_pool = F.adaptive_max_pool2d(x, 1)
out = (avg_pool + max_pool).view(x.size(0), -1)
else:
raise ValueError(f'Unknown pooling mode: {mode}')
return out, att_map
def forward(self, x, return_featuremaps=False, get_embeddings=False, get_extra_data=False):
feature_maps, feature_att_maps = self._backbone(x)
if return_featuremaps:
return feature_maps
glob_features, head_att_map = self._glob_feature_vector(feature_maps, self.pooling_type, self.head_att)
embeddings = [fc(glob_features) for fc in self.fc]
if self.training and len(self.classifier) == 0:
return embeddings
attr_embeddings = {}
if self.use_attr:
attr_embeddings = {attr_name: attr_fc(glob_features) for attr_name, attr_fc in self.attr.items()}
attr_vector = torch.cat([attr_embeddings[attr_name] for attr_name in self.attr_names], dim=1)
embeddings = [attr_module(e, attr_vector) for e, attr_module in zip(embeddings, self.attr_att)]
if not self.training and not self.classification:
return torch.cat(embeddings, dim=1)
logits = [classifier(embd) for embd, classifier in zip(embeddings, self.classifier)]
if not self.training and self.classification:
return logits
if len(logits) == 1:
logits = logits[0]
if len(embeddings) == 1:
embeddings = embeddings[0]
if get_embeddings:
out_data = [logits, embeddings]
elif self.loss in ['softmax', 'adacos', 'd_softmax', 'am_softmax']:
out_data = [logits]
elif self.loss in ['triplet']:
out_data = [logits, embeddings]
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
if get_extra_data:
extra_out_data = dict()
extra_out_data['att_maps'] = [head_att_map] + feature_att_maps
if self.use_attr:
attr_logits = {attr_name: attr_classifier(attr_embeddings[attr_name])
for attr_name, attr_classifier in self.attr_classifier.items()}
extra_out_data['attr_logits'] = attr_logits
out_data += [extra_out_data]
return tuple(out_data)
def train(self, train_mode=True):
super(OSNet, self).train(train_mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
return self
def load_pretrained_weights(self, pretrained_dict):
model_dict = self.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in pretrained_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
self.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'
)
else:
print('Successfully loaded pretrained weights')
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model.load_pretrained_weights(state_dict)
##########
# Instantiation
##########
def osnet_ain_x1_0(num_classes, pretrained=False, download_weights=False,
IN_first=False, IN_conv1=False, **kwargs):
model = OSNet(
num_classes,
blocks=[
[OSBlockINin, OSBlockINin],
[OSBlock, OSBlockINin],
[OSBlockINin, OSBlock]
],
channels=[64, 256, 384, 512],
IN_conv1=True,
**kwargs
)
if pretrained and download_weights:
init_pretrained_weights(model, key='osnet_ain_x1_0')
return model
def osnet_ain2_x1_0(num_classes, pretrained=False, download_weights=False,
enable_attentions=False, IN_first=False, IN_conv1=False,
**kwargs):
model = OSNet(
num_classes,
blocks=[
[OSBlockINin, OSBlockINin],
[OSBlock, OSBlockINin],
[OSBlockINin, OSBlock]
],
channels=[64, 256, 384, 512],
attentions=[False, True, True, False, False] if enable_attentions else None,
IN_first=True,
IN_conv1=True,
**kwargs
)
if pretrained and download_weights:
init_pretrained_weights(model, key='osnet_ain_x1_0')
return model
| 32.392941
| 111
| 0.594138
|
from __future__ import division, absolute_import
import warnings
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchreid.losses import AngleSimpleLinear
from torchreid.ops import Dropout, HSwish, GumbelSigmoid, LocalContrastNormalization
__all__ = ['osnet_ain_x1_0', 'osnet_ain2_x1_0']
pretrained_urls = {
'osnet_ain_x1_0': 'https://drive.google.com/uc?id=1-CaioD9NaqbHK_kzSMW8VE4_3KcsRjEo'
}
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu(x)
class Conv1x1(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, groups=1, out_fn=nn.ReLU, use_in=False):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.InstanceNorm2d(out_channels, affine=True) if use_in else nn.BatchNorm2d(out_channels)
self.out_fn = out_fn() if out_fn is not None else None
def forward(self, x):
y = self.conv(x)
y = self.bn(y)
y = self.out_fn(y) if self.out_fn is not None else y
return y
class Conv1x1Linear(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, bn=True):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = None
if bn:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
return x
class Conv3x3(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, groups=1, out_fn=nn.ReLU):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.out_fn = out_fn() if out_fn is not None else None
def forward(self, x):
y = self.conv(x)
y = self.bn(y)
y = self.out_fn(y) if self.out_fn is not None else y
return y
class LightConv3x3(nn.Module):
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
return self.relu(x)
class LightConvStream(nn.Module):
def __init__(self, in_channels, out_channels, depth):
super(LightConvStream, self).__init__()
assert depth >= 1, 'depth must be equal to or larger than 1, but got {}'.format(
depth
)
layers = []
layers += [LightConv3x3(in_channels, out_channels)]
for i in range(depth - 1):
layers += [LightConv3x3(out_channels, out_channels)]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
eduction=4.0, residual=True):
super(ResidualAttention, self).__init__()
self.residual = residual
internal_channels = int(in_channels / reduction)
self.spatial_attention = nn.Sequential(
Conv1x1(in_channels, internal_channels, out_fn=None),
HSwish(),
Conv3x3(internal_channels, internal_channels, groups=internal_channels, out_fn=None),
HSwish(),
Conv1x1(internal_channels, 1, out_fn=None),
GumbelSigmoid(scale=5.0) if gumbel else nn.Sigmoid()
)
def forward(self, x, return_mask=False):
soft_mask = self.spatial_attention(x)
out = (1.0 + soft_mask) * x if self.residual else soft_mask * x
if return_mask:
return out, soft_mask
else:
return out
class AttributeAttention(nn.Module):
def __init__(self, main_num_features, attr_num_feature, out_num_features):
super(AttributeAttention, self).__init__()
self.gate = nn.Sequential(
nn.Linear(attr_num_feature, main_num_features),
nn.BatchNorm1d(main_num_features),
nn.Sigmoid()
)
self.fc = nn.Sequential(
nn.Linear(main_num_features, out_num_features),
nn.BatchNorm1d(out_num_features)
)
def forward(self, x, attr):
return self.fc(x * self.gate(attr))
CTGate, self).__init__()
assert channels > 0
assert groups > 0
self.gn = nn.GroupNorm(groups, channels, affine=True)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.gate_activation = nn.Sigmoid()
def init_params(self):
nn.init.zeros_(self.gn.weight)
nn.init.ones_(self.gn.bias)
def forward(self, x):
y = self.global_avgpool(x)
y = self.gn(y)
y = self.gate_activation(y)
out = y * x
return out
class ChannelGate(nn.Module):
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU()
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU()
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError("Unknown gate activation: {}".format(gate_activation))
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
def __init__(self, in_channels, out_channels, channel_gate, reduction=4, T=4, dropout_cfg=None, **kwargs):
super(OSBlock, self).__init__()
assert T >= 1
assert out_channels >= reduction and out_channels % reduction == 0
mid_channels = out_channels // reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, T + 1):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = channel_gate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.dropout = None
if dropout_cfg is not None:
self.dropout = Dropout(**dropout_cfg)
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(identity)
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = x2 + self.gate(x2_t)
x3 = self.conv3(x2)
if self.dropout is not None:
x3 = self.dropout(x3, x)
out = x3 + identity
return F.relu(out)
class OSBlockINin(nn.Module):
def __init__(self, in_channels, out_channels, channel_gate, reduction=4, T=4, dropout_cfg=None, **kwargs):
super(OSBlockINin, self).__init__()
assert T >= 1
assert out_channels >= reduction and out_channels % reduction == 0
mid_channels = out_channels // reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2 = nn.ModuleList()
for t in range(1, T + 1):
self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]
self.gate = channel_gate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
self.dropout = None
if dropout_cfg is not None:
self.dropout = Dropout(**dropout_cfg)
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(identity)
x1 = self.conv1(x)
x2 = 0
for conv2_t in self.conv2:
x2_t = conv2_t(x1)
x2 = x2 + self.gate(x2_t)
x3 = self.conv3(x2)
x3 = self.IN(x3)
if self.dropout is not None:
x3 = self.dropout(x3, x)
out = x3 + identity
return F.relu(out)
blocks,
channels,
classification=False,
contrastive=False,
head_attention=False,
attentions=None,
dropout_cfg=None,
feature_dim=256,
loss='softmax',
input_lcn=False,
IN_first=False,
IN_conv1=False,
bn_eval=False,
bn_frozen=False,
attr_names=None,
attr_num_classes=None,
lct_gate=False,
pooling_type='avg',
**kwargs
):
super(OSNet, self).__init__()
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.classification = classification
self.contrastive = contrastive
self.pooling_type = pooling_type
num_blocks = len(blocks)
assert num_blocks == len(channels) - 1
self.loss = loss
self.feature_dim = feature_dim
assert self.feature_dim is not None and self.feature_dim > 0
self.use_attentions = attentions
if self.use_attentions is None:
self.use_attentions = [False] * (num_blocks + 2)
assert len(self.use_attentions) == num_blocks + 2
if not isinstance(num_classes, (list, tuple)):
num_classes = [num_classes]
self.num_classes = num_classes
assert len(self.num_classes) > 0
self.input_lcn = LocalContrastNormalization(3, 5, affine=True) if input_lcn else None
self.input_IN = nn.InstanceNorm2d(3, affine=True) if IN_first else None
channel_gate = LCTGate if lct_gate else ChannelGate
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN_conv1)
self.att1 = self._construct_attention_layer(channels[0], self.use_attentions[0])
self.pool1 = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._construct_layer(blocks[0], channels[0], channels[1], channel_gate, dropout_cfg)
self.att2 = self._construct_attention_layer(channels[1], self.use_attentions[1])
self.pool2 = nn.Sequential(Conv1x1(channels[1], channels[1]), nn.AvgPool2d(2, stride=2))
self.conv3 = self._construct_layer(blocks[1], channels[1], channels[2], channel_gate, dropout_cfg)
self.att3 = self._construct_attention_layer(channels[2], self.use_attentions[2])
self.pool3 = nn.Sequential(Conv1x1(channels[2], channels[2]), nn.AvgPool2d(2, stride=2))
self.conv4 = self._construct_layer(blocks[2], channels[2], channels[3], channel_gate, dropout_cfg)
self.att4 = self._construct_attention_layer(channels[3], self.use_attentions[3])
backbone_out_num_channels = channels[3]
self.conv5 = Conv1x1(channels[3], backbone_out_num_channels)
self.att5 = self._construct_attention_layer(backbone_out_num_channels, self.use_attentions[4])
self.head_att = self._construct_head_attention(backbone_out_num_channels, enable=head_attention)
classifier_block = nn.Linear if self.loss not in ['am_softmax'] else AngleSimpleLinear
self.use_attr = attr_names is not None and attr_num_classes is not None
if self.use_attr:
assert len(attr_names) == len(attr_num_classes)
in_feature_dims = [2 * self.feature_dim] * len(self.num_classes)
out_feature_dims = [self.feature_dim] * len(self.num_classes)
self.attr_names = []
self.attr, self.attr_classifier = nn.ModuleDict(), nn.ModuleDict()
attr_feature_dim = self.feature_dim // 4
for attr_name, attr_size in zip(attr_names, attr_num_classes):
if attr_size is None or attr_size <= 0:
continue
self.attr[attr_name] = self._construct_fc_layer(backbone_out_num_channels, attr_feature_dim)
self.attr_classifier[attr_name] = classifier_block(attr_feature_dim, attr_size)
self.attr_names.append(attr_name)
if len(self.attr) > 0:
mixed_hum_features = len(self.attr) * attr_feature_dim
self.attr_att = nn.ModuleList()
for trg_id in range(len(self.num_classes)):
self.attr_att.append(AttributeAttention(
in_feature_dims[trg_id], mixed_hum_features, out_feature_dims[trg_id]
))
else:
self.use_attr = False
if not self.use_attr:
in_feature_dims = [self.feature_dim] * len(self.num_classes)
out_feature_dims = [self.feature_dim] * len(self.num_classes)
self.out_feature_dims = out_feature_dims
self.fc, self.classifier = nn.ModuleList(), nn.ModuleList()
for trg_id, trg_num_classes in enumerate(self.num_classes):
self.fc.append(self._construct_fc_layer(backbone_out_num_channels, in_feature_dims[trg_id]))
if not contrastive and trg_num_classes > 0:
self.classifier.append(classifier_block(out_feature_dims[trg_id], trg_num_classes))
self._init_params()
@staticmethod
def _construct_layer(blocks, in_channels, out_channels, channel_gate, dropout_cfg=None):
layers = []
layers += [blocks[0](in_channels, out_channels, channel_gate, dropout_cfg=dropout_cfg)]
for i in range(1, len(blocks)):
layers += [blocks[i](out_channels, out_channels, channel_gate, dropout_cfg=dropout_cfg)]
return nn.Sequential(*layers)
@staticmethod
def _construct_attention_layer(num_channels, enable):
return ResidualAttention(num_channels, gumbel=False, residual=True) if enable else None
@staticmethod
def _construct_head_attention(num_channels, enable, channel_factor=8, gumbel=True, gumbel_scale=5.0):
if not enable:
return None
internal_num_channels = int(float(num_channels) / float(channel_factor))
layers = [
Conv1x1(num_channels, internal_num_channels, out_fn=None),
HSwish(),
Conv3x3(internal_num_channels, internal_num_channels, groups=internal_num_channels, out_fn=None),
HSwish(),
Conv1x1(internal_num_channels, 1, out_fn=None),
GumbelSigmoid(scale=gumbel_scale) if gumbel else nn.Sigmoid()
]
return nn.Sequential(*layers)
@staticmethod
def _construct_fc_layer(input_dim, output_dim, dropout=False):
layers = []
if dropout:
layers.append(Dropout(p=0.2, dist='gaussian'))
layers.extend([
nn.Linear(input_dim, output_dim),
nn.BatchNorm1d(output_dim)
])
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.InstanceNorm1d, nn.InstanceNorm2d)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, LocalContrastNormalization):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, LCTGate):
m.init_params()
def _backbone(self, x):
att_maps = []
y = self.input_lcn(x) if self.input_lcn is not None else x
y = self.input_IN(y) if self.input_IN is not None else y
y = self.conv1(y)
if self.att1 is not None:
y, att1 = self.att1(y, return_mask=True)
att_maps.append(att1)
y = self.pool1(y)
y = self.conv2(y)
if self.att2 is not None:
y, att2 = self.att2(y, return_mask=True)
att_maps.append(att2)
y = self.pool2(y)
y = self.conv3(y)
if self.att3 is not None:
y, att3 = self.att3(y, return_mask=True)
att_maps.append(att3)
y = self.pool3(y)
y = self.conv4(y)
if self.att4 is not None:
y, att4 = self.att4(y, return_mask=True)
att_maps.append(att4)
y = self.conv5(y)
if self.att5 is not None:
y, att5 = self.att5(y, return_mask=True)
att_maps.append(att5)
return y, att_maps
@staticmethod
def _glob_feature_vector(x, mode='avg', head_att=None):
att_map = None
if mode == 'head_att':
assert head_att is not None
att_map = head_att(x)
with torch.no_grad():
num_values = torch.sum(att_map, dim=(2, 3), keepdim=True)
scale = num_values.clamp_min(1.0).pow(-1)
y = scale * att_map * x
out = torch.sum(y, dim=(2, 3))
elif mode == 'avg':
out = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)
elif mode == 'max':
out = F.adaptive_max_pool2d(x, 1).view(x.size(0), -1)
elif mode == 'avg+max':
avg_pool = F.adaptive_avg_pool2d(x, 1)
max_pool = F.adaptive_max_pool2d(x, 1)
out = (avg_pool + max_pool).view(x.size(0), -1)
else:
raise ValueError(f'Unknown pooling mode: {mode}')
return out, att_map
def forward(self, x, return_featuremaps=False, get_embeddings=False, get_extra_data=False):
feature_maps, feature_att_maps = self._backbone(x)
if return_featuremaps:
return feature_maps
glob_features, head_att_map = self._glob_feature_vector(feature_maps, self.pooling_type, self.head_att)
embeddings = [fc(glob_features) for fc in self.fc]
if self.training and len(self.classifier) == 0:
return embeddings
attr_embeddings = {}
if self.use_attr:
attr_embeddings = {attr_name: attr_fc(glob_features) for attr_name, attr_fc in self.attr.items()}
attr_vector = torch.cat([attr_embeddings[attr_name] for attr_name in self.attr_names], dim=1)
embeddings = [attr_module(e, attr_vector) for e, attr_module in zip(embeddings, self.attr_att)]
if not self.training and not self.classification:
return torch.cat(embeddings, dim=1)
logits = [classifier(embd) for embd, classifier in zip(embeddings, self.classifier)]
if not self.training and self.classification:
return logits
if len(logits) == 1:
logits = logits[0]
if len(embeddings) == 1:
embeddings = embeddings[0]
if get_embeddings:
out_data = [logits, embeddings]
elif self.loss in ['softmax', 'adacos', 'd_softmax', 'am_softmax']:
out_data = [logits]
elif self.loss in ['triplet']:
out_data = [logits, embeddings]
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
if get_extra_data:
extra_out_data = dict()
extra_out_data['att_maps'] = [head_att_map] + feature_att_maps
if self.use_attr:
attr_logits = {attr_name: attr_classifier(attr_embeddings[attr_name])
for attr_name, attr_classifier in self.attr_classifier.items()}
extra_out_data['attr_logits'] = attr_logits
out_data += [extra_out_data]
return tuple(out_data)
def train(self, train_mode=True):
super(OSNet, self).train(train_mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
return self
def load_pretrained_weights(self, pretrained_dict):
model_dict = self.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in pretrained_dict.items():
if k.startswith('module.'):
k = k[7:]
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
self.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'
)
else:
print('Successfully loaded pretrained weights')
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
def init_pretrained_weights(model, key=''):
import os
import errno
import gdown
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model.load_pretrained_weights(state_dict)
IN_first=False, IN_conv1=False, **kwargs):
model = OSNet(
num_classes,
blocks=[
[OSBlockINin, OSBlockINin],
[OSBlock, OSBlockINin],
[OSBlockINin, OSBlock]
],
channels=[64, 256, 384, 512],
IN_conv1=True,
**kwargs
)
if pretrained and download_weights:
init_pretrained_weights(model, key='osnet_ain_x1_0')
return model
def osnet_ain2_x1_0(num_classes, pretrained=False, download_weights=False,
enable_attentions=False, IN_first=False, IN_conv1=False,
**kwargs):
model = OSNet(
num_classes,
blocks=[
[OSBlockINin, OSBlockINin],
[OSBlock, OSBlockINin],
[OSBlockINin, OSBlock]
],
channels=[64, 256, 384, 512],
attentions=[False, True, True, False, False] if enable_attentions else None,
IN_first=True,
IN_conv1=True,
**kwargs
)
if pretrained and download_weights:
init_pretrained_weights(model, key='osnet_ain_x1_0')
return model
| true
| true
|
1c44f2346c4dcf0a488a33568bec5852405a2972
| 754
|
py
|
Python
|
athp_stock/__manifest__.py
|
QuanTranDoanAnh/odoo-athp-addons
|
8a6ce58378b37e96d022ded8d912bb8b88e55b4c
|
[
"MIT"
] | null | null | null |
athp_stock/__manifest__.py
|
QuanTranDoanAnh/odoo-athp-addons
|
8a6ce58378b37e96d022ded8d912bb8b88e55b4c
|
[
"MIT"
] | null | null | null |
athp_stock/__manifest__.py
|
QuanTranDoanAnh/odoo-athp-addons
|
8a6ce58378b37e96d022ded8d912bb8b88e55b4c
|
[
"MIT"
] | null | null | null |
{
'name': "An Toan Hoa Phat Stock Management App",
'summary': "Stock Management App customized for An Toan Hoa Phat",
'description': """
Stock Management App customized for An Toan Hoa Phat
""",
'author': 'Business Link Development Technologies Co., Ltd.',
'website': 'http://www.bld.com.vn',
'license': 'Other proprietary',
'depends': ['base', 'stock'],
'category': 'Stock',
'version': '1.0.0',
'data': [
'security/security.xml',
'security/ir.model.access.csv',
'views/stock_request_views.xml',
'views/product_views.xml',
'views/actions.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
'application': True
}
| 31.416667
| 71
| 0.572944
|
{
'name': "An Toan Hoa Phat Stock Management App",
'summary': "Stock Management App customized for An Toan Hoa Phat",
'description': """
Stock Management App customized for An Toan Hoa Phat
""",
'author': 'Business Link Development Technologies Co., Ltd.',
'website': 'http://www.bld.com.vn',
'license': 'Other proprietary',
'depends': ['base', 'stock'],
'category': 'Stock',
'version': '1.0.0',
'data': [
'security/security.xml',
'security/ir.model.access.csv',
'views/stock_request_views.xml',
'views/product_views.xml',
'views/actions.xml'
],
'demo': [],
'installable': True,
'auto_install': False,
'application': True
}
| true
| true
|
1c44f390c47285189ba516ba8ac76c57279695a4
| 12,651
|
py
|
Python
|
Google/benchmarks/gnmt/implementations/gnmt-research-TF-tpu-v4-512/utils/iterator_utils.py
|
goswamig/training_results_v0.7
|
4278ce8a0f3d4db6b5e6054277724ca36278d7a3
|
[
"Apache-2.0"
] | 48
|
2020-07-29T18:09:23.000Z
|
2021-10-09T01:53:33.000Z
|
Google/benchmarks/gnmt/implementations/gnmt-research-TF-tpu-v4-512/utils/iterator_utils.py
|
goswamig/training_results_v0.7
|
4278ce8a0f3d4db6b5e6054277724ca36278d7a3
|
[
"Apache-2.0"
] | 9
|
2021-04-02T02:28:07.000Z
|
2022-03-26T18:23:59.000Z
|
Google/benchmarks/gnmt/implementations/gnmt-research-TF-tpu-v4-512/utils/iterator_utils.py
|
lablup/training_results_v0.7
|
f5bb59aa0f8b18b602763abe47d1d24d0d54b197
|
[
"Apache-2.0"
] | 42
|
2020-08-01T06:41:24.000Z
|
2022-01-20T10:33:08.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For loading data into NMT models."""
from __future__ import print_function
import tensorflow.compat.v1 as tf
__all__ = ["get_iterator", "get_infer_iterator"]
# pylint: disable=g-long-lambda,line-too-long
def get_iterator(src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size,
global_batch_size,
sos,
eos,
random_seed,
num_buckets,
src_max_len=None,
tgt_max_len=None,
num_parallel_calls=4,
output_buffer_size=None,
skip_count=None,
num_shards=1,
shard_index=0,
reshuffle_each_iteration=True,
filter_oversized_sequences=False,
return_raw=False):
"""Function that returns input dataset."""
# Total number of examples in src_dataset/tgt_dataset
if not output_buffer_size:
output_buffer_size = global_batch_size * 100
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)
tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
src_tgt_dataset = src_tgt_dataset.shard(num_shards, shard_index)
if skip_count is not None:
src_tgt_dataset = src_tgt_dataset.skip(skip_count)
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.string_split([src]).values, tf.string_split([tgt]).values),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Filter zero length input sequences.
src_tgt_dataset = src_tgt_dataset.filter(
lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))
# Filter oversized input sequences (542 examples are filtered).
if filter_oversized_sequences:
src_tgt_dataset = src_tgt_dataset.filter(lambda src, tgt: tf.logical_and(
tf.size(src) <= src_max_len - 2,
tf.size(tgt) <= tgt_max_len - 1))
if src_max_len:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src[:src_max_len - 2], tgt),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
if tgt_max_len:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src, tgt[:tgt_max_len]),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Convert the word strings to ids. Word strings that are not in the
# vocab get the lookup table's default_value integer.
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.cast(src_vocab_table.lookup(src), tf.int32),
tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)),
num_parallel_calls=num_parallel_calls)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
# Create a tgt_input prefixed with <sos> and a tgt_output suffixed with <eos>.
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.concat(([tgt_sos_id], src, [src_eos_id]), 0),
tf.concat(([tgt_sos_id], tgt), 0),
tf.concat((tgt, [tgt_eos_id]), 0)),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Add in sequence lengths.
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt_in, tgt_out: (
src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in)),
num_parallel_calls=num_parallel_calls)
if return_raw:
def map_fn(src, tgt_in, tgt_out, src_len, tgt_len):
"""Pad the dataset and emit the bucket id as key."""
src = tf.pad(
src, [[0, src_max_len - tf.size(src)]], constant_values=src_eos_id)
tgt_in = tf.pad(
tgt_in, [[0, tgt_max_len - tf.size(tgt_in)]],
constant_values=tgt_eos_id)
tgt_out = tf.pad(
tgt_out, [[0, tgt_max_len - tf.size(tgt_out)]],
constant_values=tgt_eos_id)
bucket_width = (src_max_len + num_buckets - 1) // num_buckets
bucket_id = tf.cast(
tf.minimum(
num_buckets,
tf.maximum(src_len // bucket_width, tgt_len // bucket_width)),
tf.int32)
return tf.concat([
src, tgt_in, tgt_out,
tf.reshape(src_len, [1]),
tf.reshape(tgt_len, [1]),
tf.reshape(bucket_id, [1])
], 0)
src_tgt_dataset = src_tgt_dataset.map(
map_fn, num_parallel_calls=num_parallel_calls)
return src_tgt_dataset.batch(1024)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
src_tgt_dataset = src_tgt_dataset.cache()
# TODO(saeta): investigate shuffle_and_repeat.
src_tgt_dataset = src_tgt_dataset.shuffle(
output_buffer_size, random_seed,
reshuffle_each_iteration).repeat()
# Bucket by source sequence length (buckets for lengths 0-9, 10-19, ...)
def batching_func(x):
return x.padded_batch(
batch_size,
# The first three entries are the source and target line rows;
# these have unknown-length vectors. The last two entries are
# the source and target row sizes; these are scalars.
padded_shapes=(
tf.TensorShape([src_max_len]), # src
tf.TensorShape([tgt_max_len]), # tgt_input
tf.TensorShape([tgt_max_len]), # tgt_output
tf.TensorShape([]), # src_len
tf.TensorShape([])), # tgt_len
# Pad the source and target sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(
src_eos_id, # src
tgt_eos_id, # tgt_input
tgt_eos_id, # tgt_output
0, # src_len -- unused
0),
# For TPU, must set drop_remainder to True or batch size will be None
drop_remainder=True) # tgt_len -- unused
if num_buckets > 1:
def key_func(unused_1, unused_2, unused_3, src_len, tgt_len):
"""Calculate bucket_width by maximum source sequence length."""
# Pairs with length [0, bucket_width) go to bucket 0, length
# [bucket_width, 2 * bucket_width) go to bucket 1, etc. Pairs with length
# over ((num_bucket-1) * bucket_width) words all go into the last bucket.
if src_max_len:
bucket_width = (src_max_len + num_buckets - 1) // num_buckets
else:
bucket_width = 10
# Bucket sentence pairs by the length of their source sentence and target
# sentence.
bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)
return tf.to_int64(tf.minimum(num_buckets, bucket_id))
def reduce_func(unused_key, windowed_data):
return batching_func(windowed_data)
batched_dataset = src_tgt_dataset.apply(
tf.data.experimental.group_by_window(
key_func=key_func,
reduce_func=reduce_func,
window_size=global_batch_size))
else:
batched_dataset = batching_func(src_tgt_dataset)
# Make_one_shot_iterator is not applicable here since we have lookup table.
# Instead return a tf.data.dataset and let TpuEstimator to initialize and make
# iterator out of it.
batched_dataset = batched_dataset.map(
lambda src, tgt_in, tgt_out, source_size, tgt_in_size: (
{"source": src,
"target_input": tgt_in,
"target_output": tgt_out,
"source_sequence_length": source_size,
"target_sequence_length": tgt_in_size}))
return batched_dataset
# pylint: disable=g-long-lambda,line-too-long
def get_preprocessed_iterator(dataset_file,
batch_size,
random_seed,
max_seq_len,
num_buckets,
shard_index,
num_shards,
num_parallel_calls=100):
"""Get the dataset iterator from preprocessed data."""
dataset = tf.data.Dataset.list_files(
dataset_file, shuffle=False).shard(num_shards, shard_index)
def fetch_dataset(filename):
dataset = tf.data.FixedLengthRecordDataset(filename,
(max_seq_len * 3 + 3) * 4)
return dataset
# TODO(dehao, jsimsa): Investigate why using dataset.interleave is slower
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
fetch_dataset, cycle_length=num_parallel_calls, sloppy=True))
def _parse(record):
record = tf.decode_raw(record, tf.int32)
r = tf.split(record, [max_seq_len, max_seq_len, max_seq_len, 1, 1, 1])
return tf.cast(tf.reshape(r[5], []), tf.int64), r[0], r[1], r[2], r[3], r[4]
shuffle_buffer_size = batch_size * 50
src_tgt_dataset = dataset.map(
_parse, num_parallel_calls=shuffle_buffer_size).cache()
src_tgt_dataset = src_tgt_dataset.shuffle(shuffle_buffer_size, random_seed,
True).repeat()
if num_buckets > 1:
def key_func(key, unused_1, unused_2, unused_3, unused_src_len,
unused_tgt_len):
return key
def reduce_func(unused_key, windowed_data):
return windowed_data.batch(batch_size, drop_remainder=True)
batched_dataset = src_tgt_dataset.apply(
tf.data.experimental.group_by_window(
key_func=key_func, reduce_func=reduce_func, window_size=batch_size))
else:
batched_dataset = src_tgt_dataset.batch(batch_size, drop_remainder=True)
batched_dataset = batched_dataset.map(
lambda unused_key, src, tgt_in, tgt_out, source_size, tgt_in_size: ({
"source": tf.reshape(src, [batch_size, max_seq_len]),
"target_input": tf.reshape(tgt_in, [batch_size, max_seq_len]),
"target_output": tf.reshape(tgt_out, [batch_size, max_seq_len]),
"source_sequence_length": tf.reshape(source_size, [batch_size]),
"target_sequence_length": tf.reshape(tgt_in_size, [batch_size])
}),
# TODO(dehao): tune the magic prefetch buffer size.
num_parallel_calls=batch_size).prefetch(4)
return batched_dataset
def get_infer_iterator(src_dataset,
src_vocab_table,
batch_size,
eos,
sos,
src_max_len=None):
"""Get dataset for inference."""
# Totol number of examples in src_dataset
# (3003 examples + 69 padding examples).
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_sos_id = tf.cast(src_vocab_table.lookup(tf.constant(sos)), tf.int32)
src_dataset = src_dataset.map(lambda src: tf.string_split([src]).values)
# Convert the word strings to ids
src_dataset = src_dataset.map(
lambda src: tf.cast(src_vocab_table.lookup(src), tf.int32))
# Add in the word counts.
src_dataset = src_dataset.map(lambda src: (tf.concat(
([src_sos_id], src, [src_eos_id]), 0), 2 + tf.size(src)))
def batching_func(x):
return x.padded_batch(
batch_size,
# The entry is the source line rows;
# this has unknown-length vectors. The last entry is
# the source row size; this is a scalar.
padded_shapes=(
tf.TensorShape([src_max_len]), # src
tf.TensorShape([])), # src_len
# Pad the source sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(
src_eos_id, # src
0),
drop_remainder=True) # src_len -- unused
batched_dataset = batching_func(src_dataset)
batched_dataset = batched_dataset.map(
lambda src_ids, src_seq_len: (
{"source": src_ids,
"source_sequence_length": src_seq_len}))
return batched_dataset
| 40.548077
| 86
| 0.648012
|
from __future__ import print_function
import tensorflow.compat.v1 as tf
__all__ = ["get_iterator", "get_infer_iterator"]
def get_iterator(src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size,
global_batch_size,
sos,
eos,
random_seed,
num_buckets,
src_max_len=None,
tgt_max_len=None,
num_parallel_calls=4,
output_buffer_size=None,
skip_count=None,
num_shards=1,
shard_index=0,
reshuffle_each_iteration=True,
filter_oversized_sequences=False,
return_raw=False):
if not output_buffer_size:
output_buffer_size = global_batch_size * 100
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)
tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
src_tgt_dataset = src_tgt_dataset.shard(num_shards, shard_index)
if skip_count is not None:
src_tgt_dataset = src_tgt_dataset.skip(skip_count)
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.string_split([src]).values, tf.string_split([tgt]).values),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
src_tgt_dataset = src_tgt_dataset.filter(
lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))
if filter_oversized_sequences:
src_tgt_dataset = src_tgt_dataset.filter(lambda src, tgt: tf.logical_and(
tf.size(src) <= src_max_len - 2,
tf.size(tgt) <= tgt_max_len - 1))
if src_max_len:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src[:src_max_len - 2], tgt),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
if tgt_max_len:
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (src, tgt[:tgt_max_len]),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.cast(src_vocab_table.lookup(src), tf.int32),
tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)),
num_parallel_calls=num_parallel_calls)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
# Create a tgt_input prefixed with <sos> and a tgt_output suffixed with <eos>.
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt: (tf.concat(([tgt_sos_id], src, [src_eos_id]), 0),
tf.concat(([tgt_sos_id], tgt), 0),
tf.concat((tgt, [tgt_eos_id]), 0)),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Add in sequence lengths.
src_tgt_dataset = src_tgt_dataset.map(
lambda src, tgt_in, tgt_out: (
src, tgt_in, tgt_out, tf.size(src), tf.size(tgt_in)),
num_parallel_calls=num_parallel_calls)
if return_raw:
def map_fn(src, tgt_in, tgt_out, src_len, tgt_len):
src = tf.pad(
src, [[0, src_max_len - tf.size(src)]], constant_values=src_eos_id)
tgt_in = tf.pad(
tgt_in, [[0, tgt_max_len - tf.size(tgt_in)]],
constant_values=tgt_eos_id)
tgt_out = tf.pad(
tgt_out, [[0, tgt_max_len - tf.size(tgt_out)]],
constant_values=tgt_eos_id)
bucket_width = (src_max_len + num_buckets - 1) // num_buckets
bucket_id = tf.cast(
tf.minimum(
num_buckets,
tf.maximum(src_len // bucket_width, tgt_len // bucket_width)),
tf.int32)
return tf.concat([
src, tgt_in, tgt_out,
tf.reshape(src_len, [1]),
tf.reshape(tgt_len, [1]),
tf.reshape(bucket_id, [1])
], 0)
src_tgt_dataset = src_tgt_dataset.map(
map_fn, num_parallel_calls=num_parallel_calls)
return src_tgt_dataset.batch(1024)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
src_tgt_dataset = src_tgt_dataset.cache()
# TODO(saeta): investigate shuffle_and_repeat.
src_tgt_dataset = src_tgt_dataset.shuffle(
output_buffer_size, random_seed,
reshuffle_each_iteration).repeat()
# Bucket by source sequence length (buckets for lengths 0-9, 10-19, ...)
def batching_func(x):
return x.padded_batch(
batch_size,
# The first three entries are the source and target line rows;
# these have unknown-length vectors. The last two entries are
# the source and target row sizes; these are scalars.
padded_shapes=(
tf.TensorShape([src_max_len]), # src
tf.TensorShape([tgt_max_len]), # tgt_input
tf.TensorShape([tgt_max_len]), # tgt_output
tf.TensorShape([]), # src_len
tf.TensorShape([])), # tgt_len
# Pad the source and target sequences with eos tokens.
# (Though notice we don't generally need to do this since
padding_values=(
src_eos_id,
tgt_eos_id,
tgt_eos_id,
0,
0),
drop_remainder=True)
if num_buckets > 1:
def key_func(unused_1, unused_2, unused_3, src_len, tgt_len):
if src_max_len:
bucket_width = (src_max_len + num_buckets - 1) // num_buckets
else:
bucket_width = 10
bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)
return tf.to_int64(tf.minimum(num_buckets, bucket_id))
def reduce_func(unused_key, windowed_data):
return batching_func(windowed_data)
batched_dataset = src_tgt_dataset.apply(
tf.data.experimental.group_by_window(
key_func=key_func,
reduce_func=reduce_func,
window_size=global_batch_size))
else:
batched_dataset = batching_func(src_tgt_dataset)
batched_dataset = batched_dataset.map(
lambda src, tgt_in, tgt_out, source_size, tgt_in_size: (
{"source": src,
"target_input": tgt_in,
"target_output": tgt_out,
"source_sequence_length": source_size,
"target_sequence_length": tgt_in_size}))
return batched_dataset
def get_preprocessed_iterator(dataset_file,
batch_size,
random_seed,
max_seq_len,
num_buckets,
shard_index,
num_shards,
num_parallel_calls=100):
dataset = tf.data.Dataset.list_files(
dataset_file, shuffle=False).shard(num_shards, shard_index)
def fetch_dataset(filename):
dataset = tf.data.FixedLengthRecordDataset(filename,
(max_seq_len * 3 + 3) * 4)
return dataset
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
fetch_dataset, cycle_length=num_parallel_calls, sloppy=True))
def _parse(record):
record = tf.decode_raw(record, tf.int32)
r = tf.split(record, [max_seq_len, max_seq_len, max_seq_len, 1, 1, 1])
return tf.cast(tf.reshape(r[5], []), tf.int64), r[0], r[1], r[2], r[3], r[4]
shuffle_buffer_size = batch_size * 50
src_tgt_dataset = dataset.map(
_parse, num_parallel_calls=shuffle_buffer_size).cache()
src_tgt_dataset = src_tgt_dataset.shuffle(shuffle_buffer_size, random_seed,
True).repeat()
if num_buckets > 1:
def key_func(key, unused_1, unused_2, unused_3, unused_src_len,
unused_tgt_len):
return key
def reduce_func(unused_key, windowed_data):
return windowed_data.batch(batch_size, drop_remainder=True)
batched_dataset = src_tgt_dataset.apply(
tf.data.experimental.group_by_window(
key_func=key_func, reduce_func=reduce_func, window_size=batch_size))
else:
batched_dataset = src_tgt_dataset.batch(batch_size, drop_remainder=True)
batched_dataset = batched_dataset.map(
lambda unused_key, src, tgt_in, tgt_out, source_size, tgt_in_size: ({
"source": tf.reshape(src, [batch_size, max_seq_len]),
"target_input": tf.reshape(tgt_in, [batch_size, max_seq_len]),
"target_output": tf.reshape(tgt_out, [batch_size, max_seq_len]),
"source_sequence_length": tf.reshape(source_size, [batch_size]),
"target_sequence_length": tf.reshape(tgt_in_size, [batch_size])
}),
num_parallel_calls=batch_size).prefetch(4)
return batched_dataset
def get_infer_iterator(src_dataset,
src_vocab_table,
batch_size,
eos,
sos,
src_max_len=None):
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_sos_id = tf.cast(src_vocab_table.lookup(tf.constant(sos)), tf.int32)
src_dataset = src_dataset.map(lambda src: tf.string_split([src]).values)
src_dataset = src_dataset.map(
lambda src: tf.cast(src_vocab_table.lookup(src), tf.int32))
src_dataset = src_dataset.map(lambda src: (tf.concat(
([src_sos_id], src, [src_eos_id]), 0), 2 + tf.size(src)))
def batching_func(x):
return x.padded_batch(
batch_size,
padded_shapes=(
tf.TensorShape([src_max_len]),
tf.TensorShape([])),
# later on we will be masking out calculations past the true sequence.
padding_values=(
src_eos_id, # src
0),
drop_remainder=True) # src_len -- unused
batched_dataset = batching_func(src_dataset)
batched_dataset = batched_dataset.map(
lambda src_ids, src_seq_len: (
{"source": src_ids,
"source_sequence_length": src_seq_len}))
return batched_dataset
| true
| true
|
1c44f449a9db601964d2de365f272f867c90bb7d
| 2,790
|
py
|
Python
|
metsim/disaggregate.py
|
jhamman/MetSim
|
538ebb141414355a5db0eddde6c0d4bec2e56390
|
[
"MIT"
] | null | null | null |
metsim/disaggregate.py
|
jhamman/MetSim
|
538ebb141414355a5db0eddde6c0d4bec2e56390
|
[
"MIT"
] | 1
|
2019-01-17T23:12:30.000Z
|
2019-01-17T23:12:30.000Z
|
metsim/disaggregate.py
|
jhamman/MetSim
|
538ebb141414355a5db0eddde6c0d4bec2e56390
|
[
"MIT"
] | 1
|
2019-03-08T15:49:18.000Z
|
2019-03-08T15:49:18.000Z
|
"""
Disaggregates daily data down to hourly data using some heuristics
"""
import numpy as np
import pandas as pd
import metsim
from metsim.defaults import PARAMS as params
from metsim.defaults import CONSTS as consts
tiny_rad_fract = np.zeros(366) #This is updated during the mtclim run
def disaggregate(df_daily):
"""
TODO
"""
dates_hourly = pd.date_range(metsim.start, metsim.stop, freq='H')
df_hourly = pd.DataFrame(index=dates_hourly)
_disagg_shortwave(df_daily, df_hourly)
_disagg_temp( df_daily, df_hourly)
_disagg_precip( df_daily, df_hourly)
_disagg_thermal( df_daily, df_hourly)
_disagg_wind( df_daily, df_hourly)
return df_hourly
def _disagg_temp(df_daily, df_hourly):
"""
TODO
"""
# Calculate times of min/max temps
set_min_max_hour(df_daily, df_hourly)
# Fit hermite polynomial and sample daily
def _disagg_precip(df_daily, df_hourly):
"""
TODO
"""
pass
def _disagg_thermal(df_daily, df_hourly):
"""
TODO
"""
pass
def _disagg_wind(df_daily, df_hourly):
"""
TODO
"""
pass
def _disagg_shortwave(df_daily, df_hourly):
"""
TODO
"""
tiny_step_per_hour = int(3600 / consts['SRADDT'])
tmp_rad = df_daily['s_swrad']
n_days = len(tmp_rad)
hourlyrad = np.zeros(n_days*24+1)
for i in range(n_days):
for j in range(24):
for k in range(tiny_step_per_hour):
tinystep = j*tiny_step_per_hour + k
if tinystep < 0:
tinystep += 24*tiny_step_per_hour
if tinystep > 24*tiny_step_per_hour - 1:
tinystep -= 24*tiny_step_per_hour
hourlyrad[i*24+j] += tiny_rad_fract[df_daily['day_of_year'][i]][tinystep]
#FIXME: This calculation is incorrect
hourlyrad[i*24+j] *= tmp_rad[i]
df_hourly['s_swrad'] = hourlyrad
def set_min_max_hour(df_daily, df_hourly):
"""
TODO
"""
hourly_rad = df_hourly['s_swrad']
n_days = len(df_daily)
t_max = np.zeros(n_days)
t_min = np.zeros(n_days)
for i in range(n_days):
risehour = sethour = -999
for hour in range(12):
if (hourly_rad[i*24+hour] > 0 and
(i*24+hour==0 or hourly_rad[i*24 + hour-1]<= 0)):
risehour = hour
for hour in range(12,24):
if (hourly_rad[i*24+hour] <= 0 and hourly_rad[i*24+hour-1]>0):
sethour = hour
if i == n_days -1 and sethour == -999:
sethour = 23
if risehour >=0 and sethour>=0:
t_max[i] - 0.67 * (sethour - risehour) + risehour
tminhour[i] = rishour - 1
df_daily['t_Tmin'] = tminhour
df_daily['t_Tmax'] = tmaxhour
| 26.320755
| 89
| 0.602151
|
import numpy as np
import pandas as pd
import metsim
from metsim.defaults import PARAMS as params
from metsim.defaults import CONSTS as consts
tiny_rad_fract = np.zeros(366)
def disaggregate(df_daily):
dates_hourly = pd.date_range(metsim.start, metsim.stop, freq='H')
df_hourly = pd.DataFrame(index=dates_hourly)
_disagg_shortwave(df_daily, df_hourly)
_disagg_temp( df_daily, df_hourly)
_disagg_precip( df_daily, df_hourly)
_disagg_thermal( df_daily, df_hourly)
_disagg_wind( df_daily, df_hourly)
return df_hourly
def _disagg_temp(df_daily, df_hourly):
set_min_max_hour(df_daily, df_hourly)
def _disagg_precip(df_daily, df_hourly):
pass
def _disagg_thermal(df_daily, df_hourly):
pass
def _disagg_wind(df_daily, df_hourly):
pass
def _disagg_shortwave(df_daily, df_hourly):
tiny_step_per_hour = int(3600 / consts['SRADDT'])
tmp_rad = df_daily['s_swrad']
n_days = len(tmp_rad)
hourlyrad = np.zeros(n_days*24+1)
for i in range(n_days):
for j in range(24):
for k in range(tiny_step_per_hour):
tinystep = j*tiny_step_per_hour + k
if tinystep < 0:
tinystep += 24*tiny_step_per_hour
if tinystep > 24*tiny_step_per_hour - 1:
tinystep -= 24*tiny_step_per_hour
hourlyrad[i*24+j] += tiny_rad_fract[df_daily['day_of_year'][i]][tinystep]
hourlyrad[i*24+j] *= tmp_rad[i]
df_hourly['s_swrad'] = hourlyrad
def set_min_max_hour(df_daily, df_hourly):
hourly_rad = df_hourly['s_swrad']
n_days = len(df_daily)
t_max = np.zeros(n_days)
t_min = np.zeros(n_days)
for i in range(n_days):
risehour = sethour = -999
for hour in range(12):
if (hourly_rad[i*24+hour] > 0 and
(i*24+hour==0 or hourly_rad[i*24 + hour-1]<= 0)):
risehour = hour
for hour in range(12,24):
if (hourly_rad[i*24+hour] <= 0 and hourly_rad[i*24+hour-1]>0):
sethour = hour
if i == n_days -1 and sethour == -999:
sethour = 23
if risehour >=0 and sethour>=0:
t_max[i] - 0.67 * (sethour - risehour) + risehour
tminhour[i] = rishour - 1
df_daily['t_Tmin'] = tminhour
df_daily['t_Tmax'] = tmaxhour
| true
| true
|
1c44f55e15605292078d004fd97a46496530c4c8
| 2,334
|
py
|
Python
|
Tools/MonoGenerator/install_name_tool.py
|
mortend/fuse-studio
|
ae299fc6bc04aa3db7b4e66034109ffe96b142b9
|
[
"MIT"
] | 324
|
2018-05-14T08:17:17.000Z
|
2022-02-21T14:50:07.000Z
|
Tools/MonoGenerator/install_name_tool.py
|
mortend/fuse-studio
|
ae299fc6bc04aa3db7b4e66034109ffe96b142b9
|
[
"MIT"
] | 27
|
2018-05-14T15:17:46.000Z
|
2021-04-20T12:01:38.000Z
|
Tools/MonoGenerator/install_name_tool.py
|
mortend/fuse-studio
|
ae299fc6bc04aa3db7b4e66034109ffe96b142b9
|
[
"MIT"
] | 53
|
2018-05-14T07:56:17.000Z
|
2022-01-04T06:33:11.000Z
|
import subprocess
import os
import fnmatch
from os import path
import shutil
def glob_recursive(path, f):
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, f):
yield root + "/" + filename
def otool(s, basepath_filters):
o = subprocess.Popen(['/usr/bin/otool', '-L', s], stdout=subprocess.PIPE)
for l in o.stdout:
if l[0] == '\t':
lib = l.split(' ', 1)[0][1:]
if (type(basepath_filters) is list and [x for x in basepath_filters if lib.startswith(x)]):
yield lib
def get_all_req_dependencies(lib, source_base_paths):
need = set([lib])
done = set()
while need:
needed = set(need)
need = set()
for f in needed:
need.update(otool(f, source_base_paths))
done.update(needed)
need.difference_update(done)
return done
def fixup_all_dylib_references(base_path, prefix, source_base_paths):
included_dylib_paths = {}
for f in glob_recursive(base_path, "*.dylib"):
rel = f[len(base_path):]
included_dylib_paths[path.basename(rel)] = rel
subprocess.check_call(['install_name_tool', '-id', prefix + rel, f])
# Another time, but fixup references to all bundled dylib files
for f in glob_recursive(base_path, "*.dylib"):
print('Fixing dependency paths for ' + f)
for ref in otool(f, source_base_paths):
print(' processing dep ' + ref)
if path.basename(ref) in included_dylib_paths:
newPath = prefix + included_dylib_paths[path.basename(ref)]
subprocess.check_call(['install_name_tool', '-change', ref, newPath, f])
def copy_lib_and_dependencies(from_path, to_path, with_prefix, base_paths):
real_from_path = path.realpath(from_path)
deps = get_all_req_dependencies(real_from_path, base_paths)
lib_path = to_path
for cur_lib in deps:
cur_lib_path = path.join(lib_path, path.basename(cur_lib))
shutil.copy(cur_lib, cur_lib_path)
# Add symlink to specific library.
if path.islink(from_path):
os.symlink(path.basename(real_from_path), path.join(lib_path, path.basename(from_path)))
def add_rpath(exe_path, rpath):
subprocess.check_call(['install_name_tool', '-add_rpath', rpath, exe_path])
| 36.46875
| 103
| 0.658098
|
import subprocess
import os
import fnmatch
from os import path
import shutil
def glob_recursive(path, f):
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, f):
yield root + "/" + filename
def otool(s, basepath_filters):
o = subprocess.Popen(['/usr/bin/otool', '-L', s], stdout=subprocess.PIPE)
for l in o.stdout:
if l[0] == '\t':
lib = l.split(' ', 1)[0][1:]
if (type(basepath_filters) is list and [x for x in basepath_filters if lib.startswith(x)]):
yield lib
def get_all_req_dependencies(lib, source_base_paths):
need = set([lib])
done = set()
while need:
needed = set(need)
need = set()
for f in needed:
need.update(otool(f, source_base_paths))
done.update(needed)
need.difference_update(done)
return done
def fixup_all_dylib_references(base_path, prefix, source_base_paths):
included_dylib_paths = {}
for f in glob_recursive(base_path, "*.dylib"):
rel = f[len(base_path):]
included_dylib_paths[path.basename(rel)] = rel
subprocess.check_call(['install_name_tool', '-id', prefix + rel, f])
for f in glob_recursive(base_path, "*.dylib"):
print('Fixing dependency paths for ' + f)
for ref in otool(f, source_base_paths):
print(' processing dep ' + ref)
if path.basename(ref) in included_dylib_paths:
newPath = prefix + included_dylib_paths[path.basename(ref)]
subprocess.check_call(['install_name_tool', '-change', ref, newPath, f])
def copy_lib_and_dependencies(from_path, to_path, with_prefix, base_paths):
real_from_path = path.realpath(from_path)
deps = get_all_req_dependencies(real_from_path, base_paths)
lib_path = to_path
for cur_lib in deps:
cur_lib_path = path.join(lib_path, path.basename(cur_lib))
shutil.copy(cur_lib, cur_lib_path)
if path.islink(from_path):
os.symlink(path.basename(real_from_path), path.join(lib_path, path.basename(from_path)))
def add_rpath(exe_path, rpath):
subprocess.check_call(['install_name_tool', '-add_rpath', rpath, exe_path])
| true
| true
|
1c44f57d976221e4da59508cd5ba0dfcab34b1ad
| 1,169
|
py
|
Python
|
lintcode/Sort/830. String Sort.py
|
yanshengjia/algorithm
|
0608d286be9c93d51768d47f21e569c6b0be9cda
|
[
"MIT"
] | 23
|
2019-08-02T12:02:47.000Z
|
2022-03-09T15:24:16.000Z
|
lintcode/Sort/830. String Sort.py
|
yanshengjia/algorithm
|
0608d286be9c93d51768d47f21e569c6b0be9cda
|
[
"MIT"
] | null | null | null |
lintcode/Sort/830. String Sort.py
|
yanshengjia/algorithm
|
0608d286be9c93d51768d47f21e569c6b0be9cda
|
[
"MIT"
] | 21
|
2019-12-22T04:47:32.000Z
|
2021-09-12T14:29:35.000Z
|
"""
Given a string, sort the string with the first keyword which is the most commonly used characters and the second keyword which is the dictionary order.
Example1
Input: str = "bloomberg"
Output: "bbooeglmr"
Explanation:
'b' and 'o' appear the most frequently, but the dictionary sequence of 'b' is the smaller than 'o', so 'b' is ranked first, followed by 'o', and so on.
Solution:
Custom Sort.
We need to write a compare function according to the requirement.
"""
# Python2
# > 90%
# Time: O(NlogN), where N is the length of str
# Space: O(N)
class Solution:
"""
@param str: the string that needs to be sorted
@return: sorted string
"""
def stringSort(self, str):
# Write your code here
d = dict()
for c in str:
d[c] = d.get(c, 0) + 1
def compare(a, b):
if d[a] == d[b]:
if a < b:
return -1
elif a > b:
return 1
else:
return 0
else:
return d[b] - d[a]
l = list(str)
l.sort(cmp=compare)
return ''.join(l)
| 25.413043
| 151
| 0.538067
|
class Solution:
def stringSort(self, str):
d = dict()
for c in str:
d[c] = d.get(c, 0) + 1
def compare(a, b):
if d[a] == d[b]:
if a < b:
return -1
elif a > b:
return 1
else:
return 0
else:
return d[b] - d[a]
l = list(str)
l.sort(cmp=compare)
return ''.join(l)
| true
| true
|
1c44f626da1edbfc30140cf9afcc3f8421b5b200
| 5,414
|
py
|
Python
|
tensorflow/python/data/benchmarks/from_tensor_slices_benchmark.py
|
anonymous-313/tensorflow
|
b82785818b6b020d62340eaaece32b9c75858185
|
[
"Apache-2.0"
] | 9
|
2019-06-05T06:48:07.000Z
|
2020-09-29T07:08:02.000Z
|
tensorflow/python/data/benchmarks/from_tensor_slices_benchmark.py
|
anonymous-313/tensorflow
|
b82785818b6b020d62340eaaece32b9c75858185
|
[
"Apache-2.0"
] | 2
|
2021-11-10T20:21:47.000Z
|
2022-02-10T04:12:28.000Z
|
tensorflow/python/data/benchmarks/from_tensor_slices_benchmark.py
|
anonymous-313/tensorflow
|
b82785818b6b020d62340eaaece32b9c75858185
|
[
"Apache-2.0"
] | 3
|
2019-06-28T02:28:27.000Z
|
2021-07-06T08:16:19.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.Dataset.from_tensor_slices()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.experimental.ops import get_single_element
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_dataset_ops
class SingleThreadedFlatMapDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func):
"""See `Dataset.flat_map()` for details."""
self._input_dataset = input_dataset
self._map_func = dataset_ops.StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
defun_kwargs={"_executor": "SINGLE_THREADED_EXECUTOR"})
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
variant_tensor = gen_dataset_ops.flat_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
**self._flat_structure)
super(SingleThreadedFlatMapDataset, self).__init__(input_dataset,
variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "SingleThreadedFlatMapDataset"
class FromTensorSlicesBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.from_tensor_slices()`."""
def benchmark_slice_repeat_batch(self):
input_size = 10000
batch_size = 100
num_epochs = 100
num_elements = input_size * num_epochs // batch_size
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(input_data).repeat(
num_epochs).batch(batch_size))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="slice_repeat_batch_input_%d_batch_%d" % (input_size, batch_size))
def benchmark_reshape_slice_repeat(self):
input_size = 10000
reshape_dim = [100, 100]
num_epochs = 100
num_elements = num_epochs * reshape_dim[0]
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(
input_data.reshape(*reshape_dim)).repeat(num_epochs))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="reshape_slice_repeat_input_%d" % input_size,
)
def benchmark_slice_repeat_sparse(self):
non_zeros_per_row_values = [0, 1, 5, 10, 100]
num_rows_values = [32, 64, 128, 1024]
for non_zeros_per_row in non_zeros_per_row_values:
tensor = sparse_tensor.SparseTensor(
indices=np.arange(non_zeros_per_row, dtype=np.int64)[:, np.newaxis],
values=np.arange(non_zeros_per_row, dtype=np.int64),
dense_shape=[1000])
for num_rows in num_rows_values:
# TODO(b/147153744): Function-valued attributes with their own
# attributes are currently only supported in graph mode.
@def_function.function
def make_dataset():
batched = dataset_ops.Dataset.from_tensors(tensor).repeat(
num_rows).batch(num_rows) # pylint: disable=cell-var-from-loop
batched_tensor = get_single_element.get_single_element(batched)
dataset = dataset_ops.Dataset.from_tensors(batched_tensor).repeat()
return SingleThreadedFlatMapDataset(
dataset, dataset_ops.Dataset.from_tensor_slices)
self.run_and_report_benchmark(
make_dataset(),
num_elements=100000,
iters=5,
name="slice_repeat_sparse_elements_per_row_%d_num_rows_%d" %
(non_zeros_per_row, num_rows))
def benchmark_slice_batch_cache_repeat(self):
input_size = 10000
batch_size = 100
num_epochs = 100
num_elements = input_size * num_epochs // batch_size
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(input_data).batch(
batch_size).cache().repeat(num_epochs))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="slice_batch_cache_repeat_input_%d_batch_%d" %
(input_size, batch_size))
if __name__ == "__main__":
benchmark_base.test.main()
| 35.618421
| 103
| 0.707056
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.experimental.ops import get_single_element
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_dataset_ops
class SingleThreadedFlatMapDataset(dataset_ops.UnaryDataset):
def __init__(self, input_dataset, map_func):
self._input_dataset = input_dataset
self._map_func = dataset_ops.StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
defun_kwargs={"_executor": "SINGLE_THREADED_EXECUTOR"})
self._structure = self._map_func.output_structure._element_spec
variant_tensor = gen_dataset_ops.flat_map_dataset(
input_dataset._variant_tensor,
self._map_func.function.captured_inputs,
f=self._map_func.function,
**self._flat_structure)
super(SingleThreadedFlatMapDataset, self).__init__(input_dataset,
variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "SingleThreadedFlatMapDataset"
class FromTensorSlicesBenchmark(benchmark_base.DatasetBenchmarkBase):
def benchmark_slice_repeat_batch(self):
input_size = 10000
batch_size = 100
num_epochs = 100
num_elements = input_size * num_epochs // batch_size
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(input_data).repeat(
num_epochs).batch(batch_size))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="slice_repeat_batch_input_%d_batch_%d" % (input_size, batch_size))
def benchmark_reshape_slice_repeat(self):
input_size = 10000
reshape_dim = [100, 100]
num_epochs = 100
num_elements = num_epochs * reshape_dim[0]
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(
input_data.reshape(*reshape_dim)).repeat(num_epochs))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="reshape_slice_repeat_input_%d" % input_size,
)
def benchmark_slice_repeat_sparse(self):
non_zeros_per_row_values = [0, 1, 5, 10, 100]
num_rows_values = [32, 64, 128, 1024]
for non_zeros_per_row in non_zeros_per_row_values:
tensor = sparse_tensor.SparseTensor(
indices=np.arange(non_zeros_per_row, dtype=np.int64)[:, np.newaxis],
values=np.arange(non_zeros_per_row, dtype=np.int64),
dense_shape=[1000])
for num_rows in num_rows_values:
@def_function.function
def make_dataset():
batched = dataset_ops.Dataset.from_tensors(tensor).repeat(
num_rows).batch(num_rows)
batched_tensor = get_single_element.get_single_element(batched)
dataset = dataset_ops.Dataset.from_tensors(batched_tensor).repeat()
return SingleThreadedFlatMapDataset(
dataset, dataset_ops.Dataset.from_tensor_slices)
self.run_and_report_benchmark(
make_dataset(),
num_elements=100000,
iters=5,
name="slice_repeat_sparse_elements_per_row_%d_num_rows_%d" %
(non_zeros_per_row, num_rows))
def benchmark_slice_batch_cache_repeat(self):
input_size = 10000
batch_size = 100
num_epochs = 100
num_elements = input_size * num_epochs // batch_size
input_data = np.random.randn(input_size)
dataset = (
dataset_ops.Dataset.from_tensor_slices(input_data).batch(
batch_size).cache().repeat(num_epochs))
self.run_and_report_benchmark(
dataset,
num_elements=num_elements,
name="slice_batch_cache_repeat_input_%d_batch_%d" %
(input_size, batch_size))
if __name__ == "__main__":
benchmark_base.test.main()
| true
| true
|
1c44f6f093c03a3c1325f256845fa710b31c5dd2
| 1,262
|
py
|
Python
|
2D_from_3D_nii.py
|
mksarker/data_preprocessing
|
dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027
|
[
"MIT"
] | null | null | null |
2D_from_3D_nii.py
|
mksarker/data_preprocessing
|
dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027
|
[
"MIT"
] | null | null | null |
2D_from_3D_nii.py
|
mksarker/data_preprocessing
|
dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027
|
[
"MIT"
] | null | null | null |
import cv2
import scipy.misc
import SimpleITK as sitk #reading MR images
import glob
readfolderT = glob.glob('path/EADC_HHP/*_MNI.nii.gz')
readfolderL = glob.glob('path/*_HHP_EADC.nii.gz')
TrainingImagesList = []
TrainingLabelsList = []
for i in range(len(readfolderT)):
y_folder = readfolderT[i]
yread = sitk.ReadImage(y_folder)
yimage = sitk.GetArrayFromImage(yread)
x = yimage[:184,:232,112:136]
x = scipy.rot90(x)
x = scipy.rot90(x)
for j in range(x.shape[2]):
TrainingImagesList.append((x[:184,:224,j]))
for i in range(len(readfolderL)):
y_folder = readfolderL[i]
yread = sitk.ReadImage(y_folder)
yimage = sitk.GetArrayFromImage(yread)
x = yimage[:184,:232,112:136]
x = scipy.rot90(x)
x = scipy.rot90(x)
for j in range(x.shape[2]):
TrainingLabelsList.append((x[:184,:224,j]))
for i in range(len(TrainingImagesList)):
xchangeL = TrainingImagesList[i]
xchangeL = cv2.resize(xchangeL,(128,128))
scipy.misc.imsave('path/Image/png_1C_images/'+str(i)+'.png',xchangeL)
for i in range(len(TrainingLabelsList)):
xchangeL = TrainingLabelsList[i]
xchangeL = cv2.resize(xchangeL,(128,128))
scipy.misc.imsave('path/Image/png_1C_labels/'+str(i)+'.png',xchangeL)
| 26.291667
| 73
| 0.680666
|
import cv2
import scipy.misc
import SimpleITK as sitk
import glob
readfolderT = glob.glob('path/EADC_HHP/*_MNI.nii.gz')
readfolderL = glob.glob('path/*_HHP_EADC.nii.gz')
TrainingImagesList = []
TrainingLabelsList = []
for i in range(len(readfolderT)):
y_folder = readfolderT[i]
yread = sitk.ReadImage(y_folder)
yimage = sitk.GetArrayFromImage(yread)
x = yimage[:184,:232,112:136]
x = scipy.rot90(x)
x = scipy.rot90(x)
for j in range(x.shape[2]):
TrainingImagesList.append((x[:184,:224,j]))
for i in range(len(readfolderL)):
y_folder = readfolderL[i]
yread = sitk.ReadImage(y_folder)
yimage = sitk.GetArrayFromImage(yread)
x = yimage[:184,:232,112:136]
x = scipy.rot90(x)
x = scipy.rot90(x)
for j in range(x.shape[2]):
TrainingLabelsList.append((x[:184,:224,j]))
for i in range(len(TrainingImagesList)):
xchangeL = TrainingImagesList[i]
xchangeL = cv2.resize(xchangeL,(128,128))
scipy.misc.imsave('path/Image/png_1C_images/'+str(i)+'.png',xchangeL)
for i in range(len(TrainingLabelsList)):
xchangeL = TrainingLabelsList[i]
xchangeL = cv2.resize(xchangeL,(128,128))
scipy.misc.imsave('path/Image/png_1C_labels/'+str(i)+'.png',xchangeL)
| true
| true
|
1c44f89d86e4e31b4b6bb6ea684f07345c57a00b
| 4,184
|
py
|
Python
|
lexer.py
|
gmCrivelli/Lya-Compiler
|
f323b6affb39a496155169aa8ce678efb80c2f9b
|
[
"MIT"
] | null | null | null |
lexer.py
|
gmCrivelli/Lya-Compiler
|
f323b6affb39a496155169aa8ce678efb80c2f9b
|
[
"MIT"
] | null | null | null |
lexer.py
|
gmCrivelli/Lya-Compiler
|
f323b6affb39a496155169aa8ce678efb80c2f9b
|
[
"MIT"
] | null | null | null |
import sys
import ply.lex as lex
import re
class Lexer:
def __init__(self):
self.build()
def build(self):
self.lexer = lex.lex(self)
def input(self, input):
self.lexer.input(input)
def token(self):
return self.lexer.token() #guardar ultima token?
# Reserved
reserved = {
# Reserved words
'array': 'ARRAY',
'by': 'BY',
'chars': 'CHARS',
'dcl': 'DCL',
'do': 'DO',
'down': 'DOWN',
'else': 'ELSE',
'elsif': 'ELSIF',
'end': 'END',
'exit': 'EXIT',
'fi': 'FI',
'for': 'FOR',
'if': 'IF',
'in': 'IN',
'loc': 'LOC',
'type': 'TYPE',
'od': 'OD',
'proc': 'PROC',
'ref': 'REF',
'result': 'RESULT',
'return': 'RETURN',
'returns': 'RETURNS',
'syn': 'SYN',
'then': 'THEN',
'to': 'TO',
'while': 'WHILE',
# Predefined words
'abs': 'ABS',
'asc': 'ASC',
'bool': 'BOOL',
'char': 'CHAR',
'false': 'FALSE',
'int': 'INT',
'length': 'LENGTH',
'lower': 'LOWER',
'null': 'NULL',
'num': 'NUM',
'print': 'PRINT',
'read': 'READ',
'true': 'TRUE',
'upper': 'UPPER'
}
# Tokens
tokens = [
# Identifier
'ID',
# && || &
# Operations and Delimiters
'PLUS', 'MINUS', 'TIMES', 'DIVIDE',
'ASSIGN', 'COMMA', 'COLON', 'SEMI', 'ARROW',
'LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET',
'LESS', 'LESSEQ', 'GREATER', 'GREATEREQ', 'EQUAL',
'AND', 'OR', 'STRCAT',
'INCREASE', 'DECREASE', 'MULCREASE', 'DIVCREASE', 'MODCREASE',
'DIFF', 'MOD','NOT',
# Literals
'ICONST', 'CCONST', 'SCONST'
] + list(reserved.values())
# Operations and Delimiters
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/(?!\*)'
t_ASSIGN = r'='
t_COMMA = r','
t_COLON = r':'
t_SEMI = r';'
t_ARROW = r'->'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LESS = r'<'
t_LESSEQ = r'<='
t_GREATER = r'>'
t_GREATEREQ = r'>='
t_EQUAL = r'=='
t_AND = r'&&'
t_OR = r'\|\|'
t_STRCAT = r'&'
t_INCREASE = r'\+='
t_DECREASE = r'-='
t_MULCREASE = r'\*='
t_DIVCREASE = r'/='
t_MODCREASE = r'%='
t_DIFF = r'!='
t_NOT = r'!'
t_MOD = r'%'
# Comments
t_ignore_COMMENNT = r'((/\*(. | \n)*\*/)|//.*)'
# Identifier
def t_ID(self, t):
r'[A-Za-z_][a-zA-Z0-9_]*'
t.type = self.reserved.get(t.value, 'ID') # Check for reserved words
return t
def t_ICONST(self, t):
r'\d+'
t.value = int(t.value)
return t
def t_CCONST(self, t):
r'\'(\\\"|\\\'|[^\'\"])\''
t.value = ord(t.value[1:-1])
return t
def t_SCONST(self, t):
r'\"(\\\"|\\\'|[^\'\"\n])*\"'
ascii_list = []
for character in t.value:
ascii_list.append(ord(character))
t.value = ascii_list
return t
# Ignored characters
t_ignore = " \t"
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error_STRING(self, t):
r'\".*'
print(str(t.lexer.lineno) + ": Unterminated string")
def t_error(self, t):
if(re.match("/\*.*", t.value) != None):
print(str(t.lexer.lineno) + ": Unterminated comment")
t.lexer.skip(len(t.value))
else:
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Run lexer on given file
def main():
file_name = sys.argv[1]
# Read given file
file = open(file_name, "r")
file_content = file.read()
l = Lexer()
#l.build()
# Give the lexer some input
l.lexer.input(file_content)
# Tokenize
while True:
tok = l.lexer.token()
if not tok:
break # No more input
print(tok)
if __name__ == "__main__": main()
| 21.791667
| 76
| 0.44718
|
import sys
import ply.lex as lex
import re
class Lexer:
def __init__(self):
self.build()
def build(self):
self.lexer = lex.lex(self)
def input(self, input):
self.lexer.input(input)
def token(self):
return self.lexer.token()
reserved = {
'array': 'ARRAY',
'by': 'BY',
'chars': 'CHARS',
'dcl': 'DCL',
'do': 'DO',
'down': 'DOWN',
'else': 'ELSE',
'elsif': 'ELSIF',
'end': 'END',
'exit': 'EXIT',
'fi': 'FI',
'for': 'FOR',
'if': 'IF',
'in': 'IN',
'loc': 'LOC',
'type': 'TYPE',
'od': 'OD',
'proc': 'PROC',
'ref': 'REF',
'result': 'RESULT',
'return': 'RETURN',
'returns': 'RETURNS',
'syn': 'SYN',
'then': 'THEN',
'to': 'TO',
'while': 'WHILE',
'abs': 'ABS',
'asc': 'ASC',
'bool': 'BOOL',
'char': 'CHAR',
'false': 'FALSE',
'int': 'INT',
'length': 'LENGTH',
'lower': 'LOWER',
'null': 'NULL',
'num': 'NUM',
'print': 'PRINT',
'read': 'READ',
'true': 'TRUE',
'upper': 'UPPER'
}
tokens = [
'ID',
'PLUS', 'MINUS', 'TIMES', 'DIVIDE',
'ASSIGN', 'COMMA', 'COLON', 'SEMI', 'ARROW',
'LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET',
'LESS', 'LESSEQ', 'GREATER', 'GREATEREQ', 'EQUAL',
'AND', 'OR', 'STRCAT',
'INCREASE', 'DECREASE', 'MULCREASE', 'DIVCREASE', 'MODCREASE',
'DIFF', 'MOD','NOT',
'ICONST', 'CCONST', 'SCONST'
] + list(reserved.values())
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/(?!\*)'
t_ASSIGN = r'='
t_COMMA = r','
t_COLON = r':'
t_SEMI = r';'
t_ARROW = r'->'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LESS = r'<'
t_LESSEQ = r'<='
t_GREATER = r'>'
t_GREATEREQ = r'>='
t_EQUAL = r'=='
t_AND = r'&&'
t_OR = r'\|\|'
t_STRCAT = r'&'
t_INCREASE = r'\+='
t_DECREASE = r'-='
t_MULCREASE = r'\*='
t_DIVCREASE = r'/='
t_MODCREASE = r'%='
t_DIFF = r'!='
t_NOT = r'!'
t_MOD = r'%'
t_ignore_COMMENNT = r'((/\*(. | \n)*\*/)|//.*)'
def t_ID(self, t):
t.type = self.reserved.get(t.value, 'ID')
return t
def t_ICONST(self, t):
t.value = int(t.value)
return t
def t_CCONST(self, t):
t.value = ord(t.value[1:-1])
return t
def t_SCONST(self, t):
ascii_list = []
for character in t.value:
ascii_list.append(ord(character))
t.value = ascii_list
return t
t_ignore = " \t"
def t_newline(self, t):
t.lexer.lineno += t.value.count("\n")
def t_error_STRING(self, t):
print(str(t.lexer.lineno) + ": Unterminated string")
def t_error(self, t):
if(re.match("/\*.*", t.value) != None):
print(str(t.lexer.lineno) + ": Unterminated comment")
t.lexer.skip(len(t.value))
else:
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
def main():
file_name = sys.argv[1]
file = open(file_name, "r")
file_content = file.read()
l = Lexer()
l.lexer.input(file_content)
while True:
tok = l.lexer.token()
if not tok:
break
print(tok)
if __name__ == "__main__": main()
| true
| true
|
1c44f91b81dc8bfac6086652fb149826007d78d1
| 3,229
|
py
|
Python
|
trac/upgrades/db18.py
|
rwbaumg/trac
|
a3b8eb6db4f4999fab421e31615bb8eb8da6fdba
|
[
"BSD-3-Clause"
] | null | null | null |
trac/upgrades/db18.py
|
rwbaumg/trac
|
a3b8eb6db4f4999fab421e31615bb8eb8da6fdba
|
[
"BSD-3-Clause"
] | null | null | null |
trac/upgrades/db18.py
|
rwbaumg/trac
|
a3b8eb6db4f4999fab421e31615bb8eb8da6fdba
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from trac.db import Table, Column, Index, DatabaseManager
def do_upgrade(env, ver, cursor):
cursor.execute("CREATE TEMPORARY TABLE session_old AS SELECT * FROM session")
cursor.execute("DROP TABLE session")
cursor.execute("CREATE TEMPORARY TABLE ticket_change_old AS SELECT * FROM ticket_change")
cursor.execute("DROP TABLE ticket_change")
# A more normalized session schema where the attributes are stored in
# a separate table
tables = [Table('session', key=('sid', 'authenticated'))[
Column('sid'),
Column('authenticated', type='int'),
Column('last_visit', type='int'),
Index(['last_visit']),
Index(['authenticated'])],
Table('session_attribute', key=('sid', 'authenticated', 'name'))[
Column('sid'),
Column('authenticated', type='int'),
Column('name'),
Column('value')],
Table('ticket_change', key=('ticket', 'time', 'field'))[
Column('ticket', type='int'),
Column('time', type='int'),
Column('author'),
Column('field'),
Column('oldvalue'),
Column('newvalue'),
Index(['ticket']),
Index(['time'])]]
db_connector, _ = DatabaseManager(env).get_connector()
for table in tables:
for stmt in db_connector.to_sql(table):
cursor.execute(stmt)
# Add an index to the temporary table to speed up the conversion
cursor.execute("CREATE INDEX session_old_sid_idx ON session_old(sid)")
# Insert the sessions into the new table
with env.db_query as db:
cursor.execute("""
INSERT INTO session (sid, last_visit, authenticated)
SELECT distinct s.sid,COALESCE(%s,0),s.authenticated
FROM session_old AS s LEFT JOIN session_old AS s2
ON (s.sid=s2.sid AND s2.var_name='last_visit')
WHERE s.sid IS NOT NULL
""" % db.cast('s2.var_value', 'int'))
cursor.execute("""
INSERT INTO session_attribute (sid, authenticated, name, value)
SELECT s.sid, s.authenticated, s.var_name, s.var_value
FROM session_old s
WHERE s.var_name <> 'last_visit' AND s.sid IS NOT NULL
""")
# Insert ticket change data into the new table
cursor.execute("""
INSERT INTO ticket_change (ticket, time, author, field, oldvalue,
newvalue)
SELECT ticket, time, author, field, oldvalue, newvalue
FROM ticket_change_old
""")
cursor.execute("DROP TABLE session_old")
cursor.execute("DROP TABLE ticket_change_old")
| 41.397436
| 93
| 0.612264
|
from trac.db import Table, Column, Index, DatabaseManager
def do_upgrade(env, ver, cursor):
cursor.execute("CREATE TEMPORARY TABLE session_old AS SELECT * FROM session")
cursor.execute("DROP TABLE session")
cursor.execute("CREATE TEMPORARY TABLE ticket_change_old AS SELECT * FROM ticket_change")
cursor.execute("DROP TABLE ticket_change")
tables = [Table('session', key=('sid', 'authenticated'))[
Column('sid'),
Column('authenticated', type='int'),
Column('last_visit', type='int'),
Index(['last_visit']),
Index(['authenticated'])],
Table('session_attribute', key=('sid', 'authenticated', 'name'))[
Column('sid'),
Column('authenticated', type='int'),
Column('name'),
Column('value')],
Table('ticket_change', key=('ticket', 'time', 'field'))[
Column('ticket', type='int'),
Column('time', type='int'),
Column('author'),
Column('field'),
Column('oldvalue'),
Column('newvalue'),
Index(['ticket']),
Index(['time'])]]
db_connector, _ = DatabaseManager(env).get_connector()
for table in tables:
for stmt in db_connector.to_sql(table):
cursor.execute(stmt)
cursor.execute("CREATE INDEX session_old_sid_idx ON session_old(sid)")
with env.db_query as db:
cursor.execute("""
INSERT INTO session (sid, last_visit, authenticated)
SELECT distinct s.sid,COALESCE(%s,0),s.authenticated
FROM session_old AS s LEFT JOIN session_old AS s2
ON (s.sid=s2.sid AND s2.var_name='last_visit')
WHERE s.sid IS NOT NULL
""" % db.cast('s2.var_value', 'int'))
cursor.execute("""
INSERT INTO session_attribute (sid, authenticated, name, value)
SELECT s.sid, s.authenticated, s.var_name, s.var_value
FROM session_old s
WHERE s.var_name <> 'last_visit' AND s.sid IS NOT NULL
""")
cursor.execute("""
INSERT INTO ticket_change (ticket, time, author, field, oldvalue,
newvalue)
SELECT ticket, time, author, field, oldvalue, newvalue
FROM ticket_change_old
""")
cursor.execute("DROP TABLE session_old")
cursor.execute("DROP TABLE ticket_change_old")
| true
| true
|
1c44f9a6633a4f9e3a11d3413aa35fee4910ba64
| 6,115
|
py
|
Python
|
app/main.py
|
grow/buildbot
|
31e2bbb2cafb9b472b3c4b98b29b9595b90ba9ee
|
[
"MIT"
] | null | null | null |
app/main.py
|
grow/buildbot
|
31e2bbb2cafb9b472b3c4b98b29b9595b90ba9ee
|
[
"MIT"
] | null | null | null |
app/main.py
|
grow/buildbot
|
31e2bbb2cafb9b472b3c4b98b29b9595b90ba9ee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from flask import request
from functools import wraps
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.serving import run_simple
import flask
import os
import mimetypes
import urllib2
import restfulgit
import repos_service
import jobs_service
from restfulgit import app_factory as restfulgit_app_factory
# Mount RestfulGit at /api/git so the temporary directories can be browsed.
class RestfulGitConfig(object):
RESTFULGIT_REPO_BASE_PATH = repos_service.get_workspace_root()
main_app = flask.Flask(__name__)
main_app.debug = True
restfulgit_app = restfulgit_app_factory.create_app(RestfulGitConfig)
app = DispatcherMiddleware(
main_app,
{
'/api/git': restfulgit_app,
},
)
def get_buildbot_password_or_die():
"""Fetches the buildbot password either from GCP metadata or from an environment variable."""
try:
url = 'http://metadata.google.internal/computeMetadata/v1/instance/attributes/buildbot-password'
headers = {'Metadata-Flavor': 'Google'}
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
return response.read()
except (urllib2.URLError, urllib2.HTTPError):
# Fall through to the environment variable.
return os.environ['BUILDBOT_PASSWORD']
def check_auth(username, password):
return username == 'admin' and password == get_buildbot_password_or_die()
def unauthorized():
return flask.Response('Unauthorized', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
def auth_required(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return unauthorized()
return f(*args, **kwargs)
return decorated
@main_app.route('/', defaults={'path': ''})
@main_app.route('/<path:path>')
@auth_required
def catch_all(path):
return '404', 404
@main_app.route('/')
@auth_required
def index():
jobs = jobs_service.list_jobs()
builds = jobs_service.list_builds(limit=20)
return flask.render_template('index.html', builds=builds, jobs=jobs)
@main_app.route('/builds')
@auth_required
def builds():
builds = jobs_service.list_builds()
return flask.render_template('builds.html', builds=builds)
@main_app.route('/jobs')
@auth_required
def jobs():
jobs = jobs_service.list_jobs()
return flask.render_template('jobs.html', jobs=jobs)
@main_app.route('/job/<int:job_id>/browse/<path:ref>')
@auth_required
def job_browse_ref(job_id, ref):
raise NotImplementedError
job = jobs_service.get_job(job_id)
return flask.render_template('browse_ref.html', job=job, ref=ref)
@main_app.route('/builds/<int:build_id>')
@auth_required
def build(build_id):
build = jobs_service.get_build(build_id)
return flask.render_template('build.html', build=build)
@main_app.route('/api/jobs/<int:job_id>/contents/update', methods=['POST'])
@auth_required
def update_contents(job_id):
data = request.get_json()
repo = repos_service.get_repo(job_id)
try:
resp = repos_service.update(
repo=repo,
branch=data['branch'],
path=data['path'],
content=data['content'],
sha=data['sha'],
message=data['message'],
committer=data['committer'],
author=data['author'])
return flask.jsonify({'success': True, 'resp': resp})
except repos_service.Error as e:
return flask.jsonify({'success': False, 'error': str(e)})
@main_app.route('/api/jobs', methods=['POST'])
@auth_required
def create_job():
# TODO: better JSON API parsing and error responses.
data = request.get_json()
assert data.get('git_url')
assert data.get('remote')
assert data.get('env')
assert data['env'].get('WEBREVIEW_API_KEY')
job_id = jobs_service.create_job(
git_url=data['git_url'],
remote=data['remote'],
env=data['env'],
)
return flask.jsonify({'success': True, 'job_id': job_id})
@main_app.route('/api/jobs/<int:job_id>', methods=['GET'])
@auth_required
def get_job(job_id):
job = jobs_service.get_job(job_id)
return flask.jsonify({'success': True, 'job': job.serialize()})
@main_app.route('/api/jobs/<int:job_id>', methods=['DELETE'])
@auth_required
def delete_job(job_id):
job_id = jobs_service.delete_job(job_id)
return flask.jsonify({'success': True})
@main_app.route('/api/jobs/sync', methods=['GET', 'POST'])
@auth_required
def sync_jobs():
data = jobs_service.sync_all_jobs()
jobs_with_new_builds = [job_id for job_id in data if data[job_id]]
if jobs_with_new_builds:
message = 'Refs changed, enqueued builds from %s jobs.' % len(jobs_with_new_builds)
else:
message = 'No refs changed in any jobs, nothing to build.'
return flask.jsonify({'success': True, 'message': message})
@main_app.route('/api/jobs/sync_forks', methods=['GET', 'POST'])
@auth_required
def sync_forks():
job_ids_synced = jobs_service.sync_all_forks()
return flask.jsonify({'success': True, 'message': 'Synced %s forks.' % len(job_ids_synced)})
@main_app.route('/api/jobs/<int:job_id>/sync', methods=['GET', 'POST'])
@auth_required
def sync_job(job_id):
# Update refs and trigger all builds.
build_ids = jobs_service.sync_job(job_id)
if build_ids:
message = 'Refs changed, enqueued %s builds.' % len(build_ids)
else:
message = 'No refs changed, nothing to build.'
return flask.jsonify({'success': True, 'message': message})
@main_app.route('/api/jobs/<int:job_id>/sync_fork', methods=['GET', 'POST'])
@auth_required
def sync_fork(job_id):
build_ids = jobs_service.sync_fork(job_id)
return flask.jsonify({'success': True})
@main_app.route('/api/jobs/<int:job_id>/run', methods=['GET', 'POST'])
@auth_required
def run_job(job_id):
ref = request.args.get('ref')
commit_sha = request.args.get('commit_sha')
assert ref
assert commit_sha
# Trigger build of single ref and commit SHA.
build_id = jobs_service.enqueue_build(job_id, ref, commit_sha)
return flask.jsonify({'success': True, 'build_id': build_id, 'message': 'Build enqueued.'})
if __name__ == '__main__':
run_simple('localhost', 5000, app, use_reloader=True, use_debugger=True)
| 28.70892
| 100
| 0.720196
|
from flask import request
from functools import wraps
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.serving import run_simple
import flask
import os
import mimetypes
import urllib2
import restfulgit
import repos_service
import jobs_service
from restfulgit import app_factory as restfulgit_app_factory
class RestfulGitConfig(object):
RESTFULGIT_REPO_BASE_PATH = repos_service.get_workspace_root()
main_app = flask.Flask(__name__)
main_app.debug = True
restfulgit_app = restfulgit_app_factory.create_app(RestfulGitConfig)
app = DispatcherMiddleware(
main_app,
{
'/api/git': restfulgit_app,
},
)
def get_buildbot_password_or_die():
try:
url = 'http://metadata.google.internal/computeMetadata/v1/instance/attributes/buildbot-password'
headers = {'Metadata-Flavor': 'Google'}
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
return response.read()
except (urllib2.URLError, urllib2.HTTPError):
return os.environ['BUILDBOT_PASSWORD']
def check_auth(username, password):
return username == 'admin' and password == get_buildbot_password_or_die()
def unauthorized():
return flask.Response('Unauthorized', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
def auth_required(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return unauthorized()
return f(*args, **kwargs)
return decorated
@main_app.route('/', defaults={'path': ''})
@main_app.route('/<path:path>')
@auth_required
def catch_all(path):
return '404', 404
@main_app.route('/')
@auth_required
def index():
jobs = jobs_service.list_jobs()
builds = jobs_service.list_builds(limit=20)
return flask.render_template('index.html', builds=builds, jobs=jobs)
@main_app.route('/builds')
@auth_required
def builds():
builds = jobs_service.list_builds()
return flask.render_template('builds.html', builds=builds)
@main_app.route('/jobs')
@auth_required
def jobs():
jobs = jobs_service.list_jobs()
return flask.render_template('jobs.html', jobs=jobs)
@main_app.route('/job/<int:job_id>/browse/<path:ref>')
@auth_required
def job_browse_ref(job_id, ref):
raise NotImplementedError
job = jobs_service.get_job(job_id)
return flask.render_template('browse_ref.html', job=job, ref=ref)
@main_app.route('/builds/<int:build_id>')
@auth_required
def build(build_id):
build = jobs_service.get_build(build_id)
return flask.render_template('build.html', build=build)
@main_app.route('/api/jobs/<int:job_id>/contents/update', methods=['POST'])
@auth_required
def update_contents(job_id):
data = request.get_json()
repo = repos_service.get_repo(job_id)
try:
resp = repos_service.update(
repo=repo,
branch=data['branch'],
path=data['path'],
content=data['content'],
sha=data['sha'],
message=data['message'],
committer=data['committer'],
author=data['author'])
return flask.jsonify({'success': True, 'resp': resp})
except repos_service.Error as e:
return flask.jsonify({'success': False, 'error': str(e)})
@main_app.route('/api/jobs', methods=['POST'])
@auth_required
def create_job():
data = request.get_json()
assert data.get('git_url')
assert data.get('remote')
assert data.get('env')
assert data['env'].get('WEBREVIEW_API_KEY')
job_id = jobs_service.create_job(
git_url=data['git_url'],
remote=data['remote'],
env=data['env'],
)
return flask.jsonify({'success': True, 'job_id': job_id})
@main_app.route('/api/jobs/<int:job_id>', methods=['GET'])
@auth_required
def get_job(job_id):
job = jobs_service.get_job(job_id)
return flask.jsonify({'success': True, 'job': job.serialize()})
@main_app.route('/api/jobs/<int:job_id>', methods=['DELETE'])
@auth_required
def delete_job(job_id):
job_id = jobs_service.delete_job(job_id)
return flask.jsonify({'success': True})
@main_app.route('/api/jobs/sync', methods=['GET', 'POST'])
@auth_required
def sync_jobs():
data = jobs_service.sync_all_jobs()
jobs_with_new_builds = [job_id for job_id in data if data[job_id]]
if jobs_with_new_builds:
message = 'Refs changed, enqueued builds from %s jobs.' % len(jobs_with_new_builds)
else:
message = 'No refs changed in any jobs, nothing to build.'
return flask.jsonify({'success': True, 'message': message})
@main_app.route('/api/jobs/sync_forks', methods=['GET', 'POST'])
@auth_required
def sync_forks():
job_ids_synced = jobs_service.sync_all_forks()
return flask.jsonify({'success': True, 'message': 'Synced %s forks.' % len(job_ids_synced)})
@main_app.route('/api/jobs/<int:job_id>/sync', methods=['GET', 'POST'])
@auth_required
def sync_job(job_id):
build_ids = jobs_service.sync_job(job_id)
if build_ids:
message = 'Refs changed, enqueued %s builds.' % len(build_ids)
else:
message = 'No refs changed, nothing to build.'
return flask.jsonify({'success': True, 'message': message})
@main_app.route('/api/jobs/<int:job_id>/sync_fork', methods=['GET', 'POST'])
@auth_required
def sync_fork(job_id):
build_ids = jobs_service.sync_fork(job_id)
return flask.jsonify({'success': True})
@main_app.route('/api/jobs/<int:job_id>/run', methods=['GET', 'POST'])
@auth_required
def run_job(job_id):
ref = request.args.get('ref')
commit_sha = request.args.get('commit_sha')
assert ref
assert commit_sha
build_id = jobs_service.enqueue_build(job_id, ref, commit_sha)
return flask.jsonify({'success': True, 'build_id': build_id, 'message': 'Build enqueued.'})
if __name__ == '__main__':
run_simple('localhost', 5000, app, use_reloader=True, use_debugger=True)
| true
| true
|
1c44fad39347b668a6b0cde118732cfb3c342041
| 428
|
py
|
Python
|
mozillians/funfacts/admin.py
|
LeoMcA/vouched-mozillians
|
e0bb3b1628eaae7474e73935f7a7604bfca14da1
|
[
"BSD-3-Clause"
] | null | null | null |
mozillians/funfacts/admin.py
|
LeoMcA/vouched-mozillians
|
e0bb3b1628eaae7474e73935f7a7604bfca14da1
|
[
"BSD-3-Clause"
] | null | null | null |
mozillians/funfacts/admin.py
|
LeoMcA/vouched-mozillians
|
e0bb3b1628eaae7474e73935f7a7604bfca14da1
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from models import FunFact
class FunFactAdmin(admin.ModelAdmin):
readonly_fields = ['result', 'created', 'updated']
list_display = ['name', 'created', 'updated', 'result', 'is_published']
def is_published(self, obj):
return obj.published
is_published.boolean = True
def result(self, obj):
return obj.execute()
admin.site.register(FunFact, FunFactAdmin)
| 23.777778
| 75
| 0.693925
|
from django.contrib import admin
from models import FunFact
class FunFactAdmin(admin.ModelAdmin):
readonly_fields = ['result', 'created', 'updated']
list_display = ['name', 'created', 'updated', 'result', 'is_published']
def is_published(self, obj):
return obj.published
is_published.boolean = True
def result(self, obj):
return obj.execute()
admin.site.register(FunFact, FunFactAdmin)
| true
| true
|
1c44faf0d31227ad4ca5dc4f15ca05e49951e313
| 2,311
|
py
|
Python
|
scripts/only_testing.py
|
hbery/ML_Image_Compression_Ratio_Analysis
|
16b21091bc4e3ced62f94f0e68ee302c1da5bf1e
|
[
"MIT"
] | null | null | null |
scripts/only_testing.py
|
hbery/ML_Image_Compression_Ratio_Analysis
|
16b21091bc4e3ced62f94f0e68ee302c1da5bf1e
|
[
"MIT"
] | null | null | null |
scripts/only_testing.py
|
hbery/ML_Image_Compression_Ratio_Analysis
|
16b21091bc4e3ced62f94f0e68ee302c1da5bf1e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Script for testing model
:Date: 06.2021
:Author: Adam Twardosz (a.twardosz98@gmail.com, https://github.com/hbery)
"""
import os, sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import numpy as np
from keras.models import load_model, Sequential
from keras.layers import Softmax
from utils import banner
def main():
""" ~~~~ PREPARE DATA ~~~~ """
if len(sys.argv) < 3:
print(f"Usage: {os.path.basename(sys.argv[0])} <folder with batches> <'model_name'> [ <destination folder> ]")
sys.exit(-1)
cwd = os.getcwd()
folder = os.path.basename(sys.argv[1])
base_path = os.path.abspath(folder)
dst_path = ""
if len(sys.argv) > 3:
dst_path = os.path.abspath(sys.argv[3])
else:
dst_path = base_path
model_name = os.path.basename(sys.argv[2])
model_path = os.path.join(cwd, "models", model_name)
statistics = os.path.join(cwd, 'statistics')
default_line_length = 65
if not os.path.isdir(statistics):
os.mkdir(statistics)
dir_files = os.listdir(base_path)
test_files = list(filter(lambda file: "test" in file, dir_files))
print(banner("MODEL"))
model = load_model(model_path)
print("⇊ Adding Softmax Layer to model\n")
prob_model = Sequential([model, Softmax()])
print(prob_model.summary(line_length=default_line_length))
print()
""" ~~~~ TEST MODEL'S ACCURACY ~~~~ """
print(banner("TESTING", length=default_line_length))
nasa_predictions = []
nasa_labels = []
nature_predictions = []
nature_labels = []
for test_file in test_files:
# Loading from *.npz
with np.load(os.path.join(base_path, test_file)) as test_batch:
# Storing real labels
nasa_labels.extend(test_batch["nsltest"])
nature_labels.extend(test_batch["ntltest"])
# Predicting labels and storing
nasa_predictions.extend(prob_model.predict(test_batch["nsdtest"]))
nature_predictions.extend(prob_model.predict(test_batch["ntdtest"]))
# Save data for plotting
stats_path = os.path.join(dst_path, f'{model_name}_stats.npz')
np.savez(stats_path,
nasa_predictions=nasa_predictions,
nasa_labels=nasa_labels,
nature_predictions=nature_predictions,
nature_labels=nature_labels
)
print(f"⮔ Statistics saved as: {stats_path}".center(default_line_length))
"""MAIN """
if __name__ == "__main__":
main()
| 25.966292
| 112
| 0.716573
|
import os, sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import numpy as np
from keras.models import load_model, Sequential
from keras.layers import Softmax
from utils import banner
def main():
if len(sys.argv) < 3:
print(f"Usage: {os.path.basename(sys.argv[0])} <folder with batches> <'model_name'> [ <destination folder> ]")
sys.exit(-1)
cwd = os.getcwd()
folder = os.path.basename(sys.argv[1])
base_path = os.path.abspath(folder)
dst_path = ""
if len(sys.argv) > 3:
dst_path = os.path.abspath(sys.argv[3])
else:
dst_path = base_path
model_name = os.path.basename(sys.argv[2])
model_path = os.path.join(cwd, "models", model_name)
statistics = os.path.join(cwd, 'statistics')
default_line_length = 65
if not os.path.isdir(statistics):
os.mkdir(statistics)
dir_files = os.listdir(base_path)
test_files = list(filter(lambda file: "test" in file, dir_files))
print(banner("MODEL"))
model = load_model(model_path)
print("⇊ Adding Softmax Layer to model\n")
prob_model = Sequential([model, Softmax()])
print(prob_model.summary(line_length=default_line_length))
print()
print(banner("TESTING", length=default_line_length))
nasa_predictions = []
nasa_labels = []
nature_predictions = []
nature_labels = []
for test_file in test_files:
with np.load(os.path.join(base_path, test_file)) as test_batch:
nasa_labels.extend(test_batch["nsltest"])
nature_labels.extend(test_batch["ntltest"])
nasa_predictions.extend(prob_model.predict(test_batch["nsdtest"]))
nature_predictions.extend(prob_model.predict(test_batch["ntdtest"]))
stats_path = os.path.join(dst_path, f'{model_name}_stats.npz')
np.savez(stats_path,
nasa_predictions=nasa_predictions,
nasa_labels=nasa_labels,
nature_predictions=nature_predictions,
nature_labels=nature_labels
)
print(f"⮔ Statistics saved as: {stats_path}".center(default_line_length))
if __name__ == "__main__":
main()
| true
| true
|
1c44fb63685476648bd3255a1f3890ca91c4616c
| 1,161
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_pie03.py
|
adgear/XlsxWriter
|
79bcaad28d57ac29038b1c74bccc6d611b7a385e
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2019-07-25T06:08:09.000Z
|
2019-11-01T02:33:56.000Z
|
xlsxwriter/test/comparison/test_chart_pie03.py
|
adgear/XlsxWriter
|
79bcaad28d57ac29038b1c74bccc6d611b7a385e
|
[
"BSD-2-Clause-FreeBSD"
] | 13
|
2019-07-14T00:29:05.000Z
|
2019-11-26T06:16:46.000Z
|
xlsxwriter/test/comparison/test_chart_pie03.py
|
adgear/XlsxWriter
|
79bcaad28d57ac29038b1c74bccc6d611b7a385e
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_pie03.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'pie'})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$3',
'values': '=Sheet1!$B$1:$B$3',
})
chart.set_legend({'delete_series': [1]})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 23.22
| 79
| 0.552972
| true
| true
|
|
1c44fb8d47ce2162e5654444a77105860b099dee
| 140
|
py
|
Python
|
facts/html_helpers.py
|
ilivans/web-scrapers
|
677c8dc5cd1d1691e45d5b92a1988a23e2288d0b
|
[
"MIT"
] | null | null | null |
facts/html_helpers.py
|
ilivans/web-scrapers
|
677c8dc5cd1d1691e45d5b92a1988a23e2288d0b
|
[
"MIT"
] | null | null | null |
facts/html_helpers.py
|
ilivans/web-scrapers
|
677c8dc5cd1d1691e45d5b92a1988a23e2288d0b
|
[
"MIT"
] | null | null | null |
import re
_tag_re = re.compile("<.*?>")
def remove_tags(raw_html):
clean_text = re.sub(_tag_re, " ", raw_html)
return clean_text
| 15.555556
| 47
| 0.657143
|
import re
_tag_re = re.compile("<.*?>")
def remove_tags(raw_html):
clean_text = re.sub(_tag_re, " ", raw_html)
return clean_text
| true
| true
|
1c44fc3b23f0e10433a56e60bc63fd9bd8d6414d
| 860
|
py
|
Python
|
docs/en/conf.py
|
alirezah52/esp-faq
|
070e1f96180df986a89d3313eea12822dda18d30
|
[
"Apache-2.0"
] | 24
|
2020-06-23T09:05:59.000Z
|
2022-03-25T20:05:55.000Z
|
docs/en/conf.py
|
xuhongv/esp-faq
|
56e6cb20ed86a10b5ecb3d147f80177fcf016335
|
[
"Apache-2.0"
] | 6
|
2020-12-07T11:52:12.000Z
|
2022-03-04T13:08:08.000Z
|
docs/en/conf.py
|
xuhongv/esp-faq
|
56e6cb20ed86a10b5ecb3d147f80177fcf016335
|
[
"Apache-2.0"
] | 15
|
2020-09-21T11:34:13.000Z
|
2022-03-20T05:00:28.000Z
|
# -*- coding: utf-8 -*-
#
# English Language RTD & Sphinx config file
#
# Uses ../conf_common.py for most non-language-specific settings.
# Importing conf_common adds all the non-language-specific
# parts to this conf module
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from conf_common import *
# General information about the project.
project = u'ESP-FAQ'
copyright = u'2020, Espressif Systems (Shanghai) Co., Ltd.'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'ESP-FAQ',
u'2020, Espressif Systems (Shanghai) Co., Ltd.', 'manual'),
]
| 29.655172
| 74
| 0.723256
|
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from conf_common import *
project = u'ESP-FAQ'
copyright = u'2020, Espressif Systems (Shanghai) Co., Ltd.'
language = 'en'
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'ESP-FAQ',
u'2020, Espressif Systems (Shanghai) Co., Ltd.', 'manual'),
]
| true
| true
|
1c44fd2bcdb1fc734b3a5f7c936bba90459bf43a
| 61,989
|
py
|
Python
|
salt/states/pkg.py
|
MrMarvin/salt
|
34620811f935450baa5d84a5e776c8fac5ba88d4
|
[
"Apache-2.0"
] | null | null | null |
salt/states/pkg.py
|
MrMarvin/salt
|
34620811f935450baa5d84a5e776c8fac5ba88d4
|
[
"Apache-2.0"
] | null | null | null |
salt/states/pkg.py
|
MrMarvin/salt
|
34620811f935450baa5d84a5e776c8fac5ba88d4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Installation of packages using OS package managers such as yum or apt-get
=========================================================================
Salt can manage software packages via the pkg state module, packages can be
set up to be installed, latest, removed and purged. Package management
declarations are typically rather simple:
.. code-block:: yaml
vim:
pkg.installed
A more involved example involves pulling from a custom repository.
Note that the pkgrepo has a require_in clause.
This is necessary and can not be replaced by a require clause in the pkg.
.. code-block:: yaml
base:
pkgrepo.managed:
- humanname: Logstash PPA
- name: ppa:wolfnet/logstash
- dist: precise
- file: /etc/apt/sources.list.d/logstash.list
- keyid: 28B04E4A
- keyserver: keyserver.ubuntu.com
- require_in:
- pkg: logstash
logstash:
pkg.installed
'''
# Import python libs
from __future__ import absolute_import
import logging
import os
import re
# Import salt libs
import salt.utils
from salt.output import nested
from salt.utils import namespaced_function as _namespaced_function
from salt.utils.odict import OrderedDict as _OrderedDict
from salt.exceptions import (
CommandExecutionError, MinionError, SaltInvocationError
)
from salt.modules.pkg_resource import _repack_pkgs
# Import 3rd-party libs
import salt.ext.six as six
# pylint: disable=invalid-name
_repack_pkgs = _namespaced_function(_repack_pkgs, globals())
if salt.utils.is_windows():
from salt.modules.win_pkg import _get_package_info
from salt.modules.win_pkg import get_repo_data
from salt.modules.win_pkg import _get_latest_pkg_version
from salt.modules.win_pkg import _reverse_cmp_pkg_versions
_get_package_info = _namespaced_function(_get_package_info, globals())
get_repo_data = _namespaced_function(get_repo_data, globals())
_get_latest_pkg_version = \
_namespaced_function(_get_latest_pkg_version, globals())
_reverse_cmp_pkg_versions = \
_namespaced_function(_reverse_cmp_pkg_versions, globals())
# The following imports are used by the namespaced win_pkg funcs
# and need to be included in their globals.
# pylint: disable=import-error,unused-import
try:
import msgpack
except ImportError:
import msgpack_pure as msgpack
from distutils.version import LooseVersion # pylint: disable=no-name-in-module
# pylint: enable=import-error,unused-import
# pylint: enable=invalid-name
log = logging.getLogger(__name__)
def __virtual__():
'''
Only make these states available if a pkg provider has been detected or
assigned for this minion
'''
return 'pkg.install' in __salt__
def __gen_rtag():
'''
Return the location of the refresh tag
'''
return os.path.join(__opts__['cachedir'], 'pkg_refresh')
def _fulfills_version_spec(versions, oper, desired_version):
'''
Returns True if any of the installed versions match the specified version,
otherwise returns False
'''
for ver in versions:
if salt.utils.compare_versions(ver1=ver,
oper=oper,
ver2=desired_version,
cmp_func=__salt__.get('version_cmp')):
return True
return False
def _find_unpurge_targets(desired):
'''
Find packages which are marked to be purged but can't yet be removed
because they are dependencies for other installed packages. These are the
packages which will need to be 'unpurged' because they are part of
pkg.installed states. This really just applies to Debian-based Linuxes.
'''
return [
x for x in desired
if x in __salt__['pkg.list_pkgs'](purge_desired=True)
]
def _find_remove_targets(name=None,
version=None,
pkgs=None,
**kwargs):
'''
Inspect the arguments to pkg.removed and discover what packages need to
be removed. Return a dict of packages to remove.
'''
cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
if pkgs:
to_remove = _repack_pkgs(pkgs)
if not to_remove:
# Badly-formatted SLS
return {'name': name,
'changes': {},
'result': False,
'comment': 'Invalidly formatted pkgs parameter. See '
'minion log.'}
else:
_normalize_name = __salt__.get('pkg.normalize_name', lambda pkgname: pkgname)
to_remove = {_normalize_name(name): version}
cver = cur_pkgs.get(name, [])
version_spec = False
# Find out which packages will be targeted in the call to pkg.remove
# Check current versions against specified versions
targets = []
problems = []
for pkgname, pkgver in six.iteritems(to_remove):
cver = cur_pkgs.get(pkgname, [])
# Package not installed, no need to remove
if not cver:
continue
# No version specified and pkg is installed
elif __salt__['pkg_resource.version_clean'](pkgver) is None:
targets.append(pkgname)
continue
version_spec = True
match = re.match('^([<>])?(=)?([^<>=]+)$', pkgver)
if not match:
msg = 'Invalid version specification {0!r} for package ' \
'{1!r}.'.format(pkgver, pkgname)
problems.append(msg)
else:
gt_lt, eq, verstr = match.groups()
comparison = gt_lt or ''
comparison += eq or ''
# A comparison operator of "=" is redundant, but possible.
# Change it to "==" so that the version comparison works
if comparison in ['=', '']:
comparison = '=='
if not _fulfills_version_spec(cver, comparison, verstr):
log.debug('Current version ({0} did not match ({1}) specified ({2}), skipping remove {3}'.format(cver, comparison, verstr, pkgname))
else:
targets.append(pkgname)
if problems:
return {'name': name,
'changes': {},
'result': False,
'comment': ' '.join(problems)}
if not targets:
# All specified packages are already absent
msg = (
'All specified packages{0} are already absent.'
.format(' (matching specified versions)' if version_spec else '')
)
return {'name': name,
'changes': {},
'result': True,
'comment': msg}
return targets
def _find_install_targets(name=None,
version=None,
pkgs=None,
sources=None,
skip_suggestions=False,
pkg_verify=False,
normalize=True,
**kwargs):
'''
Inspect the arguments to pkg.installed and discover what packages need to
be installed. Return a dict of desired packages
'''
if all((pkgs, sources)):
return {'name': name,
'changes': {},
'result': False,
'comment': 'Only one of "pkgs" and "sources" is permitted.'}
# dict for packages that fail pkg.verify and their altered files
altered_files = {}
# Get the ignore_types list if any from the pkg_verify argument
if isinstance(pkg_verify, list) and any(x.get('ignore_types') is not None
for x in pkg_verify
if type(x) is _OrderedDict
and 'ignore_types' in x):
ignore_types = next(x.get('ignore_types')
for x in pkg_verify
if 'ignore_types' in x)
else:
ignore_types = []
cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
if any((pkgs, sources)):
if pkgs:
desired = _repack_pkgs(pkgs)
elif sources:
desired = __salt__['pkg_resource.pack_sources'](sources)
if not desired:
# Badly-formatted SLS
return {'name': name,
'changes': {},
'result': False,
'comment': 'Invalidly formatted {0!r} parameter. See '
'minion log.'.format('pkgs' if pkgs
else 'sources')}
to_unpurge = _find_unpurge_targets(desired)
else:
if salt.utils.is_windows():
pkginfo = _get_package_info(name)
if not pkginfo:
return {'name': name,
'changes': {},
'result': False,
'comment': 'Package {0} not found in the '
'repository.'.format(name)}
if version is None:
version = _get_latest_pkg_version(pkginfo)
if normalize:
_normalize_name = __salt__.get('pkg.normalize_name', lambda pkgname: pkgname)
desired = {_normalize_name(name): version}
else:
desired = {name: version}
to_unpurge = _find_unpurge_targets(desired)
cver = cur_pkgs.get(name, [])
if name not in to_unpurge:
if version and version in cver and not pkg_verify:
# The package is installed and is the correct version
return {'name': name,
'changes': {},
'result': True,
'comment': 'Version {0} of package {1!r} is already '
'installed.'.format(version, name)}
# if cver is not an empty string, the package is already installed
elif cver and version is None and not pkg_verify:
# The package is installed
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package {0} is already '
'installed.'.format(name)}
version_spec = False
# Find out which packages will be targeted in the call to pkg.install
if sources:
targets = []
to_reinstall = []
for x in desired:
if x not in cur_pkgs:
targets.append(x)
elif pkg_verify:
retval = __salt__['pkg.verify'](x, ignore_types=ignore_types)
if retval:
to_reinstall.append(x)
altered_files[x] = retval
else:
# Check for alternate package names if strict processing is not
# enforced.
# Takes extra time. Disable for improved performance
if not skip_suggestions:
# Perform platform-specific pre-flight checks
problems = _preflight_check(desired, **kwargs)
comments = []
if problems.get('no_suggest'):
comments.append(
'The following package(s) were not found, and no possible '
'matches were found in the package db: '
'{0}'.format(', '.join(sorted(problems['no_suggest'])))
)
if problems.get('suggest'):
for pkgname, suggestions in six.iteritems(problems['suggest']):
comments.append(
'Package {0!r} not found (possible matches: {1})'
.format(pkgname, ', '.join(suggestions))
)
if comments:
if len(comments) > 1:
comments.append('')
return {'name': name,
'changes': {},
'result': False,
'comment': '. '.join(comments).rstrip()}
# Check current versions against desired versions
targets = {}
to_reinstall = {}
problems = []
for pkgname, pkgver in six.iteritems(desired):
cver = cur_pkgs.get(pkgname, [])
# Package not yet installed, so add to targets
if not cver:
targets[pkgname] = pkgver
continue
elif not __salt__['pkg_resource.check_extra_requirements'](pkgname,
pkgver):
targets[pkgname] = pkgver
continue
# No version specified and pkg is installed
elif __salt__['pkg_resource.version_clean'](pkgver) is None:
if pkg_verify:
retval = __salt__['pkg.verify'](pkgname, ignore_types=ignore_types)
if retval:
to_reinstall[pkgname] = pkgver
altered_files[pkgname] = retval
continue
version_spec = True
match = re.match('^([<>])?(=)?([^<>=]+)$', pkgver)
if not match:
msg = 'Invalid version specification {0!r} for package ' \
'{1!r}.'.format(pkgver, pkgname)
problems.append(msg)
else:
gt_lt, eq, verstr = match.groups()
comparison = gt_lt or ''
comparison += eq or ''
# A comparison operator of "=" is redundant, but possible.
# Change it to "==" so that the version comparison works
if comparison in ['=', '']:
comparison = '=='
if 'allow_updates' in kwargs:
if kwargs['allow_updates']:
comparison = '>='
if not _fulfills_version_spec(cver, comparison, verstr):
log.debug('Current version ({0} did not match ({1}) desired ({2}), add to targets'.format(cver, comparison, verstr))
targets[pkgname] = pkgver
elif pkg_verify and comparison == '==':
retval = __salt__['pkg.verify'](pkgname, ignore_types=ignore_types)
if retval:
to_reinstall[pkgname] = pkgver
altered_files[pkgname] = retval
if problems:
return {'name': name,
'changes': {},
'result': False,
'comment': ' '.join(problems)}
if not any((targets, to_unpurge, to_reinstall)):
# All specified packages are installed
msg = (
'All specified packages are already installed{0}.'
.format(' and are at the desired version' if version_spec else '')
)
return {'name': name,
'changes': {},
'result': True,
'comment': msg}
return desired, targets, to_unpurge, to_reinstall, altered_files
def _verify_install(desired, new_pkgs):
'''
Determine whether or not the installed packages match what was requested in
the SLS file.
'''
ok = []
failed = []
for pkgname, pkgver in six.iteritems(desired):
cver = new_pkgs.get(pkgname)
if not cver:
failed.append(pkgname)
continue
elif not __salt__['pkg_resource.version_clean'](pkgver):
ok.append(pkgname)
continue
elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]):
ok.append(pkgname)
continue
match = re.match('^([<>])?(=)?([^<>=]+)$', pkgver)
gt_lt, eq, verstr = match.groups()
comparison = gt_lt or ''
comparison += eq or ''
# A comparison operator of "=" is redundant, but possible.
# Change it to "==" so that the version comparison works.
if comparison in ('=', ''):
comparison = '=='
if _fulfills_version_spec(cver, comparison, verstr):
ok.append(pkgname)
else:
failed.append(pkgname)
return ok, failed
def _get_desired_pkg(name, desired):
'''
Helper function that retrieves and nicely formats the desired pkg (and
version if specified) so that helpful information can be printed in the
comment for the state.
'''
if not desired[name] or desired[name].startswith(('<', '>', '=')):
oper = ''
else:
oper = '='
return '{0}{1}{2}'.format(name, oper,
'' if not desired[name] else desired[name])
def _preflight_check(desired, fromrepo, **kwargs):
'''
Perform platform-specific checks on desired packages
'''
if 'pkg.check_db' not in __salt__:
return {}
ret = {'suggest': {}, 'no_suggest': []}
pkginfo = __salt__['pkg.check_db'](
*list(desired.keys()), fromrepo=fromrepo, **kwargs
)
for pkgname in pkginfo:
if pkginfo[pkgname]['found'] is False:
if pkginfo[pkgname]['suggestions']:
ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions']
else:
ret['no_suggest'].append(pkgname)
return ret
def _nested_output(obj):
'''
Serialize obj and format for output
'''
nested.__opts__ = __opts__
ret = nested.output(obj).rstrip()
return ret
def installed(
name,
version=None,
refresh=None,
fromrepo=None,
skip_verify=False,
skip_suggestions=False,
pkgs=None,
sources=None,
allow_updates=False,
pkg_verify=False,
normalize=True,
**kwargs):
'''
Ensure that the package is installed, and that it is the correct version
(if specified).
name
The name of the package to be installed. This parameter is ignored if
either "pkgs" or "sources" is used. Additionally, please note that this
option can only be used to install packages from a software repository.
To install a package file manually, use the "sources" option detailed
below.
fromrepo
Specify a repository from which to install
.. note::
Distros which use APT (Debian, Ubuntu, etc.) do not have a concept
of repositories, in the same way as YUM-based distros do. When a
source is added, it is assigned to a given release. Consider the
following source configuration:
.. code-block:: text
deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main
The packages provided by this source would be made available via
the ``precise`` release, therefore ``fromrepo`` would need to be
set to ``precise`` for Salt to install the package from this
source.
Having multiple sources in the same release may result in the
default install candidate being newer than what is desired. If this
is the case, the desired version must be specified using the
``version`` parameter.
If the ``pkgs`` parameter is being used to install multiple
packages in the same state, then instead of using ``version``,
use the method of version specification described in the **Multiple
Package Installation Options** section below.
Running the shell command ``apt-cache policy pkgname`` on a minion
can help elucidate the APT configuration and aid in properly
configuring states:
.. code-block:: bash
root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg'
ubuntu01:
ffmpeg:
Installed: (none)
Candidate: 7:0.10.11-1~precise1
Version table:
7:0.10.11-1~precise1 0
500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages
4:0.8.10-0ubuntu0.12.04.1 0
500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages
500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages
4:0.8.1-0ubuntu1 0
500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages
The release is located directly after the source's URL. The actual
release name is the part before the slash, so to install version
**4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or
``precise-security`` could be used for the ``fromrepo`` value.
skip_verify
Skip the GPG verification check for the package to be installed
skip_suggestions
Force strict package naming. Disables lookup of package alternatives.
.. versionadded:: 2014.1.1
version
Install a specific version of a package. This option is ignored if
either "pkgs" or "sources" is used. Currently, this option is supported
for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`,
:mod:`ebuild <salt.modules.ebuild>`,
:mod:`pacman <salt.modules.pacman>`,
:mod:`yumpkg <salt.modules.yumpkg>`, and
:mod:`zypper <salt.modules.zypper>`. The version number includes the
release designation where applicable, to allow Salt to target a
specific release of a given version. When in doubt, using the
``pkg.latest_version`` function for an uninstalled package will tell
you the version available.
.. code-block:: bash
# salt myminion pkg.latest_version httpd
myminion:
2.2.15-30.el6.centos
Also, while this function is not yet implemented for all pkg frontends,
:mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will
show all versions available in the various repositories for a given
package, irrespective of whether or not it is installed.
.. code-block:: bash
# salt myminion pkg.list_repo_pkgs httpd
myminion:
----------
base:
|_
----------
httpd:
2.2.15-29.el6.centos
updates:
|_
----------
httpd:
2.2.15-30.el6.centos
The version strings returned by either of these functions can be used
as version specifiers in pkg states.
refresh
Update the repo database of available packages prior to installing the
requested package.
hold
Force the package to be held at the current installed version.
Currently works with YUM & APT based systems.
.. versionadded:: 2014.7.0
allow_updates
Allow the package to be updated outside Salt's control (e.g. auto updates on Windows).
This means a package on the Minion can have a newer version than the latest available
in the repository without enforcing a re-installation of the package.
.. versionadded:: 2014.7.0
Example:
.. code-block:: yaml
httpd:
pkg.installed:
- fromrepo: mycustomrepo
- skip_verify: True
- skip_suggestions: True
- version: 2.0.6~ubuntu3
- refresh: True
- hold: False
pkg_verify
.. versionadded:: 2014.7.0
For requested packages that are already installed and would not be targeted for
upgrade or downgrade, use pkg.verify to determine if any of the files installed
by the package have been altered. If files have been altered, the reinstall
option of pkg.install is used to force a reinstall. Types to ignore can be
passed to pkg.verify (see example below). Currently, this option is supported
for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`.
Examples:
.. code-block:: yaml
httpd:
pkg.installed:
- version: 2.2.15-30.el6.centos
- pkg_verify: True
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar: 1.2.3-4
- baz
- pkg_verify:
- ignore_types: [config,doc]
normalize
Normalize the package name by removing the architecture. Default is True.
This is useful for poorly created packages which might include the
architecture as an actual part of the name such as kernel modules
which match a specific kernel version.
.. versionadded:: 2014.7.0
Example:
.. code-block:: yaml
gpfs.gplbin-2.6.32-279.31.1.el6.x86_64:
pkg.installed:
- normalize: False
**Multiple Package Installation Options: (not supported in Windows or
pkgng)**
pkgs
A list of packages to install from a software repository. All packages
listed under ``pkgs`` will be installed via a single command.
Example:
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar
- baz
- hold: True
``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`,
:mod:`ebuild <salt.modules.ebuild>`,
:mod:`pacman <salt.modules.pacman>`, :mod:`yumpkg <salt.modules.yumpkg>`,
and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified
in the ``pkgs`` argument. For example:
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar: 1.2.3-4
- baz
Additionally, :mod:`ebuild <salt.modules.ebuild>`,
:mod:`pacman <salt.modules.pacman>` and
:mod:`zypper <salt.modules.zypper>` support the ``<``, ``<=``, ``>=``, and
``>`` operators for more control over what versions will be installed. For
example:
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo
- bar: '>=1.2.3-4'
- baz
``NOTE:`` When using comparison operators, the expression must be enclosed
in quotes to avoid a YAML render error.
With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use
flag list and/or if the given packages should be in package.accept_keywords
file and/or the overlay from which you want the package to be installed.
For example:
.. code-block:: yaml
mypkgs:
pkg.installed:
- pkgs:
- foo: '~'
- bar: '~>=1.2:slot::overlay[use,-otheruse]'
- baz
names
A list of packages to install from a software repository. Each package
will be installed individually by the package manager.
.. warning::
Unlike ``pkgs``, the ``names`` parameter cannot specify a version.
In addition, it makes a separate call to the package management
frontend to install each package, whereas ``pkgs`` makes just a
single call. It is therefore recommended to use ``pkgs`` instead of
``names`` to install multiple packages, both for the additional
features and the performance improvement that it brings.
sources
A list of packages to install, along with the source URI or local path
from which to install each package. In the example below, ``foo``,
``bar``, ``baz``, etc. refer to the name of the package, as it would
appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt
CLI commands.
.. code-block:: yaml
mypkgs:
pkg.installed:
- sources:
- foo: salt://rpms/foo.rpm
- bar: http://somesite.org/bar.rpm
- baz: ftp://someothersite.org/baz.rpm
- qux: /minion/path/to/qux.rpm
install_recommends
Whether to install the packages marked as recommended. Default is True.
Currently only works with APT based systems.
.. versionadded:: Lithium
.. code-block:: yaml
httpd:
pkg.installed:
- install_recommends: False
only_upgrade
Only upgrade the packages, if they are already installed. Default is False.
Currently only works with APT based systems.
.. versionadded:: Lithium
.. code-block:: yaml
httpd:
pkg.installed:
- only_upgrade: True
'''
if isinstance(pkgs, list) and len(pkgs) == 0:
return {'name': name,
'changes': {},
'result': True,
'comment': 'No packages to install provided'}
kwargs['saltenv'] = __env__
rtag = __gen_rtag()
refresh = bool(
salt.utils.is_true(refresh)
or (os.path.isfile(rtag) and refresh is not False)
)
if not isinstance(pkg_verify, list):
pkg_verify = pkg_verify is True
if (pkg_verify or isinstance(pkg_verify, list)) and 'pkg.verify' not in __salt__:
return {'name': name,
'changes': {},
'result': False,
'comment': 'pkg.verify not implemented'}
if not isinstance(version, six.string_types) and version is not None:
version = str(version)
if version is not None and version == 'latest':
version = __salt__['pkg.latest_version'](name)
kwargs['allow_updates'] = allow_updates
result = _find_install_targets(name, version, pkgs, sources,
fromrepo=fromrepo,
skip_suggestions=skip_suggestions,
pkg_verify=pkg_verify,
normalize=normalize,
**kwargs)
try:
desired, targets, to_unpurge, to_reinstall, altered_files = result
except ValueError:
# _find_install_targets() found no targets or encountered an error
# check that the hold function is available
if 'pkg.hold' in __salt__:
if 'hold' in kwargs:
try:
if kwargs['hold']:
hold_ret = __salt__['pkg.hold'](
name=name, pkgs=pkgs, sources=sources
)
else:
hold_ret = __salt__['pkg.unhold'](
name=name, pkgs=pkgs, sources=sources
)
except (CommandExecutionError, SaltInvocationError) as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': str(exc)}
if 'result' in hold_ret and not hold_ret['result']:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while '
'holding/unholding package(s): {0}'
.format(hold_ret['comment'])}
else:
modified_hold = [hold_ret[x] for x in hold_ret
if hold_ret[x]['changes']]
not_modified_hold = [hold_ret[x] for x in hold_ret
if not hold_ret[x]['changes']
and hold_ret[x]['result']]
failed_hold = [hold_ret[x] for x in hold_ret
if not hold_ret[x]['result']]
if modified_hold:
for i in modified_hold:
result['comment'] += ' {0}'.format(i['comment'])
result['result'] = i['result']
result['changes'][i['name']] = i['changes']
if not_modified_hold:
for i in not_modified_hold:
result['comment'] += ' {0}'.format(i['comment'])
result['result'] = i['result']
if failed_hold:
for i in failed_hold:
result['comment'] += ' {0}'.format(i['comment'])
result['result'] = i['result']
return result
if to_unpurge and 'lowpkg.unpurge' not in __salt__:
return {'name': name,
'changes': {},
'result': False,
'comment': 'lowpkg.unpurge not implemented'}
# Remove any targets not returned by _find_install_targets
if pkgs:
pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)]
pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)])
elif sources:
oldsources = sources
sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets]
sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall])
comment = []
if __opts__['test']:
if targets:
if sources:
summary = ', '.join(targets)
else:
summary = ', '.join([_get_desired_pkg(x, targets)
for x in targets])
comment.append('The following packages are set to be '
'installed/updated: {0}.'.format(summary))
if to_unpurge:
comment.append(
'The following packages will have their selection status '
'changed from \'purge\' to \'install\': {0}.'
.format(', '.join(to_unpurge))
)
if to_reinstall:
# Add a comment for each package in to_reinstall with its pkg.verify output
for x in to_reinstall:
if sources:
pkgstr = x
else:
pkgstr = _get_desired_pkg(x, to_reinstall)
comment.append('\nPackage {0} is set to be reinstalled because the '
'following files have been altered:'.format(pkgstr))
comment.append('\n' + _nested_output(altered_files[x]))
return {'name': name,
'changes': {},
'result': None,
'comment': ' '.join(comment)}
changes = {'installed': {}}
modified_hold = None
not_modified_hold = None
failed_hold = None
if targets or to_reinstall:
reinstall = bool(to_reinstall)
try:
pkg_ret = __salt__['pkg.install'](name,
refresh=refresh,
version=version,
fromrepo=fromrepo,
skip_verify=skip_verify,
pkgs=pkgs,
sources=sources,
reinstall=reinstall,
normalize=normalize,
**kwargs)
if os.path.isfile(rtag) and refresh:
os.remove(rtag)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while installing '
'package(s): {0}'.format(exc)}
if isinstance(pkg_ret, dict):
changes['installed'].update(pkg_ret)
elif isinstance(pkg_ret, six.string_types):
comment.append(pkg_ret)
if 'pkg.hold' in __salt__:
if 'hold' in kwargs:
try:
if kwargs['hold']:
hold_ret = __salt__['pkg.hold'](
name=name, pkgs=pkgs, sources=sources
)
else:
hold_ret = __salt__['pkg.unhold'](
name=name, pkgs=pkgs, sources=sources
)
except (CommandExecutionError, SaltInvocationError) as exc:
comment.append(str(exc))
return {'name': name,
'changes': changes,
'result': False,
'comment': ' '.join(comment)}
else:
if 'result' in hold_ret and not hold_ret['result']:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while '
'holding/unholding package(s): {0}'
.format(hold_ret['comment'])}
else:
modified_hold = [hold_ret[x] for x in hold_ret
if hold_ret[x]['changes']]
not_modified_hold = [hold_ret[x] for x in hold_ret
if not hold_ret[x]['changes']
and hold_ret[x]['result']]
failed_hold = [hold_ret[x] for x in hold_ret
if not hold_ret[x]['result']]
if to_unpurge:
changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge)
# Analyze pkg.install results for packages in targets
if sources:
modified = [x for x in changes['installed'] if x in targets]
not_modified = [x for x in desired if x not in targets and x not in to_reinstall]
failed = [x for x in targets if x not in modified]
else:
ok, failed = \
_verify_install(
desired, __salt__['pkg.list_pkgs'](
versions_as_list=True, **kwargs
)
)
modified = [x for x in ok if x in targets]
not_modified = [x for x in ok if x not in targets and x not in to_reinstall]
failed = [x for x in failed if x in targets]
# If there was nothing unpurged, just set the changes dict to the contents
# of changes['installed'].
if not changes.get('purge_desired'):
changes = changes['installed']
if modified:
if sources:
summary = ', '.join(modified)
else:
summary = ', '.join([_get_desired_pkg(x, desired)
for x in modified])
if len(summary) < 20:
comment.append('The following packages were installed/updated: '
'{0}.'.format(summary))
else:
comment.append(
'{0} targeted package{1} {2} installed/updated.'.format(
len(modified),
's' if len(modified) > 1 else '',
'were' if len(modified) > 1 else 'was'
)
)
if modified_hold:
for i in modified_hold:
comment.append(i['comment'])
change_name = i['name']
if len(changes[change_name]['new']) > 0:
changes[change_name]['new'] += '\n'
changes[change_name]['new'] += '{0}'.format(i['changes']['new'])
if len(changes[change_name]['old']) > 0:
changes[change_name]['old'] += '\n'
changes[change_name]['old'] += '{0}'.format(i['changes']['old'])
# Any requested packages that were not targeted for install or reinstall
if not_modified:
if sources:
summary = ', '.join(not_modified)
else:
summary = ', '.join([_get_desired_pkg(x, desired)
for x in not_modified])
if len(not_modified) <= 20:
comment.append('The following packages were already installed: '
'{0}.'.format(summary))
else:
comment.append(
'{0} targeted package{1} {2} already installed.'.format(
len(not_modified),
's' if len(not_modified) > 1 else '',
'were' if len(not_modified) > 1 else 'was'
)
)
if not_modified_hold:
for i in not_modified_hold:
comment.append(i['comment'])
result = True
if failed:
if sources:
summary = ', '.join(failed)
else:
summary = ', '.join([_get_desired_pkg(x, desired)
for x in failed])
comment.insert(0, 'The following packages failed to '
'install/update: {0}.'.format(summary))
result = False
if failed_hold:
for i in failed_hold:
comment.append(i['comment'])
result = False
# Get the ignore_types list if any from the pkg_verify argument
if isinstance(pkg_verify, list) and any(x.get('ignore_types') is not None
for x in pkg_verify
if isinstance(x, _OrderedDict)
and 'ignore_types' in x):
ignore_types = next(x.get('ignore_types')
for x in pkg_verify
if 'ignore_types' in x)
else:
ignore_types = []
# Rerun pkg.verify for packages in to_reinstall to determine failed
modified = []
failed = []
for x in to_reinstall:
retval = __salt__['pkg.verify'](x, ignore_types=ignore_types)
if retval:
failed.append(x)
altered_files[x] = retval
else:
modified.append(x)
if modified:
# Add a comment for each package in modified with its pkg.verify output
for x in modified:
if sources:
pkgstr = x
else:
pkgstr = _get_desired_pkg(x, desired)
comment.append('\nPackage {0} was reinstalled. The following files '
'were remediated:'.format(pkgstr))
comment.append(_nested_output(altered_files[x]))
if failed:
# Add a comment for each package in failed with its pkg.verify output
for x in failed:
if sources:
pkgstr = x
else:
pkgstr = _get_desired_pkg(x, desired)
comment.append(
'\nReinstall was not successful for package {0}. The following '
'files could not be remediated:'.format(pkgstr)
)
comment.append(_nested_output(altered_files[x]))
result = False
return {'name': name,
'changes': changes,
'result': result,
'comment': ' '.join(comment)}
def latest(
name,
refresh=None,
fromrepo=None,
skip_verify=False,
pkgs=None,
**kwargs):
'''
Ensure that the named package is installed and the latest available
package. If the package can be updated, this state function will update
the package. Generally it is better for the
:mod:`installed <salt.states.pkg.installed>` function to be
used, as :mod:`latest <salt.states.pkg.latest>` will update the package
whenever a new package is available.
name
The name of the package to maintain at the latest available version.
This parameter is ignored if "pkgs" is used.
fromrepo
Specify a repository from which to install
skip_verify
Skip the GPG verification check for the package to be installed
Multiple Package Installation Options:
(Not yet supported for: Windows, FreeBSD, OpenBSD, MacOS, and Solaris
pkgutil)
pkgs
A list of packages to maintain at the latest available version.
.. code-block:: yaml
mypkgs:
pkg.latest:
- pkgs:
- foo
- bar
- baz
install_recommends
Whether to install the packages marked as recommended. Default is True.
Currently only works with APT based systems.
.. versionadded:: Lithium
.. code-block:: yaml
httpd:
pkg.latest:
- install_recommends: False
only_upgrade
Only upgrade the packages, if they are already installed. Default is False.
Currently only works with APT based systems.
.. versionadded:: Lithium
.. code-block:: yaml
httpd:
pkg.latest:
- only_upgrade: True
'''
rtag = __gen_rtag()
refresh = bool(
salt.utils.is_true(refresh)
or (os.path.isfile(rtag) and refresh is not False)
)
if kwargs.get('sources'):
return {'name': name,
'changes': {},
'result': False,
'comment': 'The "sources" parameter is not supported.'}
elif pkgs:
desired_pkgs = list(_repack_pkgs(pkgs).keys())
if not desired_pkgs:
# Badly-formatted SLS
return {'name': name,
'changes': {},
'result': False,
'comment': 'Invalidly formatted "pkgs" parameter. See '
'minion log.'}
else:
desired_pkgs = [name]
cur = __salt__['pkg.version'](*desired_pkgs, **kwargs)
try:
avail = __salt__['pkg.latest_version'](*desired_pkgs,
fromrepo=fromrepo,
refresh=refresh,
**kwargs)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while checking the '
'newest available version of package(s): {0}'
.format(exc)}
# Remove the rtag if it exists, ensuring only one refresh per salt run
# (unless overridden with refresh=True)
if os.path.isfile(rtag) and refresh:
os.remove(rtag)
# Repack the cur/avail data if only a single package is being checked
if isinstance(cur, six.string_types):
cur = {desired_pkgs[0]: cur}
if isinstance(avail, six.string_types):
avail = {desired_pkgs[0]: avail}
targets = {}
problems = []
for pkg in desired_pkgs:
if not avail[pkg]:
if not cur[pkg]:
msg = 'No information found for {0!r}.'.format(pkg)
log.error(msg)
problems.append(msg)
elif not cur[pkg] \
or salt.utils.compare_versions(
ver1=cur[pkg],
oper='<',
ver2=avail[pkg],
cmp_func=__salt__.get('version_cmp')):
targets[pkg] = avail[pkg]
if problems:
return {'name': name,
'changes': {},
'result': False,
'comment': ' '.join(problems)}
if targets:
# Find up-to-date packages
if not pkgs:
# There couldn't have been any up-to-date packages if this state
# only targeted a single package and is being allowed to proceed to
# the install step.
up_to_date = []
else:
up_to_date = [x for x in pkgs if x not in targets]
if __opts__['test']:
to_be_upgraded = ', '.join(sorted(targets))
comment = 'The following packages are set to be ' \
'installed/upgraded: ' \
'{0}.'.format(to_be_upgraded)
if up_to_date:
up_to_date_nb = len(up_to_date)
if up_to_date_nb <= 10:
up_to_date_sorted = sorted(up_to_date)
up_to_date_details = ', '.join(
'{0} ({1})'.format(name, cur[name])
for name in up_to_date_sorted
)
comment += (
' The following packages are already '
'up-to-date: {0}.'
).format(up_to_date_details)
else:
comment += ' {0} packages are already up-to-date.'.format(
up_to_date_nb
)
return {'name': name,
'changes': {},
'result': None,
'comment': comment}
# Build updated list of pkgs to exclude non-targeted ones
targeted_pkgs = list(targets.keys()) if pkgs else None
try:
# No need to refresh, if a refresh was necessary it would have been
# performed above when pkg.latest_version was run.
changes = __salt__['pkg.install'](name,
refresh=False,
fromrepo=fromrepo,
skip_verify=skip_verify,
pkgs=targeted_pkgs,
**kwargs)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while installing '
'package(s): {0}'.format(exc)}
if changes:
# Find failed and successful updates
failed = [x for x in targets
if not changes.get(x) or changes[x]['new'] != targets[x]]
successful = [x for x in targets if x not in failed]
comments = []
if failed:
msg = 'The following packages failed to update: ' \
'{0}.'.format(', '.join(sorted(failed)))
comments.append(msg)
if successful:
msg = 'The following packages were successfully ' \
'installed/upgraded: ' \
'{0}.'.format(', '.join(sorted(successful)))
comments.append(msg)
if up_to_date:
if len(up_to_date) <= 10:
msg = 'The following packages were already up-to-date: ' \
'{0}.'.format(', '.join(sorted(up_to_date)))
else:
msg = '{0} packages were already up-to-date. '.format(
len(up_to_date))
comments.append(msg)
return {'name': name,
'changes': changes,
'result': False if failed else True,
'comment': ' '.join(comments)}
else:
if len(targets) > 10:
comment = ('{0} targeted packages failed to update. '
'See debug log for details.'.format(len(targets)))
elif len(targets) > 1:
comment = ('The following targeted packages failed to update. '
'See debug log for details: ({0}).'
.format(', '.join(sorted(targets))))
else:
comment = 'Package {0} failed to ' \
'update.'.format(next(iter(list(targets.keys()))))
if up_to_date:
if len(up_to_date) <= 10:
comment += ' The following packages were already ' \
'up-to-date: ' \
'{0}'.format(', '.join(sorted(up_to_date)))
else:
comment += '{0} packages were already ' \
'up-to-date.'.format(len(up_to_date))
return {'name': name,
'changes': changes,
'result': False,
'comment': comment}
else:
if len(desired_pkgs) > 10:
comment = 'All {0} packages are up-to-date.'.format(
len(desired_pkgs))
elif len(desired_pkgs) > 1:
comment = 'All packages are up-to-date ' \
'({0}).'.format(', '.join(sorted(desired_pkgs)))
else:
comment = 'Package {0} is already ' \
'up-to-date.'.format(desired_pkgs[0])
return {'name': name,
'changes': {},
'result': True,
'comment': comment}
def _uninstall(action='remove', name=None, version=None, pkgs=None, **kwargs):
'''
Common function for package removal
'''
if action not in ('remove', 'purge'):
return {'name': name,
'changes': {},
'result': False,
'comment': 'Invalid action {0!r}. '
'This is probably a bug.'.format(action)}
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
except MinionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while parsing targets: '
'{0}'.format(exc)}
targets = _find_remove_targets(name, version, pkgs, **kwargs)
if isinstance(targets, dict) and 'result' in targets:
return targets
elif not isinstance(targets, list):
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while checking targets: '
'{0}'.format(targets)}
if action == 'purge':
old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True,
removed=True,
**kwargs)
targets.extend([x for x in pkg_params if x in old_removed])
targets.sort()
if not targets:
return {'name': name,
'changes': {},
'result': True,
'comment': 'None of the targeted packages are installed'
'{0}'.format(' or partially installed'
if action == 'purge' else '')}
if __opts__['test']:
return {'name': name,
'changes': {},
'result': None,
'comment': 'The following packages will be {0}d: '
'{1}.'.format(action, ', '.join(targets))}
changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, **kwargs)
new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
failed = [x for x in pkg_params if x in new]
if action == 'purge':
new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True,
removed=True,
**kwargs)
failed.extend([x for x in pkg_params if x in new_removed])
failed.sort()
if failed:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'The following packages failed to {0}: '
'{1}.'.format(action, ', '.join(failed))}
comments = []
not_installed = sorted([x for x in pkg_params if x not in targets])
if not_installed:
comments.append('The following packages were not installed: '
'{0}.'.format(', '.join(not_installed)))
comments.append('The following packages were {0}d: '
'{1}.'.format(action, ', '.join(targets)))
else:
comments.append('All targeted packages were {0}d.'.format(action))
return {'name': name,
'changes': changes,
'result': True,
'comment': ' '.join(comments)}
def removed(name, version=None, pkgs=None, **kwargs):
'''
Verify that a package is not installed, calling ``pkg.remove`` if necessary
to remove the package.
name
The name of the package to be removed.
version
The version of the package that should be removed. Don't do anything if
the package is installed with an unmatching version.
Multiple Package Options:
pkgs
A list of packages to remove. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed. It accepts
version numbers as well.
.. versionadded:: 0.16.0
'''
try:
return _uninstall(action='remove', name=name, version=version,
pkgs=pkgs, **kwargs)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': str(exc)}
def purged(name, version=None, pkgs=None, **kwargs):
'''
Verify that a package is not installed, calling ``pkg.purge`` if necessary
to purge the package. All configuration files are also removed.
name
The name of the package to be purged.
version
The version of the package that should be removed. Don't do anything if
the package is installed with an unmatching version.
Multiple Package Options:
pkgs
A list of packages to purge. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed. It accepts
version numbers as well.
.. versionadded:: 0.16.0
'''
try:
return _uninstall(action='purge', name=name, version=version,
pkgs=pkgs, **kwargs)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': str(exc)}
def uptodate(name, refresh=False):
'''
.. versionadded:: 2014.7.0
Verify that the system is completely up to date.
name
The name has no functional value and is only used as a tracking
reference
refresh
refresh the package database before checking for new upgrades
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
if 'pkg.list_upgrades' not in __salt__:
ret['comment'] = 'State pkg.uptodate is not available'
return ret
if isinstance(refresh, bool):
try:
packages = __salt__['pkg.list_upgrades'](refresh=refresh)
except Exception as exc:
ret['comment'] = str(exc)
return ret
else:
ret['comment'] = 'refresh must be a boolean.'
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pkg.upgrade'](refresh=refresh)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
def mod_init(low):
'''
Set a flag to tell the install functions to refresh the package database.
This ensures that the package database is refreshed only once during
a state run significantly improving the speed of package management
during a state run.
It sets a flag for a number of reasons, primarily due to timeline logic.
When originally setting up the mod_init for pkg a number of corner cases
arose with different package managers and how they refresh package data.
It also runs the "ex_mod_init" from the package manager module that is
currently loaded. The "ex_mod_init" is expected to work as a normal
"mod_init" function.
.. seealso::
:py:func:`salt.modules.ebuild.ex_mod_init`
'''
ret = True
if 'pkg.ex_mod_init' in __salt__:
ret = __salt__['pkg.ex_mod_init'](low)
if low['fun'] == 'installed' or low['fun'] == 'latest':
rtag = __gen_rtag()
if not os.path.exists(rtag):
salt.utils.fopen(rtag, 'w+').write('')
return ret
return False
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
agg_enabled = [
'installed',
'latest',
'removed',
'purged',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = salt.utils.gen_state_tag(chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pkg':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkgs:
if 'pkgs' in low:
low['pkgs'].extend(pkgs)
else:
low['pkgs'] = pkgs
return low
| 36.81057
| 148
| 0.527384
|
from __future__ import absolute_import
import logging
import os
import re
import salt.utils
from salt.output import nested
from salt.utils import namespaced_function as _namespaced_function
from salt.utils.odict import OrderedDict as _OrderedDict
from salt.exceptions import (
CommandExecutionError, MinionError, SaltInvocationError
)
from salt.modules.pkg_resource import _repack_pkgs
import salt.ext.six as six
_repack_pkgs = _namespaced_function(_repack_pkgs, globals())
if salt.utils.is_windows():
from salt.modules.win_pkg import _get_package_info
from salt.modules.win_pkg import get_repo_data
from salt.modules.win_pkg import _get_latest_pkg_version
from salt.modules.win_pkg import _reverse_cmp_pkg_versions
_get_package_info = _namespaced_function(_get_package_info, globals())
get_repo_data = _namespaced_function(get_repo_data, globals())
_get_latest_pkg_version = \
_namespaced_function(_get_latest_pkg_version, globals())
_reverse_cmp_pkg_versions = \
_namespaced_function(_reverse_cmp_pkg_versions, globals())
try:
import msgpack
except ImportError:
import msgpack_pure as msgpack
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
def __virtual__():
return 'pkg.install' in __salt__
def __gen_rtag():
return os.path.join(__opts__['cachedir'], 'pkg_refresh')
def _fulfills_version_spec(versions, oper, desired_version):
for ver in versions:
if salt.utils.compare_versions(ver1=ver,
oper=oper,
ver2=desired_version,
cmp_func=__salt__.get('version_cmp')):
return True
return False
def _find_unpurge_targets(desired):
return [
x for x in desired
if x in __salt__['pkg.list_pkgs'](purge_desired=True)
]
def _find_remove_targets(name=None,
version=None,
pkgs=None,
**kwargs):
cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
if pkgs:
to_remove = _repack_pkgs(pkgs)
if not to_remove:
return {'name': name,
'changes': {},
'result': False,
'comment': 'Invalidly formatted pkgs parameter. See '
'minion log.'}
else:
_normalize_name = __salt__.get('pkg.normalize_name', lambda pkgname: pkgname)
to_remove = {_normalize_name(name): version}
cver = cur_pkgs.get(name, [])
version_spec = False
targets = []
problems = []
for pkgname, pkgver in six.iteritems(to_remove):
cver = cur_pkgs.get(pkgname, [])
if not cver:
continue
elif __salt__['pkg_resource.version_clean'](pkgver) is None:
targets.append(pkgname)
continue
version_spec = True
match = re.match('^([<>])?(=)?([^<>=]+)$', pkgver)
if not match:
msg = 'Invalid version specification {0!r} for package ' \
'{1!r}.'.format(pkgver, pkgname)
problems.append(msg)
else:
gt_lt, eq, verstr = match.groups()
comparison = gt_lt or ''
comparison += eq or ''
if comparison in ['=', '']:
comparison = '=='
if not _fulfills_version_spec(cver, comparison, verstr):
log.debug('Current version ({0} did not match ({1}) specified ({2}), skipping remove {3}'.format(cver, comparison, verstr, pkgname))
else:
targets.append(pkgname)
if problems:
return {'name': name,
'changes': {},
'result': False,
'comment': ' '.join(problems)}
if not targets:
msg = (
'All specified packages{0} are already absent.'
.format(' (matching specified versions)' if version_spec else '')
)
return {'name': name,
'changes': {},
'result': True,
'comment': msg}
return targets
def _find_install_targets(name=None,
version=None,
pkgs=None,
sources=None,
skip_suggestions=False,
pkg_verify=False,
normalize=True,
**kwargs):
if all((pkgs, sources)):
return {'name': name,
'changes': {},
'result': False,
'comment': 'Only one of "pkgs" and "sources" is permitted.'}
altered_files = {}
if isinstance(pkg_verify, list) and any(x.get('ignore_types') is not None
for x in pkg_verify
if type(x) is _OrderedDict
and 'ignore_types' in x):
ignore_types = next(x.get('ignore_types')
for x in pkg_verify
if 'ignore_types' in x)
else:
ignore_types = []
cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
if any((pkgs, sources)):
if pkgs:
desired = _repack_pkgs(pkgs)
elif sources:
desired = __salt__['pkg_resource.pack_sources'](sources)
if not desired:
return {'name': name,
'changes': {},
'result': False,
'comment': 'Invalidly formatted {0!r} parameter. See '
'minion log.'.format('pkgs' if pkgs
else 'sources')}
to_unpurge = _find_unpurge_targets(desired)
else:
if salt.utils.is_windows():
pkginfo = _get_package_info(name)
if not pkginfo:
return {'name': name,
'changes': {},
'result': False,
'comment': 'Package {0} not found in the '
'repository.'.format(name)}
if version is None:
version = _get_latest_pkg_version(pkginfo)
if normalize:
_normalize_name = __salt__.get('pkg.normalize_name', lambda pkgname: pkgname)
desired = {_normalize_name(name): version}
else:
desired = {name: version}
to_unpurge = _find_unpurge_targets(desired)
cver = cur_pkgs.get(name, [])
if name not in to_unpurge:
if version and version in cver and not pkg_verify:
return {'name': name,
'changes': {},
'result': True,
'comment': 'Version {0} of package {1!r} is already '
'installed.'.format(version, name)}
elif cver and version is None and not pkg_verify:
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package {0} is already '
'installed.'.format(name)}
version_spec = False
if sources:
targets = []
to_reinstall = []
for x in desired:
if x not in cur_pkgs:
targets.append(x)
elif pkg_verify:
retval = __salt__['pkg.verify'](x, ignore_types=ignore_types)
if retval:
to_reinstall.append(x)
altered_files[x] = retval
else:
if not skip_suggestions:
problems = _preflight_check(desired, **kwargs)
comments = []
if problems.get('no_suggest'):
comments.append(
'The following package(s) were not found, and no possible '
'matches were found in the package db: '
'{0}'.format(', '.join(sorted(problems['no_suggest'])))
)
if problems.get('suggest'):
for pkgname, suggestions in six.iteritems(problems['suggest']):
comments.append(
'Package {0!r} not found (possible matches: {1})'
.format(pkgname, ', '.join(suggestions))
)
if comments:
if len(comments) > 1:
comments.append('')
return {'name': name,
'changes': {},
'result': False,
'comment': '. '.join(comments).rstrip()}
targets = {}
to_reinstall = {}
problems = []
for pkgname, pkgver in six.iteritems(desired):
cver = cur_pkgs.get(pkgname, [])
if not cver:
targets[pkgname] = pkgver
continue
elif not __salt__['pkg_resource.check_extra_requirements'](pkgname,
pkgver):
targets[pkgname] = pkgver
continue
elif __salt__['pkg_resource.version_clean'](pkgver) is None:
if pkg_verify:
retval = __salt__['pkg.verify'](pkgname, ignore_types=ignore_types)
if retval:
to_reinstall[pkgname] = pkgver
altered_files[pkgname] = retval
continue
version_spec = True
match = re.match('^([<>])?(=)?([^<>=]+)$', pkgver)
if not match:
msg = 'Invalid version specification {0!r} for package ' \
'{1!r}.'.format(pkgver, pkgname)
problems.append(msg)
else:
gt_lt, eq, verstr = match.groups()
comparison = gt_lt or ''
comparison += eq or ''
if comparison in ['=', '']:
comparison = '=='
if 'allow_updates' in kwargs:
if kwargs['allow_updates']:
comparison = '>='
if not _fulfills_version_spec(cver, comparison, verstr):
log.debug('Current version ({0} did not match ({1}) desired ({2}), add to targets'.format(cver, comparison, verstr))
targets[pkgname] = pkgver
elif pkg_verify and comparison == '==':
retval = __salt__['pkg.verify'](pkgname, ignore_types=ignore_types)
if retval:
to_reinstall[pkgname] = pkgver
altered_files[pkgname] = retval
if problems:
return {'name': name,
'changes': {},
'result': False,
'comment': ' '.join(problems)}
if not any((targets, to_unpurge, to_reinstall)):
msg = (
'All specified packages are already installed{0}.'
.format(' and are at the desired version' if version_spec else '')
)
return {'name': name,
'changes': {},
'result': True,
'comment': msg}
return desired, targets, to_unpurge, to_reinstall, altered_files
def _verify_install(desired, new_pkgs):
ok = []
failed = []
for pkgname, pkgver in six.iteritems(desired):
cver = new_pkgs.get(pkgname)
if not cver:
failed.append(pkgname)
continue
elif not __salt__['pkg_resource.version_clean'](pkgver):
ok.append(pkgname)
continue
elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]):
ok.append(pkgname)
continue
match = re.match('^([<>])?(=)?([^<>=]+)$', pkgver)
gt_lt, eq, verstr = match.groups()
comparison = gt_lt or ''
comparison += eq or ''
if comparison in ('=', ''):
comparison = '=='
if _fulfills_version_spec(cver, comparison, verstr):
ok.append(pkgname)
else:
failed.append(pkgname)
return ok, failed
def _get_desired_pkg(name, desired):
if not desired[name] or desired[name].startswith(('<', '>', '=')):
oper = ''
else:
oper = '='
return '{0}{1}{2}'.format(name, oper,
'' if not desired[name] else desired[name])
def _preflight_check(desired, fromrepo, **kwargs):
if 'pkg.check_db' not in __salt__:
return {}
ret = {'suggest': {}, 'no_suggest': []}
pkginfo = __salt__['pkg.check_db'](
*list(desired.keys()), fromrepo=fromrepo, **kwargs
)
for pkgname in pkginfo:
if pkginfo[pkgname]['found'] is False:
if pkginfo[pkgname]['suggestions']:
ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions']
else:
ret['no_suggest'].append(pkgname)
return ret
def _nested_output(obj):
nested.__opts__ = __opts__
ret = nested.output(obj).rstrip()
return ret
def installed(
name,
version=None,
refresh=None,
fromrepo=None,
skip_verify=False,
skip_suggestions=False,
pkgs=None,
sources=None,
allow_updates=False,
pkg_verify=False,
normalize=True,
**kwargs):
if isinstance(pkgs, list) and len(pkgs) == 0:
return {'name': name,
'changes': {},
'result': True,
'comment': 'No packages to install provided'}
kwargs['saltenv'] = __env__
rtag = __gen_rtag()
refresh = bool(
salt.utils.is_true(refresh)
or (os.path.isfile(rtag) and refresh is not False)
)
if not isinstance(pkg_verify, list):
pkg_verify = pkg_verify is True
if (pkg_verify or isinstance(pkg_verify, list)) and 'pkg.verify' not in __salt__:
return {'name': name,
'changes': {},
'result': False,
'comment': 'pkg.verify not implemented'}
if not isinstance(version, six.string_types) and version is not None:
version = str(version)
if version is not None and version == 'latest':
version = __salt__['pkg.latest_version'](name)
kwargs['allow_updates'] = allow_updates
result = _find_install_targets(name, version, pkgs, sources,
fromrepo=fromrepo,
skip_suggestions=skip_suggestions,
pkg_verify=pkg_verify,
normalize=normalize,
**kwargs)
try:
desired, targets, to_unpurge, to_reinstall, altered_files = result
except ValueError:
if 'pkg.hold' in __salt__:
if 'hold' in kwargs:
try:
if kwargs['hold']:
hold_ret = __salt__['pkg.hold'](
name=name, pkgs=pkgs, sources=sources
)
else:
hold_ret = __salt__['pkg.unhold'](
name=name, pkgs=pkgs, sources=sources
)
except (CommandExecutionError, SaltInvocationError) as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': str(exc)}
if 'result' in hold_ret and not hold_ret['result']:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while '
'holding/unholding package(s): {0}'
.format(hold_ret['comment'])}
else:
modified_hold = [hold_ret[x] for x in hold_ret
if hold_ret[x]['changes']]
not_modified_hold = [hold_ret[x] for x in hold_ret
if not hold_ret[x]['changes']
and hold_ret[x]['result']]
failed_hold = [hold_ret[x] for x in hold_ret
if not hold_ret[x]['result']]
if modified_hold:
for i in modified_hold:
result['comment'] += ' {0}'.format(i['comment'])
result['result'] = i['result']
result['changes'][i['name']] = i['changes']
if not_modified_hold:
for i in not_modified_hold:
result['comment'] += ' {0}'.format(i['comment'])
result['result'] = i['result']
if failed_hold:
for i in failed_hold:
result['comment'] += ' {0}'.format(i['comment'])
result['result'] = i['result']
return result
if to_unpurge and 'lowpkg.unpurge' not in __salt__:
return {'name': name,
'changes': {},
'result': False,
'comment': 'lowpkg.unpurge not implemented'}
if pkgs:
pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)]
pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)])
elif sources:
oldsources = sources
sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets]
sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall])
comment = []
if __opts__['test']:
if targets:
if sources:
summary = ', '.join(targets)
else:
summary = ', '.join([_get_desired_pkg(x, targets)
for x in targets])
comment.append('The following packages are set to be '
'installed/updated: {0}.'.format(summary))
if to_unpurge:
comment.append(
'The following packages will have their selection status '
'changed from \'purge\' to \'install\': {0}.'
.format(', '.join(to_unpurge))
)
if to_reinstall:
for x in to_reinstall:
if sources:
pkgstr = x
else:
pkgstr = _get_desired_pkg(x, to_reinstall)
comment.append('\nPackage {0} is set to be reinstalled because the '
'following files have been altered:'.format(pkgstr))
comment.append('\n' + _nested_output(altered_files[x]))
return {'name': name,
'changes': {},
'result': None,
'comment': ' '.join(comment)}
changes = {'installed': {}}
modified_hold = None
not_modified_hold = None
failed_hold = None
if targets or to_reinstall:
reinstall = bool(to_reinstall)
try:
pkg_ret = __salt__['pkg.install'](name,
refresh=refresh,
version=version,
fromrepo=fromrepo,
skip_verify=skip_verify,
pkgs=pkgs,
sources=sources,
reinstall=reinstall,
normalize=normalize,
**kwargs)
if os.path.isfile(rtag) and refresh:
os.remove(rtag)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while installing '
'package(s): {0}'.format(exc)}
if isinstance(pkg_ret, dict):
changes['installed'].update(pkg_ret)
elif isinstance(pkg_ret, six.string_types):
comment.append(pkg_ret)
if 'pkg.hold' in __salt__:
if 'hold' in kwargs:
try:
if kwargs['hold']:
hold_ret = __salt__['pkg.hold'](
name=name, pkgs=pkgs, sources=sources
)
else:
hold_ret = __salt__['pkg.unhold'](
name=name, pkgs=pkgs, sources=sources
)
except (CommandExecutionError, SaltInvocationError) as exc:
comment.append(str(exc))
return {'name': name,
'changes': changes,
'result': False,
'comment': ' '.join(comment)}
else:
if 'result' in hold_ret and not hold_ret['result']:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while '
'holding/unholding package(s): {0}'
.format(hold_ret['comment'])}
else:
modified_hold = [hold_ret[x] for x in hold_ret
if hold_ret[x]['changes']]
not_modified_hold = [hold_ret[x] for x in hold_ret
if not hold_ret[x]['changes']
and hold_ret[x]['result']]
failed_hold = [hold_ret[x] for x in hold_ret
if not hold_ret[x]['result']]
if to_unpurge:
changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge)
if sources:
modified = [x for x in changes['installed'] if x in targets]
not_modified = [x for x in desired if x not in targets and x not in to_reinstall]
failed = [x for x in targets if x not in modified]
else:
ok, failed = \
_verify_install(
desired, __salt__['pkg.list_pkgs'](
versions_as_list=True, **kwargs
)
)
modified = [x for x in ok if x in targets]
not_modified = [x for x in ok if x not in targets and x not in to_reinstall]
failed = [x for x in failed if x in targets]
if not changes.get('purge_desired'):
changes = changes['installed']
if modified:
if sources:
summary = ', '.join(modified)
else:
summary = ', '.join([_get_desired_pkg(x, desired)
for x in modified])
if len(summary) < 20:
comment.append('The following packages were installed/updated: '
'{0}.'.format(summary))
else:
comment.append(
'{0} targeted package{1} {2} installed/updated.'.format(
len(modified),
's' if len(modified) > 1 else '',
'were' if len(modified) > 1 else 'was'
)
)
if modified_hold:
for i in modified_hold:
comment.append(i['comment'])
change_name = i['name']
if len(changes[change_name]['new']) > 0:
changes[change_name]['new'] += '\n'
changes[change_name]['new'] += '{0}'.format(i['changes']['new'])
if len(changes[change_name]['old']) > 0:
changes[change_name]['old'] += '\n'
changes[change_name]['old'] += '{0}'.format(i['changes']['old'])
if not_modified:
if sources:
summary = ', '.join(not_modified)
else:
summary = ', '.join([_get_desired_pkg(x, desired)
for x in not_modified])
if len(not_modified) <= 20:
comment.append('The following packages were already installed: '
'{0}.'.format(summary))
else:
comment.append(
'{0} targeted package{1} {2} already installed.'.format(
len(not_modified),
's' if len(not_modified) > 1 else '',
'were' if len(not_modified) > 1 else 'was'
)
)
if not_modified_hold:
for i in not_modified_hold:
comment.append(i['comment'])
result = True
if failed:
if sources:
summary = ', '.join(failed)
else:
summary = ', '.join([_get_desired_pkg(x, desired)
for x in failed])
comment.insert(0, 'The following packages failed to '
'install/update: {0}.'.format(summary))
result = False
if failed_hold:
for i in failed_hold:
comment.append(i['comment'])
result = False
if isinstance(pkg_verify, list) and any(x.get('ignore_types') is not None
for x in pkg_verify
if isinstance(x, _OrderedDict)
and 'ignore_types' in x):
ignore_types = next(x.get('ignore_types')
for x in pkg_verify
if 'ignore_types' in x)
else:
ignore_types = []
modified = []
failed = []
for x in to_reinstall:
retval = __salt__['pkg.verify'](x, ignore_types=ignore_types)
if retval:
failed.append(x)
altered_files[x] = retval
else:
modified.append(x)
if modified:
for x in modified:
if sources:
pkgstr = x
else:
pkgstr = _get_desired_pkg(x, desired)
comment.append('\nPackage {0} was reinstalled. The following files '
'were remediated:'.format(pkgstr))
comment.append(_nested_output(altered_files[x]))
if failed:
for x in failed:
if sources:
pkgstr = x
else:
pkgstr = _get_desired_pkg(x, desired)
comment.append(
'\nReinstall was not successful for package {0}. The following '
'files could not be remediated:'.format(pkgstr)
)
comment.append(_nested_output(altered_files[x]))
result = False
return {'name': name,
'changes': changes,
'result': result,
'comment': ' '.join(comment)}
def latest(
name,
refresh=None,
fromrepo=None,
skip_verify=False,
pkgs=None,
**kwargs):
rtag = __gen_rtag()
refresh = bool(
salt.utils.is_true(refresh)
or (os.path.isfile(rtag) and refresh is not False)
)
if kwargs.get('sources'):
return {'name': name,
'changes': {},
'result': False,
'comment': 'The "sources" parameter is not supported.'}
elif pkgs:
desired_pkgs = list(_repack_pkgs(pkgs).keys())
if not desired_pkgs:
return {'name': name,
'changes': {},
'result': False,
'comment': 'Invalidly formatted "pkgs" parameter. See '
'minion log.'}
else:
desired_pkgs = [name]
cur = __salt__['pkg.version'](*desired_pkgs, **kwargs)
try:
avail = __salt__['pkg.latest_version'](*desired_pkgs,
fromrepo=fromrepo,
refresh=refresh,
**kwargs)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while checking the '
'newest available version of package(s): {0}'
.format(exc)}
if os.path.isfile(rtag) and refresh:
os.remove(rtag)
if isinstance(cur, six.string_types):
cur = {desired_pkgs[0]: cur}
if isinstance(avail, six.string_types):
avail = {desired_pkgs[0]: avail}
targets = {}
problems = []
for pkg in desired_pkgs:
if not avail[pkg]:
if not cur[pkg]:
msg = 'No information found for {0!r}.'.format(pkg)
log.error(msg)
problems.append(msg)
elif not cur[pkg] \
or salt.utils.compare_versions(
ver1=cur[pkg],
oper='<',
ver2=avail[pkg],
cmp_func=__salt__.get('version_cmp')):
targets[pkg] = avail[pkg]
if problems:
return {'name': name,
'changes': {},
'result': False,
'comment': ' '.join(problems)}
if targets:
if not pkgs:
# only targeted a single package and is being allowed to proceed to
# the install step.
up_to_date = []
else:
up_to_date = [x for x in pkgs if x not in targets]
if __opts__['test']:
to_be_upgraded = ', '.join(sorted(targets))
comment = 'The following packages are set to be ' \
'installed/upgraded: ' \
'{0}.'.format(to_be_upgraded)
if up_to_date:
up_to_date_nb = len(up_to_date)
if up_to_date_nb <= 10:
up_to_date_sorted = sorted(up_to_date)
up_to_date_details = ', '.join(
'{0} ({1})'.format(name, cur[name])
for name in up_to_date_sorted
)
comment += (
' The following packages are already '
'up-to-date: {0}.'
).format(up_to_date_details)
else:
comment += ' {0} packages are already up-to-date.'.format(
up_to_date_nb
)
return {'name': name,
'changes': {},
'result': None,
'comment': comment}
# Build updated list of pkgs to exclude non-targeted ones
targeted_pkgs = list(targets.keys()) if pkgs else None
try:
# No need to refresh, if a refresh was necessary it would have been
# performed above when pkg.latest_version was run.
changes = __salt__['pkg.install'](name,
refresh=False,
fromrepo=fromrepo,
skip_verify=skip_verify,
pkgs=targeted_pkgs,
**kwargs)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while installing '
'package(s): {0}'.format(exc)}
if changes:
# Find failed and successful updates
failed = [x for x in targets
if not changes.get(x) or changes[x]['new'] != targets[x]]
successful = [x for x in targets if x not in failed]
comments = []
if failed:
msg = 'The following packages failed to update: ' \
'{0}.'.format(', '.join(sorted(failed)))
comments.append(msg)
if successful:
msg = 'The following packages were successfully ' \
'installed/upgraded: ' \
'{0}.'.format(', '.join(sorted(successful)))
comments.append(msg)
if up_to_date:
if len(up_to_date) <= 10:
msg = 'The following packages were already up-to-date: ' \
'{0}.'.format(', '.join(sorted(up_to_date)))
else:
msg = '{0} packages were already up-to-date. '.format(
len(up_to_date))
comments.append(msg)
return {'name': name,
'changes': changes,
'result': False if failed else True,
'comment': ' '.join(comments)}
else:
if len(targets) > 10:
comment = ('{0} targeted packages failed to update. '
'See debug log for details.'.format(len(targets)))
elif len(targets) > 1:
comment = ('The following targeted packages failed to update. '
'See debug log for details: ({0}).'
.format(', '.join(sorted(targets))))
else:
comment = 'Package {0} failed to ' \
'update.'.format(next(iter(list(targets.keys()))))
if up_to_date:
if len(up_to_date) <= 10:
comment += ' The following packages were already ' \
'up-to-date: ' \
'{0}'.format(', '.join(sorted(up_to_date)))
else:
comment += '{0} packages were already ' \
'up-to-date.'.format(len(up_to_date))
return {'name': name,
'changes': changes,
'result': False,
'comment': comment}
else:
if len(desired_pkgs) > 10:
comment = 'All {0} packages are up-to-date.'.format(
len(desired_pkgs))
elif len(desired_pkgs) > 1:
comment = 'All packages are up-to-date ' \
'({0}).'.format(', '.join(sorted(desired_pkgs)))
else:
comment = 'Package {0} is already ' \
'up-to-date.'.format(desired_pkgs[0])
return {'name': name,
'changes': {},
'result': True,
'comment': comment}
def _uninstall(action='remove', name=None, version=None, pkgs=None, **kwargs):
if action not in ('remove', 'purge'):
return {'name': name,
'changes': {},
'result': False,
'comment': 'Invalid action {0!r}. '
'This is probably a bug.'.format(action)}
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
except MinionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while parsing targets: '
'{0}'.format(exc)}
targets = _find_remove_targets(name, version, pkgs, **kwargs)
if isinstance(targets, dict) and 'result' in targets:
return targets
elif not isinstance(targets, list):
return {'name': name,
'changes': {},
'result': False,
'comment': 'An error was encountered while checking targets: '
'{0}'.format(targets)}
if action == 'purge':
old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True,
removed=True,
**kwargs)
targets.extend([x for x in pkg_params if x in old_removed])
targets.sort()
if not targets:
return {'name': name,
'changes': {},
'result': True,
'comment': 'None of the targeted packages are installed'
'{0}'.format(' or partially installed'
if action == 'purge' else '')}
if __opts__['test']:
return {'name': name,
'changes': {},
'result': None,
'comment': 'The following packages will be {0}d: '
'{1}.'.format(action, ', '.join(targets))}
changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, **kwargs)
new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)
failed = [x for x in pkg_params if x in new]
if action == 'purge':
new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True,
removed=True,
**kwargs)
failed.extend([x for x in pkg_params if x in new_removed])
failed.sort()
if failed:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'The following packages failed to {0}: '
'{1}.'.format(action, ', '.join(failed))}
comments = []
not_installed = sorted([x for x in pkg_params if x not in targets])
if not_installed:
comments.append('The following packages were not installed: '
'{0}.'.format(', '.join(not_installed)))
comments.append('The following packages were {0}d: '
'{1}.'.format(action, ', '.join(targets)))
else:
comments.append('All targeted packages were {0}d.'.format(action))
return {'name': name,
'changes': changes,
'result': True,
'comment': ' '.join(comments)}
def removed(name, version=None, pkgs=None, **kwargs):
try:
return _uninstall(action='remove', name=name, version=version,
pkgs=pkgs, **kwargs)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': str(exc)}
def purged(name, version=None, pkgs=None, **kwargs):
try:
return _uninstall(action='purge', name=name, version=version,
pkgs=pkgs, **kwargs)
except CommandExecutionError as exc:
return {'name': name,
'changes': {},
'result': False,
'comment': str(exc)}
def uptodate(name, refresh=False):
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to update.'}
if 'pkg.list_upgrades' not in __salt__:
ret['comment'] = 'State pkg.uptodate is not available'
return ret
if isinstance(refresh, bool):
try:
packages = __salt__['pkg.list_upgrades'](refresh=refresh)
except Exception as exc:
ret['comment'] = str(exc)
return ret
else:
ret['comment'] = 'refresh must be a boolean.'
return ret
if not packages:
ret['comment'] = 'System is already up-to-date.'
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'System update will be performed'
ret['result'] = None
return ret
updated = __salt__['pkg.upgrade'](refresh=refresh)
if updated.get('result') is False:
ret.update(updated)
elif updated:
ret['changes'] = updated
ret['comment'] = 'Upgrade successful.'
ret['result'] = True
else:
ret['comment'] = 'Upgrade failed.'
return ret
def mod_init(low):
ret = True
if 'pkg.ex_mod_init' in __salt__:
ret = __salt__['pkg.ex_mod_init'](low)
if low['fun'] == 'installed' or low['fun'] == 'latest':
rtag = __gen_rtag()
if not os.path.exists(rtag):
salt.utils.fopen(rtag, 'w+').write('')
return ret
return False
def mod_aggregate(low, chunks, running):
pkgs = []
agg_enabled = [
'installed',
'latest',
'removed',
'purged',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = salt.utils.gen_state_tag(chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pkg':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkgs:
if 'pkgs' in low:
low['pkgs'].extend(pkgs)
else:
low['pkgs'] = pkgs
return low
| true
| true
|
1c44fd6d83836c8a950ff68c726d4e52c4f08086
| 12,639
|
py
|
Python
|
eval/libMemo.py
|
PurdueDualityLab/memoized-regex-engine
|
e7edcb0033a1eba90589e7831733f6527d9c4909
|
[
"MIT"
] | 5
|
2020-10-05T14:24:06.000Z
|
2021-02-27T23:01:00.000Z
|
eval/libMemo.py
|
PurdueDualityLab/memoized-regex-engine
|
e7edcb0033a1eba90589e7831733f6527d9c4909
|
[
"MIT"
] | 2
|
2020-09-30T16:48:24.000Z
|
2020-09-30T16:48:52.000Z
|
eval/libMemo.py
|
PurdueDualityLab/memoized-regex-engine
|
e7edcb0033a1eba90589e7831733f6527d9c4909
|
[
"MIT"
] | 1
|
2021-02-02T05:12:06.000Z
|
2021-02-02T05:12:06.000Z
|
"""Memoization: utils associated with memoization experiments
"""
# Import libLF
import os
import sys
sys.path.append(os.path.join(os.environ['MEMOIZATION_PROJECT_ROOT'], 'eval', 'lib'))
import libLF
# Other imports
import json
import re
import tempfile
import pandas as pd
###
# Constants
###
class ProtoRegexEngine:
"""One stop shop for interacting with the Prototype Regex Engine
Don't instantiate this. Everything is static.
"""
CLI = os.path.join(os.environ['MEMOIZATION_PROJECT_ROOT'], "src-simple", "re")
class SELECTION_SCHEME:
SS_None = "no memoization"
SS_Full = "full memoization"
SS_InDeg = "selective: indeg>1"
SS_Loop = "selective: loop"
scheme2cox = {
SS_None: "none",
SS_Full: "full",
SS_InDeg: "indeg",
SS_Loop: "loop",
}
all = scheme2cox.keys()
allMemo = [ SS_Full, SS_InDeg, SS_Loop ]
class ENCODING_SCHEME:
ES_None = "no encoding"
ES_Negative = "negative encoding"
ES_RLE = "RLE"
ES_RLE_TUNED = "RLE-tuned"
scheme2cox = {
ES_None: "none",
ES_Negative: "neg",
ES_RLE: "rle",
# ES_RLE_TUNED: "rle-tuned", # TODO Work out the right math here
}
all = scheme2cox.keys()
@staticmethod
def buildQueryFile(pattern, input, filePrefix="protoRegexEngineQueryFile-"):
"""Build a query file
pattern: string
input: string
[filePrefix]: string
returns: tmp fileName. Caller should unlink.
"""
fd, name = tempfile.mkstemp(suffix=".json", prefix=filePrefix)
os.close(fd)
with open(name, 'w') as outStream:
json.dump({
"pattern": pattern,
"input": input,
}, outStream)
return name
@staticmethod
def query(selectionScheme, encodingScheme, queryFile, timeout=None):
"""Query the engine
selectionScheme: SELECTION_SCHEME
encodingScheme: ENCODING_SCHEME
queryFile: file path
timeout: integer seconds before raising subprocess.TimeoutExpired
returns: EngineMeasurements
raises: on rc != 0, or on timeout
"""
rc, stdout, stderr = libLF.runcmd_OutAndErr(
args= [ ProtoRegexEngine.CLI,
ProtoRegexEngine.SELECTION_SCHEME.scheme2cox[selectionScheme],
ProtoRegexEngine.ENCODING_SCHEME.scheme2cox[encodingScheme],
'-f', queryFile ],
timeout=timeout
)
if rc != 0:
if "syntax error" in stderr:
raise SyntaxError("Engine raised syntax error\n rc: {}\nstdout:\n{}\n\nstderr:\n{}".format(rc, stdout, stderr))
else:
raise BaseException('Invocation failed; rc {} stdout\n {}\n\nstderr\n {}'.format(rc, stdout, stderr))
res = re.search(r"Need (\d+) bits", stdout)
if res:
libLF.log("Wished for {} bits".format(res.group(1)))
# libLF.log("stderr: <" + stderr + ">")
return ProtoRegexEngine.EngineMeasurements(stderr.strip(), "-no match-" in stdout)
class EngineMeasurements:
"""Engine measurements
This is a Python-native version of the JSON object
emitted by the regex engine.
It offers some assurance of type safety.
"""
def __init__(self, measAsJSON, misMatched):
obj = json.loads(measAsJSON)
self._unpackInputInfo(obj['inputInfo'])
self._unpackMemoizationInfo(obj['memoizationInfo'])
self._unpackSimulationInfo(obj['simulationInfo'])
self.matched = not misMatched
def _unpackInputInfo(self, dict):
self.ii_lenW = int(dict['lenW'])
self.ii_nStates = int(dict['nStates'])
def _unpackMemoizationInfo(self, dict):
self.mi_config_encoding = dict['config']['encoding']
self.mi_config_vertexSelection = dict['config']['vertexSelection']
self.mi_results_maxObservedAsymptoticCostsPerVertex = [
int(cost) for cost in dict['results']['maxObservedAsymptoticCostsPerMemoizedVertex']
]
self.mi_results_maxObservedMemoryBytesPerVertex = [
int(cost) for cost in dict['results']['maxObservedMemoryBytesPerMemoizedVertex']
]
self.mi_results_nSelectedVertices = int(dict['results']['nSelectedVertices'])
self.mi_results_lenW = int(dict['results']['lenW'])
def _unpackSimulationInfo(self, dict):
self.si_nTotalVisits = int(dict['nTotalVisits'])
self.si_simTimeUS = int(dict['simTimeUS'])
self.si_visitsToMostVisitedSimPos = int(dict['visitsToMostVisitedSimPos'])
self.si_nPossibleTotalVisitsWithMemoization = int(dict['nPossibleTotalVisitsWithMemoization'])
self.si_visitsToMostVisitedSimPos = int(dict['visitsToMostVisitedSimPos'])
###
# Input classes
###
class SimpleRegex:
"""Simple regex for use with a memoized regex engine.
Can be pattern ("all") or pattern+evilInput ("SL")
"""
def __init__(self):
self.pattern = None
self.evilInputs = []
return
def initFromNDJSON(self, line):
obj = json.loads(line)
self.pattern = obj['pattern']
self.evilInputs = []
if 'evilInputs' in obj:
for _ei in obj['evilInputs']:
_ei['couldParse'] = True # Hack
ei = libLF.EvilInput()
ei.initFromDict(_ei)
self.evilInputs.append(ei)
return self
###
# Output classes
###
class MemoizationStaticAnalysis:
"""Represents the result of regex pattern static analysis for memoization purposes"""
def __init__(self):
self.pattern = None
self.policy2nSelectedVertices = {}
def initFromRaw(self, pattern, policy2nSelectedVertices):
self.pattern = pattern
self.policy2nSelectedVertices = policy2nSelectedVertices
# All memoization policies measured?
s1 = set(policy2nSelectedVertices.keys())
s2 = set(policy2nSelectedVertices.keys())
assert s1 <= s2 <= s1
return self
def initFromNDJSON(self, jsonStr):
obj = libLF.fromNDJSON(jsonStr)
return self.initFromDict(obj)
def initFromDict(self, obj):
self.pattern = obj['pattern']
self.policy2nSelectedVertices = obj['policy2nSelectedVertices']
return self
def toNDJSON(self):
_dict = {
'pattern': self.pattern,
'policy2nSelectedVertices': self.policy2nSelectedVertices
}
return json.dumps(_dict)
class MemoizationDynamicAnalysis:
"""Represents the result of regex pattern dynamic analysis for memoization purposes"""
def __init__(self):
self.pattern = None
self.automatonSize = -1
self.phiInDeg = -1
self.phiQuantifier = -1
self.inputLength = -1
self.evilInput = None # If an SL regex
self.nPumps = -1 # If an SL regex
# Set these if you run a production regex analysis
self.productionEnginePumps = -1
self.perlBehavior = ""
self.phpBehavior = ""
self.csharpBehavior = ""
self.selectionPolicy_to_enc2spaceAlgo = {} # Numeric space cost in algorithmic measure
self.selectionPolicy_to_enc2spaceBytes = {} # Numeric space cost in bytes
self.selectionPolicy_to_enc2time = {} # Numeric time cost
for scheme in ProtoRegexEngine.SELECTION_SCHEME.scheme2cox.keys():
if scheme != ProtoRegexEngine.SELECTION_SCHEME.SS_None:
self.selectionPolicy_to_enc2spaceAlgo[scheme] = {}
self.selectionPolicy_to_enc2spaceBytes[scheme] = {}
self.selectionPolicy_to_enc2time[scheme] = {}
def initFromRaw(self, pattern, automatonSize, phiInDeg, phiQuantifier, inputLength, evilInput, nPumps, selectionPolicy_to_enc2spaceAlgo, selectionPolicy_to_enc2spaceBytes, selectionPolicy_to_enc2time):
self.pattern = pattern
self.automatonSize = automatonSize
self.phiInDeg = phiInDeg
self.phiQuantifier = phiQuantifier
self.inputLength = inputLength
self.evilInput = evilInput
self.nPumps = nPumps
self.selectionPolicy_to_enc2time = selectionPolicy_to_enc2time
self.selectionPolicy_to_enc2spaceAlgo = selectionPolicy_to_enc2spaceAlgo
self.selectionPolicy_to_enc2spaceBytes = selectionPolicy_to_enc2spaceBytes
return self
def initFromNDJSON(self, jsonStr):
obj = libLF.fromNDJSON(jsonStr)
return self.initFromDict(obj)
def initFromDict(self, obj):
self.pattern = obj['pattern']
self.automatonSize = obj['automatonSize']
self.phiInDeg = obj['phiInDeg']
self.phiQuantifier = obj['phiQuantifier']
self.inputLength = obj['inputLength']
if obj['evilInput'] is not None:
ei = libLF.EvilInput()
ei.initFromNDJSON(obj['evilInput'])
self.evilInput = ei
else:
self.evilInput = None
self.nPumps = obj['nPumps']
self.productionEnginePumps = obj['productionEnginePumps']
self.perlBehavior = obj['perlBehavior']
self.phpBehavior = obj['phpBehavior']
self.csharpBehavior = obj['csharpBehavior']
self.selectionPolicy_to_enc2time = obj['selectionPolicy_to_enc2time']
self.selectionPolicy_to_enc2spaceAlgo = obj['selectionPolicy_to_enc2spaceAlgo']
self.selectionPolicy_to_enc2spaceBytes = obj['selectionPolicy_to_enc2spaceBytes']
return self
def toNDJSON(self):
_dict = {
'pattern': self.pattern,
'automatonSize': self.automatonSize,
'phiInDeg': self.phiInDeg,
'phiQuantifier': self.phiQuantifier,
'inputLength': self.inputLength,
'evilInput': self.evilInput.toNDJSON() if self.evilInput else None,
'nPumps': self.nPumps,
'perlBehavior': self.perlBehavior,
'productionEnginePumps': self.productionEnginePumps,
'selectionPolicy_to_enc2time': self.selectionPolicy_to_enc2time,
'selectionPolicy_to_enc2spaceAlgo': self.selectionPolicy_to_enc2spaceAlgo,
'selectionPolicy_to_enc2spaceBytes': self.selectionPolicy_to_enc2spaceBytes,
}
return json.dumps(_dict)
def validate(self):
"""Returns True if everything looks OK, else raises an error"""
assert self.automatonSize >= 0, "No automaton"
assert self.phiInDeg >= 0, "Negative |Phi_in-deg|?"
assert self.phiQuantifier >= 0, "Negative |Phi_quantifier|?"
assert self.inputLength > 0, "no input"
# Full space cost (algorithmic) for Phi=Q should be |Q| * |w|
fullSpaceCostAlgo = self.selectionPolicy_to_enc2spaceAlgo[
ProtoRegexEngine.SELECTION_SCHEME.SS_Full
][
ProtoRegexEngine.ENCODING_SCHEME.ES_None
]
# Should be "bigger" -- the difference can arise due to pump strings being > 1 character long
assert fullSpaceCostAlgo <= self.automatonSize * (self.inputLength+1), \
"fullSpaceCost {} is not >= {} * {}".format(fullSpaceCostAlgo, self.automatonSize, self.inputLength)
# Full table should have the most space complexity
for selectionScheme, enc2space in self.selectionPolicy_to_enc2spaceAlgo.items():
for encodingScheme, spaceCost in enc2space.items():
assert spaceCost <= fullSpaceCostAlgo, \
"General fullSpaceCost < cost for {}-{}".format(selectionScheme, encodingScheme)
assert spaceCost <= enc2space[ProtoRegexEngine.ENCODING_SCHEME.ES_None], \
"Phi-specific fullSpaceCost < cost for {}-{}".format(selectionScheme, encodingScheme)
return True
def toDataFrame(self):
"""Return a pandas DataFrame
This expands the selection-encoding dictionaries
"""
rows = []
for selectionPolicy, d in self.selectionPolicy_to_enc2time.items():
for encodingPolicy, _ in d.items():
rows.append( {
"pattern": self.pattern,
"|Q|": self.automatonSize,
"|Phi_{in-deg > 1}|": self.phiInDeg,
"|Phi_{quantifier}|": self.phiQuantifier,
"|w|": self.inputLength + 1, # Count the null byte
"SL": True,
"nPumps": self.nPumps,
"perlBehavior": self.perlBehavior,
"phpBehavior": self.phpBehavior,
"csharpBehavior": self.csharpBehavior,
"productionEnginePumps": self.productionEnginePumps,
"selectionPolicy": selectionPolicy,
"encodingPolicy": encodingPolicy,
"timeCost": self.selectionPolicy_to_enc2time[selectionPolicy][encodingPolicy],
"spaceCostAlgo": self.selectionPolicy_to_enc2spaceAlgo[selectionPolicy][encodingPolicy],
"spaceCostBytes": self.selectionPolicy_to_enc2spaceBytes[selectionPolicy][encodingPolicy],
})
return pd.DataFrame(data=rows)
| 36.008547
| 203
| 0.664293
|
import os
import sys
sys.path.append(os.path.join(os.environ['MEMOIZATION_PROJECT_ROOT'], 'eval', 'lib'))
import libLF
import json
import re
import tempfile
import pandas as pd
ass ProtoRegexEngine:
CLI = os.path.join(os.environ['MEMOIZATION_PROJECT_ROOT'], "src-simple", "re")
class SELECTION_SCHEME:
SS_None = "no memoization"
SS_Full = "full memoization"
SS_InDeg = "selective: indeg>1"
SS_Loop = "selective: loop"
scheme2cox = {
SS_None: "none",
SS_Full: "full",
SS_InDeg: "indeg",
SS_Loop: "loop",
}
all = scheme2cox.keys()
allMemo = [ SS_Full, SS_InDeg, SS_Loop ]
class ENCODING_SCHEME:
ES_None = "no encoding"
ES_Negative = "negative encoding"
ES_RLE = "RLE"
ES_RLE_TUNED = "RLE-tuned"
scheme2cox = {
ES_None: "none",
ES_Negative: "neg",
ES_RLE: "rle",
x.keys()
@staticmethod
def buildQueryFile(pattern, input, filePrefix="protoRegexEngineQueryFile-"):
fd, name = tempfile.mkstemp(suffix=".json", prefix=filePrefix)
os.close(fd)
with open(name, 'w') as outStream:
json.dump({
"pattern": pattern,
"input": input,
}, outStream)
return name
@staticmethod
def query(selectionScheme, encodingScheme, queryFile, timeout=None):
rc, stdout, stderr = libLF.runcmd_OutAndErr(
args= [ ProtoRegexEngine.CLI,
ProtoRegexEngine.SELECTION_SCHEME.scheme2cox[selectionScheme],
ProtoRegexEngine.ENCODING_SCHEME.scheme2cox[encodingScheme],
'-f', queryFile ],
timeout=timeout
)
if rc != 0:
if "syntax error" in stderr:
raise SyntaxError("Engine raised syntax error\n rc: {}\nstdout:\n{}\n\nstderr:\n{}".format(rc, stdout, stderr))
else:
raise BaseException('Invocation failed; rc {} stdout\n {}\n\nstderr\n {}'.format(rc, stdout, stderr))
res = re.search(r"Need (\d+) bits", stdout)
if res:
libLF.log("Wished for {} bits".format(res.group(1)))
return ProtoRegexEngine.EngineMeasurements(stderr.strip(), "-no match-" in stdout)
class EngineMeasurements:
def __init__(self, measAsJSON, misMatched):
obj = json.loads(measAsJSON)
self._unpackInputInfo(obj['inputInfo'])
self._unpackMemoizationInfo(obj['memoizationInfo'])
self._unpackSimulationInfo(obj['simulationInfo'])
self.matched = not misMatched
def _unpackInputInfo(self, dict):
self.ii_lenW = int(dict['lenW'])
self.ii_nStates = int(dict['nStates'])
def _unpackMemoizationInfo(self, dict):
self.mi_config_encoding = dict['config']['encoding']
self.mi_config_vertexSelection = dict['config']['vertexSelection']
self.mi_results_maxObservedAsymptoticCostsPerVertex = [
int(cost) for cost in dict['results']['maxObservedAsymptoticCostsPerMemoizedVertex']
]
self.mi_results_maxObservedMemoryBytesPerVertex = [
int(cost) for cost in dict['results']['maxObservedMemoryBytesPerMemoizedVertex']
]
self.mi_results_nSelectedVertices = int(dict['results']['nSelectedVertices'])
self.mi_results_lenW = int(dict['results']['lenW'])
def _unpackSimulationInfo(self, dict):
self.si_nTotalVisits = int(dict['nTotalVisits'])
self.si_simTimeUS = int(dict['simTimeUS'])
self.si_visitsToMostVisitedSimPos = int(dict['visitsToMostVisitedSimPos'])
self.si_nPossibleTotalVisitsWithMemoization = int(dict['nPossibleTotalVisitsWithMemoization'])
self.si_visitsToMostVisitedSimPos = int(dict['visitsToMostVisitedSimPos'])
ass SimpleRegex:
def __init__(self):
self.pattern = None
self.evilInputs = []
return
def initFromNDJSON(self, line):
obj = json.loads(line)
self.pattern = obj['pattern']
self.evilInputs = []
if 'evilInputs' in obj:
for _ei in obj['evilInputs']:
_ei['couldParse'] = True
ei = libLF.EvilInput()
ei.initFromDict(_ei)
self.evilInputs.append(ei)
return self
ass MemoizationStaticAnalysis:
def __init__(self):
self.pattern = None
self.policy2nSelectedVertices = {}
def initFromRaw(self, pattern, policy2nSelectedVertices):
self.pattern = pattern
self.policy2nSelectedVertices = policy2nSelectedVertices
s1 = set(policy2nSelectedVertices.keys())
s2 = set(policy2nSelectedVertices.keys())
assert s1 <= s2 <= s1
return self
def initFromNDJSON(self, jsonStr):
obj = libLF.fromNDJSON(jsonStr)
return self.initFromDict(obj)
def initFromDict(self, obj):
self.pattern = obj['pattern']
self.policy2nSelectedVertices = obj['policy2nSelectedVertices']
return self
def toNDJSON(self):
_dict = {
'pattern': self.pattern,
'policy2nSelectedVertices': self.policy2nSelectedVertices
}
return json.dumps(_dict)
class MemoizationDynamicAnalysis:
def __init__(self):
self.pattern = None
self.automatonSize = -1
self.phiInDeg = -1
self.phiQuantifier = -1
self.inputLength = -1
self.evilInput = None
self.nPumps = -1
self.productionEnginePumps = -1
self.perlBehavior = ""
self.phpBehavior = ""
self.csharpBehavior = ""
self.selectionPolicy_to_enc2spaceAlgo = {}
self.selectionPolicy_to_enc2spaceBytes = {}
self.selectionPolicy_to_enc2time = {}
for scheme in ProtoRegexEngine.SELECTION_SCHEME.scheme2cox.keys():
if scheme != ProtoRegexEngine.SELECTION_SCHEME.SS_None:
self.selectionPolicy_to_enc2spaceAlgo[scheme] = {}
self.selectionPolicy_to_enc2spaceBytes[scheme] = {}
self.selectionPolicy_to_enc2time[scheme] = {}
def initFromRaw(self, pattern, automatonSize, phiInDeg, phiQuantifier, inputLength, evilInput, nPumps, selectionPolicy_to_enc2spaceAlgo, selectionPolicy_to_enc2spaceBytes, selectionPolicy_to_enc2time):
self.pattern = pattern
self.automatonSize = automatonSize
self.phiInDeg = phiInDeg
self.phiQuantifier = phiQuantifier
self.inputLength = inputLength
self.evilInput = evilInput
self.nPumps = nPumps
self.selectionPolicy_to_enc2time = selectionPolicy_to_enc2time
self.selectionPolicy_to_enc2spaceAlgo = selectionPolicy_to_enc2spaceAlgo
self.selectionPolicy_to_enc2spaceBytes = selectionPolicy_to_enc2spaceBytes
return self
def initFromNDJSON(self, jsonStr):
obj = libLF.fromNDJSON(jsonStr)
return self.initFromDict(obj)
def initFromDict(self, obj):
self.pattern = obj['pattern']
self.automatonSize = obj['automatonSize']
self.phiInDeg = obj['phiInDeg']
self.phiQuantifier = obj['phiQuantifier']
self.inputLength = obj['inputLength']
if obj['evilInput'] is not None:
ei = libLF.EvilInput()
ei.initFromNDJSON(obj['evilInput'])
self.evilInput = ei
else:
self.evilInput = None
self.nPumps = obj['nPumps']
self.productionEnginePumps = obj['productionEnginePumps']
self.perlBehavior = obj['perlBehavior']
self.phpBehavior = obj['phpBehavior']
self.csharpBehavior = obj['csharpBehavior']
self.selectionPolicy_to_enc2time = obj['selectionPolicy_to_enc2time']
self.selectionPolicy_to_enc2spaceAlgo = obj['selectionPolicy_to_enc2spaceAlgo']
self.selectionPolicy_to_enc2spaceBytes = obj['selectionPolicy_to_enc2spaceBytes']
return self
def toNDJSON(self):
_dict = {
'pattern': self.pattern,
'automatonSize': self.automatonSize,
'phiInDeg': self.phiInDeg,
'phiQuantifier': self.phiQuantifier,
'inputLength': self.inputLength,
'evilInput': self.evilInput.toNDJSON() if self.evilInput else None,
'nPumps': self.nPumps,
'perlBehavior': self.perlBehavior,
'productionEnginePumps': self.productionEnginePumps,
'selectionPolicy_to_enc2time': self.selectionPolicy_to_enc2time,
'selectionPolicy_to_enc2spaceAlgo': self.selectionPolicy_to_enc2spaceAlgo,
'selectionPolicy_to_enc2spaceBytes': self.selectionPolicy_to_enc2spaceBytes,
}
return json.dumps(_dict)
def validate(self):
assert self.automatonSize >= 0, "No automaton"
assert self.phiInDeg >= 0, "Negative |Phi_in-deg|?"
assert self.phiQuantifier >= 0, "Negative |Phi_quantifier|?"
assert self.inputLength > 0, "no input"
fullSpaceCostAlgo = self.selectionPolicy_to_enc2spaceAlgo[
ProtoRegexEngine.SELECTION_SCHEME.SS_Full
][
ProtoRegexEngine.ENCODING_SCHEME.ES_None
]
assert fullSpaceCostAlgo <= self.automatonSize * (self.inputLength+1), \
"fullSpaceCost {} is not >= {} * {}".format(fullSpaceCostAlgo, self.automatonSize, self.inputLength)
for selectionScheme, enc2space in self.selectionPolicy_to_enc2spaceAlgo.items():
for encodingScheme, spaceCost in enc2space.items():
assert spaceCost <= fullSpaceCostAlgo, \
"General fullSpaceCost < cost for {}-{}".format(selectionScheme, encodingScheme)
assert spaceCost <= enc2space[ProtoRegexEngine.ENCODING_SCHEME.ES_None], \
"Phi-specific fullSpaceCost < cost for {}-{}".format(selectionScheme, encodingScheme)
return True
def toDataFrame(self):
rows = []
for selectionPolicy, d in self.selectionPolicy_to_enc2time.items():
for encodingPolicy, _ in d.items():
rows.append( {
"pattern": self.pattern,
"|Q|": self.automatonSize,
"|Phi_{in-deg > 1}|": self.phiInDeg,
"|Phi_{quantifier}|": self.phiQuantifier,
"|w|": self.inputLength + 1,
"SL": True,
"nPumps": self.nPumps,
"perlBehavior": self.perlBehavior,
"phpBehavior": self.phpBehavior,
"csharpBehavior": self.csharpBehavior,
"productionEnginePumps": self.productionEnginePumps,
"selectionPolicy": selectionPolicy,
"encodingPolicy": encodingPolicy,
"timeCost": self.selectionPolicy_to_enc2time[selectionPolicy][encodingPolicy],
"spaceCostAlgo": self.selectionPolicy_to_enc2spaceAlgo[selectionPolicy][encodingPolicy],
"spaceCostBytes": self.selectionPolicy_to_enc2spaceBytes[selectionPolicy][encodingPolicy],
})
return pd.DataFrame(data=rows)
| true
| true
|
1c44ff9ade270ab368d3086fd8c6ded1212a389e
| 12,078
|
py
|
Python
|
Ray_ACNet.py
|
kiototeko/PRIMAL2
|
331ca7ba11d48483694594a9f2029d76238668bb
|
[
"MIT"
] | null | null | null |
Ray_ACNet.py
|
kiototeko/PRIMAL2
|
331ca7ba11d48483694594a9f2029d76238668bb
|
[
"MIT"
] | null | null | null |
Ray_ACNet.py
|
kiototeko/PRIMAL2
|
331ca7ba11d48483694594a9f2029d76238668bb
|
[
"MIT"
] | 1
|
2020-12-10T00:01:44.000Z
|
2020-12-10T00:01:44.000Z
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
import numpy as np
# parameters for training
GRAD_CLIP = 10.0
KEEP_PROB1 = 1 # was 0.5
KEEP_PROB2 = 1 # was 0.7
RNN_SIZE = 512
GOAL_REPR_SIZE = 12
# Used to initialize weights for policy and value output layers (Do we need to use that? Maybe not now)
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class ACNet:
def __init__(self, scope, a_size, trainer, TRAINING, NUM_CHANNEL, OBS_SIZE, GLOBAL_NET_SCOPE, GLOBAL_NETWORK=False, RELATIONAL_LEARNING=False):
with tf.variable_scope(str(scope) + '/qvalues'):
self.trainer = trainer
# The input size may require more work to fit the interface.
self.inputs = tf.placeholder(shape=[None, NUM_CHANNEL, OBS_SIZE, OBS_SIZE], dtype=tf.float32)
self.goal_pos = tf.placeholder(shape=[None, 3], dtype=tf.float32)
self.myinput = tf.transpose(self.inputs, perm=[0, 2, 3, 1])
self.policy, self.value, self.state_out, self.state_in, self.state_init, self.valids = self._build_net(
self.myinput, self.goal_pos, RNN_SIZE, TRAINING, a_size, RELATIONAL_LEARNING)
if TRAINING:
self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot = tf.one_hot(self.actions, a_size, dtype=tf.float32)
self.train_valid = tf.placeholder(shape=[None, a_size], dtype=tf.float32)
self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget')
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])
self.train_value = tf.placeholder(tf.float32, [None])
self.train_policy = tf.placeholder(tf.float32, [None])
self.train_imitation = tf.placeholder(tf.float32, [None]) # NEED THIS
self.optimal_actions = tf.placeholder(tf.int32, [None]) # NEED THIS
self.optimal_actions_onehot = tf.one_hot(self.optimal_actions, a_size, dtype=tf.float32) # NEED THIS
self.train_valids= tf.placeholder(tf.float32, [None,1])
# Loss Functions
self.value_loss = 0.1 * tf.reduce_mean(
self.train_value * tf.square(self.target_v - tf.reshape(self.value, shape=[-1])))
self.entropy = - tf.reduce_mean(self.policy * tf.log(tf.clip_by_value(self.policy, 1e-10, 1.0)))
self.policy_loss = - 0.5 * tf.reduce_mean(self.train_policy*
tf.log(tf.clip_by_value(self.responsible_outputs, 1e-15, 1.0)) * self.advantages)
self.valid_loss = - 16 * tf.reduce_mean(self.train_valids * tf.log(tf.clip_by_value(self.valids, 1e-10, 1.0)) * \
self.train_valid + tf.log(
tf.clip_by_value(1 - self.valids, 1e-10, 1.0)) * (1 - self.train_valid))
self.loss = self.value_loss + self.policy_loss + self.valid_loss - self.entropy * 0.01
# IMPORTANT: 0 * self.value_loss is important so we can
# fetch the gradients properly
self.imitation_loss = 0 * self.value_loss + tf.reduce_mean(self.train_imitation*
tf.keras.backend.categorical_crossentropy(self.optimal_actions_onehot, self.policy))
# Get gradients from local network using local losses and
# normalize the gradients using clipping
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope + '/qvalues')
self.gradients = tf.gradients(self.loss, local_vars)
self.var_norms = tf.global_norm(local_vars)
self.grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, GRAD_CLIP)
# Apply local gradients to global network
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE + '/qvalues')
if self.trainer:
self.apply_grads = self.trainer.apply_gradients(zip(self.grads, global_vars))
self.local_vars = local_vars
# now the gradients for imitation loss
self.i_gradients = tf.gradients(self.imitation_loss, local_vars)
self.i_var_norms = tf.global_norm(local_vars)
self.i_grads, self.i_grad_norms = tf.clip_by_global_norm(self.i_gradients, GRAD_CLIP)
# Apply local gradients to global network
if self.trainer:
self.apply_imitation_grads = self.trainer.apply_gradients(zip(self.i_grads, global_vars))
if GLOBAL_NETWORK:
print("\n\n\n\n is a global network\n\n\n\n")
weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.tempGradients = [tf.placeholder(shape=w.get_shape(), dtype=tf.float32) for w in weightVars]
self.apply_grads = self.trainer.apply_gradients(zip(self.tempGradients, weightVars))
#self.clippedGrads, norms = tf.clip_by_global_norm(self.tempGradients, GRAD_CLIP)
#self.apply_grads = self.trainer.apply_gradients(zip(self.clippedGrads, weightVars))
print("Hello World... From " + str(scope)) # :)
def _build_net(self, inputs, goal_pos, RNN_SIZE, TRAINING, a_size, RELATIONAL_LEARNING):
def conv_mlp(inputs, kernal_size, output_size):
inputs = tf.reshape(inputs, [-1, 1, kernal_size, 1])
conv = layers.conv2d(inputs=inputs, padding="VALID", num_outputs=output_size,
kernel_size=[1, kernal_size], stride=1,
data_format="NHWC", weights_initializer=w_init, activation_fn=tf.nn.relu)
return conv
def VGG_Block(inputs):
def conv_2d(inputs, kernal_size, output_size):
conv = layers.conv2d(inputs=inputs, padding="SAME", num_outputs=output_size,
kernel_size=[kernal_size[0], kernal_size[1]], stride=1,
data_format="NHWC", weights_initializer=w_init, activation_fn=tf.nn.relu)
return conv
conv1 = conv_2d(inputs, [3, 3], RNN_SIZE // 4)
conv1a = conv_2d(conv1, [3, 3], RNN_SIZE // 4)
conv1b = conv_2d(conv1a, [3, 3], RNN_SIZE // 4)
pool1 = layers.max_pool2d(inputs=conv1b, kernel_size=[2, 2])
return pool1
#From here on, these are functions used for the relational module which were obtained from https://github.com/RLOpensource/Relational_Deep_Reinforcement_Learning/blob/5945fab3fe6c2f344ab7ac78c95c8d1aee7f6e3b/core.py
#Except the mlp function
def flatten(nnk, shape):
flatten = tf.reshape(nnk, [-1, shape[1]*shape[2]*shape[3]])
return flatten
def mlp(x): #this function was added as it was missing in the code I used
for i in range(2):
x = tf.layers.dense(inputs=x, units=x.get_shape()[2], activation=tf.nn.relu)
return x
def query_key_value(nnk, shape):
flatten = tf.reshape(nnk, [-1, shape[1]*shape[2], shape[3]])
after_layer = [tf.layers.dense(inputs=flatten, units=shape[3], activation=tf.nn.relu) for i in range(3)]
return after_layer[0], after_layer[1], after_layer[2], flatten
def self_attention(query, key, value):
key_dim_size = float(key.get_shape().as_list()[-1])
key = tf.transpose(key, perm=[0, 2, 1])
S = tf.matmul(query, key) / tf.sqrt(key_dim_size)
attention_weight = tf.nn.softmax(S)
A = tf.matmul(attention_weight, value)
shape = A.get_shape()
return A, attention_weight, [s.value for s in shape]
def layer_normalization(x):
feature_shape = x.get_shape()[-1:]
mean, variance = tf.nn.moments(x, [2], keep_dims=True)
beta = tf.Variable(tf.zeros(feature_shape), trainable=False)
gamma = tf.Variable(tf.ones(feature_shape), trainable=False)
return gamma * (x - mean) / tf.sqrt(variance + 1e-8) + beta
def residual(x, inp, residual_time):
x = x + inp
x = layer_normalization(x)
return x
def feature_wise_max(x):
return tf.reduce_max(x, axis=2)
def relational_module(x):
shape = x.get_shape()
query, key, value, E = query_key_value(x, shape)
normalized_query = layer_normalization(query)
normalized_key = layer_normalization(key)
normalized_value = layer_normalization(value)
A, attention_weight, shape = self_attention(normalized_query, normalized_key, normalized_value)
A_mlp = mlp(A)
E_hat = residual(A_mlp, E, 2)
max_E_hat = feature_wise_max(E_hat)
return max_E_hat
w_init = layers.variance_scaling_initializer()
vgg1 = VGG_Block(inputs)
vgg2 = VGG_Block(vgg1)
if RELATIONAL_LEARNING:
vgg2 = relational_module(vgg2) #We add relational module in here
#An error occurs here because of the size
conv3 = layers.conv2d(inputs=vgg2, padding="VALID", num_outputs=RNN_SIZE - GOAL_REPR_SIZE, kernel_size=[2, 2],
stride=1, data_format="NHWC", weights_initializer=w_init, activation_fn=None)
flat = tf.nn.relu(layers.flatten(conv3))
goal_layer = layers.fully_connected(inputs=goal_pos, num_outputs=GOAL_REPR_SIZE)
hidden_input = tf.concat([flat, goal_layer], 1)
h1 = layers.fully_connected(inputs=hidden_input, num_outputs=RNN_SIZE)
d1 = layers.dropout(h1, keep_prob=KEEP_PROB1, is_training=TRAINING)
h2 = layers.fully_connected(inputs=d1, num_outputs=RNN_SIZE, activation_fn=None)
d2 = layers.dropout(h2, keep_prob=KEEP_PROB2, is_training=TRAINING)
self.h3 = tf.nn.relu(d2 + hidden_input)
# Recurrent network for temporal dependencies
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE, state_is_tuple=True)
c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
state_in = (c_in, h_in)
rnn_in = tf.expand_dims(self.h3, [0])
step_size = tf.shape(inputs)[:1]
state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
state_out = (lstm_c[:1, :], lstm_h[:1, :])
self.rnn_out = tf.reshape(lstm_outputs, [-1, RNN_SIZE])
policy_layer = layers.fully_connected(inputs=self.rnn_out, num_outputs=a_size,
weights_initializer=normalized_columns_initializer(1. / float(a_size)),
biases_initializer=None, activation_fn=None)
policy = tf.nn.softmax(policy_layer)
policy_sig = tf.sigmoid(policy_layer)
value = layers.fully_connected(inputs=self.rnn_out, num_outputs=1,
weights_initializer=normalized_columns_initializer(1.0), biases_initializer=None,
activation_fn=None)
return policy, value, state_out, state_in, state_init, policy_sig
| 50.962025
| 223
| 0.621792
|
import tensorflow as tf
import tensorflow.contrib.layers as layers
import numpy as np
GRAD_CLIP = 10.0
KEEP_PROB1 = 1
KEEP_PROB2 = 1
RNN_SIZE = 512
GOAL_REPR_SIZE = 12
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
class ACNet:
def __init__(self, scope, a_size, trainer, TRAINING, NUM_CHANNEL, OBS_SIZE, GLOBAL_NET_SCOPE, GLOBAL_NETWORK=False, RELATIONAL_LEARNING=False):
with tf.variable_scope(str(scope) + '/qvalues'):
self.trainer = trainer
self.inputs = tf.placeholder(shape=[None, NUM_CHANNEL, OBS_SIZE, OBS_SIZE], dtype=tf.float32)
self.goal_pos = tf.placeholder(shape=[None, 3], dtype=tf.float32)
self.myinput = tf.transpose(self.inputs, perm=[0, 2, 3, 1])
self.policy, self.value, self.state_out, self.state_in, self.state_init, self.valids = self._build_net(
self.myinput, self.goal_pos, RNN_SIZE, TRAINING, a_size, RELATIONAL_LEARNING)
if TRAINING:
self.actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.actions_onehot = tf.one_hot(self.actions, a_size, dtype=tf.float32)
self.train_valid = tf.placeholder(shape=[None, a_size], dtype=tf.float32)
self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget')
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])
self.train_value = tf.placeholder(tf.float32, [None])
self.train_policy = tf.placeholder(tf.float32, [None])
self.train_imitation = tf.placeholder(tf.float32, [None])
self.optimal_actions = tf.placeholder(tf.int32, [None])
self.optimal_actions_onehot = tf.one_hot(self.optimal_actions, a_size, dtype=tf.float32)
self.train_valids= tf.placeholder(tf.float32, [None,1])
self.value_loss = 0.1 * tf.reduce_mean(
self.train_value * tf.square(self.target_v - tf.reshape(self.value, shape=[-1])))
self.entropy = - tf.reduce_mean(self.policy * tf.log(tf.clip_by_value(self.policy, 1e-10, 1.0)))
self.policy_loss = - 0.5 * tf.reduce_mean(self.train_policy*
tf.log(tf.clip_by_value(self.responsible_outputs, 1e-15, 1.0)) * self.advantages)
self.valid_loss = - 16 * tf.reduce_mean(self.train_valids * tf.log(tf.clip_by_value(self.valids, 1e-10, 1.0)) * \
self.train_valid + tf.log(
tf.clip_by_value(1 - self.valids, 1e-10, 1.0)) * (1 - self.train_valid))
self.loss = self.value_loss + self.policy_loss + self.valid_loss - self.entropy * 0.01
self.imitation_loss = 0 * self.value_loss + tf.reduce_mean(self.train_imitation*
tf.keras.backend.categorical_crossentropy(self.optimal_actions_onehot, self.policy))
local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope + '/qvalues')
self.gradients = tf.gradients(self.loss, local_vars)
self.var_norms = tf.global_norm(local_vars)
self.grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, GRAD_CLIP)
global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE + '/qvalues')
if self.trainer:
self.apply_grads = self.trainer.apply_gradients(zip(self.grads, global_vars))
self.local_vars = local_vars
self.i_gradients = tf.gradients(self.imitation_loss, local_vars)
self.i_var_norms = tf.global_norm(local_vars)
self.i_grads, self.i_grad_norms = tf.clip_by_global_norm(self.i_gradients, GRAD_CLIP)
if self.trainer:
self.apply_imitation_grads = self.trainer.apply_gradients(zip(self.i_grads, global_vars))
if GLOBAL_NETWORK:
print("\n\n\n\n is a global network\n\n\n\n")
weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.tempGradients = [tf.placeholder(shape=w.get_shape(), dtype=tf.float32) for w in weightVars]
self.apply_grads = self.trainer.apply_gradients(zip(self.tempGradients, weightVars))
print("Hello World... From " + str(scope))
def _build_net(self, inputs, goal_pos, RNN_SIZE, TRAINING, a_size, RELATIONAL_LEARNING):
def conv_mlp(inputs, kernal_size, output_size):
inputs = tf.reshape(inputs, [-1, 1, kernal_size, 1])
conv = layers.conv2d(inputs=inputs, padding="VALID", num_outputs=output_size,
kernel_size=[1, kernal_size], stride=1,
data_format="NHWC", weights_initializer=w_init, activation_fn=tf.nn.relu)
return conv
def VGG_Block(inputs):
def conv_2d(inputs, kernal_size, output_size):
conv = layers.conv2d(inputs=inputs, padding="SAME", num_outputs=output_size,
kernel_size=[kernal_size[0], kernal_size[1]], stride=1,
data_format="NHWC", weights_initializer=w_init, activation_fn=tf.nn.relu)
return conv
conv1 = conv_2d(inputs, [3, 3], RNN_SIZE // 4)
conv1a = conv_2d(conv1, [3, 3], RNN_SIZE // 4)
conv1b = conv_2d(conv1a, [3, 3], RNN_SIZE // 4)
pool1 = layers.max_pool2d(inputs=conv1b, kernel_size=[2, 2])
return pool1
def flatten(nnk, shape):
flatten = tf.reshape(nnk, [-1, shape[1]*shape[2]*shape[3]])
return flatten
def mlp(x):
for i in range(2):
x = tf.layers.dense(inputs=x, units=x.get_shape()[2], activation=tf.nn.relu)
return x
def query_key_value(nnk, shape):
flatten = tf.reshape(nnk, [-1, shape[1]*shape[2], shape[3]])
after_layer = [tf.layers.dense(inputs=flatten, units=shape[3], activation=tf.nn.relu) for i in range(3)]
return after_layer[0], after_layer[1], after_layer[2], flatten
def self_attention(query, key, value):
key_dim_size = float(key.get_shape().as_list()[-1])
key = tf.transpose(key, perm=[0, 2, 1])
S = tf.matmul(query, key) / tf.sqrt(key_dim_size)
attention_weight = tf.nn.softmax(S)
A = tf.matmul(attention_weight, value)
shape = A.get_shape()
return A, attention_weight, [s.value for s in shape]
def layer_normalization(x):
feature_shape = x.get_shape()[-1:]
mean, variance = tf.nn.moments(x, [2], keep_dims=True)
beta = tf.Variable(tf.zeros(feature_shape), trainable=False)
gamma = tf.Variable(tf.ones(feature_shape), trainable=False)
return gamma * (x - mean) / tf.sqrt(variance + 1e-8) + beta
def residual(x, inp, residual_time):
x = x + inp
x = layer_normalization(x)
return x
def feature_wise_max(x):
return tf.reduce_max(x, axis=2)
def relational_module(x):
shape = x.get_shape()
query, key, value, E = query_key_value(x, shape)
normalized_query = layer_normalization(query)
normalized_key = layer_normalization(key)
normalized_value = layer_normalization(value)
A, attention_weight, shape = self_attention(normalized_query, normalized_key, normalized_value)
A_mlp = mlp(A)
E_hat = residual(A_mlp, E, 2)
max_E_hat = feature_wise_max(E_hat)
return max_E_hat
w_init = layers.variance_scaling_initializer()
vgg1 = VGG_Block(inputs)
vgg2 = VGG_Block(vgg1)
if RELATIONAL_LEARNING:
vgg2 = relational_module(vgg2)
conv3 = layers.conv2d(inputs=vgg2, padding="VALID", num_outputs=RNN_SIZE - GOAL_REPR_SIZE, kernel_size=[2, 2],
stride=1, data_format="NHWC", weights_initializer=w_init, activation_fn=None)
flat = tf.nn.relu(layers.flatten(conv3))
goal_layer = layers.fully_connected(inputs=goal_pos, num_outputs=GOAL_REPR_SIZE)
hidden_input = tf.concat([flat, goal_layer], 1)
h1 = layers.fully_connected(inputs=hidden_input, num_outputs=RNN_SIZE)
d1 = layers.dropout(h1, keep_prob=KEEP_PROB1, is_training=TRAINING)
h2 = layers.fully_connected(inputs=d1, num_outputs=RNN_SIZE, activation_fn=None)
d2 = layers.dropout(h2, keep_prob=KEEP_PROB2, is_training=TRAINING)
self.h3 = tf.nn.relu(d2 + hidden_input)
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE, state_is_tuple=True)
c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
state_in = (c_in, h_in)
rnn_in = tf.expand_dims(self.h3, [0])
step_size = tf.shape(inputs)[:1]
state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
state_out = (lstm_c[:1, :], lstm_h[:1, :])
self.rnn_out = tf.reshape(lstm_outputs, [-1, RNN_SIZE])
policy_layer = layers.fully_connected(inputs=self.rnn_out, num_outputs=a_size,
weights_initializer=normalized_columns_initializer(1. / float(a_size)),
biases_initializer=None, activation_fn=None)
policy = tf.nn.softmax(policy_layer)
policy_sig = tf.sigmoid(policy_layer)
value = layers.fully_connected(inputs=self.rnn_out, num_outputs=1,
weights_initializer=normalized_columns_initializer(1.0), biases_initializer=None,
activation_fn=None)
return policy, value, state_out, state_in, state_init, policy_sig
| true
| true
|
1c44fff0dab22be13688f184324423bc17c6ff1b
| 1,546
|
py
|
Python
|
sktimeline/models/user.py
|
aaronmauro/sktimeline
|
3a83b8973959c2d6bf49021cd8efb0ead81b9395
|
[
"MIT"
] | 2
|
2016-06-14T17:02:42.000Z
|
2016-10-24T14:49:25.000Z
|
sktimeline/models/user.py
|
aaronmauro/sktimeline
|
3a83b8973959c2d6bf49021cd8efb0ead81b9395
|
[
"MIT"
] | 3
|
2016-06-27T13:20:53.000Z
|
2017-03-18T14:21:27.000Z
|
sktimeline/models/user.py
|
aaronmauro/sktimeline
|
3a83b8973959c2d6bf49021cd8efb0ead81b9395
|
[
"MIT"
] | 2
|
2016-06-14T17:03:05.000Z
|
2016-09-01T14:18:44.000Z
|
from sktimeline import db
from passlib.hash import sha256_crypt
class User(db.Model):
__tablename__ = 'users'
uid = db.Column(db.Integer, primary_key=True) #todo: maybe write migration to rename to id to be consistant
username = db.Column(db.String(20), unique=True, default=None)
# todo: write migration to name `password`
passwords = db.Column(db.String(100), default=None)
email = db.Column(db.String(50), default=None)
settings = db.Column(db.Text, default=None)
tracking = db.Column(db.Text, default=None)
rank = db.Column(db.Integer, default=None)
twitter_feed_settings = db.relationship('TwitterFeedSetting', backref='user', lazy='select')
slack_feed_settings = db.relationship('SlackFeedSetting', backref='user', lazy='select')
github_feed_settings = db.relationship('GithubFeedSetting', backref='user', lazy='select')
def __init__(self, username, password, email):
self.username = username
self.passwords = sha256_crypt.encrypt(password)
self.email = email
def password_is_correct(self, password):
return sha256_crypt.verify(password, self.passwords)
@classmethod
def username_exists(cls, username):
# todo: look if this query.filter method is proper way to query
return cls.query.filter(cls.username == username).count() > 0
@classmethod
def load_by_username(cls, username):
return cls.query.filter_by(username=username).first()
def __repr__(self):
return '<User %r>' % self.username
| 37.707317
| 111
| 0.701811
|
from sktimeline import db
from passlib.hash import sha256_crypt
class User(db.Model):
__tablename__ = 'users'
uid = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, default=None)
passwords = db.Column(db.String(100), default=None)
email = db.Column(db.String(50), default=None)
settings = db.Column(db.Text, default=None)
tracking = db.Column(db.Text, default=None)
rank = db.Column(db.Integer, default=None)
twitter_feed_settings = db.relationship('TwitterFeedSetting', backref='user', lazy='select')
slack_feed_settings = db.relationship('SlackFeedSetting', backref='user', lazy='select')
github_feed_settings = db.relationship('GithubFeedSetting', backref='user', lazy='select')
def __init__(self, username, password, email):
self.username = username
self.passwords = sha256_crypt.encrypt(password)
self.email = email
def password_is_correct(self, password):
return sha256_crypt.verify(password, self.passwords)
@classmethod
def username_exists(cls, username):
return cls.query.filter(cls.username == username).count() > 0
@classmethod
def load_by_username(cls, username):
return cls.query.filter_by(username=username).first()
def __repr__(self):
return '<User %r>' % self.username
| true
| true
|
1c45000beb56342f4006bcd9799b6608ea26d13c
| 7,595
|
py
|
Python
|
hsi/gui/widgets/QParamRegionWidget.py
|
morrocoy/hsi
|
da6a2923dff831e927aaea04ba657ddcb1b7e4eb
|
[
"MIT"
] | 1
|
2021-03-29T14:37:03.000Z
|
2021-03-29T14:37:03.000Z
|
hsi/gui/widgets/QParamRegionWidget.py
|
morrocoy/hsi
|
da6a2923dff831e927aaea04ba657ddcb1b7e4eb
|
[
"MIT"
] | null | null | null |
hsi/gui/widgets/QParamRegionWidget.py
|
morrocoy/hsi
|
da6a2923dff831e927aaea04ba657ddcb1b7e4eb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 10:35:08 2021
@author: kpapke
"""
import numpy as np
from ...bindings.Qt import QtWidgets, QtGui, QtCore
from ...log import logmanager
logger = logmanager.getLogger(__name__)
__all__ = ['QParamRegionWidget']
class QParamRegionWidget(QtWidgets.QWidget):
""" Config widget with two spinboxes that control the parameter bounds."""
sigValueChanged = QtCore.Signal(str, list)
def __init__(self, *args, **kwargs):
""" Constructor
"""
parent = kwargs.get('parent', None)
super(QParamRegionWidget, self).__init__(parent=parent)
if len(args) == 1:
kwargs['parent'] = args[0]
elif len(args) == 2:
kwargs['name'] = args[0]
kwargs['parent'] = args[1]
elif len(args) == 3:
kwargs['name'] = args[0]
kwargs['value'] = args[1]
kwargs['parent'] = args[2]
elif len(args) == 4:
kwargs['name'] = args[0]
kwargs['value'] = args[1]
kwargs['scale'] = args[2]
kwargs['parent'] = args[3]
self.name = kwargs.get('name', None) # parameter name
self.label = kwargs.get('label', self.name) # parameter label
self.dvalue = [None, None] # default value
self.scale = kwargs.get('scale', 1.) # scale for presentation
# set default value
val = kwargs.get('value', [None, None])
self.setValueDefault(val)
self.varLabel = QtWidgets.QLabel()
self.lowerBoundSpinBox = QtWidgets.QDoubleSpinBox(self)
self.upperBoundSpinBox = QtWidgets.QDoubleSpinBox(self)
# configure widget views
self._setupViews(*args, **kwargs)
# connect signals
self.lowerBoundSpinBox.valueChanged.connect(
lambda val: self._triggerSigValueChanged((val, None)))
self.upperBoundSpinBox.valueChanged.connect(
lambda val: self._triggerSigValueChanged((None, val)))
def _setupViews(self, *args, **kwargs):
self.mainLayout = QtWidgets.QFormLayout()
self.mainLayout.setContentsMargins(0, 0, 0, 0) # left, top, right, bottom
self.mainLayout.setSpacing(3)
self.setLayout(self.mainLayout)
self.varLabel.setText(self.label)
self.varLabel.setIndent(5)
self.varLabel.setMinimumWidth(50)
self.varLabel.setStyleSheet("border: 0px;")
# maxWidth = kwargs.get('maximumWidth', 67)
singleStep = kwargs.get('singleStep', 0.1)
decimals = kwargs.get('decimals', 3)
# self.setMaximumWidth(maxWidth)
self.setSingleStep(singleStep)
self.setDecimals(decimals)
self.setBounds([-1e5, 1e5])
self.setEnabled(True)
# set value
if self.dvalue[0] is None:
self.lowerBoundSpinBox.setValue(self.lowerBoundSpinBox.minimum())
else:
self.lowerBoundSpinBox.setValue(self.dvalue[0] * self.scale)
if self.dvalue[1] is None:
self.upperBoundSpinBox.setValue(self.upperBoundSpinBox.maximum())
else:
self.upperBoundSpinBox.setValue(self.dvalue[1] * self.scale)
layout = QtGui.QHBoxLayout()
layout.addWidget(self.lowerBoundSpinBox)
layout.addWidget(self.upperBoundSpinBox)
self.mainLayout.addRow(self.varLabel, layout)
def _triggerSigValueChanged(self, bounds=[None, None]):
lbnd, ubnd = bounds
if lbnd is None:
lbnd = self.lowerBoundSpinBox.value()
if ubnd is None:
ubnd = self.upperBoundSpinBox.value()
lbnd = lbnd / self.scale
ubnd = ubnd / self.scale
self.sigValueChanged.emit(self.name, [lbnd, ubnd])
def reset(self):
if self.dvalue[0] is None:
self.lowerBoundSpinBox.setValue(self.lowerBoundSpinBox.minimum())
else:
self.lowerBoundSpinBox.setValue(self.dvalue[0] * self.scale)
if self.dvalue[1] is None:
self.upperBoundSpinBox.setValue(self.upperBoundSpinBox.maximum())
else:
self.upperBoundSpinBox.setValue(self.dvalue[1] * self.scale)
def setDecimals(self, val):
self.lowerBoundSpinBox.setDecimals(val)
self.upperBoundSpinBox.setDecimals(val)
pass
def setEnabled(self, val):
self.lowerBoundSpinBox.setEnabled(val)
self.upperBoundSpinBox.setEnabled(val)
def setLabel(self, label):
self.label = label
self.varLabel.setText(label)
def setMaximumWidth(self, val):
super(QParamRegionWidget, self).setMaximumWidth(val)
width = int((val - 50) // 2 - 8)
self.lowerBoundSpinBox.setMaximumWidth(width)
self.upperBoundSpinBox.setMaximumWidth(width)
def setName(self, name, label=None):
self.name = name
if label is not None:
self.label = label
self.varLabel.setText(label)
def setBounds(self, val=[None, None]):
if val is None:
bounds = [None, None]
elif type(val) in [list, tuple, np.ndarray] and len(val) == 2:
bounds = [val[0], val[1]]
else:
raise ValueError("Argument `val` must be list, tuple or "
"1D ndarray of length 2. Got {}".format(range))
lbnd, ubnd = bounds
if lbnd is None:
lbnd = -1e5
else:
lbnd = self.scale * lbnd
if ubnd is None:
ubnd = 1e5
else:
ubnd = self.scale * ubnd
self.lowerBoundSpinBox.setRange(lbnd, ubnd)
self.upperBoundSpinBox.setRange(lbnd, ubnd)
def setScale(self, val):
lbnd, ubnd = self.value()
lbnd = lbnd / self.scale * val
ubnd = ubnd / self.scale * val
self.scale = val
self.lowerBoundSpinBox.setValue(lbnd)
self.upperBoundSpinBox.setValue(ubnd)
def setSingleStep(self, val):
self.lowerBoundSpinBox.setSingleStep(val)
self.upperBoundSpinBox.setSingleStep(val)
def setValue(self, val):
if val is None:
bounds = [None, None]
elif type(val) in [list, tuple, np.ndarray] and len(val) == 2:
bounds = [val[0], val[1]]
else:
raise ValueError("Argument val must be list, tuple or "
"1D ndarray of length 2. Got {}".format(val))
lbnd, ubnd = bounds
if lbnd is None:
lbnd = self.lowerBoundSpinBox.minimum()
else:
lbnd = self.scale * lbnd
if ubnd is None:
ubnd = self.upperBoundSpinBox.maximum()
else:
ubnd = self.scale * ubnd
self.lowerBoundSpinBox.blockSignals(True)
self.upperBoundSpinBox.blockSignals(True)
self.lowerBoundSpinBox.setValue(lbnd)
self.upperBoundSpinBox.setValue(ubnd)
self.lowerBoundSpinBox.blockSignals(False)
self.upperBoundSpinBox.blockSignals(False)
self._triggerSigValueChanged()
def setValueDefault(self, val):
if val is None:
bounds = [None, None]
elif type(val) in [list, tuple, np.ndarray] and len(val) == 2:
bounds = [val[0], val[1]]
else:
raise ValueError("Argument val must be list, tuple or "
"1D ndarray of length 2. Got {}".format(val))
self.dvalue = bounds
def value(self):
lbnd = 1./self.scale * self.lowerBoundSpinBox.value()
ubnd = 1./self.scale * self.upperBoundSpinBox.value()
return [lbnd, ubnd]
| 31.255144
| 81
| 0.596972
|
import numpy as np
from ...bindings.Qt import QtWidgets, QtGui, QtCore
from ...log import logmanager
logger = logmanager.getLogger(__name__)
__all__ = ['QParamRegionWidget']
class QParamRegionWidget(QtWidgets.QWidget):
sigValueChanged = QtCore.Signal(str, list)
def __init__(self, *args, **kwargs):
parent = kwargs.get('parent', None)
super(QParamRegionWidget, self).__init__(parent=parent)
if len(args) == 1:
kwargs['parent'] = args[0]
elif len(args) == 2:
kwargs['name'] = args[0]
kwargs['parent'] = args[1]
elif len(args) == 3:
kwargs['name'] = args[0]
kwargs['value'] = args[1]
kwargs['parent'] = args[2]
elif len(args) == 4:
kwargs['name'] = args[0]
kwargs['value'] = args[1]
kwargs['scale'] = args[2]
kwargs['parent'] = args[3]
self.name = kwargs.get('name', None)
self.label = kwargs.get('label', self.name)
self.dvalue = [None, None]
self.scale = kwargs.get('scale', 1.)
val = kwargs.get('value', [None, None])
self.setValueDefault(val)
self.varLabel = QtWidgets.QLabel()
self.lowerBoundSpinBox = QtWidgets.QDoubleSpinBox(self)
self.upperBoundSpinBox = QtWidgets.QDoubleSpinBox(self)
self._setupViews(*args, **kwargs)
self.lowerBoundSpinBox.valueChanged.connect(
lambda val: self._triggerSigValueChanged((val, None)))
self.upperBoundSpinBox.valueChanged.connect(
lambda val: self._triggerSigValueChanged((None, val)))
def _setupViews(self, *args, **kwargs):
self.mainLayout = QtWidgets.QFormLayout()
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.mainLayout.setSpacing(3)
self.setLayout(self.mainLayout)
self.varLabel.setText(self.label)
self.varLabel.setIndent(5)
self.varLabel.setMinimumWidth(50)
self.varLabel.setStyleSheet("border: 0px;")
singleStep = kwargs.get('singleStep', 0.1)
decimals = kwargs.get('decimals', 3)
self.setSingleStep(singleStep)
self.setDecimals(decimals)
self.setBounds([-1e5, 1e5])
self.setEnabled(True)
if self.dvalue[0] is None:
self.lowerBoundSpinBox.setValue(self.lowerBoundSpinBox.minimum())
else:
self.lowerBoundSpinBox.setValue(self.dvalue[0] * self.scale)
if self.dvalue[1] is None:
self.upperBoundSpinBox.setValue(self.upperBoundSpinBox.maximum())
else:
self.upperBoundSpinBox.setValue(self.dvalue[1] * self.scale)
layout = QtGui.QHBoxLayout()
layout.addWidget(self.lowerBoundSpinBox)
layout.addWidget(self.upperBoundSpinBox)
self.mainLayout.addRow(self.varLabel, layout)
def _triggerSigValueChanged(self, bounds=[None, None]):
lbnd, ubnd = bounds
if lbnd is None:
lbnd = self.lowerBoundSpinBox.value()
if ubnd is None:
ubnd = self.upperBoundSpinBox.value()
lbnd = lbnd / self.scale
ubnd = ubnd / self.scale
self.sigValueChanged.emit(self.name, [lbnd, ubnd])
def reset(self):
if self.dvalue[0] is None:
self.lowerBoundSpinBox.setValue(self.lowerBoundSpinBox.minimum())
else:
self.lowerBoundSpinBox.setValue(self.dvalue[0] * self.scale)
if self.dvalue[1] is None:
self.upperBoundSpinBox.setValue(self.upperBoundSpinBox.maximum())
else:
self.upperBoundSpinBox.setValue(self.dvalue[1] * self.scale)
def setDecimals(self, val):
self.lowerBoundSpinBox.setDecimals(val)
self.upperBoundSpinBox.setDecimals(val)
pass
def setEnabled(self, val):
self.lowerBoundSpinBox.setEnabled(val)
self.upperBoundSpinBox.setEnabled(val)
def setLabel(self, label):
self.label = label
self.varLabel.setText(label)
def setMaximumWidth(self, val):
super(QParamRegionWidget, self).setMaximumWidth(val)
width = int((val - 50) // 2 - 8)
self.lowerBoundSpinBox.setMaximumWidth(width)
self.upperBoundSpinBox.setMaximumWidth(width)
def setName(self, name, label=None):
self.name = name
if label is not None:
self.label = label
self.varLabel.setText(label)
def setBounds(self, val=[None, None]):
if val is None:
bounds = [None, None]
elif type(val) in [list, tuple, np.ndarray] and len(val) == 2:
bounds = [val[0], val[1]]
else:
raise ValueError("Argument `val` must be list, tuple or "
"1D ndarray of length 2. Got {}".format(range))
lbnd, ubnd = bounds
if lbnd is None:
lbnd = -1e5
else:
lbnd = self.scale * lbnd
if ubnd is None:
ubnd = 1e5
else:
ubnd = self.scale * ubnd
self.lowerBoundSpinBox.setRange(lbnd, ubnd)
self.upperBoundSpinBox.setRange(lbnd, ubnd)
def setScale(self, val):
lbnd, ubnd = self.value()
lbnd = lbnd / self.scale * val
ubnd = ubnd / self.scale * val
self.scale = val
self.lowerBoundSpinBox.setValue(lbnd)
self.upperBoundSpinBox.setValue(ubnd)
def setSingleStep(self, val):
self.lowerBoundSpinBox.setSingleStep(val)
self.upperBoundSpinBox.setSingleStep(val)
def setValue(self, val):
if val is None:
bounds = [None, None]
elif type(val) in [list, tuple, np.ndarray] and len(val) == 2:
bounds = [val[0], val[1]]
else:
raise ValueError("Argument val must be list, tuple or "
"1D ndarray of length 2. Got {}".format(val))
lbnd, ubnd = bounds
if lbnd is None:
lbnd = self.lowerBoundSpinBox.minimum()
else:
lbnd = self.scale * lbnd
if ubnd is None:
ubnd = self.upperBoundSpinBox.maximum()
else:
ubnd = self.scale * ubnd
self.lowerBoundSpinBox.blockSignals(True)
self.upperBoundSpinBox.blockSignals(True)
self.lowerBoundSpinBox.setValue(lbnd)
self.upperBoundSpinBox.setValue(ubnd)
self.lowerBoundSpinBox.blockSignals(False)
self.upperBoundSpinBox.blockSignals(False)
self._triggerSigValueChanged()
def setValueDefault(self, val):
if val is None:
bounds = [None, None]
elif type(val) in [list, tuple, np.ndarray] and len(val) == 2:
bounds = [val[0], val[1]]
else:
raise ValueError("Argument val must be list, tuple or "
"1D ndarray of length 2. Got {}".format(val))
self.dvalue = bounds
def value(self):
lbnd = 1./self.scale * self.lowerBoundSpinBox.value()
ubnd = 1./self.scale * self.upperBoundSpinBox.value()
return [lbnd, ubnd]
| true
| true
|
1c45008460e21527f50631de2053f1a3242bd3bb
| 5,711
|
py
|
Python
|
src/main/python/ttconv/scc/codes/preambles_address_codes.py
|
xchange11/ttconv-1
|
6e67172af126fa0e90690044848f300c0173715c
|
[
"BSD-2-Clause"
] | 66
|
2020-09-25T11:38:28.000Z
|
2022-03-23T15:15:34.000Z
|
src/main/python/ttconv/scc/codes/preambles_address_codes.py
|
xchange11/ttconv-1
|
6e67172af126fa0e90690044848f300c0173715c
|
[
"BSD-2-Clause"
] | 217
|
2020-09-22T22:45:22.000Z
|
2022-03-31T23:02:15.000Z
|
src/main/python/ttconv/scc/codes/preambles_address_codes.py
|
xchange11/ttconv-1
|
6e67172af126fa0e90690044848f300c0173715c
|
[
"BSD-2-Clause"
] | 5
|
2020-09-25T09:24:17.000Z
|
2021-08-08T20:52:26.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2020, Sandflow Consulting LLC
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""SCC Preamble Address Codes"""
from __future__ import annotations
from typing import Optional
from ttconv.scc.codes import SCC_COLOR_MAPPING
from ttconv.style_properties import NamedColors, TextDecorationType, \
FontStyleType, ColorType
_ROW_MAPPING = {
(0x01, 0x40): 1,
(0x01, 0x60): 2,
(0x02, 0x40): 3,
(0x02, 0x60): 4,
(0x05, 0x40): 5,
(0x05, 0x60): 6,
(0x06, 0x40): 7,
(0x06, 0x60): 8,
(0x07, 0x40): 9,
(0x07, 0x60): 10,
(0x00, 0x40): 11,
(0x03, 0x40): 12,
(0x03, 0x60): 13,
(0x04, 0x40): 14,
(0x04, 0x60): 15
}
class _SccPacDescriptionBits:
"""Helper class for SCC PAC description bits handling"""
def __init__(self, bits: int):
self._bits = bits
def get_underline(self) -> bool:
"""Returns whether the PAC description bits sets the underline decoration"""
return self._bits % 2 == 1
def get_italic(self) -> bool:
"""Returns whether the PAC description bits sets the italic style"""
return self._bits in (0x0E, 0x0F)
def get_color(self) -> Optional[ColorType]:
"""Returns the color from the PAC description bits"""
if self._bits not in list(range(0x00, 0x10)):
return None
if self._bits in (0x00, 0x01, 0x0E, 0x0F):
return NamedColors.white.value
return SCC_COLOR_MAPPING.get(self._bits, None)
def get_indent(self) -> Optional[int]:
"""Returns the column offset from the PAC description bits"""
if self._bits in list(range(0x10, 0x20)):
return ((self._bits - 0x10) - (self._bits % 2)) * 2
return None
class SccPreambleAddressCode:
"""SCC PAC definition"""
def __init__(self, byte_1: int, byte_2: int):
row = SccPreambleAddressCode._get_row(byte_1, byte_2)
if row is None:
raise ValueError("Failed to extract PAC row from specified bytes:", hex(byte_1), hex(byte_2))
desc_bits = SccPreambleAddressCode._get_description_bits(byte_2)
if desc_bits is None:
raise ValueError("Failed to extract PAC description from specified bytes:", hex(byte_1), hex(byte_2))
self._row = row
self._color: Optional[ColorType] = desc_bits.get_color()
self._indent: Optional[int] = desc_bits.get_indent()
self._font_style: Optional[bool] = FontStyleType.italic if desc_bits.get_italic() else None
self._text_decoration: Optional[TextDecorationType] = \
TextDecorationType(underline=True) if desc_bits.get_underline() else None
self._channel = 2 if byte_1 & 0x08 else 1
def get_row(self) -> int:
"""Returns the PAC row"""
return self._row
def get_indent(self) -> Optional[int]:
"""Returns PAC column offset"""
return self._indent
def get_color(self) -> Optional[ColorType]:
"""Returns PAC color"""
return self._color
def get_font_style(self) -> Optional[FontStyleType]:
"""Returns PAC font style"""
return self._font_style
def get_text_decoration(self) -> Optional[TextDecorationType]:
"""Returns PAC text decoration"""
return self._text_decoration
def get_channel(self):
"""Returns PAC channel"""
return self._channel
def __eq__(self, other) -> bool:
"""Overrides default implementation"""
return isinstance(other, SccPreambleAddressCode) \
and self.get_row() == other.get_row() \
and self.get_indent() == other.get_indent() \
and self.get_color() == other.get_color() \
and self.get_font_style() == other.get_font_style() \
and self.get_text_decoration() == other.get_text_decoration()
@staticmethod
def find(byte_1: int, byte_2: int) -> Optional[SccPreambleAddressCode]:
"""Find the SCC PAC corresponding to the specified bytes"""
try:
return SccPreambleAddressCode(byte_1, byte_2)
except ValueError as _e:
return None
@staticmethod
def _get_row(byte_1: int, byte_2: int) -> Optional[int]:
"""Decodes SCC PAC row number from specified bytes"""
if byte_1 not in list(range(0x10, 0x20)):
return None
row_bits = ((byte_1 & 0x0F) % 0X08, byte_2 & 0x60)
return _ROW_MAPPING.get(row_bits, None)
@staticmethod
def _get_description_bits(byte_2: int) -> Optional[_SccPacDescriptionBits]:
"""Extracts descriptions bits from second byte of the input pair"""
if byte_2 not in list(range(0x40, 0x80)):
return None
return _SccPacDescriptionBits(byte_2 & 0x1F)
| 34.612121
| 107
| 0.706181
|
from __future__ import annotations
from typing import Optional
from ttconv.scc.codes import SCC_COLOR_MAPPING
from ttconv.style_properties import NamedColors, TextDecorationType, \
FontStyleType, ColorType
_ROW_MAPPING = {
(0x01, 0x40): 1,
(0x01, 0x60): 2,
(0x02, 0x40): 3,
(0x02, 0x60): 4,
(0x05, 0x40): 5,
(0x05, 0x60): 6,
(0x06, 0x40): 7,
(0x06, 0x60): 8,
(0x07, 0x40): 9,
(0x07, 0x60): 10,
(0x00, 0x40): 11,
(0x03, 0x40): 12,
(0x03, 0x60): 13,
(0x04, 0x40): 14,
(0x04, 0x60): 15
}
class _SccPacDescriptionBits:
def __init__(self, bits: int):
self._bits = bits
def get_underline(self) -> bool:
return self._bits % 2 == 1
def get_italic(self) -> bool:
return self._bits in (0x0E, 0x0F)
def get_color(self) -> Optional[ColorType]:
if self._bits not in list(range(0x00, 0x10)):
return None
if self._bits in (0x00, 0x01, 0x0E, 0x0F):
return NamedColors.white.value
return SCC_COLOR_MAPPING.get(self._bits, None)
def get_indent(self) -> Optional[int]:
if self._bits in list(range(0x10, 0x20)):
return ((self._bits - 0x10) - (self._bits % 2)) * 2
return None
class SccPreambleAddressCode:
def __init__(self, byte_1: int, byte_2: int):
row = SccPreambleAddressCode._get_row(byte_1, byte_2)
if row is None:
raise ValueError("Failed to extract PAC row from specified bytes:", hex(byte_1), hex(byte_2))
desc_bits = SccPreambleAddressCode._get_description_bits(byte_2)
if desc_bits is None:
raise ValueError("Failed to extract PAC description from specified bytes:", hex(byte_1), hex(byte_2))
self._row = row
self._color: Optional[ColorType] = desc_bits.get_color()
self._indent: Optional[int] = desc_bits.get_indent()
self._font_style: Optional[bool] = FontStyleType.italic if desc_bits.get_italic() else None
self._text_decoration: Optional[TextDecorationType] = \
TextDecorationType(underline=True) if desc_bits.get_underline() else None
self._channel = 2 if byte_1 & 0x08 else 1
def get_row(self) -> int:
return self._row
def get_indent(self) -> Optional[int]:
return self._indent
def get_color(self) -> Optional[ColorType]:
return self._color
def get_font_style(self) -> Optional[FontStyleType]:
return self._font_style
def get_text_decoration(self) -> Optional[TextDecorationType]:
return self._text_decoration
def get_channel(self):
return self._channel
def __eq__(self, other) -> bool:
return isinstance(other, SccPreambleAddressCode) \
and self.get_row() == other.get_row() \
and self.get_indent() == other.get_indent() \
and self.get_color() == other.get_color() \
and self.get_font_style() == other.get_font_style() \
and self.get_text_decoration() == other.get_text_decoration()
@staticmethod
def find(byte_1: int, byte_2: int) -> Optional[SccPreambleAddressCode]:
try:
return SccPreambleAddressCode(byte_1, byte_2)
except ValueError as _e:
return None
@staticmethod
def _get_row(byte_1: int, byte_2: int) -> Optional[int]:
if byte_1 not in list(range(0x10, 0x20)):
return None
row_bits = ((byte_1 & 0x0F) % 0X08, byte_2 & 0x60)
return _ROW_MAPPING.get(row_bits, None)
@staticmethod
def _get_description_bits(byte_2: int) -> Optional[_SccPacDescriptionBits]:
if byte_2 not in list(range(0x40, 0x80)):
return None
return _SccPacDescriptionBits(byte_2 & 0x1F)
| true
| true
|
1c45021962b5771701ee306281be1ae1136b0046
| 612
|
py
|
Python
|
examples/download_video.py
|
kmpm/py-asyncio-goproapi
|
61e259052608657f56615e1dfd6c64e8627425dd
|
[
"MIT"
] | null | null | null |
examples/download_video.py
|
kmpm/py-asyncio-goproapi
|
61e259052608657f56615e1dfd6c64e8627425dd
|
[
"MIT"
] | 1
|
2018-11-07T09:29:31.000Z
|
2018-11-07T12:10:41.000Z
|
examples/download_video.py
|
kmpm/py-asyncio-goproapi
|
61e259052608657f56615e1dfd6c64e8627425dd
|
[
"MIT"
] | null | null | null |
from goprocam import GoProCamera, constants
import asyncio
gpCam = GoProCamera.GoPro()
videos_duration = [10, 30]
async def run():
await gpCam.connect()
await gpCam.video_settings("720p", "50")
await gpCam.gpControlSet(constants.Video.PROTUNE_VIDEO, constants.Video.ProTune.ON)
for i in videos_duration:
print("Recording and downloading " + str(i) + " seconds video")
await gpCam.downloadLastMedia(await gpCam.shoot_video(i), custom_filename="VIDEO_{0}.MP4".format(i))
await asyncio.sleep(2)
await gpCam.quit()
asyncio.get_event_loop().run_until_complete(run())
| 29.142857
| 108
| 0.720588
|
from goprocam import GoProCamera, constants
import asyncio
gpCam = GoProCamera.GoPro()
videos_duration = [10, 30]
async def run():
await gpCam.connect()
await gpCam.video_settings("720p", "50")
await gpCam.gpControlSet(constants.Video.PROTUNE_VIDEO, constants.Video.ProTune.ON)
for i in videos_duration:
print("Recording and downloading " + str(i) + " seconds video")
await gpCam.downloadLastMedia(await gpCam.shoot_video(i), custom_filename="VIDEO_{0}.MP4".format(i))
await asyncio.sleep(2)
await gpCam.quit()
asyncio.get_event_loop().run_until_complete(run())
| true
| true
|
1c450455c52286a916d561148f32bebb4a8a514b
| 3,759
|
py
|
Python
|
revitron/transmissiondata.py
|
YKato521/revitron-for-RevitPythonShell
|
031a87997a00902bf16ca9ef6bb05f5cae26e044
|
[
"MIT"
] | null | null | null |
revitron/transmissiondata.py
|
YKato521/revitron-for-RevitPythonShell
|
031a87997a00902bf16ca9ef6bb05f5cae26e044
|
[
"MIT"
] | null | null | null |
revitron/transmissiondata.py
|
YKato521/revitron-for-RevitPythonShell
|
031a87997a00902bf16ca9ef6bb05f5cae26e044
|
[
"MIT"
] | null | null | null |
"""
This submodule contains the ``TransmissionData`` class
which allows for editing the paths of linked files without opening a model.
"""
import re
import shutil
import os
import sys
class TransmissionData:
"""
A transmission data wrapper.
"""
refs = dict()
def __init__(self, hostPath):
"""
Inits a new TransmissionData instance.
Args:
hostPath (string): The path of the host model
"""
import revitron
if revitron.Document.isOpen(hostPath):
print('The host model must be closed to edit transmission data!')
sys.exit()
self.hostPath = revitron.DB.FilePath(hostPath)
self.data = revitron.DB.TransmissionData.ReadTransmissionData(self.hostPath)
for refId in self.data.GetAllExternalFileReferenceIds():
self.refs[refId.IntegerValue] = revitron.ExternalReference(self.data.GetLastSavedReferenceData(refId))
def listLinks(self):
"""
List all links in the host document.
"""
for _id in self.refs:
ref = self.refs[_id]
print(ref.path)
def moveLinksOnDisk(self, source, target):
"""
Moves all external CAD and RVT links on disk and relinks them.
Args:
source (string): The source directory
target (string): The target directory
"""
import revitron
source = re.sub(r'\\$', '', source) + os.sep
source = '^' + re.escape(source)
target = re.sub(r'\\$', '', target)
target = re.sub(r'\\', os.sep, target)
for _id in self.refs:
refId = revitron.DB.ElementId(_id)
ref = self.refs[_id]
if str(ref.type) in ['RevitLink', 'CADLink']:
if re.search(source, ref.path, re.IGNORECASE):
newPath = target + os.sep + re.sub(source, '', ref.path, re.IGNORECASE)
else:
newPath = target + os.sep + os.path.basename(ref.path)
print(newPath)
if newPath != ref.path:
try:
os.makedirs(os.path.dirname(newPath))
print('Created {}'.format(os.path.dirname(newPath)))
except:
pass
try:
shutil.copyfile(ref.path, newPath)
except:
pass
self.data.SetDesiredReferenceData(refId, revitron.DB.FilePath(newPath), revitron.DB.PathType.Absolute, True)
self.write()
def replaceInPath(self, search, replace):
"""
Search and replace in all link paths of the document.
Args:
search (string): The search string
replace (string): The replacement string
"""
import revitron
for _id in self.refs:
refId = revitron.DB.ElementId(_id)
ref = self.refs[_id]
newPath = ref.path.replace(search, replace)
self.data.SetDesiredReferenceData(refId, revitron.DB.FilePath(newPath), revitron.DB.PathType.Absolute, True)
self.write()
def write(self):
"""
Writes the TransmissionData back to the model.
"""
import revitron
self.data.IsTransmitted = True
revitron.DB.TransmissionData.WriteTransmissionData(self.hostPath, self.data)
| 31.066116
| 128
| 0.514499
|
import re
import shutil
import os
import sys
class TransmissionData:
refs = dict()
def __init__(self, hostPath):
import revitron
if revitron.Document.isOpen(hostPath):
print('The host model must be closed to edit transmission data!')
sys.exit()
self.hostPath = revitron.DB.FilePath(hostPath)
self.data = revitron.DB.TransmissionData.ReadTransmissionData(self.hostPath)
for refId in self.data.GetAllExternalFileReferenceIds():
self.refs[refId.IntegerValue] = revitron.ExternalReference(self.data.GetLastSavedReferenceData(refId))
def listLinks(self):
for _id in self.refs:
ref = self.refs[_id]
print(ref.path)
def moveLinksOnDisk(self, source, target):
import revitron
source = re.sub(r'\\$', '', source) + os.sep
source = '^' + re.escape(source)
target = re.sub(r'\\$', '', target)
target = re.sub(r'\\', os.sep, target)
for _id in self.refs:
refId = revitron.DB.ElementId(_id)
ref = self.refs[_id]
if str(ref.type) in ['RevitLink', 'CADLink']:
if re.search(source, ref.path, re.IGNORECASE):
newPath = target + os.sep + re.sub(source, '', ref.path, re.IGNORECASE)
else:
newPath = target + os.sep + os.path.basename(ref.path)
print(newPath)
if newPath != ref.path:
try:
os.makedirs(os.path.dirname(newPath))
print('Created {}'.format(os.path.dirname(newPath)))
except:
pass
try:
shutil.copyfile(ref.path, newPath)
except:
pass
self.data.SetDesiredReferenceData(refId, revitron.DB.FilePath(newPath), revitron.DB.PathType.Absolute, True)
self.write()
def replaceInPath(self, search, replace):
import revitron
for _id in self.refs:
refId = revitron.DB.ElementId(_id)
ref = self.refs[_id]
newPath = ref.path.replace(search, replace)
self.data.SetDesiredReferenceData(refId, revitron.DB.FilePath(newPath), revitron.DB.PathType.Absolute, True)
self.write()
def write(self):
import revitron
self.data.IsTransmitted = True
revitron.DB.TransmissionData.WriteTransmissionData(self.hostPath, self.data)
| true
| true
|
1c45046affa5436f6f677300552086a4582337bc
| 2,492
|
py
|
Python
|
desktop/core/ext-py/jaeger-client-4.0.0/setup.py
|
e11it/hue-1
|
436704c40b5fa6ffd30bd972bf50ffeec738d091
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/jaeger-client-4.0.0/setup.py
|
e11it/hue-1
|
436704c40b5fa6ffd30bd972bf50ffeec738d091
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/jaeger-client-4.0.0/setup.py
|
e11it/hue-1
|
436704c40b5fa6ffd30bd972bf50ffeec738d091
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
version = None
with open('jaeger_client/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
assert version is not None, \
'Could not determine version number from jaeger_client/__init__.py'
setup(
name='jaeger-client',
version=version,
url='https://github.com/jaegertracing/jaeger-client-python',
description='Jaeger Python OpenTracing Tracer implementation',
author='Yuri Shkuro',
author_email='ys@uber.com',
packages=find_packages(exclude=['crossdock', 'tests', 'example', 'tests.*']),
include_package_data=True,
license='Apache License 2.0',
zip_safe=False,
keywords='jaeger, tracing, opentracing',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'threadloop>=1,<2',
'thrift',
'tornado>=4.3,<5',
'opentracing>=2.1,<3.0',
],
# Uncomment below if need to test with unreleased version of opentracing
# dependency_links=[
# 'git+ssh://git@github.com/opentracing/opentracing-python.git@BRANCHNAME#egg=opentracing',
# ],
test_suite='tests',
extras_require={
':python_version<"3"': [
'futures',
],
'tests': [
'mock==1.0.1',
'pycurl>=7.43,<8',
# pinned to avoid RemovedInPytest4Warning
'pytest>=3.7.0,<3.8.0',
'pytest-cov==2.5.1',
'coverage<4.4', # can remove after https://bitbucket.org/ned/coveragepy/issues/581/44b1-44-breaking-in-ci
'pytest-timeout==1.3.1',
'pytest-tornado',
# pin <3.2 as otherwise it requires pytest>=3.8
'pytest-benchmark[histogram]>=3.0.0rc1,<3.2',
'pytest-localserver',
'flake8',
'flake8-quotes',
'codecov',
'tchannel>=0.27', # This is only used in python 2
'opentracing_instrumentation>=2,<3',
'prometheus_client==0.3.1',
]
},
)
| 33.226667
| 118
| 0.573435
|
import re
from setuptools import setup, find_packages
version = None
with open('jaeger_client/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
assert version is not None, \
'Could not determine version number from jaeger_client/__init__.py'
setup(
name='jaeger-client',
version=version,
url='https://github.com/jaegertracing/jaeger-client-python',
description='Jaeger Python OpenTracing Tracer implementation',
author='Yuri Shkuro',
author_email='ys@uber.com',
packages=find_packages(exclude=['crossdock', 'tests', 'example', 'tests.*']),
include_package_data=True,
license='Apache License 2.0',
zip_safe=False,
keywords='jaeger, tracing, opentracing',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'threadloop>=1,<2',
'thrift',
'tornado>=4.3,<5',
'opentracing>=2.1,<3.0',
],
test_suite='tests',
extras_require={
':python_version<"3"': [
'futures',
],
'tests': [
'mock==1.0.1',
'pycurl>=7.43,<8',
'pytest>=3.7.0,<3.8.0',
'pytest-cov==2.5.1',
'coverage<4.4',
'pytest-timeout==1.3.1',
'pytest-tornado',
'pytest-benchmark[histogram]>=3.0.0rc1,<3.2',
'pytest-localserver',
'flake8',
'flake8-quotes',
'codecov',
'tchannel>=0.27',
'opentracing_instrumentation>=2,<3',
'prometheus_client==0.3.1',
]
},
)
| true
| true
|
1c450494cef97a82cf17c2e517bb7a3972d095f8
| 1,065
|
py
|
Python
|
plugins/k8s/resoto_plugin_k8s/resources/pod.py
|
MrMarvin/cloudkeeper
|
cdca21c1a3b945da6e53a5dbb37a437e1d46f557
|
[
"Apache-2.0"
] | 316
|
2021-07-08T12:54:19.000Z
|
2022-01-12T18:50:17.000Z
|
plugins/k8s/resoto_plugin_k8s/resources/pod.py
|
MrMarvin/cloudkeeper
|
cdca21c1a3b945da6e53a5dbb37a437e1d46f557
|
[
"Apache-2.0"
] | 110
|
2022-01-13T22:27:55.000Z
|
2022-03-30T22:26:50.000Z
|
plugins/k8s/resoto_plugin_k8s/resources/pod.py
|
MrMarvin/cloudkeeper
|
cdca21c1a3b945da6e53a5dbb37a437e1d46f557
|
[
"Apache-2.0"
] | 14
|
2021-08-23T08:29:29.000Z
|
2022-01-08T04:42:28.000Z
|
from kubernetes import client
from .common import KubernetesResource
from resotolib.baseresources import (
BaseInstance,
InstanceStatus,
)
from typing import ClassVar, Dict
from dataclasses import dataclass
@dataclass(eq=False)
class KubernetesPod(KubernetesResource, BaseInstance):
kind: ClassVar[str] = "kubernetes_pod"
api: ClassVar[object] = client.CoreV1Api
list_method: ClassVar[str] = "list_pod_for_all_namespaces"
attr_map: ClassVar[Dict] = {"instance_status": lambda r: r.status.phase}
instance_status_map: ClassVar[Dict[str, InstanceStatus]] = {
"Pending": InstanceStatus.BUSY,
"Running": InstanceStatus.RUNNING,
"Failed": InstanceStatus.TERMINATED,
"Succeeded": InstanceStatus.BUSY,
}
def _instance_status_setter(self, value: str) -> None:
self._instance_status = self.instance_status_map.get(
value, InstanceStatus.UNKNOWN
)
KubernetesPod.instance_status = property(
KubernetesPod._instance_status_getter, KubernetesPod._instance_status_setter
)
| 30.428571
| 80
| 0.73615
|
from kubernetes import client
from .common import KubernetesResource
from resotolib.baseresources import (
BaseInstance,
InstanceStatus,
)
from typing import ClassVar, Dict
from dataclasses import dataclass
@dataclass(eq=False)
class KubernetesPod(KubernetesResource, BaseInstance):
kind: ClassVar[str] = "kubernetes_pod"
api: ClassVar[object] = client.CoreV1Api
list_method: ClassVar[str] = "list_pod_for_all_namespaces"
attr_map: ClassVar[Dict] = {"instance_status": lambda r: r.status.phase}
instance_status_map: ClassVar[Dict[str, InstanceStatus]] = {
"Pending": InstanceStatus.BUSY,
"Running": InstanceStatus.RUNNING,
"Failed": InstanceStatus.TERMINATED,
"Succeeded": InstanceStatus.BUSY,
}
def _instance_status_setter(self, value: str) -> None:
self._instance_status = self.instance_status_map.get(
value, InstanceStatus.UNKNOWN
)
KubernetesPod.instance_status = property(
KubernetesPod._instance_status_getter, KubernetesPod._instance_status_setter
)
| true
| true
|
1c450601610f97294ac129d9fba539453ebcde59
| 3,852
|
py
|
Python
|
utool/util_win32.py
|
Erotemic/utool
|
9fbbceefed71ab4b38ab806b998fefc9b873f205
|
[
"Apache-2.0"
] | 8
|
2017-10-31T03:57:37.000Z
|
2021-01-15T15:40:23.000Z
|
utool/util_win32.py
|
Erotemic/utool
|
9fbbceefed71ab4b38ab806b998fefc9b873f205
|
[
"Apache-2.0"
] | 6
|
2016-07-22T21:49:52.000Z
|
2021-11-08T01:00:40.000Z
|
utool/util_win32.py
|
Erotemic/utool
|
9fbbceefed71ab4b38ab806b998fefc9b873f205
|
[
"Apache-2.0"
] | 6
|
2016-06-15T23:11:44.000Z
|
2021-11-07T14:23:42.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from os.path import join, normpath, pathsep, dirname # NOQA
def get_regstr(regtype, var, val):
regtype_map = {
'REG_EXPAND_SZ': 'hex(2):',
'REG_DWORD': 'dword:',
'REG_BINARY': None,
'REG_MULTI_SZ': None,
'REG_SZ': '',
}
# It is not a good idea to write these variables...
EXCLUDE = ['USERPROFILE', 'USERNAME', 'SYSTEM32']
if var in EXCLUDE:
return ''
def quotes(str_):
return '"' + str_.replace('"', r'\"') + '"'
sanitized_var = quotes(var)
if regtype == 'REG_EXPAND_SZ':
# Weird encoding
#bin_ = binascii.hexlify(hex_)
#val_ = ','.join([''.join(hex2) for hex2 in hex2zip])
#import binascii # NOQA
x = val
ascii_ = x.encode("ascii")
hex_ = ascii_.encode("hex")
hex_ = x.encode("hex")
hex2zip = zip(hex_[0::2], hex_[1::2])
spacezip = [('0', '0')] * len(hex2zip)
hex3zip = zip(hex2zip, spacezip)
sanitized_val = ','.join([''.join(hex2) + ',' + ''.join(space) for hex2, space in hex3zip])
elif regtype == 'REG_DWORD':
sanitized_val = '%08d' % int(val)
else:
sanitized_val = quotes(val)
# Comment with the human-readable nonhex version of the string
comment = '; ' + var + '=' + val
regstr = sanitized_var + '=' + regtype_map[regtype] + sanitized_val
return comment + '\n' + regstr
def make_regfile_str(key, varval_list, rtype):
# Input: list of (var, val) tuples
# key to put varval list in
# rtype - type of registry variables
envtxt_list = ['Windows Registry Editor Version 5.00',
'',
key]
print('\n'.join(map(repr, varval_list)))
varval_list = filter(lambda x: isinstance(x, tuple), varval_list)
vartxt_list = [get_regstr(rtype, var, val) for (var, val) in varval_list]
envtxt_list.extend(vartxt_list)
regfile_str = '\n'.join(envtxt_list)
return regfile_str
def add_to_win32_PATH(script_fpath, *add_path_list):
r"""
Writes a registery script to update the PATH variable into the sync registry
CommandLine:
python -m utool.util_win32 --test-add_to_win32_PATH --newpath "C:\Program Files (x86)\Graphviz2.38\bin"
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_win32 import * # NOQA
>>> script_fpath = join(ut.truepath('~'), 'Sync/win7/registry', 'UPDATE_PATH.reg')
>>> new_path = ut.get_argval('--newpath', str, default=None)
>>> result = add_to_win32_PATH(script_fpath, new_path)
>>> print(result)
"""
import utool as ut
write_dir = dirname(script_fpath)
key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
rtype = 'REG_EXPAND_SZ'
# Read current PATH values
win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
new_path_list = ut.unique_ordered(win_pathlist + list(add_path_list))
#new_path_list = unique_ordered(win_pathlist, rob_pathlist)
print('\n'.join(new_path_list))
pathtxt = pathsep.join(new_path_list)
varval_list = [('Path', pathtxt)]
regfile_str = make_regfile_str(key, varval_list, rtype)
ut.view_directory(write_dir)
print(regfile_str)
ut.writeto(script_fpath, regfile_str, mode='wb')
print('Please have an admin run the script. You may need to restart')
if __name__ == '__main__':
"""
CommandLine:
python -m utool.util_win32
python -m utool.util_win32 --allexamples
python -m utool.util_win32 --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| 36.685714
| 111
| 0.630322
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from os.path import join, normpath, pathsep, dirname
def get_regstr(regtype, var, val):
regtype_map = {
'REG_EXPAND_SZ': 'hex(2):',
'REG_DWORD': 'dword:',
'REG_BINARY': None,
'REG_MULTI_SZ': None,
'REG_SZ': '',
}
EXCLUDE = ['USERPROFILE', 'USERNAME', 'SYSTEM32']
if var in EXCLUDE:
return ''
def quotes(str_):
return '"' + str_.replace('"', r'\"') + '"'
sanitized_var = quotes(var)
if regtype == 'REG_EXPAND_SZ':
x = val
ascii_ = x.encode("ascii")
hex_ = ascii_.encode("hex")
hex_ = x.encode("hex")
hex2zip = zip(hex_[0::2], hex_[1::2])
spacezip = [('0', '0')] * len(hex2zip)
hex3zip = zip(hex2zip, spacezip)
sanitized_val = ','.join([''.join(hex2) + ',' + ''.join(space) for hex2, space in hex3zip])
elif regtype == 'REG_DWORD':
sanitized_val = '%08d' % int(val)
else:
sanitized_val = quotes(val)
comment = '; ' + var + '=' + val
regstr = sanitized_var + '=' + regtype_map[regtype] + sanitized_val
return comment + '\n' + regstr
def make_regfile_str(key, varval_list, rtype):
envtxt_list = ['Windows Registry Editor Version 5.00',
'',
key]
print('\n'.join(map(repr, varval_list)))
varval_list = filter(lambda x: isinstance(x, tuple), varval_list)
vartxt_list = [get_regstr(rtype, var, val) for (var, val) in varval_list]
envtxt_list.extend(vartxt_list)
regfile_str = '\n'.join(envtxt_list)
return regfile_str
def add_to_win32_PATH(script_fpath, *add_path_list):
import utool as ut
write_dir = dirname(script_fpath)
key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
rtype = 'REG_EXPAND_SZ'
win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
new_path_list = ut.unique_ordered(win_pathlist + list(add_path_list))
print('\n'.join(new_path_list))
pathtxt = pathsep.join(new_path_list)
varval_list = [('Path', pathtxt)]
regfile_str = make_regfile_str(key, varval_list, rtype)
ut.view_directory(write_dir)
print(regfile_str)
ut.writeto(script_fpath, regfile_str, mode='wb')
print('Please have an admin run the script. You may need to restart')
if __name__ == '__main__':
import multiprocessing
multiprocessing.freeze_support()
import utool as ut
ut.doctest_funcs()
| true
| true
|
1c4506292da685c618215c514c153bc431358b30
| 914
|
py
|
Python
|
helpers.py
|
maxwelldemaio/books
|
adeeb85cc8bd19198dd0ba430d4fb26b5a96b60e
|
[
"MIT"
] | null | null | null |
helpers.py
|
maxwelldemaio/books
|
adeeb85cc8bd19198dd0ba430d4fb26b5a96b60e
|
[
"MIT"
] | null | null | null |
helpers.py
|
maxwelldemaio/books
|
adeeb85cc8bd19198dd0ba430d4fb26b5a96b60e
|
[
"MIT"
] | 1
|
2021-03-01T05:59:33.000Z
|
2021-03-01T05:59:33.000Z
|
import json
import os
import requests
from flask import redirect, render_template, session
from functools import wraps
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return render_template("apology.html")
return f(*args, **kwargs)
return decorated_function
# Obtain response JSON from GoodReads API
def obtain_response(isbn):
res = requests.get("https://www.goodreads.com/book/review_counts.json",
params={"key": os.getenv("API_KEY"), "isbns": f"{isbn}"})
data = res.json()
ratingsCount = data["books"][0]["ratings_count"]
averageRating = data["books"][0]["average_rating"]
return [ratingsCount, averageRating]
def hashPass(password):
pass
| 24.702703
| 80
| 0.666302
|
import json
import os
import requests
from flask import redirect, render_template, session
from functools import wraps
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return render_template("apology.html")
return f(*args, **kwargs)
return decorated_function
def obtain_response(isbn):
res = requests.get("https://www.goodreads.com/book/review_counts.json",
params={"key": os.getenv("API_KEY"), "isbns": f"{isbn}"})
data = res.json()
ratingsCount = data["books"][0]["ratings_count"]
averageRating = data["books"][0]["average_rating"]
return [ratingsCount, averageRating]
def hashPass(password):
pass
| true
| true
|
1c4506701b04228b402dcf017737b7b97e102a97
| 5,977
|
py
|
Python
|
2017/iker/day15.py
|
bbglab/adventofcode
|
65b6d8331d10f229b59232882d60024b08d69294
|
[
"MIT"
] | null | null | null |
2017/iker/day15.py
|
bbglab/adventofcode
|
65b6d8331d10f229b59232882d60024b08d69294
|
[
"MIT"
] | null | null | null |
2017/iker/day15.py
|
bbglab/adventofcode
|
65b6d8331d10f229b59232882d60024b08d69294
|
[
"MIT"
] | 3
|
2016-12-02T09:20:42.000Z
|
2021-12-01T13:31:07.000Z
|
"""
--- Day 15: Dueling Generators ---
Here, you encounter a pair of dueling generators. The generators, called generator A and generator B, are trying to agree on a sequence of numbers. However, one of them is malfunctioning, and so the sequences don't always match.
As they do this, a judge waits for each of them to generate its next value, compares the lowest 16 bits of both values, and keeps track of the number of times those parts of the values match.
The generators both work on the same principle. To create its next value, a generator will take the previous value it produced, multiply it by a factor (generator A uses 16807; generator B uses 48271), and then keep the remainder of dividing that resulting product by 2147483647. That final remainder is the value it produces next.
To calculate each generator's first value, it instead uses a specific starting value as its "previous value" (as listed in your puzzle input).
For example, suppose that for starting values, generator A uses 65, while generator B uses 8921. Then, the first five pairs of generated values are:
--Gen. A-- --Gen. B--
1092455 430625591
1181022009 1233683848
245556042 1431495498
1744312007 137874439
1352636452 285222916
In binary, these pairs are (with generator A's value first in each pair):
00000000000100001010101101100111
00011001101010101101001100110111
01000110011001001111011100111001
01001001100010001000010110001000
00001110101000101110001101001010
01010101010100101110001101001010
01100111111110000001011011000111
00001000001101111100110000000111
01010000100111111001100000100100
00010001000000000010100000000100
Here, you can see that the lowest (here, rightmost) 16 bits of the third value match: 1110001101001010. Because of this one match, after processing these five pairs, the judge would have added only 1 to its total.
To get a significant sample, the judge would like to consider 40 million pairs. (In the example above, the judge would eventually find a total of 588 pairs that match in their lowest 16 bits.)
After 40 million pairs, what is the judge's final count?
--- Part Two ---
In the interest of trying to align a little better, the generators get more picky about the numbers they actually give to the judge.
They still generate values in the same way, but now they only hand a value to the judge when it meets their criteria:
Generator A looks for values that are multiples of 4.
Generator B looks for values that are multiples of 8.
Each generator functions completely independently: they both go through values entirely on their own, only occasionally handing an acceptable value to the judge, and otherwise working through the same sequence of values as before until they find one.
The judge still waits for each generator to provide it with a value before comparing them (using the same comparison method as before). It keeps track of the order it receives values; the first values from each generator are compared, then the second values from each generator, then the third values, and so on.
Using the example starting values given above, the generators now produce the following first five values each:
--Gen. A-- --Gen. B--
1352636452 1233683848
1992081072 862516352
530830436 1159784568
1980017072 1616057672
740335192 412269392
These values have the following corresponding binary values:
01010000100111111001100000100100
01001001100010001000010110001000
01110110101111001011111010110000
00110011011010001111010010000000
00011111101000111101010001100100
01000101001000001110100001111000
01110110000001001010100110110000
01100000010100110001010101001000
00101100001000001001111001011000
00011000100100101011101101010000
Unfortunately, even though this change makes more bits similar on average, none of these values' lowest 16 bits match. Now, it's not until the 1056th pair that the judge finds the first match:
--Gen. A-- --Gen. B--
1023762912 896885216
00111101000001010110000111100000
00110101011101010110000111100000
This change makes the generators much slower, and the judge is getting impatient; it is now only willing to consider 5 million pairs. (Using the values from the example above, after five million pairs, the judge would eventually find a total of 309 pairs that match in their lowest 16 bits.)
After 5 million pairs, but using this new generator logic, what is the judge's final count?
"""
factor_A = 16807
factor_B = 48271
divider = 2147483647
test_start_value_A = 65
test_start_value_B = 8921
input_A = 116
input_B = 299
def generator(start_value, factor):
val = start_value
while True:
val = val * factor % divider
yield val
def compare(start_A, start_B, rounds):
matches = 0
for i, values in enumerate(zip(generator(start_A, factor_A), generator(start_B, factor_B))):
if i >= rounds:
return matches
else:
vA, vB = values
if vA.to_bytes(100, 'big')[-2:] == vB.to_bytes(100, 'big')[-2:]:
matches += 1
def test1():
assert 588 == compare(test_start_value_A, test_start_value_B, 40*10**6)
def part1():
print(compare(input_A, input_B, 40*10**6))
def picky_generator(start_value, factor, multipleof):
val = start_value
while True:
val = val * factor % divider
if val % multipleof == 0:
yield val
def compare2(start_A, start_B, rounds):
matches = 0
for i, values in enumerate(zip(picky_generator(start_A, factor_A, 4), picky_generator(start_B, factor_B, 8))):
if i >= rounds:
return matches
else:
vA, vB = values
if vA.to_bytes(100, 'big')[-2:] == vB.to_bytes(100, 'big')[-2:]:
matches += 1
def test2():
assert 309 == compare2(test_start_value_A, test_start_value_B, 5*10**6)
def part2():
print(compare2(input_A, input_B, 5*10**6))
if __name__ == '__main__':
# test1()
# part1()
# test2()
part2()
| 35.577381
| 331
| 0.752886
|
factor_A = 16807
factor_B = 48271
divider = 2147483647
test_start_value_A = 65
test_start_value_B = 8921
input_A = 116
input_B = 299
def generator(start_value, factor):
val = start_value
while True:
val = val * factor % divider
yield val
def compare(start_A, start_B, rounds):
matches = 0
for i, values in enumerate(zip(generator(start_A, factor_A), generator(start_B, factor_B))):
if i >= rounds:
return matches
else:
vA, vB = values
if vA.to_bytes(100, 'big')[-2:] == vB.to_bytes(100, 'big')[-2:]:
matches += 1
def test1():
assert 588 == compare(test_start_value_A, test_start_value_B, 40*10**6)
def part1():
print(compare(input_A, input_B, 40*10**6))
def picky_generator(start_value, factor, multipleof):
val = start_value
while True:
val = val * factor % divider
if val % multipleof == 0:
yield val
def compare2(start_A, start_B, rounds):
matches = 0
for i, values in enumerate(zip(picky_generator(start_A, factor_A, 4), picky_generator(start_B, factor_B, 8))):
if i >= rounds:
return matches
else:
vA, vB = values
if vA.to_bytes(100, 'big')[-2:] == vB.to_bytes(100, 'big')[-2:]:
matches += 1
def test2():
assert 309 == compare2(test_start_value_A, test_start_value_B, 5*10**6)
def part2():
print(compare2(input_A, input_B, 5*10**6))
if __name__ == '__main__':
part2()
| true
| true
|
1c4507651df4cfeb751a19ff84991c40d5064f9e
| 1,553
|
py
|
Python
|
python/asdl/rust/__main__.py
|
DuckLogic/rust-asdlr
|
e900640f1973f334e30746d7f1caceff703662a7
|
[
"MIT"
] | null | null | null |
python/asdl/rust/__main__.py
|
DuckLogic/rust-asdlr
|
e900640f1973f334e30746d7f1caceff703662a7
|
[
"MIT"
] | 2
|
2022-01-10T02:18:07.000Z
|
2022-01-10T06:41:02.000Z
|
python/asdl/rust/__main__.py
|
DuckLogic/rust-astlib
|
e900640f1973f334e30746d7f1caceff703662a7
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import click
import asdl
from . import GeneratorMode, write_source, AUTOGEN_MESSAGE
@click.command()
@click.argument('input-filename')
@click.option('--rust-file', '-R', 'rust_filename', type=click.Path(), required=True)
@click.option('--dump-module', '-D', is_flag=True)
@click.option(
'--mode', '-m', 'mode_names',
help="The mode of operation, specifying what to generate (default: only AST)",
type=click.Choice(tuple(mode.value for mode in GeneratorMode)),
default=("ast",), multiple=True
)
def generate(input_filename, rust_filename, mode_names=('ast',), dump_module=False):
input_filename = Path(input_filename)
rust_filename = Path(rust_filename)
modes = [GeneratorMode(name) for name in mode_names]
auto_gen_msg = AUTOGEN_MESSAGE.format("/".join(Path(__file__).parts[-2:]))
mod = asdl.parse(input_filename)
if dump_module:
print('Parsed Module:')
try:
from prettyprinter import register_pretty, \
install_extras, \
pprint as pretty_print
except ImportError:
print("WARN: Failed to import 'prettyprinter'", file=sys.stderr)
pretty_print = print
else:
install_extras()
pretty_print(mod)
if not asdl.check(mod):
sys.exit(1)
with rust_filename.open("w") as rust_file:
rust_file.write(auto_gen_msg)
write_source(mod, rust_file, modes=modes)
print(f"{rust_filename}, regenerated.")
if __name__ == "__main__":
generate()
| 32.354167
| 85
| 0.660657
|
from pathlib import Path
import click
import asdl
from . import GeneratorMode, write_source, AUTOGEN_MESSAGE
@click.command()
@click.argument('input-filename')
@click.option('--rust-file', '-R', 'rust_filename', type=click.Path(), required=True)
@click.option('--dump-module', '-D', is_flag=True)
@click.option(
'--mode', '-m', 'mode_names',
help="The mode of operation, specifying what to generate (default: only AST)",
type=click.Choice(tuple(mode.value for mode in GeneratorMode)),
default=("ast",), multiple=True
)
def generate(input_filename, rust_filename, mode_names=('ast',), dump_module=False):
input_filename = Path(input_filename)
rust_filename = Path(rust_filename)
modes = [GeneratorMode(name) for name in mode_names]
auto_gen_msg = AUTOGEN_MESSAGE.format("/".join(Path(__file__).parts[-2:]))
mod = asdl.parse(input_filename)
if dump_module:
print('Parsed Module:')
try:
from prettyprinter import register_pretty, \
install_extras, \
pprint as pretty_print
except ImportError:
print("WARN: Failed to import 'prettyprinter'", file=sys.stderr)
pretty_print = print
else:
install_extras()
pretty_print(mod)
if not asdl.check(mod):
sys.exit(1)
with rust_filename.open("w") as rust_file:
rust_file.write(auto_gen_msg)
write_source(mod, rust_file, modes=modes)
print(f"{rust_filename}, regenerated.")
if __name__ == "__main__":
generate()
| true
| true
|
1c4507b1e693bc14907ab2b1dfd524207e7fbaf4
| 1,076
|
py
|
Python
|
scenarios/camerainseaport/src/list_all_camera.py
|
rdsea/HINC
|
2e94321f2f31b4deff08d08a4c128b958a469a3f
|
[
"Apache-2.0"
] | 1
|
2021-05-18T13:03:47.000Z
|
2021-05-18T13:03:47.000Z
|
scenarios/camerainseaport/src/list_all_camera.py
|
rdsea/HINC
|
2e94321f2f31b4deff08d08a4c128b958a469a3f
|
[
"Apache-2.0"
] | 11
|
2020-07-16T03:17:28.000Z
|
2022-02-12T03:05:48.000Z
|
scenarios/camerainseaport/src/list_all_camera.py
|
rdsea/HINC
|
2e94321f2f31b4deff08d08a4c128b958a469a3f
|
[
"Apache-2.0"
] | 1
|
2018-04-13T07:45:28.000Z
|
2018-04-13T07:45:28.000Z
|
import requests
import sys
import os
import json
import pycurl
from urllib.parse import urlparse
'''
This shows a simple example of dealing with protocol interoperability with camera.
1)A customer searches for cameras in a location
'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--provider_url', default='http://localhost:3000/camera', help='URL of the IoT Camera Provider')
parser.add_argument('--lon', default='108.1494449', help='longitude')
parser.add_argument('--lat', default='16.0723458', help='latitude')
parser.add_argument('--distance', default='10000', help='default in meters')
args = parser.parse_args()
'''
Using camera ID to look for the latest video
'''
def camera_data_handle(camera):
print(camera)
# Search for cameras close to a location
##TODO check values
headers = {
'Cache-Control': "no-cache"
}
url=args.provider_url+"/list"
response = requests.request("GET", url, headers=headers)
#print(response.text)
list_of_cameras =response.json()
for camera in list_of_cameras:
camera_data_handle(camera)
| 25.619048
| 116
| 0.749071
|
import requests
import sys
import os
import json
import pycurl
from urllib.parse import urlparse
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--provider_url', default='http://localhost:3000/camera', help='URL of the IoT Camera Provider')
parser.add_argument('--lon', default='108.1494449', help='longitude')
parser.add_argument('--lat', default='16.0723458', help='latitude')
parser.add_argument('--distance', default='10000', help='default in meters')
args = parser.parse_args()
def camera_data_handle(camera):
print(camera)
Cache-Control': "no-cache"
}
url=args.provider_url+"/list"
response = requests.request("GET", url, headers=headers)
list_of_cameras =response.json()
for camera in list_of_cameras:
camera_data_handle(camera)
| true
| true
|
1c4507d2f3b8880e6d7d9479a647f9a24833791f
| 6,797
|
py
|
Python
|
h1/model/storage_object.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/model/storage_object.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/model/storage_object.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class StorageObject(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'name': (str,), # noqa: E501
'size': (float,), # noqa: E501
'created_on': (datetime,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'name': 'name', # noqa: E501
'size': 'size', # noqa: E501
'created_on': 'createdOn', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""StorageObject - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
size (float): [optional] # noqa: E501
created_on (datetime): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.619318
| 110
| 0.571281
|
import re
import sys
from h1.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class StorageObject(ModelNormal):
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
return {
'id': (str,),
'name': (str,),
'size': (float,),
'created_on': (datetime,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id',
'name': 'name',
'size': 'size',
'created_on': 'createdOn',
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
| true
| true
|
1c450927f120af0dc58091790586492d57fafa7d
| 29,753
|
py
|
Python
|
lobot.py
|
mrschue/lobot
|
d4e55d6086b2546709190f2e377e83bced58d004
|
[
"MIT"
] | 2
|
2019-03-16T15:32:51.000Z
|
2019-03-20T12:54:03.000Z
|
lobot.py
|
mrschue/lobot
|
d4e55d6086b2546709190f2e377e83bced58d004
|
[
"MIT"
] | 2
|
2020-09-27T17:07:01.000Z
|
2020-09-27T18:12:48.000Z
|
lobot.py
|
mrschue/lobot
|
d4e55d6086b2546709190f2e377e83bced58d004
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import json
from PyInquirer import style_from_dict, prompt
from prettytable import PrettyTable
import os
import subprocess
import boto3
from botocore.exceptions import ClientError
import datetime
import time
import socket
GLOBAL_CONFIG = {}
# Global dictionary that maps AWS-usernames to a description of the images that uses them.
USERNAME_TO_AMI = {"ec2-user": "For Amazon Linux AMI, Fedora AMI, Suse AMI",
"ubuntu": "For Ubuntu AMI",
"centos": "For Centos AMI",
"admin": "For Debian AMI"}
# Attributes lobot will fetch from the AWS database
STANDARD_ATTRIBUTES = ["Name", "KeyName", "InstanceId", "InstanceType", "PublicIpAddress", "Uptime", "State"]
# This dictionary maps region codes to readable region names.
# https://docs.aws.amazon.com/general/latest/gr/rande.html
REGION_TO_READABLE_NAME = {
"us-east-1": "US East (N. Virginia)",
"us-east-2": "US East (Ohio)",
"us-west-1": "US West (N. California)",
"us-west-2": "US West (Oregon)",
"ap-south-1": "Asia Pacific (Mumbai)",
"ap-northeast-3": "Asia Pacific (Osaka Local)",
"ap-northeast-2": "Asia Pacific (Seoul)",
"ap-southeast-1": "Asia Pacific (Singapore)",
"ap-southeast-2": "Asia Pacific (Sydney)",
"ap-northeast-1": "Asia Pacific (Tokyo)",
"ca-central-1": "Canada (Central)",
"cn-north-1": "China (Beijing)",
"cn-northwest-1": "China (Ningxia)",
"eu-central-1": "EU (Frankfurt)",
"eu-west-1": "EU (Ireland)",
"eu-west-2": "EU (London)",
"eu-west-3": "EU (Paris)",
"eu-north-1": "EU (Stockholm)",
"sa-east-1": "South America (São Paulo)"}
def read_config(filepath=os.path.dirname(os.path.realpath(__file__))+"/config.cfg"):
"""
Auxiliary function to parse the config files.
"""
config_dict = {}
with open(filepath, "r") as config_file:
config_content = config_file.readlines()
for line in config_content:
if line in ("", "\n"):
continue
if line.strip()[0] == "#":
continue
key, value = line.split(":", maxsplit=1)
key = key.strip()
value = value.strip()
if value in ("True", "true", "1"):
value = True
if value in ("False", "false", "0"):
value = False
config_dict[key] = value
return config_dict
def check_port(port):
"""
Checks if a port is available for SSH forwarding.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = False
try:
sock.bind(("0.0.0.0", port))
result = True
except:
result = False
sock.close()
return result
def timedelta_hours_minutes(timedelta):
"""
Formatting function for uptime.
"""
return timedelta.days * 24 + timedelta.seconds//3600, (timedelta.seconds//60)%60
def load_prices(used_instance_types, region_name):
"""
Load current EC2 price-list from AWS.
"""
pricing = boto3.client("pricing")
price_map = {}
known_instance_types = []
product_list = []
for used_type in used_instance_types:
if used_type not in known_instance_types:
try:
location_name = REGION_TO_READABLE_NAME[region_name]
except KeyError:
raise KeyError("Region "+str(region_name)+" does not have a readable name. Please check https://docs.aws.amazon.com/general/latest/gr/rande.html and update the REGION_TO_READABLE_NAME dictionary")
filters = [{'Type' :'TERM_MATCH', 'Field':'operatingSystem', 'Value':'Linux' },
{'Type' :'TERM_MATCH', 'Field':'location', 'Value': location_name},
{'Type' :'TERM_MATCH', 'Field':'instanceType', 'Value':used_type},
{'Type' :'TERM_MATCH', 'Field':'currentGeneration', 'Value':'Yes'}]
product_list += [json.loads(product) for product in pricing.get_products(ServiceCode="AmazonEC2", Filters=filters)["PriceList"]]
for product in product_list:
technical_info = product["product"]["attributes"]
try:
on_demand_info = product["terms"]["OnDemand"]
except KeyError:
continue
funny_key = list(on_demand_info.keys())[0]
if len(on_demand_info.keys()) > 1:
print("ALERT - MANY FUNNY KEYS")
on_demand_info = on_demand_info[funny_key]["priceDimensions"]
funny_key = list(on_demand_info.keys())[0]
if len(on_demand_info.keys()) > 1:
print("ALERT - MANY FUNNY KEYS")
on_demand_info = on_demand_info[funny_key]
price_desc = on_demand_info["description"]
price_unit = on_demand_info["unit"]
price_per_unit_in_usd = float(on_demand_info["pricePerUnit"]["USD"])
if price_per_unit_in_usd == 0:
continue
info_dict = {"pricePerUnit (*)":price_per_unit_in_usd, "unit":price_unit, "instanceFamily":technical_info["instanceFamily"]}
price_map[technical_info["instanceType"]] = info_dict
known_instance_types.append(technical_info["instanceType"])
del pricing
return price_map
def merge_price_map(instances, price_map):
"""
Auxiliary function to merge prices into the table of instances.
"""
for idx, inst in enumerate(instances):
info = price_map.get(inst["InstanceType"], None)
if info is not None:
inst.update(info)
else:
print("Warning: "+str(inst["InstanceType"])+" is not known")
return instances
def imageid_to_name(image_id):
ec2 = boto3.client("ec2")
image_info = ec2.describe_images(ImageIds=[image_id])["Images"][0]
image_name = image_info.get("Name", "")
return image_name
def get_current_instances(interesting_attributes=STANDARD_ATTRIBUTES, include_prices=True, region_name=None):
"""
Fetch all available instances as well as their interesting attributes and possibly price information for
the given region.
"""
assert("InstanceType" in interesting_attributes)
if region_name is None:
ec2 = boto3.client("ec2")
region_name = ec2.meta.region_name
else:
ec2 = boto3.client("ec2", region_name=region_name)
reservations = ec2.describe_instances()["Reservations"]
used_types =[]
instances = []
for res in reservations:
instances += res["Instances"]
# Unpack tags and state
for idx, inst in enumerate(instances):
for attribute in interesting_attributes:
if not attribute in inst:
inst[attribute] = None
if "State" in inst:
inst["State"] = inst["State"]["Name"]
if inst["InstanceType"] not in used_types:
used_types.append(inst["InstanceType"])
if "Uptime" in interesting_attributes:
if inst["State"] != "running":
uptime = timedelta_hours_minutes(datetime.timedelta(0))
else:
uptime = timedelta_hours_minutes(datetime.datetime.now(datetime.timezone.utc) - inst["LaunchTime"])
inst["Uptime"] = "{}h {}m".format(*uptime)
try:
tags = inst["Tags"]
for tag in tags:
inst[tag["Key"]] = tag["Value"]
inst.pop("Tags", None)
except KeyError:
inst["Name"] = ""
try:
if "ImageName" in interesting_attributes:
image_id = inst["ImageId"]
image_name = imageid_to_name(image_id)
inst.pop("ImageId", None)
inst["ImageName"] = image_name
except KeyError:
inst["ImageName"] = ""
placement = inst["Placement"]
for k,v in placement.items():
inst[k] = v
instances[idx] = {k:v for k,v in inst.items() if k in interesting_attributes}
if include_prices:
price_map = load_prices(used_types, region_name=region_name)
instances = merge_price_map(instances, price_map)
del ec2
return (instances, used_types, region_name)
def start_instance(instance, region_name, waiting_periods=7):
"""
Sends the START signal to a stopped instance and waits for the instance to change state to
'RUNNING'.
"""
if instance["State"] in ("running", "pending"):
print("No need to start this one. Maybe have some patience.")
else:
ec2 = boto3.client("ec2", region_name=region_name)
# Do a dryrun first to verify permissions
response = None
try:
ec2.start_instances(InstanceIds=[instance["InstanceId"]], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise
# Dry run succeeded, run start_instances without dry run
try:
response = ec2.start_instances(InstanceIds=[instance["InstanceId"]], DryRun=False)
print("START signal sent, waiting for reachability ...")
waiter = ec2.get_waiter("instance_running")
waiter.wait(InstanceIds=[instance["InstanceId"]])
current_info = ec2.describe_instances(InstanceIds=[instance["InstanceId"]])["Reservations"][0]["Instances"][0]
if "PublicIpAddress" in current_info:
print("Instance reachable, address: "+current_info["PublicIpAddress"])
except ClientError as e:
print(e)
del ec2
return response
def stop_instance(instance, region_name):
"""
Sends the stop signal to a given instance and while wait for the instance to change the state
to 'STOPPED'.
"""
confirm_prompt = {
'type': 'confirm',
'message': 'Do you really want to stop \"'+instance["Name"]+'\"?',
'name': 'stop',
'default': False,
}
chosen_confirmation = prompt.prompt(confirm_prompt)["stop"]
if not chosen_confirmation:
print(" ----> Canceling.")
return
if instance["State"] in ("stopped", "stopping"):
print("------> Instance is already stopped or stopping.")
else:
ec2 = boto3.client("ec2", region_name=region_name)
response = None
try:
ec2.stop_instances(InstanceIds=[instance["InstanceId"]], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise
try:
response = ec2.stop_instances(InstanceIds=[instance["InstanceId"]], DryRun=False)
print("STOP signal sent, waiting for full stop. This might take a while.")
waiter = ec2.get_waiter("instance_stopped")
waiter.wait(InstanceIds=[instance["InstanceId"]])
print("Instance stopped.")
except ClientError as e:
print(e)
return response
def connect_instance(instance):
"""
This function tries to open an interactive SSH onto the instance.
"""
# Check if key is available
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
if os.path.exists(key_path):
subprocess.call(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]])
else:
raise ValueError("Key"+key_name+".pem is not available in my 'keys' folder.")
def start_jupyter(instance, local_port=8889):
"""
This function tries to SSH onto the instance, remotely start a Jupyter notebook server, and forward given
local port to it.
"""
# Check onif key is available
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
if os.path.exists(key_path):
output = str(subprocess.run(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "jupyter", "notebook", "list"], stdout=subprocess.PIPE).stdout).split("\\n")[1:-1]
if len(output) == 0:
print("Starting jupyter server remotely...")
subprocess.run(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "screen", "-dm", "bash", "-c", "\"jupyter", "notebook", "--no-browser", "--port=8889\""])
time.sleep(3)
output = str(subprocess.run(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "jupyter", "notebook", "list"], stdout=subprocess.PIPE).stdout).split("\\n")[1:-1]
print("\t ... done")
else:
print("Jupyter server found, did not start a new server.")
one_up = 0
while (one_up < 3):
if check_port(local_port + one_up):
server_prompt = {
'type': 'list',
'name': 'server',
'message': 'Port '+str(local_port + one_up)+' available. Connect?',
'choices': output
}
jupyter_instance = prompt.prompt(server_prompt)["server"]
remote_hostport = jupyter_instance.split("/")[2]
command = ["nohup", "ssh", "-i", key_path, "-N", "-L", str(local_port + one_up)+":"+remote_hostport, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]]
process = subprocess.Popen(command, preexec_fn=os.setpgrp)
print("Port forwarding PID: "+str(process.pid))
print(jupyter_instance.replace(str(remote_hostport), str(local_port + one_up), 1))
print("")
break
else:
print("Local port "+str(local_port)+" is taken. Maybe you are already connected?")
one_up += 1
else:
raise ValueError("Key"+key_name+".pem is not available in my keys folder")
return output
def change_remote_username():
"""
For interacting with the OS running on the remote, you'll need to know corresponding username.
Most AMIs have a specific one.
The list is curated as global variable.
"""
global GLOBAL_CONFIG
available_names = [k+" - "+v for k, v in USERNAME_TO_AMI.items()]
username_prompt = {
'type': 'list',
'name': 'username',
'message': 'Current username: '+GLOBAL_CONFIG["aws_username"]+'. Which username do you want use instead?',
'choices': available_names
}
chosen_name = prompt.prompt(username_prompt)["username"].split(" - ")[0]
GLOBAL_CONFIG["aws_username"] = chosen_name
def kill_jupyters(instance):
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
# UNFINISHED
def display_instances(instances, region_name):
"""
This is the core status table of lobot. It displays all available instances for the currently active region.
It will contain the following info:
- Instance ID and Name-tag (if available).
- State of the instance (stopped, stopping, starting, running).
- Type of the instance, its price per unit, and the unit.
- If started, the current uptime.
- Required private key to be available in the 'keys' folder.
- Instance's public IP adress.
"""
print("\n")
if region_name is not None:
try:
location_name = REGION_TO_READABLE_NAME[region_name]
except KeyError:
raise KeyError("Region "+str(region_name)+" does not have a readable name. Please check https://docs.aws.amazon.com/general/latest/gr/rande.html and update the REGION_TO_READABLE_NAME dictionary")
print("Instances for region: \n\t\t"+str(region_name)+" ["+location_name+"]\n")
if len(instances) > 0:
keys = sorted(instances[0].keys())
instance_table = PrettyTable(keys)
instances = sorted(instances, key=lambda x: (0 if x["State"] == "running" else 1, x["State"]), reverse=False)
for instance in instances:
items = sorted(instance.items(), key=lambda x: x[0])
instance_table.add_row([v for k,v in items])
print(instance_table)
if GLOBAL_CONFIG["load_prices"]:
print("\t(*)\tlisted prices are in $ and for on-demand Linux (w/o SQL) in region '"+region_name+"' only.\n\t\t They might be unreliable in some cases - please confirm prices at: https://aws.amazon.com/de/ec2/pricing/on-demand/")
print("\n\n")
else:
print("\n\n")
if region_name is not None:
try:
location_name = REGION_TO_READABLE_NAME[region_name]
except KeyError:
raise KeyError("Region "+str(region_name)+" does not have a readable name. Please check https://docs.aws.amazon.com/general/latest/gr/rande.html and update the REGION_TO_READABLE_NAME dictionary")
print("No instances in region '"+str(region_name)+"' ["+location_name+"] available.")
else:
print("No instances in this region.")
print("\n\n")
def change_type(instance, region_name, available_instances):
"""
This creates a prompt to change the type of a given instance.
The available types can be changed in 'instance_types.cfg'.
If one is picked, the type of the instance is changed.
"""
assert(instance["State"] == "stopped")
ec2 = boto3.client("ec2", region_name=region_name)
choices = [k+" :: "+v for k, v in available_instances.items()]
type_prompt = {
'type': 'list',
'name': 'type',
'message': 'Current type: '+instance["InstanceType"]+'. Which type do you want instead?',
'choices': choices
}
chosen_type = prompt.prompt(type_prompt)["type"].split(" :: ")[0]
ec2.modify_instance_attribute(InstanceId=instance["InstanceId"], Attribute='instanceType', Value=chosen_type)
def change_name(instance, region_name):
"""
This creates a prompt for the new name-tag of an instance and changes the name when provided.
"""
ec2 = boto3.client("ec2", region_name)
name_prompt = {
'type': 'input',
'name': 'instance_name',
'message': 'Current name: '+instance["Name"]+'. Which name do you want instead?',
}
chosen_name = prompt.prompt(name_prompt)["instance_name"]
confirm_prompt = {
'type': 'confirm',
'message': 'Do you want to change the name \"'+instance["Name"]+'\" to \"'+chosen_name+'\"?',
'name': 'change_name',
'default': False,
}
chosen_confirmation = prompt.prompt(confirm_prompt)["change_name"]
if not chosen_confirmation:
print("-----------> Name was not changed.")
else:
new_name_tag = {"Key":"Name", "Value":chosen_name}
ec2.create_tags(Resources=[instance["InstanceId"]], Tags=[new_name_tag])
print("Name should be changed now!")
time.sleep(0.5)
def deploy(instance):
"""
Takes all files from the 'deploy' folder in the the lobot directoy and uploads
them to the remote machines '~/lobot/deploy' folder.
"""
print("?")
deploy_path = os.path.dirname(os.path.realpath(__file__))+"/deploy/"
print("\nContent of \"deploy\" folder:")
for filename in os.listdir(deploy_path):
print("\t\t"+filename)
print("\t\t - - -")
confirm_prompt = {
'type': 'confirm',
'message': 'Do you want to copy the content of the \"deploy\" folder to the remote machine?',
'name': 'deploy',
'default': False,
}
chosen_confirmation = prompt.prompt(confirm_prompt)["deploy"]
if chosen_confirmation:
if not os.path.exists(deploy_path):
print("No \"deploy\" folder in the script's directory \""+os.path.dirname(os.path.realpath(__file__)))
return
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
command = ["scp", "-i", key_path, "-r", deploy_path+".", GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]+":lobot/deploy/"]
if os.path.exists(key_path):
ls_command = ["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "ls", "-ll", "~/lobot/deploy"]
ls_returncode = subprocess.call(ls_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if ls_returncode == 2:
return_code = subprocess.call(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "mkdir", "~/lobot", ";", "mkdir", "~/lobot/deploy"])
if subprocess.call(command) == 0:
print("Copied to \"~/lobot/deploy\" on remote machine.")
else:
raise ValueError("Key"+key_name+".pem is not available in my keys folder")
def fetch(instance):
"""
Fetches all files from '~/lobot/fetch' on the remote machine and puts
them in './fetch' on the local machine.
"""
fetch_path = os.path.dirname(os.path.realpath(__file__))+"/fetch/"
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
command = ["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "ls", "-ll", "~/lobot/fetch"]
if os.path.exists(key_path):
print("Output of \"ls -ll ~/lobot/fetch\" on remote machine:")
return_code = subprocess.call(command)
if return_code == 2:
return_code = subprocess.call(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "mkdir", "~/lobot", ";", "mkdir", "~/lobot/fetch"])
print("\"~/lobot/fetch\" folder created remotely, is empty")
return
else:
raise ValueError("Key"+key_name+".pem is not available in my keys folder")
confirm_prompt = {
'type': 'confirm',
'message': 'Do you want to copy the content of the remote \"~/lobot/fetch\" folder to the local machine?',
'name': 'fetch',
'default': False,
}
chosen_confirmation = prompt.prompt(confirm_prompt)["fetch"]
if chosen_confirmation:
if not os.path.exists(fetch_path):
print("No \"fetch\" folder in the script's directory \""+os.path.dirname(os.path.realpath(__file__)))
return
command = ["scp", "-i", key_path, "-r", GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]+":lobot/fetch/", fetch_path]
if os.path.exists(key_path):
subprocess.call(command)
else:
raise ValueError("Key"+key_name+".pem is not available in my keys folder")
def ask_instance(instances):
"""
Creates the prompt for picking from the list of instances available in the current region.
"""
sorted_list = sorted(instances, key=lambda x: x["State"])
choices = [inst["InstanceId"]+" :: ("+inst["State"]+", "+inst["Name"]+")" for inst in sorted_list] + ["Change region", "Change username (SSH)"]
instance_prompt = {
'type': 'list',
'name': 'instance',
'message': 'Choose instance, change region, or change SSH username:',
'choices': choices
}
answer = prompt.prompt(instance_prompt)['instance'].split(" :: ")[0]
return answer
def change_region(current_region_name):
"""
lobot always only works in one region.
This function creates a prompt to pick from all available regions and indicates the one
that is currently active.
"""
ec2 = boto3.client("ec2")
known_regions = [region['RegionName'] for region in ec2.describe_regions()['Regions']]
for region_idx, region_name in enumerate(known_regions):
try:
location_name = REGION_TO_READABLE_NAME[region_name]
except KeyError:
raise KeyError("Region "+str(region_name)+" does not have a readable name. Please check https://docs.aws.amazon.com/general/latest/gr/rande.html and update the REGION_TO_READABLE_NAME dictionary")
known_regions[region_idx] = region_name + " - " + location_name
region_prompt = {
'type': 'list',
'name': 'region',
'message': 'Current region: '+str(current_region_name)+'. Which region do you want instead?',
'choices': known_regions
}
chosen_region = prompt.prompt(region_prompt)['region'].split(" - ")[0]
return chosen_region
def detailed_info(instance, region_name):
"""
Prints detailed info for a given instance, such as:
- used AMI
- Availability Zone
- Number of CPU cores
"""
ec2 = boto3.client("ec2", region_name=region_name)
current_info = ec2.describe_instances(InstanceIds=[instance["InstanceId"]])["Reservations"][0]["Instances"][0]
relevant_info = {}
table = PrettyTable(["Key", "Value"])
relevant_info["AMI Id"] = current_info["ImageId"]
try:
relevant_info["AMI Name"] = imageid_to_name(relevant_info["AMI Id"])
except ClientError:
print("\nAMI Id could not be mapped to name ..")
relevant_info["Availability Zone"] = current_info["Placement"]["AvailabilityZone"]
relevant_info["Number of CPU cores"] = current_info["CpuOptions"]["CoreCount"]
print("")
for info_name, info_content in relevant_info.items():
table.add_row([info_name, info_content])
print(table)
if __name__ == "__main__":
GLOBAL_CONFIG = read_config()
recommended_instance_types = read_config(os.path.dirname(os.path.realpath(__file__))+"/instance_types.cfg")
# If not specified, takes default configured region.
try:
client_region_name = GLOBAL_CONFIG["aws_region"]
except ValueError:
client_region_name = boto3.client("ec2").meta.region_name
# Check if there is a "keys" folder. If not, create one
print("\n")
created_folder = False
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys"
if not os.path.isdir(key_path):
print("No \"keys\" folder. Creating one ...")
os.mkdir(key_path)
create_folder = True
fetch_path = os.path.dirname(os.path.realpath(__file__))+"/fetch"
if not os.path.isdir(fetch_path):
print("No \"fetch\" folder. Creating one ...")
os.mkdir(fetch_path)
created_folder = True
deploy_path = os.path.dirname(os.path.realpath(__file__))+"/deploy"
if not os.path.isdir(deploy_path):
print("No \"deploy\" folder. Creating one ...")
os.mkdir(deploy_path)
created_folder = True
if created_folder:
input("\nENTER to continue ..")
while True:
client_region_name = GLOBAL_CONFIG["aws_region"]
os.system("clear")
#print("Loading instances")
instances, used_types, client_region_name = get_current_instances(region_name=client_region_name, include_prices=GLOBAL_CONFIG["load_prices"])
#print("\t ... done")
display_instances(instances, region_name=client_region_name)
time.sleep(0.5)
# Choose instance
chosen_instance = ask_instance(instances)
if chosen_instance == "Change region":
GLOBAL_CONFIG["aws_region"] = change_region(current_region_name=client_region_name)
time.sleep(1)
continue
elif chosen_instance == "Change username (SSH)":
change_remote_username()
time.sleep(1)
continue
else:
for inst in instances:
if inst["InstanceId"] == chosen_instance:
chosen_instance = inst
# Choose action
options = []
options.append("Details")
instance_name = chosen_instance["Name"]
deploy_option_name = "Deploy data to \""+str(instance_name)+"\""
fetch_option_name= "Fetch data from \""+str(instance_name)+"\""
if chosen_instance["State"] == "running" and chosen_instance["PublicIpAddress"] is not None:
options.append("Open shell (SSH)")
options.append("Jupyter")
options.append(deploy_option_name)
options.append(fetch_option_name)
options.append("Change name")
options.append("Stop")
elif chosen_instance["State"] in ("terminated", "terminating"):
options = ["Nothing to do here."]
else:
options.append("Start")
options.append("Change name")
options.append("Change type")
time.sleep(2)
chosen_action = prompt.prompt({'type':"list", "name":"action", "message": "What do you want to do?", "choices":options})["action"]
if chosen_action == "Start":
response = start_instance(chosen_instance, region_name=client_region_name)
if chosen_action == "Stop":
response = stop_instance(chosen_instance, region_name=client_region_name)
if chosen_action == "Open shell (SSH)":
connect_instance(chosen_instance)
if chosen_action == "Jupyter":
process = start_jupyter(chosen_instance)
if chosen_action == "Kill Jupyters":
kill_jupyters(chosen_instance)
if chosen_action == "Change type":
change_type(chosen_instance, region_name=client_region_name, available_instances=recommended_instance_types)
if chosen_action == "Change name":
change_name(chosen_instance, region_name=client_region_name)
if chosen_action == deploy_option_name:
deploy(chosen_instance)
if chosen_action == fetch_option_name:
fetch(chosen_instance)
if chosen_action == "Details":
detailed_info(chosen_instance, region_name=client_region_name)
time.sleep(0.5)
input("\n\nENTER to reload script ..")
| 44.674174
| 240
| 0.618828
|
import json
from PyInquirer import style_from_dict, prompt
from prettytable import PrettyTable
import os
import subprocess
import boto3
from botocore.exceptions import ClientError
import datetime
import time
import socket
GLOBAL_CONFIG = {}
USERNAME_TO_AMI = {"ec2-user": "For Amazon Linux AMI, Fedora AMI, Suse AMI",
"ubuntu": "For Ubuntu AMI",
"centos": "For Centos AMI",
"admin": "For Debian AMI"}
STANDARD_ATTRIBUTES = ["Name", "KeyName", "InstanceId", "InstanceType", "PublicIpAddress", "Uptime", "State"]
REGION_TO_READABLE_NAME = {
"us-east-1": "US East (N. Virginia)",
"us-east-2": "US East (Ohio)",
"us-west-1": "US West (N. California)",
"us-west-2": "US West (Oregon)",
"ap-south-1": "Asia Pacific (Mumbai)",
"ap-northeast-3": "Asia Pacific (Osaka Local)",
"ap-northeast-2": "Asia Pacific (Seoul)",
"ap-southeast-1": "Asia Pacific (Singapore)",
"ap-southeast-2": "Asia Pacific (Sydney)",
"ap-northeast-1": "Asia Pacific (Tokyo)",
"ca-central-1": "Canada (Central)",
"cn-north-1": "China (Beijing)",
"cn-northwest-1": "China (Ningxia)",
"eu-central-1": "EU (Frankfurt)",
"eu-west-1": "EU (Ireland)",
"eu-west-2": "EU (London)",
"eu-west-3": "EU (Paris)",
"eu-north-1": "EU (Stockholm)",
"sa-east-1": "South America (São Paulo)"}
def read_config(filepath=os.path.dirname(os.path.realpath(__file__))+"/config.cfg"):
config_dict = {}
with open(filepath, "r") as config_file:
config_content = config_file.readlines()
for line in config_content:
if line in ("", "\n"):
continue
if line.strip()[0] == "#":
continue
key, value = line.split(":", maxsplit=1)
key = key.strip()
value = value.strip()
if value in ("True", "true", "1"):
value = True
if value in ("False", "false", "0"):
value = False
config_dict[key] = value
return config_dict
def check_port(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = False
try:
sock.bind(("0.0.0.0", port))
result = True
except:
result = False
sock.close()
return result
def timedelta_hours_minutes(timedelta):
return timedelta.days * 24 + timedelta.seconds//3600, (timedelta.seconds//60)%60
def load_prices(used_instance_types, region_name):
pricing = boto3.client("pricing")
price_map = {}
known_instance_types = []
product_list = []
for used_type in used_instance_types:
if used_type not in known_instance_types:
try:
location_name = REGION_TO_READABLE_NAME[region_name]
except KeyError:
raise KeyError("Region "+str(region_name)+" does not have a readable name. Please check https://docs.aws.amazon.com/general/latest/gr/rande.html and update the REGION_TO_READABLE_NAME dictionary")
filters = [{'Type' :'TERM_MATCH', 'Field':'operatingSystem', 'Value':'Linux' },
{'Type' :'TERM_MATCH', 'Field':'location', 'Value': location_name},
{'Type' :'TERM_MATCH', 'Field':'instanceType', 'Value':used_type},
{'Type' :'TERM_MATCH', 'Field':'currentGeneration', 'Value':'Yes'}]
product_list += [json.loads(product) for product in pricing.get_products(ServiceCode="AmazonEC2", Filters=filters)["PriceList"]]
for product in product_list:
technical_info = product["product"]["attributes"]
try:
on_demand_info = product["terms"]["OnDemand"]
except KeyError:
continue
funny_key = list(on_demand_info.keys())[0]
if len(on_demand_info.keys()) > 1:
print("ALERT - MANY FUNNY KEYS")
on_demand_info = on_demand_info[funny_key]["priceDimensions"]
funny_key = list(on_demand_info.keys())[0]
if len(on_demand_info.keys()) > 1:
print("ALERT - MANY FUNNY KEYS")
on_demand_info = on_demand_info[funny_key]
price_desc = on_demand_info["description"]
price_unit = on_demand_info["unit"]
price_per_unit_in_usd = float(on_demand_info["pricePerUnit"]["USD"])
if price_per_unit_in_usd == 0:
continue
info_dict = {"pricePerUnit (*)":price_per_unit_in_usd, "unit":price_unit, "instanceFamily":technical_info["instanceFamily"]}
price_map[technical_info["instanceType"]] = info_dict
known_instance_types.append(technical_info["instanceType"])
del pricing
return price_map
def merge_price_map(instances, price_map):
for idx, inst in enumerate(instances):
info = price_map.get(inst["InstanceType"], None)
if info is not None:
inst.update(info)
else:
print("Warning: "+str(inst["InstanceType"])+" is not known")
return instances
def imageid_to_name(image_id):
ec2 = boto3.client("ec2")
image_info = ec2.describe_images(ImageIds=[image_id])["Images"][0]
image_name = image_info.get("Name", "")
return image_name
def get_current_instances(interesting_attributes=STANDARD_ATTRIBUTES, include_prices=True, region_name=None):
assert("InstanceType" in interesting_attributes)
if region_name is None:
ec2 = boto3.client("ec2")
region_name = ec2.meta.region_name
else:
ec2 = boto3.client("ec2", region_name=region_name)
reservations = ec2.describe_instances()["Reservations"]
used_types =[]
instances = []
for res in reservations:
instances += res["Instances"]
for idx, inst in enumerate(instances):
for attribute in interesting_attributes:
if not attribute in inst:
inst[attribute] = None
if "State" in inst:
inst["State"] = inst["State"]["Name"]
if inst["InstanceType"] not in used_types:
used_types.append(inst["InstanceType"])
if "Uptime" in interesting_attributes:
if inst["State"] != "running":
uptime = timedelta_hours_minutes(datetime.timedelta(0))
else:
uptime = timedelta_hours_minutes(datetime.datetime.now(datetime.timezone.utc) - inst["LaunchTime"])
inst["Uptime"] = "{}h {}m".format(*uptime)
try:
tags = inst["Tags"]
for tag in tags:
inst[tag["Key"]] = tag["Value"]
inst.pop("Tags", None)
except KeyError:
inst["Name"] = ""
try:
if "ImageName" in interesting_attributes:
image_id = inst["ImageId"]
image_name = imageid_to_name(image_id)
inst.pop("ImageId", None)
inst["ImageName"] = image_name
except KeyError:
inst["ImageName"] = ""
placement = inst["Placement"]
for k,v in placement.items():
inst[k] = v
instances[idx] = {k:v for k,v in inst.items() if k in interesting_attributes}
if include_prices:
price_map = load_prices(used_types, region_name=region_name)
instances = merge_price_map(instances, price_map)
del ec2
return (instances, used_types, region_name)
def start_instance(instance, region_name, waiting_periods=7):
if instance["State"] in ("running", "pending"):
print("No need to start this one. Maybe have some patience.")
else:
ec2 = boto3.client("ec2", region_name=region_name)
response = None
try:
ec2.start_instances(InstanceIds=[instance["InstanceId"]], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise
try:
response = ec2.start_instances(InstanceIds=[instance["InstanceId"]], DryRun=False)
print("START signal sent, waiting for reachability ...")
waiter = ec2.get_waiter("instance_running")
waiter.wait(InstanceIds=[instance["InstanceId"]])
current_info = ec2.describe_instances(InstanceIds=[instance["InstanceId"]])["Reservations"][0]["Instances"][0]
if "PublicIpAddress" in current_info:
print("Instance reachable, address: "+current_info["PublicIpAddress"])
except ClientError as e:
print(e)
del ec2
return response
def stop_instance(instance, region_name):
confirm_prompt = {
'type': 'confirm',
'message': 'Do you really want to stop \"'+instance["Name"]+'\"?',
'name': 'stop',
'default': False,
}
chosen_confirmation = prompt.prompt(confirm_prompt)["stop"]
if not chosen_confirmation:
print(" ----> Canceling.")
return
if instance["State"] in ("stopped", "stopping"):
print("------> Instance is already stopped or stopping.")
else:
ec2 = boto3.client("ec2", region_name=region_name)
response = None
try:
ec2.stop_instances(InstanceIds=[instance["InstanceId"]], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
raise
try:
response = ec2.stop_instances(InstanceIds=[instance["InstanceId"]], DryRun=False)
print("STOP signal sent, waiting for full stop. This might take a while.")
waiter = ec2.get_waiter("instance_stopped")
waiter.wait(InstanceIds=[instance["InstanceId"]])
print("Instance stopped.")
except ClientError as e:
print(e)
return response
def connect_instance(instance):
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
if os.path.exists(key_path):
subprocess.call(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]])
else:
raise ValueError("Key"+key_name+".pem is not available in my 'keys' folder.")
def start_jupyter(instance, local_port=8889):
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
if os.path.exists(key_path):
output = str(subprocess.run(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "jupyter", "notebook", "list"], stdout=subprocess.PIPE).stdout).split("\\n")[1:-1]
if len(output) == 0:
print("Starting jupyter server remotely...")
subprocess.run(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "screen", "-dm", "bash", "-c", "\"jupyter", "notebook", "--no-browser", "--port=8889\""])
time.sleep(3)
output = str(subprocess.run(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "jupyter", "notebook", "list"], stdout=subprocess.PIPE).stdout).split("\\n")[1:-1]
print("\t ... done")
else:
print("Jupyter server found, did not start a new server.")
one_up = 0
while (one_up < 3):
if check_port(local_port + one_up):
server_prompt = {
'type': 'list',
'name': 'server',
'message': 'Port '+str(local_port + one_up)+' available. Connect?',
'choices': output
}
jupyter_instance = prompt.prompt(server_prompt)["server"]
remote_hostport = jupyter_instance.split("/")[2]
command = ["nohup", "ssh", "-i", key_path, "-N", "-L", str(local_port + one_up)+":"+remote_hostport, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]]
process = subprocess.Popen(command, preexec_fn=os.setpgrp)
print("Port forwarding PID: "+str(process.pid))
print(jupyter_instance.replace(str(remote_hostport), str(local_port + one_up), 1))
print("")
break
else:
print("Local port "+str(local_port)+" is taken. Maybe you are already connected?")
one_up += 1
else:
raise ValueError("Key"+key_name+".pem is not available in my keys folder")
return output
def change_remote_username():
global GLOBAL_CONFIG
available_names = [k+" - "+v for k, v in USERNAME_TO_AMI.items()]
username_prompt = {
'type': 'list',
'name': 'username',
'message': 'Current username: '+GLOBAL_CONFIG["aws_username"]+'. Which username do you want use instead?',
'choices': available_names
}
chosen_name = prompt.prompt(username_prompt)["username"].split(" - ")[0]
GLOBAL_CONFIG["aws_username"] = chosen_name
def kill_jupyters(instance):
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
def display_instances(instances, region_name):
print("\n")
if region_name is not None:
try:
location_name = REGION_TO_READABLE_NAME[region_name]
except KeyError:
raise KeyError("Region "+str(region_name)+" does not have a readable name. Please check https://docs.aws.amazon.com/general/latest/gr/rande.html and update the REGION_TO_READABLE_NAME dictionary")
print("Instances for region: \n\t\t"+str(region_name)+" ["+location_name+"]\n")
if len(instances) > 0:
keys = sorted(instances[0].keys())
instance_table = PrettyTable(keys)
instances = sorted(instances, key=lambda x: (0 if x["State"] == "running" else 1, x["State"]), reverse=False)
for instance in instances:
items = sorted(instance.items(), key=lambda x: x[0])
instance_table.add_row([v for k,v in items])
print(instance_table)
if GLOBAL_CONFIG["load_prices"]:
print("\t(*)\tlisted prices are in $ and for on-demand Linux (w/o SQL) in region '"+region_name+"' only.\n\t\t They might be unreliable in some cases - please confirm prices at: https://aws.amazon.com/de/ec2/pricing/on-demand/")
print("\n\n")
else:
print("\n\n")
if region_name is not None:
try:
location_name = REGION_TO_READABLE_NAME[region_name]
except KeyError:
raise KeyError("Region "+str(region_name)+" does not have a readable name. Please check https://docs.aws.amazon.com/general/latest/gr/rande.html and update the REGION_TO_READABLE_NAME dictionary")
print("No instances in region '"+str(region_name)+"' ["+location_name+"] available.")
else:
print("No instances in this region.")
print("\n\n")
def change_type(instance, region_name, available_instances):
assert(instance["State"] == "stopped")
ec2 = boto3.client("ec2", region_name=region_name)
choices = [k+" :: "+v for k, v in available_instances.items()]
type_prompt = {
'type': 'list',
'name': 'type',
'message': 'Current type: '+instance["InstanceType"]+'. Which type do you want instead?',
'choices': choices
}
chosen_type = prompt.prompt(type_prompt)["type"].split(" :: ")[0]
ec2.modify_instance_attribute(InstanceId=instance["InstanceId"], Attribute='instanceType', Value=chosen_type)
def change_name(instance, region_name):
ec2 = boto3.client("ec2", region_name)
name_prompt = {
'type': 'input',
'name': 'instance_name',
'message': 'Current name: '+instance["Name"]+'. Which name do you want instead?',
}
chosen_name = prompt.prompt(name_prompt)["instance_name"]
confirm_prompt = {
'type': 'confirm',
'message': 'Do you want to change the name \"'+instance["Name"]+'\" to \"'+chosen_name+'\"?',
'name': 'change_name',
'default': False,
}
chosen_confirmation = prompt.prompt(confirm_prompt)["change_name"]
if not chosen_confirmation:
print("-----------> Name was not changed.")
else:
new_name_tag = {"Key":"Name", "Value":chosen_name}
ec2.create_tags(Resources=[instance["InstanceId"]], Tags=[new_name_tag])
print("Name should be changed now!")
time.sleep(0.5)
def deploy(instance):
print("?")
deploy_path = os.path.dirname(os.path.realpath(__file__))+"/deploy/"
print("\nContent of \"deploy\" folder:")
for filename in os.listdir(deploy_path):
print("\t\t"+filename)
print("\t\t - - -")
confirm_prompt = {
'type': 'confirm',
'message': 'Do you want to copy the content of the \"deploy\" folder to the remote machine?',
'name': 'deploy',
'default': False,
}
chosen_confirmation = prompt.prompt(confirm_prompt)["deploy"]
if chosen_confirmation:
if not os.path.exists(deploy_path):
print("No \"deploy\" folder in the script's directory \""+os.path.dirname(os.path.realpath(__file__)))
return
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
command = ["scp", "-i", key_path, "-r", deploy_path+".", GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]+":lobot/deploy/"]
if os.path.exists(key_path):
ls_command = ["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "ls", "-ll", "~/lobot/deploy"]
ls_returncode = subprocess.call(ls_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if ls_returncode == 2:
return_code = subprocess.call(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "mkdir", "~/lobot", ";", "mkdir", "~/lobot/deploy"])
if subprocess.call(command) == 0:
print("Copied to \"~/lobot/deploy\" on remote machine.")
else:
raise ValueError("Key"+key_name+".pem is not available in my keys folder")
def fetch(instance):
fetch_path = os.path.dirname(os.path.realpath(__file__))+"/fetch/"
key_name = instance["KeyName"]
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys/"+key_name+".pem"
command = ["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "ls", "-ll", "~/lobot/fetch"]
if os.path.exists(key_path):
print("Output of \"ls -ll ~/lobot/fetch\" on remote machine:")
return_code = subprocess.call(command)
if return_code == 2:
return_code = subprocess.call(["ssh", "-i", key_path, GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"], "mkdir", "~/lobot", ";", "mkdir", "~/lobot/fetch"])
print("\"~/lobot/fetch\" folder created remotely, is empty")
return
else:
raise ValueError("Key"+key_name+".pem is not available in my keys folder")
confirm_prompt = {
'type': 'confirm',
'message': 'Do you want to copy the content of the remote \"~/lobot/fetch\" folder to the local machine?',
'name': 'fetch',
'default': False,
}
chosen_confirmation = prompt.prompt(confirm_prompt)["fetch"]
if chosen_confirmation:
if not os.path.exists(fetch_path):
print("No \"fetch\" folder in the script's directory \""+os.path.dirname(os.path.realpath(__file__)))
return
command = ["scp", "-i", key_path, "-r", GLOBAL_CONFIG["aws_username"]+"@"+instance["PublicIpAddress"]+":lobot/fetch/", fetch_path]
if os.path.exists(key_path):
subprocess.call(command)
else:
raise ValueError("Key"+key_name+".pem is not available in my keys folder")
def ask_instance(instances):
sorted_list = sorted(instances, key=lambda x: x["State"])
choices = [inst["InstanceId"]+" :: ("+inst["State"]+", "+inst["Name"]+")" for inst in sorted_list] + ["Change region", "Change username (SSH)"]
instance_prompt = {
'type': 'list',
'name': 'instance',
'message': 'Choose instance, change region, or change SSH username:',
'choices': choices
}
answer = prompt.prompt(instance_prompt)['instance'].split(" :: ")[0]
return answer
def change_region(current_region_name):
ec2 = boto3.client("ec2")
known_regions = [region['RegionName'] for region in ec2.describe_regions()['Regions']]
for region_idx, region_name in enumerate(known_regions):
try:
location_name = REGION_TO_READABLE_NAME[region_name]
except KeyError:
raise KeyError("Region "+str(region_name)+" does not have a readable name. Please check https://docs.aws.amazon.com/general/latest/gr/rande.html and update the REGION_TO_READABLE_NAME dictionary")
known_regions[region_idx] = region_name + " - " + location_name
region_prompt = {
'type': 'list',
'name': 'region',
'message': 'Current region: '+str(current_region_name)+'. Which region do you want instead?',
'choices': known_regions
}
chosen_region = prompt.prompt(region_prompt)['region'].split(" - ")[0]
return chosen_region
def detailed_info(instance, region_name):
ec2 = boto3.client("ec2", region_name=region_name)
current_info = ec2.describe_instances(InstanceIds=[instance["InstanceId"]])["Reservations"][0]["Instances"][0]
relevant_info = {}
table = PrettyTable(["Key", "Value"])
relevant_info["AMI Id"] = current_info["ImageId"]
try:
relevant_info["AMI Name"] = imageid_to_name(relevant_info["AMI Id"])
except ClientError:
print("\nAMI Id could not be mapped to name ..")
relevant_info["Availability Zone"] = current_info["Placement"]["AvailabilityZone"]
relevant_info["Number of CPU cores"] = current_info["CpuOptions"]["CoreCount"]
print("")
for info_name, info_content in relevant_info.items():
table.add_row([info_name, info_content])
print(table)
if __name__ == "__main__":
GLOBAL_CONFIG = read_config()
recommended_instance_types = read_config(os.path.dirname(os.path.realpath(__file__))+"/instance_types.cfg")
try:
client_region_name = GLOBAL_CONFIG["aws_region"]
except ValueError:
client_region_name = boto3.client("ec2").meta.region_name
print("\n")
created_folder = False
key_path = os.path.dirname(os.path.realpath(__file__))+"/keys"
if not os.path.isdir(key_path):
print("No \"keys\" folder. Creating one ...")
os.mkdir(key_path)
create_folder = True
fetch_path = os.path.dirname(os.path.realpath(__file__))+"/fetch"
if not os.path.isdir(fetch_path):
print("No \"fetch\" folder. Creating one ...")
os.mkdir(fetch_path)
created_folder = True
deploy_path = os.path.dirname(os.path.realpath(__file__))+"/deploy"
if not os.path.isdir(deploy_path):
print("No \"deploy\" folder. Creating one ...")
os.mkdir(deploy_path)
created_folder = True
if created_folder:
input("\nENTER to continue ..")
while True:
client_region_name = GLOBAL_CONFIG["aws_region"]
os.system("clear")
instances, used_types, client_region_name = get_current_instances(region_name=client_region_name, include_prices=GLOBAL_CONFIG["load_prices"])
display_instances(instances, region_name=client_region_name)
time.sleep(0.5)
chosen_instance = ask_instance(instances)
if chosen_instance == "Change region":
GLOBAL_CONFIG["aws_region"] = change_region(current_region_name=client_region_name)
time.sleep(1)
continue
elif chosen_instance == "Change username (SSH)":
change_remote_username()
time.sleep(1)
continue
else:
for inst in instances:
if inst["InstanceId"] == chosen_instance:
chosen_instance = inst
options = []
options.append("Details")
instance_name = chosen_instance["Name"]
deploy_option_name = "Deploy data to \""+str(instance_name)+"\""
fetch_option_name= "Fetch data from \""+str(instance_name)+"\""
if chosen_instance["State"] == "running" and chosen_instance["PublicIpAddress"] is not None:
options.append("Open shell (SSH)")
options.append("Jupyter")
options.append(deploy_option_name)
options.append(fetch_option_name)
options.append("Change name")
options.append("Stop")
elif chosen_instance["State"] in ("terminated", "terminating"):
options = ["Nothing to do here."]
else:
options.append("Start")
options.append("Change name")
options.append("Change type")
time.sleep(2)
chosen_action = prompt.prompt({'type':"list", "name":"action", "message": "What do you want to do?", "choices":options})["action"]
if chosen_action == "Start":
response = start_instance(chosen_instance, region_name=client_region_name)
if chosen_action == "Stop":
response = stop_instance(chosen_instance, region_name=client_region_name)
if chosen_action == "Open shell (SSH)":
connect_instance(chosen_instance)
if chosen_action == "Jupyter":
process = start_jupyter(chosen_instance)
if chosen_action == "Kill Jupyters":
kill_jupyters(chosen_instance)
if chosen_action == "Change type":
change_type(chosen_instance, region_name=client_region_name, available_instances=recommended_instance_types)
if chosen_action == "Change name":
change_name(chosen_instance, region_name=client_region_name)
if chosen_action == deploy_option_name:
deploy(chosen_instance)
if chosen_action == fetch_option_name:
fetch(chosen_instance)
if chosen_action == "Details":
detailed_info(chosen_instance, region_name=client_region_name)
time.sleep(0.5)
input("\n\nENTER to reload script ..")
| true
| true
|
1c45093a445be08922386a784e09521a402ff9a8
| 1,191
|
py
|
Python
|
IPython/terminal/tests/test_help.py
|
chebee7i/ipython
|
85b169fa3afc3d374973295c7f1409ededddbaca
|
[
"BSD-3-Clause-Clear"
] | 26
|
2018-02-14T23:52:58.000Z
|
2021-08-16T13:50:03.000Z
|
IPython/terminal/tests/test_help.py
|
chebee7i/ipython
|
85b169fa3afc3d374973295c7f1409ededddbaca
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
IPython/terminal/tests/test_help.py
|
chebee7i/ipython
|
85b169fa3afc3d374973295c7f1409ededddbaca
|
[
"BSD-3-Clause-Clear"
] | 10
|
2018-08-13T19:38:39.000Z
|
2020-04-19T03:02:00.000Z
|
"""Test help output of various IPython entry points"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import IPython.testing.tools as tt
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_ipython_help():
tt.help_all_output_test()
def test_profile_help():
tt.help_all_output_test("profile")
def test_profile_list_help():
tt.help_all_output_test("profile list")
def test_profile_create_help():
tt.help_all_output_test("profile create")
def test_locate_help():
tt.help_all_output_test("locate")
def test_locate_profile_help():
tt.help_all_output_test("locate profile")
| 31.342105
| 78
| 0.455919
|
import IPython.testing.tools as tt
def test_ipython_help():
tt.help_all_output_test()
def test_profile_help():
tt.help_all_output_test("profile")
def test_profile_list_help():
tt.help_all_output_test("profile list")
def test_profile_create_help():
tt.help_all_output_test("profile create")
def test_locate_help():
tt.help_all_output_test("locate")
def test_locate_profile_help():
tt.help_all_output_test("locate profile")
| true
| true
|
1c4509e4ca560671d16f2a9bb8671aab3ebd9e45
| 696
|
py
|
Python
|
BOJ/03000~03999/3100~3199/3154.py
|
shinkeonkim/today-ps
|
f3e5e38c5215f19579bb0422f303a9c18c626afa
|
[
"Apache-2.0"
] | 2
|
2020-01-29T06:54:41.000Z
|
2021-11-07T13:23:27.000Z
|
BOJ/03000~03999/3100~3199/3154.py
|
shinkeonkim/Today_PS
|
bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44
|
[
"Apache-2.0"
] | null | null | null |
BOJ/03000~03999/3100~3199/3154.py
|
shinkeonkim/Today_PS
|
bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44
|
[
"Apache-2.0"
] | null | null | null |
def f(a, b):
L = [[3,1],[0,0],[0,1],[0,2],[1,0],[1,1],[1,2],[2,0],[2,1],[2,2]]
return abs(L[a][0] - L[b][0]) + abs(L[a][1] - L[b][1])
M = list(map(int,input().split(":")))
C = 111111
ans = ""
for i in range(-3,10):
for j in range(-3,10):
h = (M[0] + 24*i)
m = (M[1] + 60*j)
if h > 99 or m > 99 or h < 0 or m < 0:
continue
s = list(map(int,list("%02d%02d" % (h,m))))
cnt = 0
for k in range(3):
cnt += f(s[k+1],s[k])
k = "%02d:%02d" % (h,m)
if cnt < C:
C = cnt
ans = "%02d:%02d" % (h,m)
if cnt == C and k < ans:
ans = k
print(ans)
| 25.777778
| 69
| 0.360632
|
def f(a, b):
L = [[3,1],[0,0],[0,1],[0,2],[1,0],[1,1],[1,2],[2,0],[2,1],[2,2]]
return abs(L[a][0] - L[b][0]) + abs(L[a][1] - L[b][1])
M = list(map(int,input().split(":")))
C = 111111
ans = ""
for i in range(-3,10):
for j in range(-3,10):
h = (M[0] + 24*i)
m = (M[1] + 60*j)
if h > 99 or m > 99 or h < 0 or m < 0:
continue
s = list(map(int,list("%02d%02d" % (h,m))))
cnt = 0
for k in range(3):
cnt += f(s[k+1],s[k])
k = "%02d:%02d" % (h,m)
if cnt < C:
C = cnt
ans = "%02d:%02d" % (h,m)
if cnt == C and k < ans:
ans = k
print(ans)
| true
| true
|
1c450b5dfde3363bf54ac0a21e4761a8d8692d5c
| 238
|
py
|
Python
|
5_kyu/product_of_consecutive_fib_numbers.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
5_kyu/product_of_consecutive_fib_numbers.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
5_kyu/product_of_consecutive_fib_numbers.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
def productFib(prod):
fib = [0, 1]
while fib[-1] * fib[-2] < prod:
fib.append(fib[-1] + fib[-2])
if fib[-1] * fib[-2] == prod:
return [fib[-2], fib[-1], True]
else:
return [fib[-2], fib[-1], False]
| 26.444444
| 40
| 0.466387
|
def productFib(prod):
fib = [0, 1]
while fib[-1] * fib[-2] < prod:
fib.append(fib[-1] + fib[-2])
if fib[-1] * fib[-2] == prod:
return [fib[-2], fib[-1], True]
else:
return [fib[-2], fib[-1], False]
| true
| true
|
1c450b7bb090a0fc1273a64843f8c0d46cc1f084
| 1,080
|
py
|
Python
|
var/spack/repos/builtin/packages/hazelcast/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/hazelcast/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/hazelcast/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Hazelcast(MavenPackage):
"""Hazelcast is an open-source distributed in-memory data
store and computation platform. It provides a wide variety
of distributed data structures and concurrency primitives."""
homepage = "http://www.hazelcast.com/"
url = "https://github.com/hazelcast/hazelcast/archive/v3.12.8.tar.gz"
version('4.0.2', sha256='4f01682583ae6603365ac7a24c568d7598cc3c1cbd736e5c6ed98bd75e39ffa3')
version('4.0.1', sha256='c9c7d5cbcf70c5e1eb72890df2b4104639f7543f11c6ac5d3e80cd2d4a0d2181')
version('3.12.8', sha256='65d0e131fc993f9517e8ce9ae5af9515f1b8038304abaaf9da535bdef1d71726')
version('3.12.7', sha256='0747de968082bc50202f825b4010be28a3885b3dbcee4b83cbe21b2f8b26a7e0')
version('3.11.7', sha256='c9f636b8813027d4cc24459bd27740549f89b4f11f62a868079bcb5b41d9b2bb')
depends_on('java@8:', type=('build', 'run'))
| 49.090909
| 96
| 0.773148
|
class Hazelcast(MavenPackage):
homepage = "http://www.hazelcast.com/"
url = "https://github.com/hazelcast/hazelcast/archive/v3.12.8.tar.gz"
version('4.0.2', sha256='4f01682583ae6603365ac7a24c568d7598cc3c1cbd736e5c6ed98bd75e39ffa3')
version('4.0.1', sha256='c9c7d5cbcf70c5e1eb72890df2b4104639f7543f11c6ac5d3e80cd2d4a0d2181')
version('3.12.8', sha256='65d0e131fc993f9517e8ce9ae5af9515f1b8038304abaaf9da535bdef1d71726')
version('3.12.7', sha256='0747de968082bc50202f825b4010be28a3885b3dbcee4b83cbe21b2f8b26a7e0')
version('3.11.7', sha256='c9f636b8813027d4cc24459bd27740549f89b4f11f62a868079bcb5b41d9b2bb')
depends_on('java@8:', type=('build', 'run'))
| true
| true
|
1c450c4040159fc28c9e4f9f9a5503948dc55c72
| 10,445
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/windows/win_scheduled_task_stat.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/windows/win_scheduled_task_stat.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/windows/win_scheduled_task_stat.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_scheduled_task_stat
version_added: "2.5"
short_description: Get information about Windows Scheduled Tasks
description:
- Will return whether the folder and task exists.
- Returns the names of tasks in the folder specified.
- Use M(win_scheduled_task) to configure a scheduled task.
options:
path:
description: The folder path where the task lives.
type: str
default: \
name:
description:
- The name of the scheduled task to get information for.
- If C(name) is set and exists, will return information on the task itself.
type: str
seealso:
- module: win_scheduled_task
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Get information about a folder
win_scheduled_task_stat:
path: \folder name
register: task_folder_stat
- name: Get information about a task in the root folder
win_scheduled_task_stat:
name: task name
register: task_stat
- name: Get information about a task in a custom folder
win_scheduled_task_stat:
path: \folder name
name: task name
register: task_stat
'''
RETURN = r'''
actions:
description: A list of actions.
returned: name is specified and task exists
type: list
sample: [
{
"Arguments": "/c echo hi",
"Id": null,
"Path": "cmd.exe",
"Type": "TASK_ACTION_EXEC",
"WorkingDirectory": null
}
]
folder_exists:
description: Whether the folder set at path exists.
returned: always
type: bool
sample: true
folder_task_count:
description: The number of tasks that exist in the folder.
returned: always
type: int
sample: 2
folder_task_names:
description: A list of tasks that exist in the folder.
returned: always
type: list
sample: [ 'Task 1', 'Task 2' ]
principal:
description: Details on the principal configured to run the task.
returned: name is specified and task exists
type: complex
contains:
display_name:
description: The name of the user/group that is displayed in the Task
Scheduler UI.
returned: ''
type: str
sample: Administrator
group_id:
description: The group that will run the task.
returned: ''
type: str
sample: BUILTIN\Administrators
id:
description: The ID for the principal.
returned: ''
type: str
sample: Author
logon_type:
description: The logon method that the task will run with.
returned: ''
type: str
sample: TASK_LOGON_INTERACTIVE_TOKEN
run_level:
description: The level of user rights used to run the task.
returned: ''
type: str
sample: TASK_RUNLEVEL_LUA
user_id:
description: The user that will run the task.
returned: ''
type: str
sample: SERVER\Administrator
registration_info:
description: Details on the task registration info.
returned: name is specified and task exists
type: complex
contains:
author:
description: The author os the task.
returned: ''
type: str
sample: SERVER\Administrator
date:
description: The date when the task was register.
returned: ''
type: str
sample: '2017-01-01T10:00:00'
description:
description: The description of the task.
returned: ''
type: str
sample: task description
documentation:
description: The documentation of the task.
returned: ''
type: str
sample: task documentation
security_descriptor:
description: The security descriptor of the task.
returned: ''
type: str
sample: security descriptor
source:
description: The source of the task.
returned: ''
type: str
sample: source
uri:
description: The URI/path of the task.
returned: ''
type: str
sample: \task\task name
version:
description: The version of the task.
returned: ''
type: str
sample: 1.0
settings:
description: Details on the task settings.
returned: name is specified and task exists
type: complex
contains:
allow_demand_start:
description: Whether the task can be started by using either the Run
command of the Context menu.
returned: ''
type: bool
sample: true
allow_hard_terminate:
description: Whether the task can terminated by using TerminateProcess.
returned: ''
type: bool
sample: true
compatibility:
description: The compatibility level of the task
returned: ''
type: int
sample: 2
delete_expired_task_after:
description: The amount of time the Task Scheduler will wait before
deleting the task after it expires.
returned: ''
type: str
sample: PT10M
disallow_start_if_on_batteries:
description: Whether the task will not be started if the computer is
running on battery power.
returned: ''
type: bool
sample: false
disallow_start_on_remote_app_session:
description: Whether the task will not be started when in a remote app
session.
returned: ''
type: bool
sample: true
enabled:
description: Whether the task is enabled.
returned: ''
type: bool
sample: true
execution_time_limit:
description: The amount of time allowed to complete the task.
returned: ''
type: str
sample: PT72H
hidden:
description: Whether the task is hidden in the UI.
returned: ''
type: bool
sample: false
idle_settings:
description: The idle settings of the task.
returned: ''
type: dict
sample: {
"idle_duration": "PT10M",
"restart_on_idle": false,
"stop_on_idle_end": true,
"wait_timeout": "PT1H"
}
maintenance_settings:
description: The maintenance settings of the task.
returned: ''
type: str
sample: null
mulitple_instances:
description: Indicates the behaviour when starting a task that is already
running.
returned: ''
type: int
sample: 2
network_settings:
description: The network settings of the task.
returned: ''
type: dict
sample: {
"id": null,
"name": null
}
priority:
description: The priority level of the task.
returned: ''
type: int
sample: 7
restart_count:
description: The number of times that the task will attempt to restart
on failures.
returned: ''
type: int
sample: 0
restart_interval:
description: How long the Task Scheduler will attempt to restart the
task.
returned: ''
type: str
sample: PT15M
run_only_id_idle:
description: Whether the task will run if the computer is in an idle
state.
returned: ''
type: bool
sample: true
run_only_if_network_available:
description: Whether the task will run only when a network is available.
returned: ''
type: bool
sample: false
start_when_available:
description: Whether the task can start at any time after its scheduled
time has passed.
returned: ''
type: bool
sample: false
stop_if_going_on_batteries:
description: Whether the task will be stopped if the computer begins to
run on battery power.
returned: ''
type: bool
sample: true
use_unified_scheduling_engine:
description: Whether the task will use the unified scheduling engine.
returned: ''
type: bool
sample: false
volatile:
description: Whether the task is volatile.
returned: ''
type: bool
sample: false
wake_to_run:
description: Whether the task will wake the computer when it is time to
run the task.
returned: ''
type: bool
sample: false
state:
description: Details on the state of the task
returned: name is specified and task exists
type: complex
contains:
last_run_time:
description: The time the registered task was last run.
returned: ''
type: str
sample: '2017-09-20T20:50:00'
last_task_result:
description: The results that were returned the last time the task was
run.
returned: ''
type: int
sample: 267009
next_run_time:
description: The time when the task is next scheduled to run.
returned: ''
type: str
sample: '2017-09-20T22:50:00'
number_of_missed_runs:
description: The number of times a task has missed a scheduled run.
returned: ''
type: int
sample: 1
status:
description: The status of the task, whether it is running, stopped, etc.
returned: ''
type: str
sample: TASK_STATE_RUNNING
task_exists:
description: Whether the task at the folder exists.
returned: name is specified
type: bool
sample: true
triggers:
description: A list of triggers.
returned: name is specified and task exists
type: list
sample: [
{
"delay": "PT15M",
"enabled": true,
"end_boundary": null,
"execution_time_limit": null,
"id": null,
"repetition": {
"duration": null,
"interval": null,
"stop_at_duration_end": false
},
"start_boundary": null,
"type": "TASK_TRIGGER_BOOT"
},
{
"days_of_month": "5,15,30",
"enabled": true,
"end_boundary": null,
"execution_time_limit": null,
"id": null,
"months_of_year": "june,december",
"random_delay": null,
"repetition": {
"duration": null,
"interval": null,
"stop_at_duration_end": false
},
"run_on_last_day_of_month": true,
"start_boundary": "2017-09-20T03:44:38",
"type": "TASK_TRIGGER_MONTHLY"
}
]
'''
| 27.486842
| 92
| 0.629009
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_scheduled_task_stat
version_added: "2.5"
short_description: Get information about Windows Scheduled Tasks
description:
- Will return whether the folder and task exists.
- Returns the names of tasks in the folder specified.
- Use M(win_scheduled_task) to configure a scheduled task.
options:
path:
description: The folder path where the task lives.
type: str
default: \
name:
description:
- The name of the scheduled task to get information for.
- If C(name) is set and exists, will return information on the task itself.
type: str
seealso:
- module: win_scheduled_task
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Get information about a folder
win_scheduled_task_stat:
path: \folder name
register: task_folder_stat
- name: Get information about a task in the root folder
win_scheduled_task_stat:
name: task name
register: task_stat
- name: Get information about a task in a custom folder
win_scheduled_task_stat:
path: \folder name
name: task name
register: task_stat
'''
RETURN = r'''
actions:
description: A list of actions.
returned: name is specified and task exists
type: list
sample: [
{
"Arguments": "/c echo hi",
"Id": null,
"Path": "cmd.exe",
"Type": "TASK_ACTION_EXEC",
"WorkingDirectory": null
}
]
folder_exists:
description: Whether the folder set at path exists.
returned: always
type: bool
sample: true
folder_task_count:
description: The number of tasks that exist in the folder.
returned: always
type: int
sample: 2
folder_task_names:
description: A list of tasks that exist in the folder.
returned: always
type: list
sample: [ 'Task 1', 'Task 2' ]
principal:
description: Details on the principal configured to run the task.
returned: name is specified and task exists
type: complex
contains:
display_name:
description: The name of the user/group that is displayed in the Task
Scheduler UI.
returned: ''
type: str
sample: Administrator
group_id:
description: The group that will run the task.
returned: ''
type: str
sample: BUILTIN\Administrators
id:
description: The ID for the principal.
returned: ''
type: str
sample: Author
logon_type:
description: The logon method that the task will run with.
returned: ''
type: str
sample: TASK_LOGON_INTERACTIVE_TOKEN
run_level:
description: The level of user rights used to run the task.
returned: ''
type: str
sample: TASK_RUNLEVEL_LUA
user_id:
description: The user that will run the task.
returned: ''
type: str
sample: SERVER\Administrator
registration_info:
description: Details on the task registration info.
returned: name is specified and task exists
type: complex
contains:
author:
description: The author os the task.
returned: ''
type: str
sample: SERVER\Administrator
date:
description: The date when the task was register.
returned: ''
type: str
sample: '2017-01-01T10:00:00'
description:
description: The description of the task.
returned: ''
type: str
sample: task description
documentation:
description: The documentation of the task.
returned: ''
type: str
sample: task documentation
security_descriptor:
description: The security descriptor of the task.
returned: ''
type: str
sample: security descriptor
source:
description: The source of the task.
returned: ''
type: str
sample: source
uri:
description: The URI/path of the task.
returned: ''
type: str
sample: \task\task name
version:
description: The version of the task.
returned: ''
type: str
sample: 1.0
settings:
description: Details on the task settings.
returned: name is specified and task exists
type: complex
contains:
allow_demand_start:
description: Whether the task can be started by using either the Run
command of the Context menu.
returned: ''
type: bool
sample: true
allow_hard_terminate:
description: Whether the task can terminated by using TerminateProcess.
returned: ''
type: bool
sample: true
compatibility:
description: The compatibility level of the task
returned: ''
type: int
sample: 2
delete_expired_task_after:
description: The amount of time the Task Scheduler will wait before
deleting the task after it expires.
returned: ''
type: str
sample: PT10M
disallow_start_if_on_batteries:
description: Whether the task will not be started if the computer is
running on battery power.
returned: ''
type: bool
sample: false
disallow_start_on_remote_app_session:
description: Whether the task will not be started when in a remote app
session.
returned: ''
type: bool
sample: true
enabled:
description: Whether the task is enabled.
returned: ''
type: bool
sample: true
execution_time_limit:
description: The amount of time allowed to complete the task.
returned: ''
type: str
sample: PT72H
hidden:
description: Whether the task is hidden in the UI.
returned: ''
type: bool
sample: false
idle_settings:
description: The idle settings of the task.
returned: ''
type: dict
sample: {
"idle_duration": "PT10M",
"restart_on_idle": false,
"stop_on_idle_end": true,
"wait_timeout": "PT1H"
}
maintenance_settings:
description: The maintenance settings of the task.
returned: ''
type: str
sample: null
mulitple_instances:
description: Indicates the behaviour when starting a task that is already
running.
returned: ''
type: int
sample: 2
network_settings:
description: The network settings of the task.
returned: ''
type: dict
sample: {
"id": null,
"name": null
}
priority:
description: The priority level of the task.
returned: ''
type: int
sample: 7
restart_count:
description: The number of times that the task will attempt to restart
on failures.
returned: ''
type: int
sample: 0
restart_interval:
description: How long the Task Scheduler will attempt to restart the
task.
returned: ''
type: str
sample: PT15M
run_only_id_idle:
description: Whether the task will run if the computer is in an idle
state.
returned: ''
type: bool
sample: true
run_only_if_network_available:
description: Whether the task will run only when a network is available.
returned: ''
type: bool
sample: false
start_when_available:
description: Whether the task can start at any time after its scheduled
time has passed.
returned: ''
type: bool
sample: false
stop_if_going_on_batteries:
description: Whether the task will be stopped if the computer begins to
run on battery power.
returned: ''
type: bool
sample: true
use_unified_scheduling_engine:
description: Whether the task will use the unified scheduling engine.
returned: ''
type: bool
sample: false
volatile:
description: Whether the task is volatile.
returned: ''
type: bool
sample: false
wake_to_run:
description: Whether the task will wake the computer when it is time to
run the task.
returned: ''
type: bool
sample: false
state:
description: Details on the state of the task
returned: name is specified and task exists
type: complex
contains:
last_run_time:
description: The time the registered task was last run.
returned: ''
type: str
sample: '2017-09-20T20:50:00'
last_task_result:
description: The results that were returned the last time the task was
run.
returned: ''
type: int
sample: 267009
next_run_time:
description: The time when the task is next scheduled to run.
returned: ''
type: str
sample: '2017-09-20T22:50:00'
number_of_missed_runs:
description: The number of times a task has missed a scheduled run.
returned: ''
type: int
sample: 1
status:
description: The status of the task, whether it is running, stopped, etc.
returned: ''
type: str
sample: TASK_STATE_RUNNING
task_exists:
description: Whether the task at the folder exists.
returned: name is specified
type: bool
sample: true
triggers:
description: A list of triggers.
returned: name is specified and task exists
type: list
sample: [
{
"delay": "PT15M",
"enabled": true,
"end_boundary": null,
"execution_time_limit": null,
"id": null,
"repetition": {
"duration": null,
"interval": null,
"stop_at_duration_end": false
},
"start_boundary": null,
"type": "TASK_TRIGGER_BOOT"
},
{
"days_of_month": "5,15,30",
"enabled": true,
"end_boundary": null,
"execution_time_limit": null,
"id": null,
"months_of_year": "june,december",
"random_delay": null,
"repetition": {
"duration": null,
"interval": null,
"stop_at_duration_end": false
},
"run_on_last_day_of_month": true,
"start_boundary": "2017-09-20T03:44:38",
"type": "TASK_TRIGGER_MONTHLY"
}
]
'''
| true
| true
|
1c450c6fcbe3b62b2247c2fb25a8112f6abca6f6
| 60,369
|
py
|
Python
|
Lib/optparse.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 52,316
|
2015-01-01T15:56:25.000Z
|
2022-03-31T23:19:01.000Z
|
Lib/optparse.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 25,286
|
2015-03-03T23:18:02.000Z
|
2022-03-31T23:17:27.000Z
|
Lib/optparse.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 31,623
|
2015-01-01T13:29:37.000Z
|
2022-03-31T19:55:06.000Z
|
"""A powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik.
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError',
'check_choice']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext, ngettext
except ImportError:
def gettext(message):
return message
def ngettext(singular, plural, n):
if n == 1:
return singular
return plural
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.help_position = self.max_help_position = \
min(max_help_position, max(width - 20, indent_increment * 2))
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = max(self.width - self.current_indent, 11)
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = max(self.width - self.help_position, 11)
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_int, _("integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names.
if isinstance(self.type, type):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appear in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the main
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is a Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| 35.891201
| 79
| 0.582633
|
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError',
'check_choice']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
try:
from gettext import gettext, ngettext
except ImportError:
def gettext(message):
return message
def ngettext(singular, plural, n):
if n == 1:
return singular
return plural
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
class OptionValueError (OptParseError):
class BadOptionError (OptParseError):
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.help_position = self.max_help_position = \
min(max_help_position, max(width - 20, indent_increment * 2))
self.current_indent = 0
self.level = 0
self.help_width = None
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
text_width = max(self.width - self.current_indent, 11)
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else:
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = max(self.width - self.help_position, 11)
def format_option_strings(self, option):
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x":
radix = 16
elif val[:2].lower() == "0b":
radix = 2
val = val[2:] or "0"
elif val[:1] == "0":
radix = 8
else:
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_int, _("integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
TYPED_ACTIONS = ("store",
"append",
"callback")
ALWAYS_TYPED_ACTIONS = ("store",
"append")
CONST_ACTIONS = ("store_const",
"append_const")
TYPES = ("string", "int", "long", "float", "complex", "choice")
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
CHECK_METHODS = None
def __init__(self, *opts, **attrs):
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
self._set_attrs(attrs)
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
self.type = "choice"
else:
self.type = "string"
else:
if isinstance(self.type, type):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the main
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
self.allow_interspersed_args = True
def disable_interspersed_args(self):
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
return (values, args)
def _process_args(self, largs, rargs, values):
while rargs:
arg = rargs[0]
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return
def _match_long_opt(self, opt):
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else:
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| true
| true
|
1c450d522f192a94ed707858331d204858a968c7
| 10,430
|
py
|
Python
|
frappe/email/email_body.py
|
omirajkar/vmsfrappe
|
da65f47850944ea234fda0ca390bacb9dac39336
|
[
"MIT"
] | 1
|
2020-01-14T17:06:07.000Z
|
2020-01-14T17:06:07.000Z
|
frappe/email/email_body.py
|
omirajkar/vmsfrappe
|
da65f47850944ea234fda0ca390bacb9dac39336
|
[
"MIT"
] | null | null | null |
frappe/email/email_body.py
|
omirajkar/vmsfrappe
|
da65f47850944ea234fda0ca390bacb9dac39336
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, re
from frappe.utils.pdf import get_pdf
from frappe.email.smtp import get_outgoing_email_account
from frappe.utils import (get_url, scrub_urls, strip, expand_relative_urls, cint,
split_emails, to_markdown, markdown, encode, random_string, parse_addr)
import email.utils
from six import iteritems
from email.mime.multipart import MIMEMultipart
def get_email(recipients, sender='', msg='', subject='[No Subject]',
text_content = None, footer=None, print_html=None, formatted=None, attachments=None,
content=None, reply_to=None, cc=[], email_account=None, expose_recipients=None,
inline_images=[]):
"""send an html email as multipart with attachments and all"""
content = content or msg
emailobj = EMail(sender, recipients, subject, reply_to=reply_to, cc=cc, email_account=email_account, expose_recipients=expose_recipients)
if not content.strip().startswith("<"):
content = markdown(content)
emailobj.set_html(content, text_content, footer=footer,
print_html=print_html, formatted=formatted, inline_images=inline_images)
if isinstance(attachments, dict):
attachments = [attachments]
for attach in (attachments or []):
emailobj.add_attachment(**attach)
return emailobj
class EMail:
"""
Wrapper on the email module. Email object represents emails to be sent to the client.
Also provides a clean way to add binary `FileData` attachments
Also sets all messages as multipart/alternative for cleaner reading in text-only clients
"""
def __init__(self, sender='', recipients=(), subject='', alternative=0, reply_to=None, cc=(), email_account=None, expose_recipients=None):
from email import Charset
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
if isinstance(recipients, basestring):
recipients = recipients.replace(';', ',').replace('\n', '')
recipients = split_emails(recipients)
# remove null
recipients = filter(None, (strip(r) for r in recipients))
self.sender = sender
self.reply_to = reply_to or sender
self.recipients = recipients
self.subject = subject
self.expose_recipients = expose_recipients
self.msg_root = MIMEMultipart('mixed')
self.msg_multipart = MIMEMultipart('alternative')
self.msg_root.attach(self.msg_multipart)
self.cc = cc or []
self.html_set = False
self.email_account = email_account or get_outgoing_email_account()
def set_html(self, message, text_content = None, footer=None, print_html=None,
formatted=None, inline_images=None):
"""Attach message in the html portion of multipart/alternative"""
if not formatted:
formatted = get_formatted_html(self.subject, message, footer, print_html, email_account=self.email_account)
# this is the first html part of a multi-part message,
# convert to text well
if not self.html_set:
if text_content:
self.set_text(expand_relative_urls(text_content))
else:
self.set_html_as_text(expand_relative_urls(formatted))
self.set_part_html(formatted, inline_images)
self.html_set = True
def set_text(self, message):
"""
Attach message in the text portion of multipart/alternative
"""
from email.mime.text import MIMEText
part = MIMEText(message, 'plain', 'utf-8')
self.msg_multipart.attach(part)
def set_part_html(self, message, inline_images):
from email.mime.text import MIMEText
if inline_images:
related = MIMEMultipart('related')
for image in inline_images:
# images in dict like {filename:'', filecontent:'raw'}
content_id = random_string(10)
# replace filename in message with CID
message = re.sub('''src=['"]{0}['"]'''.format(image.get('filename')),
'src="cid:{0}"'.format(content_id), message)
self.add_attachment(image.get('filename'), image.get('filecontent'),
None, content_id=content_id, parent=related)
html_part = MIMEText(message, 'html', 'utf-8')
related.attach(html_part)
self.msg_multipart.attach(related)
else:
self.msg_multipart.attach(MIMEText(message, 'html', 'utf-8'))
def set_html_as_text(self, html):
"""return html2text"""
self.set_text(to_markdown(html))
def set_message(self, message, mime_type='text/html', as_attachment=0, filename='attachment.html'):
"""Append the message with MIME content to the root node (as attachment)"""
from email.mime.text import MIMEText
maintype, subtype = mime_type.split('/')
part = MIMEText(message, _subtype = subtype)
if as_attachment:
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.msg_root.attach(part)
def attach_file(self, n):
"""attach a file from the `FileData` table"""
from frappe.utils.file_manager import get_file
res = get_file(n)
if not res:
return
self.add_attachment(res[0], res[1])
def add_attachment(self, fname, fcontent, content_type=None,
parent=None, content_id=None):
"""add attachment"""
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes
if not content_type:
content_type, encoding = mimetypes.guess_type(fname)
if content_type is None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
content_type = 'application/octet-stream'
maintype, subtype = content_type.split('/', 1)
if maintype == 'text':
# Note: we should handle calculating the charset
if isinstance(fcontent, unicode):
fcontent = fcontent.encode("utf-8")
part = MIMEText(fcontent, _subtype=subtype, _charset="utf-8")
elif maintype == 'image':
part = MIMEImage(fcontent, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(fcontent, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(fcontent)
# Encode the payload using Base64
from email import encoders
encoders.encode_base64(part)
# Set the filename parameter
if fname:
part.add_header(b'Content-Disposition',
("attachment; filename=\"%s\"" % fname).encode('utf-8'))
if content_id:
part.add_header(b'Content-ID', '<{0}>'.format(content_id))
if not parent:
parent = self.msg_root
parent.attach(part)
def add_pdf_attachment(self, name, html, options=None):
self.add_attachment(name, get_pdf(html, options), 'application/octet-stream')
def validate(self):
"""validate the Email Addresses"""
from frappe.utils import validate_email_add
if not self.sender:
self.sender = self.email_account.default_sender
validate_email_add(strip(self.sender), True)
self.reply_to = validate_email_add(strip(self.reply_to) or self.sender, True)
self.replace_sender()
self.recipients = [strip(r) for r in self.recipients]
self.cc = [strip(r) for r in self.cc]
for e in self.recipients + (self.cc or []):
validate_email_add(e, True)
def replace_sender(self):
if cint(self.email_account.always_use_account_email_id_as_sender):
self.set_header('X-Original-From', self.sender)
sender_name, sender_email = parse_addr(self.sender)
self.sender = email.utils.formataddr((sender_name or self.email_account.name, self.email_account.email_id))
def set_message_id(self, message_id, is_notification=False):
if message_id:
self.msg_root["Message-Id"] = '<' + message_id + '>'
else:
self.msg_root["Message-Id"] = get_message_id()
self.msg_root["isnotification"] = '<notification>'
if is_notification:
self.msg_root["isnotification"] = '<notification>'
def set_in_reply_to(self, in_reply_to):
"""Used to send the Message-Id of a received email back as In-Reply-To"""
self.msg_root["In-Reply-To"] = in_reply_to
def make(self):
"""build into msg_root"""
headers = {
"Subject": strip(self.subject),
"From": self.sender,
"To": ', '.join(self.recipients) if self.expose_recipients=="header" else "<!--recipient-->",
"Date": email.utils.formatdate(),
"Reply-To": self.reply_to if self.reply_to else None,
"CC": ', '.join(self.cc) if self.cc and self.expose_recipients=="header" else None,
'X-Frappe-Site': get_url(),
}
# reset headers as values may be changed.
for key, val in iteritems(headers):
self.set_header(key, val)
# call hook to enable apps to modify msg_root before sending
for hook in frappe.get_hooks("make_email_body_message"):
frappe.get_attr(hook)(self)
def set_header(self, key, value):
key = encode(key)
value = encode(value)
if self.msg_root.has_key(key):
del self.msg_root[key]
self.msg_root[key] = value
def as_string(self):
"""validate, build message and convert to string"""
self.validate()
self.make()
return self.msg_root.as_string()
def get_formatted_html(subject, message, footer=None, print_html=None, email_account=None):
if not email_account:
email_account = get_outgoing_email_account(False)
rendered_email = frappe.get_template("templates/emails/standard.html").render({
"content": message,
"signature": get_signature(email_account),
"footer": get_footer(email_account, footer),
"title": subject,
"print_html": print_html,
"subject": subject
})
return scrub_urls(rendered_email)
def get_message_id():
'''Returns Message ID created from doctype and name'''
return "<{unique}@{site}>".format(
site=frappe.local.site,
unique=email.utils.make_msgid(random_string(10)).split('@')[0].split('<')[1])
def get_signature(email_account):
if email_account and email_account.add_signature and email_account.signature:
return "<br><br>" + email_account.signature
else:
return ""
def get_footer(email_account, footer=None):
"""append a footer (signature)"""
footer = footer or ""
if email_account and email_account.footer:
footer += '<div style="margin: 15px auto;">{0}</div>'.format(email_account.footer)
footer += "<!--unsubscribe link here-->"
company_address = frappe.db.get_default("email_footer_address")
if company_address:
footer += '<div style="margin: 15px auto; text-align: center; color: #8d99a6">{0}</div>'\
.format(company_address.replace("\n", "<br>"))
if not cint(frappe.db.get_default("disable_standard_email_footer")):
for default_mail_footer in frappe.get_hooks("default_mail_footer"):
footer += '<div style="margin: 15px auto;">{0}</div>'.format(default_mail_footer)
return footer
| 33.754045
| 139
| 0.727229
|
from __future__ import unicode_literals
import frappe, re
from frappe.utils.pdf import get_pdf
from frappe.email.smtp import get_outgoing_email_account
from frappe.utils import (get_url, scrub_urls, strip, expand_relative_urls, cint,
split_emails, to_markdown, markdown, encode, random_string, parse_addr)
import email.utils
from six import iteritems
from email.mime.multipart import MIMEMultipart
def get_email(recipients, sender='', msg='', subject='[No Subject]',
text_content = None, footer=None, print_html=None, formatted=None, attachments=None,
content=None, reply_to=None, cc=[], email_account=None, expose_recipients=None,
inline_images=[]):
content = content or msg
emailobj = EMail(sender, recipients, subject, reply_to=reply_to, cc=cc, email_account=email_account, expose_recipients=expose_recipients)
if not content.strip().startswith("<"):
content = markdown(content)
emailobj.set_html(content, text_content, footer=footer,
print_html=print_html, formatted=formatted, inline_images=inline_images)
if isinstance(attachments, dict):
attachments = [attachments]
for attach in (attachments or []):
emailobj.add_attachment(**attach)
return emailobj
class EMail:
def __init__(self, sender='', recipients=(), subject='', alternative=0, reply_to=None, cc=(), email_account=None, expose_recipients=None):
from email import Charset
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
if isinstance(recipients, basestring):
recipients = recipients.replace(';', ',').replace('\n', '')
recipients = split_emails(recipients)
recipients = filter(None, (strip(r) for r in recipients))
self.sender = sender
self.reply_to = reply_to or sender
self.recipients = recipients
self.subject = subject
self.expose_recipients = expose_recipients
self.msg_root = MIMEMultipart('mixed')
self.msg_multipart = MIMEMultipart('alternative')
self.msg_root.attach(self.msg_multipart)
self.cc = cc or []
self.html_set = False
self.email_account = email_account or get_outgoing_email_account()
def set_html(self, message, text_content = None, footer=None, print_html=None,
formatted=None, inline_images=None):
if not formatted:
formatted = get_formatted_html(self.subject, message, footer, print_html, email_account=self.email_account)
if not self.html_set:
if text_content:
self.set_text(expand_relative_urls(text_content))
else:
self.set_html_as_text(expand_relative_urls(formatted))
self.set_part_html(formatted, inline_images)
self.html_set = True
def set_text(self, message):
from email.mime.text import MIMEText
part = MIMEText(message, 'plain', 'utf-8')
self.msg_multipart.attach(part)
def set_part_html(self, message, inline_images):
from email.mime.text import MIMEText
if inline_images:
related = MIMEMultipart('related')
for image in inline_images:
content_id = random_string(10)
message = re.sub('''src=['"]{0}['"]'''.format(image.get('filename')),
'src="cid:{0}"'.format(content_id), message)
self.add_attachment(image.get('filename'), image.get('filecontent'),
None, content_id=content_id, parent=related)
html_part = MIMEText(message, 'html', 'utf-8')
related.attach(html_part)
self.msg_multipart.attach(related)
else:
self.msg_multipart.attach(MIMEText(message, 'html', 'utf-8'))
def set_html_as_text(self, html):
self.set_text(to_markdown(html))
def set_message(self, message, mime_type='text/html', as_attachment=0, filename='attachment.html'):
from email.mime.text import MIMEText
maintype, subtype = mime_type.split('/')
part = MIMEText(message, _subtype = subtype)
if as_attachment:
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.msg_root.attach(part)
def attach_file(self, n):
from frappe.utils.file_manager import get_file
res = get_file(n)
if not res:
return
self.add_attachment(res[0], res[1])
def add_attachment(self, fname, fcontent, content_type=None,
parent=None, content_id=None):
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes
if not content_type:
content_type, encoding = mimetypes.guess_type(fname)
if content_type is None:
content_type = 'application/octet-stream'
maintype, subtype = content_type.split('/', 1)
if maintype == 'text':
if isinstance(fcontent, unicode):
fcontent = fcontent.encode("utf-8")
part = MIMEText(fcontent, _subtype=subtype, _charset="utf-8")
elif maintype == 'image':
part = MIMEImage(fcontent, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(fcontent, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(fcontent)
from email import encoders
encoders.encode_base64(part)
if fname:
part.add_header(b'Content-Disposition',
("attachment; filename=\"%s\"" % fname).encode('utf-8'))
if content_id:
part.add_header(b'Content-ID', '<{0}>'.format(content_id))
if not parent:
parent = self.msg_root
parent.attach(part)
def add_pdf_attachment(self, name, html, options=None):
self.add_attachment(name, get_pdf(html, options), 'application/octet-stream')
def validate(self):
from frappe.utils import validate_email_add
if not self.sender:
self.sender = self.email_account.default_sender
validate_email_add(strip(self.sender), True)
self.reply_to = validate_email_add(strip(self.reply_to) or self.sender, True)
self.replace_sender()
self.recipients = [strip(r) for r in self.recipients]
self.cc = [strip(r) for r in self.cc]
for e in self.recipients + (self.cc or []):
validate_email_add(e, True)
def replace_sender(self):
if cint(self.email_account.always_use_account_email_id_as_sender):
self.set_header('X-Original-From', self.sender)
sender_name, sender_email = parse_addr(self.sender)
self.sender = email.utils.formataddr((sender_name or self.email_account.name, self.email_account.email_id))
def set_message_id(self, message_id, is_notification=False):
if message_id:
self.msg_root["Message-Id"] = '<' + message_id + '>'
else:
self.msg_root["Message-Id"] = get_message_id()
self.msg_root["isnotification"] = '<notification>'
if is_notification:
self.msg_root["isnotification"] = '<notification>'
def set_in_reply_to(self, in_reply_to):
self.msg_root["In-Reply-To"] = in_reply_to
def make(self):
headers = {
"Subject": strip(self.subject),
"From": self.sender,
"To": ', '.join(self.recipients) if self.expose_recipients=="header" else "<!--recipient-->",
"Date": email.utils.formatdate(),
"Reply-To": self.reply_to if self.reply_to else None,
"CC": ', '.join(self.cc) if self.cc and self.expose_recipients=="header" else None,
'X-Frappe-Site': get_url(),
}
for key, val in iteritems(headers):
self.set_header(key, val)
for hook in frappe.get_hooks("make_email_body_message"):
frappe.get_attr(hook)(self)
def set_header(self, key, value):
key = encode(key)
value = encode(value)
if self.msg_root.has_key(key):
del self.msg_root[key]
self.msg_root[key] = value
def as_string(self):
self.validate()
self.make()
return self.msg_root.as_string()
def get_formatted_html(subject, message, footer=None, print_html=None, email_account=None):
if not email_account:
email_account = get_outgoing_email_account(False)
rendered_email = frappe.get_template("templates/emails/standard.html").render({
"content": message,
"signature": get_signature(email_account),
"footer": get_footer(email_account, footer),
"title": subject,
"print_html": print_html,
"subject": subject
})
return scrub_urls(rendered_email)
def get_message_id():
return "<{unique}@{site}>".format(
site=frappe.local.site,
unique=email.utils.make_msgid(random_string(10)).split('@')[0].split('<')[1])
def get_signature(email_account):
if email_account and email_account.add_signature and email_account.signature:
return "<br><br>" + email_account.signature
else:
return ""
def get_footer(email_account, footer=None):
footer = footer or ""
if email_account and email_account.footer:
footer += '<div style="margin: 15px auto;">{0}</div>'.format(email_account.footer)
footer += "<!--unsubscribe link here-->"
company_address = frappe.db.get_default("email_footer_address")
if company_address:
footer += '<div style="margin: 15px auto; text-align: center; color: #8d99a6">{0}</div>'\
.format(company_address.replace("\n", "<br>"))
if not cint(frappe.db.get_default("disable_standard_email_footer")):
for default_mail_footer in frappe.get_hooks("default_mail_footer"):
footer += '<div style="margin: 15px auto;">{0}</div>'.format(default_mail_footer)
return footer
| true
| true
|
1c450d8cf947536398acc4c04ba15817d15671ab
| 2,418
|
py
|
Python
|
hummingbot/connector/exchange/huobi/huobi_utils.py
|
cardosofede/hummingbot
|
d1df085bb879a06a7dc77d4fdc8ff6f13d8726ca
|
[
"Apache-2.0"
] | 542
|
2021-12-17T22:34:31.000Z
|
2022-03-31T14:36:23.000Z
|
hummingbot/connector/exchange/huobi/huobi_utils.py
|
cardosofede/hummingbot
|
d1df085bb879a06a7dc77d4fdc8ff6f13d8726ca
|
[
"Apache-2.0"
] | 291
|
2021-12-17T20:07:53.000Z
|
2022-03-31T11:07:23.000Z
|
hummingbot/connector/exchange/huobi/huobi_utils.py
|
cardosofede/hummingbot
|
d1df085bb879a06a7dc77d4fdc8ff6f13d8726ca
|
[
"Apache-2.0"
] | 220
|
2021-12-17T12:41:23.000Z
|
2022-03-31T23:03:22.000Z
|
import re
from decimal import Decimal
from typing import Optional, Tuple
from hummingbot.client.config.config_methods import using_exchange
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.connector.exchange.huobi.huobi_ws_post_processor import HuobiWSPostProcessor
from hummingbot.core.data_type.trade_fee import TradeFeeSchema
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
DEFAULT_FEES = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("0.002"),
taker_percent_fee_decimal=Decimal("0.002"),
)
RE_4_LETTERS_QUOTE = re.compile(r"^(\w+)(usdt|husd|usdc)$")
RE_3_LETTERS_QUOTE = re.compile(r"^(\w+)(btc|eth|trx)$")
RE_2_LETTERS_QUOTE = re.compile(r"^(\w+)(ht)$")
CENTRALIZED = True
EXAMPLE_PAIR = "ETH-USDT"
BROKER_ID = "AAc484720a"
def split_trading_pair(trading_pair: str) -> Optional[Tuple[str, str]]:
try:
m = RE_4_LETTERS_QUOTE.match(trading_pair)
if m is None:
m = RE_3_LETTERS_QUOTE.match(trading_pair)
if m is None:
m = RE_2_LETTERS_QUOTE.match(trading_pair)
return m.group(1), m.group(2)
# Exceptions are now logged as warnings in trading pair fetcher
except Exception:
return None
def convert_from_exchange_trading_pair(exchange_trading_pair: str) -> Optional[str]:
if split_trading_pair(exchange_trading_pair) is None:
return None
# Huobi uses lowercase (btcusdt)
base_asset, quote_asset = split_trading_pair(exchange_trading_pair)
return f"{base_asset.upper()}-{quote_asset.upper()}"
def convert_to_exchange_trading_pair(hb_trading_pair: str) -> str:
# Huobi uses lowercase (btcusdt)
return hb_trading_pair.replace("-", "").lower()
def build_api_factory() -> WebAssistantsFactory:
api_factory = WebAssistantsFactory(ws_post_processors=[HuobiWSPostProcessor()])
return api_factory
KEYS = {
"huobi_api_key":
ConfigVar(key="huobi_api_key",
prompt="Enter your Huobi API key >>> ",
required_if=using_exchange("huobi"),
is_secure=True,
is_connect_key=True),
"huobi_secret_key":
ConfigVar(key="huobi_secret_key",
prompt="Enter your Huobi secret key >>> ",
required_if=using_exchange("huobi"),
is_secure=True,
is_connect_key=True),
}
| 33.123288
| 92
| 0.698511
|
import re
from decimal import Decimal
from typing import Optional, Tuple
from hummingbot.client.config.config_methods import using_exchange
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.connector.exchange.huobi.huobi_ws_post_processor import HuobiWSPostProcessor
from hummingbot.core.data_type.trade_fee import TradeFeeSchema
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
DEFAULT_FEES = TradeFeeSchema(
maker_percent_fee_decimal=Decimal("0.002"),
taker_percent_fee_decimal=Decimal("0.002"),
)
RE_4_LETTERS_QUOTE = re.compile(r"^(\w+)(usdt|husd|usdc)$")
RE_3_LETTERS_QUOTE = re.compile(r"^(\w+)(btc|eth|trx)$")
RE_2_LETTERS_QUOTE = re.compile(r"^(\w+)(ht)$")
CENTRALIZED = True
EXAMPLE_PAIR = "ETH-USDT"
BROKER_ID = "AAc484720a"
def split_trading_pair(trading_pair: str) -> Optional[Tuple[str, str]]:
try:
m = RE_4_LETTERS_QUOTE.match(trading_pair)
if m is None:
m = RE_3_LETTERS_QUOTE.match(trading_pair)
if m is None:
m = RE_2_LETTERS_QUOTE.match(trading_pair)
return m.group(1), m.group(2)
except Exception:
return None
def convert_from_exchange_trading_pair(exchange_trading_pair: str) -> Optional[str]:
if split_trading_pair(exchange_trading_pair) is None:
return None
base_asset, quote_asset = split_trading_pair(exchange_trading_pair)
return f"{base_asset.upper()}-{quote_asset.upper()}"
def convert_to_exchange_trading_pair(hb_trading_pair: str) -> str:
return hb_trading_pair.replace("-", "").lower()
def build_api_factory() -> WebAssistantsFactory:
api_factory = WebAssistantsFactory(ws_post_processors=[HuobiWSPostProcessor()])
return api_factory
KEYS = {
"huobi_api_key":
ConfigVar(key="huobi_api_key",
prompt="Enter your Huobi API key >>> ",
required_if=using_exchange("huobi"),
is_secure=True,
is_connect_key=True),
"huobi_secret_key":
ConfigVar(key="huobi_secret_key",
prompt="Enter your Huobi secret key >>> ",
required_if=using_exchange("huobi"),
is_secure=True,
is_connect_key=True),
}
| true
| true
|
1c450ed72a7fafccbd98ee6d00b861adbfb2e6c6
| 1,681
|
py
|
Python
|
src/django_pg_hll/bulk_update.py
|
M1ha-Shvn/django-pg-hll
|
2530f63c95e02410c710b31b8a34470fbc06fa88
|
[
"BSD-3-Clause"
] | 2
|
2020-09-08T10:10:39.000Z
|
2021-06-08T19:16:51.000Z
|
src/django_pg_hll/bulk_update.py
|
M1ha-Shvn/django-pg-hll
|
2530f63c95e02410c710b31b8a34470fbc06fa88
|
[
"BSD-3-Clause"
] | 4
|
2020-09-08T13:53:27.000Z
|
2021-11-05T14:17:40.000Z
|
src/django_pg_hll/bulk_update.py
|
M1hacka/django-pg-hll
|
2530f63c95e02410c710b31b8a34470fbc06fa88
|
[
"BSD-3-Clause"
] | 1
|
2020-09-07T15:35:22.000Z
|
2020-09-07T15:35:22.000Z
|
"""
django-pg-bulk-update support.
"""
from django.db.models.sql import Query
from .compatibility import django_pg_bulk_update_available
from .fields import HllField
from .values import HllEmpty, HllValue, HllCombinedExpression
# As django-pg-bulk-update library is not required, import only if it exists
if django_pg_bulk_update_available():
from django_pg_bulk_update.set_functions import ConcatSetFunction
from django_pg_bulk_update.compatibility import get_field_db_type
else:
class ConcatSetFunction:
pass
def get_field_db_type(field, conn):
raise NotImplementedError
class HllConcatFunction(ConcatSetFunction):
names = {'hll_concat'}
supported_field_classes = {'HllField'}
def _parse_null_default(self, field, connection, **kwargs):
kwargs['null_default'] = kwargs.get('null_default', HllEmpty())
return super(HllConcatFunction, self)._parse_null_default(field, connection, **kwargs)
def format_field_value(self, field, val, connection, cast_type=False, **kwargs):
if not isinstance(field, HllField):
return super(HllConcatFunction, self).format_field_value(field, val, connection, cast_type=cast_type,
**kwargs)
if not isinstance(val, (HllValue, HllCombinedExpression)):
raise ValueError('val should be HllValue instance')
compiler = Query(field.model).get_compiler(connection=connection)
sql, params = val.as_sql(compiler, connection)
if cast_type:
sql = 'CAST(%s AS %s)' % (sql, get_field_db_type(field, connection))
return sql, tuple(params)
| 36.543478
| 113
| 0.700178
|
from django.db.models.sql import Query
from .compatibility import django_pg_bulk_update_available
from .fields import HllField
from .values import HllEmpty, HllValue, HllCombinedExpression
if django_pg_bulk_update_available():
from django_pg_bulk_update.set_functions import ConcatSetFunction
from django_pg_bulk_update.compatibility import get_field_db_type
else:
class ConcatSetFunction:
pass
def get_field_db_type(field, conn):
raise NotImplementedError
class HllConcatFunction(ConcatSetFunction):
names = {'hll_concat'}
supported_field_classes = {'HllField'}
def _parse_null_default(self, field, connection, **kwargs):
kwargs['null_default'] = kwargs.get('null_default', HllEmpty())
return super(HllConcatFunction, self)._parse_null_default(field, connection, **kwargs)
def format_field_value(self, field, val, connection, cast_type=False, **kwargs):
if not isinstance(field, HllField):
return super(HllConcatFunction, self).format_field_value(field, val, connection, cast_type=cast_type,
**kwargs)
if not isinstance(val, (HllValue, HllCombinedExpression)):
raise ValueError('val should be HllValue instance')
compiler = Query(field.model).get_compiler(connection=connection)
sql, params = val.as_sql(compiler, connection)
if cast_type:
sql = 'CAST(%s AS %s)' % (sql, get_field_db_type(field, connection))
return sql, tuple(params)
| true
| true
|
1c450f4f0df5c3af0c2e624475ff2ba3c604f2e3
| 5,208
|
py
|
Python
|
qsimov/connectors/parser.py
|
daviddavo/QSimov
|
2df523e911374553c6fa9caf2b895fd62bc46eed
|
[
"MIT"
] | null | null | null |
qsimov/connectors/parser.py
|
daviddavo/QSimov
|
2df523e911374553c6fa9caf2b895fd62bc46eed
|
[
"MIT"
] | null | null | null |
qsimov/connectors/parser.py
|
daviddavo/QSimov
|
2df523e911374553c6fa9caf2b895fd62bc46eed
|
[
"MIT"
] | null | null | null |
"""Module with gate name parsing stuff.
This module has all name parsing stuff
"""
import numpy as np
import re
__rep__ = re.compile(r"^([a-zA-Z0-9]+)" +
r"(\((?:(?:(?:[a-zA-Z]+)|" +
r"(?:[\+\-]?[0-9]+(?:\.[0-9]+)?(?:e[\+\-][0-9]+)?))" +
r"\,\s*)*(?:(?:(?:[a-zA-Z]+)|" +
r"(?:[\+\-]?[0-9]+(?:\.[0-9]+)?" +
r"(?:e[\+\-][0-9]+)?)))\))?(\-1)?$")
def parseGroups(groups):
"""Parse the result of getGroups function, passed as parameter."""
errored = False
g1 = groups[0]
g4 = groups[2] is not None
if groups[1] is not None:
aux = groups[1][1:-1].split(",")
g2 = len(aux)
g3 = []
for attr in aux:
attr = attr.strip()
if len(attr) == 0:
errored = True
break
is_neg = attr[0] == '-'
is_pos = attr[0] == '+'
if is_neg or is_pos:
attr = attr[1:]
if len(attr) == 0:
errored = True
break
if "." in attr:
attr = float(attr)
elif attr[0] in "0123456789":
attr = int(attr)
elif attr.lower() == "pi":
attr = np.pi
elif attr.lower() == "tau":
attr = 2 * np.pi
elif attr.lower() == "e":
attr = np.e
else:
print(attr)
errored = True
break
if is_neg:
attr = -attr
g3.append(attr)
else:
g2 = 0
g3 = None
if not errored:
return (g1, g2, g3, g4)
else:
return None
def getGroups(str_gate):
"""Get matching groups using __rep__ regular expression."""
res = __rep__.match(str_gate)
return parseGroups(res.groups()) if res is not None else None
__gateDict__ = {}
__gateDict__["x"] = ("X", 0, 0)
__gateDict__["not"] = ("X", 0, 0)
__gateDict__["sqrtnot"] = ("SqrtX", 0, 0)
__gateDict__["sqrtx"] = ("SqrtX", 0, 0)
__gateDict__["v"] = ("SqrtX", 0, 0)
__gateDict__["y"] = ("Y", 0, 0)
__gateDict__["z"] = ("Z", 0, 0)
__gateDict__["rx"] = ("RX", 1, 1)
__gateDict__["ry"] = ("RY", 1, 1)
__gateDict__["rz"] = ("RZ", 1, 1)
__gateDict__["r"] = ("R", 1, 1)
__gateDict__["phaseshift"] = ("R", 1, 1)
__gateDict__["phasechange"] = ("R", 1, 1)
__gateDict__["runity"] = ("RootPhase", 1, 1)
__gateDict__["rootphase"] = ("RootPhase", 1, 1)
__gateDict__["h"] = ("H", 0, 1)
__gateDict__["u"] = ("U", 1, 3)
__gateDict__["u3"] = ("U", 3, 3)
__gateDict__["u2"] = ("U2", 2, 2)
__gateDict__["u1"] = ("U1", 1, 1)
__gateDict__["d"] = ("HalfDeutsch", 1, 1)
__gateDict__["deutsch"] = ("HalfDeutsch", 1, 1)
__gateDict__["halfdeutsch"] = ("HalfDeutsch", 1, 1)
__gateDict__["partialdeutsch"] = ("HalfDeutsch", 1, 1)
__gateDict__["xx"] = ("XX", 3, 3)
__gateDict__["isingx"] = ("XX", 3, 3)
__gateDict__["isingxx"] = ("XX", 3, 3)
__gateDict__["yy"] = ("YY", 3, 3)
__gateDict__["isingy"] = ("YY", 3, 3)
__gateDict__["isingyy"] = ("YY", 3, 3)
__gateDict__["zz"] = ("ZZ", 3, 3)
__gateDict__["isingz"] = ("ZZ", 3, 3)
__gateDict__["isingzz"] = ("ZZ", 3, 3)
__gateDict__["swap"] = ("SWAP", 2, 2)
__gateDict__["iswap"] = ("ISWAP", 2, 2)
__gateDict__["sqrtswap"] = ("SqrtSWAP", 2, 2)
def getGateData(gateraw):
"""Get the data of the gate associated with the given string."""
gate = None
if type(gateraw) == str:
groups = getGroups(gateraw)
if not (groups is None):
gatename, nargs, args, invert = groups
gatename = gatename.lower()
if gatename in __gateDict__:
gatemet, minargs, maxargs = __gateDict__[gatename]
if gatename == "u":
if nargs == 3:
gatemet = "U"
minargs = 3
elif nargs == 2:
gatemet = "U2"
minargs, maxargs = 2, 2
elif nargs == 1:
gatemet = "U1"
minargs, maxargs = 1, 1
if minargs <= nargs <= maxargs: # Adoro Python
if nargs == 0:
gate = (gatemet, None, None, None, invert)
elif nargs == 1:
gate = (gatemet, args[0], None, None, invert)
elif nargs == 2:
gate = (gatemet, args[0], args[1], None, invert)
else:
gate = (gatemet, args[0], args[1], args[2], invert)
else:
# print("Received: " + gateraw)
# print("Parsed: " + gate)
raise ValueError(gatename + " gate number of args must " +
"be between " + str(minargs) +
" and " + str(maxargs))
else:
raise ValueError(gatename + " can't be used with QSimovAPI")
else:
raise ValueError(gateraw + " can't be used with QSimovAPI")
else:
raise ValueError("You can only use a string!")
return gate
| 34.039216
| 78
| 0.460061
|
import numpy as np
import re
__rep__ = re.compile(r"^([a-zA-Z0-9]+)" +
r"(\((?:(?:(?:[a-zA-Z]+)|" +
r"(?:[\+\-]?[0-9]+(?:\.[0-9]+)?(?:e[\+\-][0-9]+)?))" +
r"\,\s*)*(?:(?:(?:[a-zA-Z]+)|" +
r"(?:[\+\-]?[0-9]+(?:\.[0-9]+)?" +
r"(?:e[\+\-][0-9]+)?)))\))?(\-1)?$")
def parseGroups(groups):
errored = False
g1 = groups[0]
g4 = groups[2] is not None
if groups[1] is not None:
aux = groups[1][1:-1].split(",")
g2 = len(aux)
g3 = []
for attr in aux:
attr = attr.strip()
if len(attr) == 0:
errored = True
break
is_neg = attr[0] == '-'
is_pos = attr[0] == '+'
if is_neg or is_pos:
attr = attr[1:]
if len(attr) == 0:
errored = True
break
if "." in attr:
attr = float(attr)
elif attr[0] in "0123456789":
attr = int(attr)
elif attr.lower() == "pi":
attr = np.pi
elif attr.lower() == "tau":
attr = 2 * np.pi
elif attr.lower() == "e":
attr = np.e
else:
print(attr)
errored = True
break
if is_neg:
attr = -attr
g3.append(attr)
else:
g2 = 0
g3 = None
if not errored:
return (g1, g2, g3, g4)
else:
return None
def getGroups(str_gate):
res = __rep__.match(str_gate)
return parseGroups(res.groups()) if res is not None else None
__gateDict__ = {}
__gateDict__["x"] = ("X", 0, 0)
__gateDict__["not"] = ("X", 0, 0)
__gateDict__["sqrtnot"] = ("SqrtX", 0, 0)
__gateDict__["sqrtx"] = ("SqrtX", 0, 0)
__gateDict__["v"] = ("SqrtX", 0, 0)
__gateDict__["y"] = ("Y", 0, 0)
__gateDict__["z"] = ("Z", 0, 0)
__gateDict__["rx"] = ("RX", 1, 1)
__gateDict__["ry"] = ("RY", 1, 1)
__gateDict__["rz"] = ("RZ", 1, 1)
__gateDict__["r"] = ("R", 1, 1)
__gateDict__["phaseshift"] = ("R", 1, 1)
__gateDict__["phasechange"] = ("R", 1, 1)
__gateDict__["runity"] = ("RootPhase", 1, 1)
__gateDict__["rootphase"] = ("RootPhase", 1, 1)
__gateDict__["h"] = ("H", 0, 1)
__gateDict__["u"] = ("U", 1, 3)
__gateDict__["u3"] = ("U", 3, 3)
__gateDict__["u2"] = ("U2", 2, 2)
__gateDict__["u1"] = ("U1", 1, 1)
__gateDict__["d"] = ("HalfDeutsch", 1, 1)
__gateDict__["deutsch"] = ("HalfDeutsch", 1, 1)
__gateDict__["halfdeutsch"] = ("HalfDeutsch", 1, 1)
__gateDict__["partialdeutsch"] = ("HalfDeutsch", 1, 1)
__gateDict__["xx"] = ("XX", 3, 3)
__gateDict__["isingx"] = ("XX", 3, 3)
__gateDict__["isingxx"] = ("XX", 3, 3)
__gateDict__["yy"] = ("YY", 3, 3)
__gateDict__["isingy"] = ("YY", 3, 3)
__gateDict__["isingyy"] = ("YY", 3, 3)
__gateDict__["zz"] = ("ZZ", 3, 3)
__gateDict__["isingz"] = ("ZZ", 3, 3)
__gateDict__["isingzz"] = ("ZZ", 3, 3)
__gateDict__["swap"] = ("SWAP", 2, 2)
__gateDict__["iswap"] = ("ISWAP", 2, 2)
__gateDict__["sqrtswap"] = ("SqrtSWAP", 2, 2)
def getGateData(gateraw):
gate = None
if type(gateraw) == str:
groups = getGroups(gateraw)
if not (groups is None):
gatename, nargs, args, invert = groups
gatename = gatename.lower()
if gatename in __gateDict__:
gatemet, minargs, maxargs = __gateDict__[gatename]
if gatename == "u":
if nargs == 3:
gatemet = "U"
minargs = 3
elif nargs == 2:
gatemet = "U2"
minargs, maxargs = 2, 2
elif nargs == 1:
gatemet = "U1"
minargs, maxargs = 1, 1
if minargs <= nargs <= maxargs:
if nargs == 0:
gate = (gatemet, None, None, None, invert)
elif nargs == 1:
gate = (gatemet, args[0], None, None, invert)
elif nargs == 2:
gate = (gatemet, args[0], args[1], None, invert)
else:
gate = (gatemet, args[0], args[1], args[2], invert)
else:
raise ValueError(gatename + " gate number of args must " +
"be between " + str(minargs) +
" and " + str(maxargs))
else:
raise ValueError(gatename + " can't be used with QSimovAPI")
else:
raise ValueError(gateraw + " can't be used with QSimovAPI")
else:
raise ValueError("You can only use a string!")
return gate
| true
| true
|
1c45101cb058c0ae07cfe74d84621ac3871e7f5e
| 1,502
|
py
|
Python
|
setup.py
|
ivanfmartinez/pysonofflan
|
60d3f2ab2952207552c1e1ea3ebd796d984e427c
|
[
"MIT"
] | null | null | null |
setup.py
|
ivanfmartinez/pysonofflan
|
60d3f2ab2952207552c1e1ea3ebd796d984e427c
|
[
"MIT"
] | null | null | null |
setup.py
|
ivanfmartinez/pysonofflan
|
60d3f2ab2952207552c1e1ea3ebd796d984e427c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=7.0', 'click_log', 'websockets']
setup_requirements = []
test_requirements = ['pytest', 'tox', 'python-coveralls']
setup(
author="Andrew Beveridge",
author_email='andrew@beveridge.uk',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Interface for Sonoff devices running original Itead "
"firmware, in LAN mode.",
entry_points={
'console_scripts': [
'pysonofflan=pysonofflan.cli:cli',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='pysonofflan',
name='pysonofflan',
packages=find_packages(include=['pysonofflan']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/beveradb/pysonofflan',
version='0.2.1',
zip_safe=False,
)
| 29.45098
| 70
| 0.643142
|
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['Click>=7.0', 'click_log', 'websockets']
setup_requirements = []
test_requirements = ['pytest', 'tox', 'python-coveralls']
setup(
author="Andrew Beveridge",
author_email='andrew@beveridge.uk',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Interface for Sonoff devices running original Itead "
"firmware, in LAN mode.",
entry_points={
'console_scripts': [
'pysonofflan=pysonofflan.cli:cli',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='pysonofflan',
name='pysonofflan',
packages=find_packages(include=['pysonofflan']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/beveradb/pysonofflan',
version='0.2.1',
zip_safe=False,
)
| true
| true
|
1c451056684517a5a35b4eeda9fafd24b1138137
| 6,010
|
py
|
Python
|
tob-api/api/indy/agent.py
|
mehmetaydar/TheOrgBook
|
951fcdbc45d2b8f7f3a7887aac19c7f04b70e23a
|
[
"Apache-2.0"
] | 1
|
2021-02-23T14:15:42.000Z
|
2021-02-23T14:15:42.000Z
|
tob-api/api/indy/agent.py
|
mehmetaydar/TheOrgBook
|
951fcdbc45d2b8f7f3a7887aac19c7f04b70e23a
|
[
"Apache-2.0"
] | null | null | null |
tob-api/api/indy/agent.py
|
mehmetaydar/TheOrgBook
|
951fcdbc45d2b8f7f3a7887aac19c7f04b70e23a
|
[
"Apache-2.0"
] | null | null | null |
import os
import threading
from von_agent.nodepool import NodePool
from von_agent.wallet import Wallet
from tob_api import hyperledger_indy
from von_agent.agents import Issuer as VonIssuer
from von_agent.agents import Verifier as VonVerifier
from von_agent.agents import HolderProver as VonHolderProver
from typing import Set, Union
from api import apps
import logging
class Issuer:
def __init__(self):
WALLET_SEED = os.environ.get('INDY_WALLET_SEED')
if not WALLET_SEED or len(WALLET_SEED) is not 32:
raise Exception('INDY_WALLET_SEED must be set and be 32 characters long.')
self.__logger = logging.getLogger(__name__)
config = hyperledger_indy.config()
self.pool = NodePool(
'the-org-book-issuer',
config['genesis_txn_path'])
wallet_name = 'TheOrgBook_Issuer_Wallet'
issuer_type = 'virtual'
issuer_config = {'freshness_time':0}
issuer_creds = {'key':''}
self.__logger.debug("Issuer __init__>>> {} {} {}".format(issuer_type, issuer_config, issuer_creds))
issuer_wallet = Wallet(
self.pool,
WALLET_SEED,
wallet_name,
issuer_type,
issuer_config,
issuer_creds)
self.__logger.debug("Issuer __init__>>> {} {} {}".format(issuer_type, issuer_config, issuer_creds))
self.instance = VonIssuer(
# self.pool,
issuer_wallet
)
async def __aenter__(self):
await self.pool.open()
await self.instance.wallet.create()
return await self.instance.open()
async def __aexit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self.__logger.error(exc_type, exc_value, traceback)
await self.instance.close()
await self.pool.close()
class Verifier:
def __init__(self):
WALLET_SEED = os.environ.get('INDY_WALLET_SEED')
if not WALLET_SEED or len(WALLET_SEED) is not 32:
raise Exception('INDY_WALLET_SEED must be set and be 32 characters long.')
self.__logger = logging.getLogger(__name__)
config = hyperledger_indy.config()
self.pool = NodePool(
'the-org-book-verifier',
config['genesis_txn_path'])
wallet_name = 'TheOrgBook_Verifier_Wallet'
verifier_type = 'virtual'
verifier_config = {'freshness_time':0}
verifier_creds = {'key':''}
self.__logger.debug("Verifier __init__>>> {} {} {}".format(verifier_type, verifier_config, verifier_creds))
verifier_wallet = Wallet(
self.pool,
WALLET_SEED,
wallet_name,
verifier_type,
verifier_config,
verifier_creds)
self.__logger.debug("Verifier __init__>>> {} {} {}".format(verifier_type, verifier_config, verifier_creds))
self.instance = VonVerifier(
# self.pool,
verifier_wallet
)
async def __aenter__(self):
await self.pool.open()
await self.instance.wallet.create()
return await self.instance.open()
async def __aexit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self.__logger.error(exc_type, exc_value, traceback)
await self.instance.close()
await self.pool.close()
class Holder:
def __init__(self, legal_entity_id: str = None):
WALLET_SEED = os.environ.get('INDY_WALLET_SEED')
if not WALLET_SEED or len(WALLET_SEED) is not 32:
raise Exception('INDY_WALLET_SEED must be set and be 32 characters long.')
self.__logger = logging.getLogger(__name__)
config = hyperledger_indy.config()
thread_id = threading.get_ident()
self.pool = NodePool(
'the-org-book-holder-' + str(thread_id),
config['genesis_txn_path'])
wallet_name = 'TheOrgBook_Holder_Wallet' + '$$' + str(thread_id)
holder_type = os.environ.get('INDY_WALLET_TYPE')
if holder_type == 'remote':
# wallet_name = wallet_name + "$$" + str(thread_id)
holder_url = os.environ.get('INDY_WALLET_URL')
holder_config = {'endpoint':holder_url,'ping':'schema/','auth':'api-token-auth/','keyval':'keyval/','freshness_time':0}
holder_creds = {'auth_token':apps.get_remote_wallet_token(),'virtual_wallet':legal_entity_id}
self.__logger.debug('Using remote Cfg: {} Creds: {}'.format(holder_config, holder_creds))
else:
# TODO force to virtual for now
holder_type = 'virtual'
holder_config = {'freshness_time':0}
holder_creds = {'key':'','virtual_wallet':legal_entity_id}
self.__logger.debug('Using virtual Cfg: {} Creds: {}'.format(holder_config, holder_creds))
self.__logger.debug("Holder __init__>>> {} {} {}".format(holder_type, holder_config, holder_creds))
holder_wallet = Wallet(
self.pool,
WALLET_SEED,
wallet_name,
holder_type,
holder_config,
holder_creds)
self.__logger.debug("Holder __init__>>> {} {} {}".format(holder_type, holder_config, holder_creds))
self.instance = VonHolderProver(
# self.pool,
holder_wallet
)
async def __aenter__(self):
await self.pool.open()
await self.instance.wallet.create()
instance = await self.instance.open()
# TODO should only create this once, and only in the root wallet (virtual_wallet == None)
await self.instance.create_link_secret('secret')
return instance
async def __aexit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self.__logger.error(exc_type, exc_value, traceback)
await self.instance.close()
await self.pool.close()
| 34.94186
| 131
| 0.620632
|
import os
import threading
from von_agent.nodepool import NodePool
from von_agent.wallet import Wallet
from tob_api import hyperledger_indy
from von_agent.agents import Issuer as VonIssuer
from von_agent.agents import Verifier as VonVerifier
from von_agent.agents import HolderProver as VonHolderProver
from typing import Set, Union
from api import apps
import logging
class Issuer:
def __init__(self):
WALLET_SEED = os.environ.get('INDY_WALLET_SEED')
if not WALLET_SEED or len(WALLET_SEED) is not 32:
raise Exception('INDY_WALLET_SEED must be set and be 32 characters long.')
self.__logger = logging.getLogger(__name__)
config = hyperledger_indy.config()
self.pool = NodePool(
'the-org-book-issuer',
config['genesis_txn_path'])
wallet_name = 'TheOrgBook_Issuer_Wallet'
issuer_type = 'virtual'
issuer_config = {'freshness_time':0}
issuer_creds = {'key':''}
self.__logger.debug("Issuer __init__>>> {} {} {}".format(issuer_type, issuer_config, issuer_creds))
issuer_wallet = Wallet(
self.pool,
WALLET_SEED,
wallet_name,
issuer_type,
issuer_config,
issuer_creds)
self.__logger.debug("Issuer __init__>>> {} {} {}".format(issuer_type, issuer_config, issuer_creds))
self.instance = VonIssuer(
issuer_wallet
)
async def __aenter__(self):
await self.pool.open()
await self.instance.wallet.create()
return await self.instance.open()
async def __aexit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self.__logger.error(exc_type, exc_value, traceback)
await self.instance.close()
await self.pool.close()
class Verifier:
def __init__(self):
WALLET_SEED = os.environ.get('INDY_WALLET_SEED')
if not WALLET_SEED or len(WALLET_SEED) is not 32:
raise Exception('INDY_WALLET_SEED must be set and be 32 characters long.')
self.__logger = logging.getLogger(__name__)
config = hyperledger_indy.config()
self.pool = NodePool(
'the-org-book-verifier',
config['genesis_txn_path'])
wallet_name = 'TheOrgBook_Verifier_Wallet'
verifier_type = 'virtual'
verifier_config = {'freshness_time':0}
verifier_creds = {'key':''}
self.__logger.debug("Verifier __init__>>> {} {} {}".format(verifier_type, verifier_config, verifier_creds))
verifier_wallet = Wallet(
self.pool,
WALLET_SEED,
wallet_name,
verifier_type,
verifier_config,
verifier_creds)
self.__logger.debug("Verifier __init__>>> {} {} {}".format(verifier_type, verifier_config, verifier_creds))
self.instance = VonVerifier(
verifier_wallet
)
async def __aenter__(self):
await self.pool.open()
await self.instance.wallet.create()
return await self.instance.open()
async def __aexit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self.__logger.error(exc_type, exc_value, traceback)
await self.instance.close()
await self.pool.close()
class Holder:
def __init__(self, legal_entity_id: str = None):
WALLET_SEED = os.environ.get('INDY_WALLET_SEED')
if not WALLET_SEED or len(WALLET_SEED) is not 32:
raise Exception('INDY_WALLET_SEED must be set and be 32 characters long.')
self.__logger = logging.getLogger(__name__)
config = hyperledger_indy.config()
thread_id = threading.get_ident()
self.pool = NodePool(
'the-org-book-holder-' + str(thread_id),
config['genesis_txn_path'])
wallet_name = 'TheOrgBook_Holder_Wallet' + '$$' + str(thread_id)
holder_type = os.environ.get('INDY_WALLET_TYPE')
if holder_type == 'remote':
holder_url = os.environ.get('INDY_WALLET_URL')
holder_config = {'endpoint':holder_url,'ping':'schema/','auth':'api-token-auth/','keyval':'keyval/','freshness_time':0}
holder_creds = {'auth_token':apps.get_remote_wallet_token(),'virtual_wallet':legal_entity_id}
self.__logger.debug('Using remote Cfg: {} Creds: {}'.format(holder_config, holder_creds))
else:
holder_type = 'virtual'
holder_config = {'freshness_time':0}
holder_creds = {'key':'','virtual_wallet':legal_entity_id}
self.__logger.debug('Using virtual Cfg: {} Creds: {}'.format(holder_config, holder_creds))
self.__logger.debug("Holder __init__>>> {} {} {}".format(holder_type, holder_config, holder_creds))
holder_wallet = Wallet(
self.pool,
WALLET_SEED,
wallet_name,
holder_type,
holder_config,
holder_creds)
self.__logger.debug("Holder __init__>>> {} {} {}".format(holder_type, holder_config, holder_creds))
self.instance = VonHolderProver(
holder_wallet
)
async def __aenter__(self):
await self.pool.open()
await self.instance.wallet.create()
instance = await self.instance.open()
await self.instance.create_link_secret('secret')
return instance
async def __aexit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self.__logger.error(exc_type, exc_value, traceback)
await self.instance.close()
await self.pool.close()
| true
| true
|
1c45137fe7f938199493e48688d1b72f051eeb5e
| 821
|
py
|
Python
|
toTheMoon/offer66_4_SearchInTwoDimensionalArray.py
|
jercas/offer66-leetcode-newcode
|
a2e5256f27dbfb23fc34119fc857cd9b00e28c03
|
[
"MIT"
] | null | null | null |
toTheMoon/offer66_4_SearchInTwoDimensionalArray.py
|
jercas/offer66-leetcode-newcode
|
a2e5256f27dbfb23fc34119fc857cd9b00e28c03
|
[
"MIT"
] | null | null | null |
toTheMoon/offer66_4_SearchInTwoDimensionalArray.py
|
jercas/offer66-leetcode-newcode
|
a2e5256f27dbfb23fc34119fc857cd9b00e28c03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 26 10:47:52 2019
@author: jercas
"""
"""
offer66-4
'二维数组中的查找'
在一个二维数组中(每个一维数组的长度相同),每一行都按照从左到右递增的顺序排序,
每一列都按照从上到下递增的顺序排序。请完成一个函数,输入这样的一个二维数组和一个整数,判断数组中是否含有该整数。
"""
class Solution:
# array 二维列表
def Find(self, target, array):
if len(array[0]) == 0:
return False
n = len(array)
# 从右上角开始查找
row, col = 0, n - 1
while row < n and col >= 0:
if array[row][col] == target:
return True
elif array[row][col] > target:
col -= 1
else:
row += 1
return False
if __name__ == "__main__":
Q1, Q2 = [[1,2,8,9],[2,4,9,12],[4,7,10,13],[6,8,11,15]], [7, 5, 15, 1, 16, 0]
A = [True, False, True, True, False, False]
solution = Solution()
for i in range(6):
if solution.Find(Q2[i], Q1) == A[i]:
print("AC")
print(solution.Find(16, [[]]))
| 20.525
| 78
| 0.595615
|
class Solution:
def Find(self, target, array):
if len(array[0]) == 0:
return False
n = len(array)
row, col = 0, n - 1
while row < n and col >= 0:
if array[row][col] == target:
return True
elif array[row][col] > target:
col -= 1
else:
row += 1
return False
if __name__ == "__main__":
Q1, Q2 = [[1,2,8,9],[2,4,9,12],[4,7,10,13],[6,8,11,15]], [7, 5, 15, 1, 16, 0]
A = [True, False, True, True, False, False]
solution = Solution()
for i in range(6):
if solution.Find(Q2[i], Q1) == A[i]:
print("AC")
print(solution.Find(16, [[]]))
| true
| true
|
1c4513e8f055ddeb4859242b1de268020ecb30ae
| 563
|
py
|
Python
|
examples/mnist/utils.py
|
gfrogat/prunhild
|
55769c6f2eca2748288c24826dd3bb14deaf5707
|
[
"MIT"
] | 28
|
2019-05-07T03:27:30.000Z
|
2022-02-02T19:49:12.000Z
|
examples/mnist/utils.py
|
gfrogat/prunhild
|
55769c6f2eca2748288c24826dd3bb14deaf5707
|
[
"MIT"
] | null | null | null |
examples/mnist/utils.py
|
gfrogat/prunhild
|
55769c6f2eca2748288c24826dd3bb14deaf5707
|
[
"MIT"
] | 5
|
2019-05-14T00:21:15.000Z
|
2021-11-25T13:26:44.000Z
|
def get_parameter_stats(model):
n_zero = 0.0
n_total = 0.0
for param in model.parameters():
# assume values smaller than 1e-7 (for 32bit) to be zero
n_zero += param.data.abs().le(1e-7).sum().item()
n_total += param.data.numel()
ratio_zero = n_zero / n_total
return n_zero, n_total, ratio_zero
def print_parameter_stats(parameter_stats):
n_zero, n_total, ratio_zero = parameter_stats
print(
"[Model] parameters zero: ({} / {} | {:.2f})".format(
n_zero, n_total, ratio_zero
)
)
| 28.15
| 64
| 0.614565
|
def get_parameter_stats(model):
n_zero = 0.0
n_total = 0.0
for param in model.parameters():
n_zero += param.data.abs().le(1e-7).sum().item()
n_total += param.data.numel()
ratio_zero = n_zero / n_total
return n_zero, n_total, ratio_zero
def print_parameter_stats(parameter_stats):
n_zero, n_total, ratio_zero = parameter_stats
print(
"[Model] parameters zero: ({} / {} | {:.2f})".format(
n_zero, n_total, ratio_zero
)
)
| true
| true
|
1c4517f681dbd5414de6d4df269356db3a4b654d
| 7,253
|
py
|
Python
|
tensorflow/python/debug/lib/source_remote_test.py
|
harunpehlivan/tensorflow
|
376e2cfdab31f4da251ea2e50992a9bf97fd171b
|
[
"Apache-2.0"
] | 16
|
2018-01-30T22:16:13.000Z
|
2021-07-18T10:00:55.000Z
|
tensorflow/python/debug/lib/source_remote_test.py
|
harunpehlivan/tensorflow
|
376e2cfdab31f4da251ea2e50992a9bf97fd171b
|
[
"Apache-2.0"
] | 3
|
2018-05-09T11:31:58.000Z
|
2021-01-27T12:26:21.000Z
|
tensorflow/python/debug/lib/source_remote_test.py
|
harunpehlivan/tensorflow
|
376e2cfdab31f4da251ea2e50992a9bf97fd171b
|
[
"Apache-2.0"
] | 13
|
2018-02-22T21:04:13.000Z
|
2020-11-17T11:38:36.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for source_remote."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import traceback
from tensorflow.core.debug import debug_service_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import source_remote
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect
def line_number_above():
return tf_inspect.stack()[1][2] - 1
class SendTracebacksTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
test_util.TensorFlowTestCase.setUpClass()
(cls._server_port, cls._debug_server_url, cls._server_dump_dir,
cls._server_thread,
cls._server) = grpc_debug_test_server.start_server_on_separate_thread()
cls._server_address = "localhost:%d" % cls._server_port
(cls._server_port_2, cls._debug_server_url_2, cls._server_dump_dir_2,
cls._server_thread_2,
cls._server_2) = grpc_debug_test_server.start_server_on_separate_thread()
cls._server_address_2 = "localhost:%d" % cls._server_port_2
cls._curr_file_path = os.path.normpath(os.path.abspath(__file__))
@classmethod
def tearDownClass(cls):
# Stop the test server and join the thread.
cls._server.stop_server().wait()
cls._server_thread.join()
cls._server_2.stop_server().wait()
cls._server_thread_2.join()
test_util.TensorFlowTestCase.tearDownClass()
def tearDown(self):
ops.reset_default_graph()
self._server.clear_data()
self._server_2.clear_data()
super(SendTracebacksTest, self).tearDown()
def _findFirstTraceInsideTensorFlowPyLibrary(self, op):
"""Find the first trace of an op that belongs to the TF Python library."""
for trace in op.traceback:
if source_utils.guess_is_tensorflow_py_library(trace[0]):
return trace
def testSendGraphTracebacksToSingleDebugServer(self):
this_func_name = "testSendGraphTracebacksToSingleDebugServer"
with session.Session() as sess:
a = variables.Variable(21.0, name="a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="b")
b_lineno = line_number_above()
math_ops.add(a, b, name="x")
x_lineno = line_number_above()
send_stack = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
self._server_address, "dummy_run_key", send_stack, sess.graph)
tb = self._server.query_op_traceback("a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
self.assertEqual(
"a = variables.Variable(21.0, name=\"a\")",
self._server.query_source_file_line(__file__, a_lineno))
# Files in the TensorFlow code base shouldn not have been sent.
tf_trace_file_path = self._findFirstTraceInsideTensorFlowPyLibrary(a.op)
with self.assertRaises(ValueError):
self._server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
self._server.query_call_types())
self.assertEqual(["dummy_run_key"], self._server.query_call_keys())
self.assertEqual(
[sess.graph.version], self._server.query_graph_versions())
def testSendGraphTracebacksToTwoDebugServers(self):
this_func_name = "testSendGraphTracebacksToTwoDebugServers"
with session.Session() as sess:
a = variables.Variable(21.0, name="two/a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="two/b")
b_lineno = line_number_above()
x = math_ops.add(a, b, name="two/x")
x_lineno = line_number_above()
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
[self._server_address, self._server_address_2],
"dummy_run_key", send_traceback, sess.graph)
servers = [self._server, self._server_2]
for server in servers:
tb = server.query_op_traceback("two/a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
server.query_origin_stack()[-1])
self.assertEqual(
"x = math_ops.add(a, b, name=\"two/x\")",
server.query_source_file_line(__file__, x_lineno))
tf_trace_file_path = self._findFirstTraceInsideTensorFlowPyLibrary(x.op)
with self.assertRaises(ValueError):
server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
server.query_call_types())
self.assertEqual(["dummy_run_key"], server.query_call_keys())
self.assertEqual([sess.graph.version], server.query_graph_versions())
def testSendEagerTracebacksToSingleDebugServer(self):
this_func_name = "testSendEagerTracebacksToSingleDebugServer"
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_eager_tracebacks(self._server_address, send_traceback)
self.assertEqual([debug_service_pb2.CallTraceback.EAGER_EXECUTION],
self._server.query_call_types())
self.assertIn((self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
if __name__ == "__main__":
googletest.main()
| 42.168605
| 88
| 0.724252
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import traceback
from tensorflow.core.debug import debug_service_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import source_remote
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect
def line_number_above():
return tf_inspect.stack()[1][2] - 1
class SendTracebacksTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
test_util.TensorFlowTestCase.setUpClass()
(cls._server_port, cls._debug_server_url, cls._server_dump_dir,
cls._server_thread,
cls._server) = grpc_debug_test_server.start_server_on_separate_thread()
cls._server_address = "localhost:%d" % cls._server_port
(cls._server_port_2, cls._debug_server_url_2, cls._server_dump_dir_2,
cls._server_thread_2,
cls._server_2) = grpc_debug_test_server.start_server_on_separate_thread()
cls._server_address_2 = "localhost:%d" % cls._server_port_2
cls._curr_file_path = os.path.normpath(os.path.abspath(__file__))
@classmethod
def tearDownClass(cls):
cls._server.stop_server().wait()
cls._server_thread.join()
cls._server_2.stop_server().wait()
cls._server_thread_2.join()
test_util.TensorFlowTestCase.tearDownClass()
def tearDown(self):
ops.reset_default_graph()
self._server.clear_data()
self._server_2.clear_data()
super(SendTracebacksTest, self).tearDown()
def _findFirstTraceInsideTensorFlowPyLibrary(self, op):
for trace in op.traceback:
if source_utils.guess_is_tensorflow_py_library(trace[0]):
return trace
def testSendGraphTracebacksToSingleDebugServer(self):
this_func_name = "testSendGraphTracebacksToSingleDebugServer"
with session.Session() as sess:
a = variables.Variable(21.0, name="a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="b")
b_lineno = line_number_above()
math_ops.add(a, b, name="x")
x_lineno = line_number_above()
send_stack = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
self._server_address, "dummy_run_key", send_stack, sess.graph)
tb = self._server.query_op_traceback("a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
self.assertEqual(
"a = variables.Variable(21.0, name=\"a\")",
self._server.query_source_file_line(__file__, a_lineno))
tf_trace_file_path = self._findFirstTraceInsideTensorFlowPyLibrary(a.op)
with self.assertRaises(ValueError):
self._server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
self._server.query_call_types())
self.assertEqual(["dummy_run_key"], self._server.query_call_keys())
self.assertEqual(
[sess.graph.version], self._server.query_graph_versions())
def testSendGraphTracebacksToTwoDebugServers(self):
this_func_name = "testSendGraphTracebacksToTwoDebugServers"
with session.Session() as sess:
a = variables.Variable(21.0, name="two/a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="two/b")
b_lineno = line_number_above()
x = math_ops.add(a, b, name="two/x")
x_lineno = line_number_above()
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
[self._server_address, self._server_address_2],
"dummy_run_key", send_traceback, sess.graph)
servers = [self._server, self._server_2]
for server in servers:
tb = server.query_op_traceback("two/a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
server.query_origin_stack()[-1])
self.assertEqual(
"x = math_ops.add(a, b, name=\"two/x\")",
server.query_source_file_line(__file__, x_lineno))
tf_trace_file_path = self._findFirstTraceInsideTensorFlowPyLibrary(x.op)
with self.assertRaises(ValueError):
server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
server.query_call_types())
self.assertEqual(["dummy_run_key"], server.query_call_keys())
self.assertEqual([sess.graph.version], server.query_graph_versions())
def testSendEagerTracebacksToSingleDebugServer(self):
this_func_name = "testSendEagerTracebacksToSingleDebugServer"
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_eager_tracebacks(self._server_address, send_traceback)
self.assertEqual([debug_service_pb2.CallTraceback.EAGER_EXECUTION],
self._server.query_call_types())
self.assertIn((self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
if __name__ == "__main__":
googletest.main()
| true
| true
|
1c4519051ae3887019459e07c09bc75536f88eb7
| 8,570
|
py
|
Python
|
fastreid/config/defaults.py
|
tenghehan/reid_without_id
|
d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
[
"MIT"
] | 1
|
2020-12-24T09:32:21.000Z
|
2020-12-24T09:32:21.000Z
|
fastreid/config/defaults.py
|
tenghehan/reid_without_id
|
d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
[
"MIT"
] | null | null | null |
fastreid/config/defaults.py
|
tenghehan/reid_without_id
|
d1d0ff273b1ef19fc6da8cbbf210527779b37455
|
[
"MIT"
] | null | null | null |
from .config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
# -----------------------------------------------------------------------------
# MODEL
# -----------------------------------------------------------------------------
_C.MODEL = CN()
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = 'Baseline'
_C.MODEL.FREEZE_LAYERS = ['']
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
_C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
_C.MODEL.BACKBONE.DEPTH = "50x"
_C.MODEL.BACKBONE.LAST_STRIDE = 1
# Backbone feature dimension
_C.MODEL.BACKBONE.FEAT_DIM = 2048
# Normalization method for the convolution layers.
_C.MODEL.BACKBONE.NORM = "BN"
# If use IBN block in backbone
_C.MODEL.BACKBONE.WITH_IBN = False
# If use SE block in backbone
_C.MODEL.BACKBONE.WITH_SE = False
# If use Non-local block in backbone
_C.MODEL.BACKBONE.WITH_NL = False
# If use ImageNet pretrain model
_C.MODEL.BACKBONE.PRETRAIN = True
# Pretrain model path
_C.MODEL.BACKBONE.PRETRAIN_PATH = ''
# ---------------------------------------------------------------------------- #
# REID HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.HEADS = CN()
_C.MODEL.HEADS.NAME = "EmbeddingHead"
# Normalization method for the convolution layers.
_C.MODEL.HEADS.NORM = "BN"
# Number of identity
_C.MODEL.HEADS.NUM_CLASSES = 0
# Embedding dimension in head
_C.MODEL.HEADS.EMBEDDING_DIM = 0
# If use BNneck in embedding
_C.MODEL.HEADS.WITH_BNNECK = True
# Triplet feature using feature before(after) bnneck
_C.MODEL.HEADS.NECK_FEAT = "before" # options: before, after
# Pooling layer type
_C.MODEL.HEADS.POOL_LAYER = "avgpool"
# Classification layer type
_C.MODEL.HEADS.CLS_LAYER = "linear" # "arcSoftmax" or "circleSoftmax"
# Margin and Scale for margin-based classification layer
_C.MODEL.HEADS.MARGIN = 0.15
_C.MODEL.HEADS.SCALE = 128
# ---------------------------------------------------------------------------- #
# REID LOSSES options
# ---------------------------------------------------------------------------- #
_C.MODEL.LOSSES = CN()
_C.MODEL.LOSSES.NAME = ("CrossEntropyLoss",)
# Cross Entropy Loss options
_C.MODEL.LOSSES.CE = CN()
# if epsilon == 0, it means no label smooth regularization,
# if epsilon == -1, it means adaptive label smooth regularization
_C.MODEL.LOSSES.CE.EPSILON = 0.0
_C.MODEL.LOSSES.CE.ALPHA = 0.2
_C.MODEL.LOSSES.CE.SCALE = 1.0
# Triplet Loss options
_C.MODEL.LOSSES.TRI = CN()
_C.MODEL.LOSSES.TRI.MARGIN = 0.3
_C.MODEL.LOSSES.TRI.NORM_FEAT = False
_C.MODEL.LOSSES.TRI.HARD_MINING = True
_C.MODEL.LOSSES.TRI.SCALE = 1.0
# Circle Loss options
_C.MODEL.LOSSES.CIRCLE = CN()
_C.MODEL.LOSSES.CIRCLE.MARGIN = 0.25
_C.MODEL.LOSSES.CIRCLE.ALPHA = 128
_C.MODEL.LOSSES.CIRCLE.SCALE = 1.0
# Focal Loss options
_C.MODEL.LOSSES.FL = CN()
_C.MODEL.LOSSES.FL.ALPHA = 0.25
_C.MODEL.LOSSES.FL.GAMMA = 2
_C.MODEL.LOSSES.FL.SCALE = 1.0
# Path to a checkpoint file to be loaded to the model. You can find available models in the model zoo.
_C.MODEL.WEIGHTS = ""
# Values to be used for image normalization
_C.MODEL.PIXEL_MEAN = [0.485*255, 0.456*255, 0.406*255]
# Values to be used for image normalization
_C.MODEL.PIXEL_STD = [0.229*255, 0.224*255, 0.225*255]
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the image during training
_C.INPUT.SIZE_TRAIN = [256, 128]
# Size of the image during test
_C.INPUT.SIZE_TEST = [256, 128]
# Random probability for image horizontal flip
_C.INPUT.DO_FLIP = True
_C.INPUT.FLIP_PROB = 0.5
# Value of padding size
_C.INPUT.DO_PAD = True
_C.INPUT.PADDING_MODE = 'constant'
_C.INPUT.PADDING = 10
# Random color jitter
_C.INPUT.CJ = CN()
_C.INPUT.CJ.ENABLED = False
_C.INPUT.CJ.PROB = 0.8
_C.INPUT.CJ.BRIGHTNESS = 0.15
_C.INPUT.CJ.CONTRAST = 0.15
_C.INPUT.CJ.SATURATION = 0.1
_C.INPUT.CJ.HUE = 0.1
# Auto augmentation
_C.INPUT.DO_AUTOAUG = False
# Augmix augmentation
_C.INPUT.DO_AUGMIX = False
# Random Erasing
_C.INPUT.REA = CN()
_C.INPUT.REA.ENABLED = False
_C.INPUT.REA.PROB = 0.5
_C.INPUT.REA.MEAN = [0.596*255, 0.558*255, 0.497*255] # [0.485*255, 0.456*255, 0.406*255]
# Random Patch
_C.INPUT.RPT = CN()
_C.INPUT.RPT.ENABLED = False
_C.INPUT.RPT.PROB = 0.5
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training
_C.DATASETS.NAMES = ("Market1501",)
# List of the dataset names for testing
_C.DATASETS.TESTS = ("Market1501",)
# Combine trainset and testset joint training
_C.DATASETS.COMBINEALL = False
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# P/K Sampler for data loading
_C.DATALOADER.PK_SAMPLER = True
# Naive sampler which don't consider balanced identity sampling
_C.DATALOADER.NAIVE_WAY = False
# Number of instance for each person
_C.DATALOADER.NUM_INSTANCE = 4
_C.DATALOADER.NUM_WORKERS = 8
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
# AUTOMATIC MIXED PRECISION
_C.SOLVER.AMP_ENABLED = False
# Optimizer
_C.SOLVER.OPT = "Adam"
_C.SOLVER.MAX_ITER = 120
_C.SOLVER.BASE_LR = 3e-4
_C.SOLVER.BIAS_LR_FACTOR = 1.
_C.SOLVER.HEADS_LR_FACTOR = 1.
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.
# Multi-step learning rate options
_C.SOLVER.SCHED = "WarmupMultiStepLR"
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = [30, 55]
# Cosine annealing learning rate options
_C.SOLVER.DELAY_ITERS = 0
_C.SOLVER.ETA_MIN_LR = 3e-7
# Warmup options
_C.SOLVER.WARMUP_FACTOR = 0.1
_C.SOLVER.WARMUP_ITERS = 10
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.FREEZE_ITERS = 0
# SWA options
_C.SOLVER.SWA = CN()
_C.SOLVER.SWA.ENABLED = False
_C.SOLVER.SWA.ITER = 10
_C.SOLVER.SWA.PERIOD = 2
_C.SOLVER.SWA.LR_FACTOR = 10.
_C.SOLVER.SWA.ETA_MIN_LR = 3.5e-6
_C.SOLVER.SWA.LR_SCHED = False
_C.SOLVER.CHECKPOINT_PERIOD = 20
# Number of images per batch across all machines.
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 64
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST = CN()
_C.TEST.EVAL_PERIOD = 20
# Number of images per batch in one process.
_C.TEST.IMS_PER_BATCH = 64
_C.TEST.METRIC = "cosine"
_C.TEST.ROC_ENABLED = False
# Average query expansion
_C.TEST.AQE = CN()
_C.TEST.AQE.ENABLED = False
_C.TEST.AQE.ALPHA = 3.0
_C.TEST.AQE.QE_TIME = 1
_C.TEST.AQE.QE_K = 5
# Re-rank
_C.TEST.RERANK = CN()
_C.TEST.RERANK.ENABLED = False
_C.TEST.RERANK.K1 = 20
_C.TEST.RERANK.K2 = 6
_C.TEST.RERANK.LAMBDA = 0.3
# Precise batchnorm
_C.TEST.PRECISE_BN = CN()
_C.TEST.PRECISE_BN.ENABLED = False
_C.TEST.PRECISE_BN.DATASET = 'Market1501'
_C.TEST.PRECISE_BN.NUM_ITER = 300
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.OUTPUT_DIR = "logs/"
# Benchmark different cudnn algorithms.
# If input images have very different sizes, this option will have large overhead
# for about 10k iterations. It usually hurts total time, but can benefit for certain models.
# If input images have the same or similar sizes, benchmark is often helpful.
_C.CUDNN_BENCHMARK = False
| 31.277372
| 102
| 0.592065
|
from .config import CfgNode as CN
_C = CN()
_C.MODEL = CN()
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = 'Baseline'
_C.MODEL.FREEZE_LAYERS = ['']
_C.MODEL.BACKBONE = CN()
_C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
_C.MODEL.BACKBONE.DEPTH = "50x"
_C.MODEL.BACKBONE.LAST_STRIDE = 1
_C.MODEL.BACKBONE.FEAT_DIM = 2048
_C.MODEL.BACKBONE.NORM = "BN"
_C.MODEL.BACKBONE.WITH_IBN = False
_C.MODEL.BACKBONE.WITH_SE = False
_C.MODEL.BACKBONE.WITH_NL = False
_C.MODEL.BACKBONE.PRETRAIN = True
_C.MODEL.BACKBONE.PRETRAIN_PATH = ''
_C.MODEL.HEADS = CN()
_C.MODEL.HEADS.NAME = "EmbeddingHead"
_C.MODEL.HEADS.NORM = "BN"
_C.MODEL.HEADS.NUM_CLASSES = 0
_C.MODEL.HEADS.EMBEDDING_DIM = 0
_C.MODEL.HEADS.WITH_BNNECK = True
_C.MODEL.HEADS.NECK_FEAT = "before"
_C.MODEL.HEADS.POOL_LAYER = "avgpool"
_C.MODEL.HEADS.CLS_LAYER = "linear"
_C.MODEL.HEADS.MARGIN = 0.15
_C.MODEL.HEADS.SCALE = 128
_C.MODEL.LOSSES = CN()
_C.MODEL.LOSSES.NAME = ("CrossEntropyLoss",)
_C.MODEL.LOSSES.CE = CN()
_C.MODEL.LOSSES.CE.EPSILON = 0.0
_C.MODEL.LOSSES.CE.ALPHA = 0.2
_C.MODEL.LOSSES.CE.SCALE = 1.0
_C.MODEL.LOSSES.TRI = CN()
_C.MODEL.LOSSES.TRI.MARGIN = 0.3
_C.MODEL.LOSSES.TRI.NORM_FEAT = False
_C.MODEL.LOSSES.TRI.HARD_MINING = True
_C.MODEL.LOSSES.TRI.SCALE = 1.0
_C.MODEL.LOSSES.CIRCLE = CN()
_C.MODEL.LOSSES.CIRCLE.MARGIN = 0.25
_C.MODEL.LOSSES.CIRCLE.ALPHA = 128
_C.MODEL.LOSSES.CIRCLE.SCALE = 1.0
_C.MODEL.LOSSES.FL = CN()
_C.MODEL.LOSSES.FL.ALPHA = 0.25
_C.MODEL.LOSSES.FL.GAMMA = 2
_C.MODEL.LOSSES.FL.SCALE = 1.0
_C.MODEL.WEIGHTS = ""
_C.MODEL.PIXEL_MEAN = [0.485*255, 0.456*255, 0.406*255]
_C.MODEL.PIXEL_STD = [0.229*255, 0.224*255, 0.225*255]
_C.INPUT = CN()
_C.INPUT.SIZE_TRAIN = [256, 128]
_C.INPUT.SIZE_TEST = [256, 128]
_C.INPUT.DO_FLIP = True
_C.INPUT.FLIP_PROB = 0.5
_C.INPUT.DO_PAD = True
_C.INPUT.PADDING_MODE = 'constant'
_C.INPUT.PADDING = 10
_C.INPUT.CJ = CN()
_C.INPUT.CJ.ENABLED = False
_C.INPUT.CJ.PROB = 0.8
_C.INPUT.CJ.BRIGHTNESS = 0.15
_C.INPUT.CJ.CONTRAST = 0.15
_C.INPUT.CJ.SATURATION = 0.1
_C.INPUT.CJ.HUE = 0.1
_C.INPUT.DO_AUTOAUG = False
_C.INPUT.DO_AUGMIX = False
_C.INPUT.REA = CN()
_C.INPUT.REA.ENABLED = False
_C.INPUT.REA.PROB = 0.5
_C.INPUT.REA.MEAN = [0.596*255, 0.558*255, 0.497*255]
_C.INPUT.RPT = CN()
_C.INPUT.RPT.ENABLED = False
_C.INPUT.RPT.PROB = 0.5
_C.DATASETS = CN()
_C.DATASETS.NAMES = ("Market1501",)
_C.DATASETS.TESTS = ("Market1501",)
_C.DATASETS.COMBINEALL = False
_C.DATALOADER = CN()
_C.DATALOADER.PK_SAMPLER = True
_C.DATALOADER.NAIVE_WAY = False
# Number of instance for each person
_C.DATALOADER.NUM_INSTANCE = 4
_C.DATALOADER.NUM_WORKERS = 8
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
# AUTOMATIC MIXED PRECISION
_C.SOLVER.AMP_ENABLED = False
# Optimizer
_C.SOLVER.OPT = "Adam"
_C.SOLVER.MAX_ITER = 120
_C.SOLVER.BASE_LR = 3e-4
_C.SOLVER.BIAS_LR_FACTOR = 1.
_C.SOLVER.HEADS_LR_FACTOR = 1.
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.
# Multi-step learning rate options
_C.SOLVER.SCHED = "WarmupMultiStepLR"
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = [30, 55]
# Cosine annealing learning rate options
_C.SOLVER.DELAY_ITERS = 0
_C.SOLVER.ETA_MIN_LR = 3e-7
# Warmup options
_C.SOLVER.WARMUP_FACTOR = 0.1
_C.SOLVER.WARMUP_ITERS = 10
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.FREEZE_ITERS = 0
# SWA options
_C.SOLVER.SWA = CN()
_C.SOLVER.SWA.ENABLED = False
_C.SOLVER.SWA.ITER = 10
_C.SOLVER.SWA.PERIOD = 2
_C.SOLVER.SWA.LR_FACTOR = 10.
_C.SOLVER.SWA.ETA_MIN_LR = 3.5e-6
_C.SOLVER.SWA.LR_SCHED = False
_C.SOLVER.CHECKPOINT_PERIOD = 20
# Number of images per batch across all machines.
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.SOLVER.IMS_PER_BATCH = 64
# This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will
# see 2 images per batch
_C.TEST = CN()
_C.TEST.EVAL_PERIOD = 20
# Number of images per batch in one process.
_C.TEST.IMS_PER_BATCH = 64
_C.TEST.METRIC = "cosine"
_C.TEST.ROC_ENABLED = False
# Average query expansion
_C.TEST.AQE = CN()
_C.TEST.AQE.ENABLED = False
_C.TEST.AQE.ALPHA = 3.0
_C.TEST.AQE.QE_TIME = 1
_C.TEST.AQE.QE_K = 5
# Re-rank
_C.TEST.RERANK = CN()
_C.TEST.RERANK.ENABLED = False
_C.TEST.RERANK.K1 = 20
_C.TEST.RERANK.K2 = 6
_C.TEST.RERANK.LAMBDA = 0.3
# Precise batchnorm
_C.TEST.PRECISE_BN = CN()
_C.TEST.PRECISE_BN.ENABLED = False
_C.TEST.PRECISE_BN.DATASET = 'Market1501'
_C.TEST.PRECISE_BN.NUM_ITER = 300
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.OUTPUT_DIR = "logs/"
# Benchmark different cudnn algorithms.
# If input images have very different sizes, this option will have large overhead
# for about 10k iterations. It usually hurts total time, but can benefit for certain models.
# If input images have the same or similar sizes, benchmark is often helpful.
_C.CUDNN_BENCHMARK = False
| true
| true
|
1c45191232e6f107bedf746641c84c6c18d003d0
| 13,305
|
py
|
Python
|
CGATPipelines/Pipeline/Cluster.py
|
cdrakesmith/CGATPipelines
|
3c94ae4f9d87d51108255dc405c4b95af7c8b694
|
[
"MIT"
] | null | null | null |
CGATPipelines/Pipeline/Cluster.py
|
cdrakesmith/CGATPipelines
|
3c94ae4f9d87d51108255dc405c4b95af7c8b694
|
[
"MIT"
] | null | null | null |
CGATPipelines/Pipeline/Cluster.py
|
cdrakesmith/CGATPipelines
|
3c94ae4f9d87d51108255dc405c4b95af7c8b694
|
[
"MIT"
] | null | null | null |
'''Cluster.py - cluster utility functions for ruffus pipelines
==============================================================
This module abstracts the DRMAA native specification and provides
convenience functions for running Drmaa jobs.
Reference
---------
'''
import re
import os
import stat
import time
import CGAT.Experiment as E
try:
import drmaa
HAS_DRMAA = True
except (ImportError, RuntimeError):
# the following does not work on Travis
#except ImportError or RuntimeError:
HAS_DRMAA = False
def setupDrmaaJobTemplate(drmaa_session, options, job_name, job_memory):
'''Sets up a Drmma job template. Currently SGE, SLURM, Torque and PBSPro are
supported'''
if not job_memory:
raise ValueError("Job memory must be specified when running"
"DRMAA jobs")
jt = drmaa_session.createJobTemplate()
jt.workingDirectory = options["workingdir"]
jt.jobEnvironment = {'BASH_ENV': '~/.bashrc'}
jt.args = []
if not re.match("[a-zA-Z]", job_name[0]):
job_name = "_" + job_name
# queue manager specific configuration options
queue_manager = options["cluster_queue_manager"]
if queue_manager.lower() == "sge":
# see: ? cannot find documentation on the SGE native spec
spec = ["-V",
"-N %s" % job_name]
if options["cluster_priority"]:
spec.append("-p %(cluster_priority)i")
if options["cluster_options"]:
spec.append("%(cluster_options)s")
if not options["cluster_memory_resource"]:
raise ValueError("The cluster memory resource must be specified")
for resource in options["cluster_memory_resource"].split(","):
spec.append("-l %s=%s" % (resource, job_memory))
# if process has multiple threads, use a parallel environment
multithread = 'job_threads' in options and options['job_threads'] > 1
if multithread:
spec.append(
"-pe %(cluster_parallel_environment)s %(job_threads)i -R y")
if "cluster_pe_queue" in options and multithread:
spec.append(
"-q %(cluster_pe_queue)s")
elif len(options['cluster_queue']) > 0:
spec.append("-q %(cluster_queue)s")
elif queue_manager.lower() == "slurm":
# SLURM DOCS:
# http://apps.man.poznan.pl/trac/slurm-drmaa
# https://computing.llnl.gov/linux/slurm/cons_res_share.html
#
# The SLURM Consumable Resource plugin is required
# The "CR_CPU_Memory" resource must be specified
#
# i.e. in slurm.conf:
# SelectType=select/cons_res
# SelectTypeParameters=CR_CPU_Memory
#
# * Note that --cpus-per-task will actually refer to cores
# with the appropriate Node configuration
#
# SLURM-DRMAA DOCS - Note that version 1.2 (SVN) is required
# http://apps.man.poznan.pl/trac/slurm-drmaa
#
# Not implemented:
# -V: SLURM automatically passess the environment variables
# -p: does not appear to be part of the slurm drmaa native spec
#
# TODO: add "--account" (not sure the best way to fill param).
spec = ["-J %s" % job_name]
if options["cluster_options"]:
spec.append("%(cluster_options)s")
if 'job_threads' in options:
job_threads = options["job_threads"]
else:
job_threads = 1 # probably should come from a config option
spec.append("--cpus-per-task=%s" % job_threads)
# Note the that the specified memory must be per CPU
# for consistency with the implemented SGE approach
if job_memory.endswith("G"):
job_memory_per_cpu = int(job_memory[:-1]) * 1000
elif job_memory.endswith("M"):
job_memory_per_cpu = int(job_memory[:-1])
else:
raise ValueError('job memory unit not recognised for SLURM, '
'must be either "M" (for Mb) or "G" (for Gb),'
' e.g. 1G or 1000M for 1 Gigabyte of memory')
spec.append("--mem-per-cpu=%s" % job_memory_per_cpu)
# set the partition to use (equivalent of SGE queue)
spec.append("--partition=%(cluster_queue)s")
elif queue_manager.lower() == "torque":
# PBS Torque native specifictation:
# http://apps.man.poznan.pl/trac/pbs-drmaa
spec = ["-N %s" % job_name,
"-l mem=%s" % job_memory, ]
if options["cluster_options"]:
spec.append("%(cluster_options)s")
# There is no equivalent to sge -V option for pbs-drmaa
# recreating this...
jt.jobEnvironment = os.environ
jt.jobEnvironment.update({'BASH_ENV': os.path.join(os.environ['HOME'],
'.bashrc')})
elif queue_manager.lower() == "pbspro":
# PBS Pro docs
# http://www.pbsworks.com/PBSProduct.aspx?n=PBS-Professional&c=Overview-and-Capabilities
# http://technion.ac.il/usg/tamnun/PBSProUserGuide12.1.pdf
# DRMAA for PBS Pro is the same as for torque:
# http://apps.man.poznan.pl/trac/pbs-drmaa
# Webpages with some examples:
# https://wiki.galaxyproject.org/Admin/Config/Performance/Cluster#PBS
# https://sites.google.com/a/case.edu/hpc-upgraded-cluster/home/Software-Guide/pbs-drmaa
# https://albertsk.files.wordpress.com/2011/12/pbs.pdf
# PBS Pro has some differences with torque so separating
# Set environment variables in .bashrc:
# PBS_DRMAA_CONF to eg ~/.pbs_drmaa.conf
# DRMAA_LIBRARY_PATH to eg /xxx/libdrmaa.so
# PBSPro only takes the first 15 characters, throws uninformative error if longer.
# mem is maximum amount of RAM used by job; mem_free doesn't seem to be available.
# For qsub job requirements would be passed as e.g.
#PBS -lselect=N:ncpus=X:mem=Ygb
#PBS -lwalltime=HH:00:00
# 'select=1' determines de number of nodes. Should go in a config file.
# mem is per node and maximum memory
# Site dependent but in general setting '#PBS -l select=NN:ncpus=NN:mem=NN{gb|mb}'
# is sufficient for parallel jobs (OpenMP, MPI).
# Also architecture dependent, jobs could be hanging if resource doesn't exist.
# TO DO: Kill if long waiting time?
nodes = 1 # TO DO: hard coding as unsure of definitions between
# threads, nodes, etc. between programmes for now
# Set up basic requirements for job submission:
# if process has multiple threads, use a parallel environment:
# TO DO: error in fastqc build_report, var referenced before assignment.
# For now adding to workaround:
if 'job_threads' in options:
job_threads = options["job_threads"]
else:
job_threads = 1
spec = ["-N %s" % job_name[0:15],
"-l select=%s:ncpus=%s:mem=%s" % (nodes, job_threads, job_memory)]
# Leaving walltime to be specified by user as difficult to set dynamically and
# depends on site/admin configuration of default values. Likely means setting for
# longest job with trade-off of longer waiting times for resources to be
# available for other jobs.
if options["cluster_options"]:
conds = ('mem' in options["cluster_options"],
'ncpus' in options["cluster_options"],
'select' in options["cluster_options"]
)
if any(conds):
spec = ["-N %s" % job_name[0:15]]
spec.append("%(cluster_options)s")
else:
spec.append("%(cluster_options)s")
if "cluster_pe_queue" in options and multithread:
spec.append("-q %(cluster_pe_queue)s")
elif options['cluster_queue'] != "NONE":
spec.append("-q %(cluster_queue)s")
# TO DO: sort out in Parameters.py to allow none values for configparser:
elif options['cluster_queue'] == "NONE":
pass
# As for torque, there is no equivalent to sge -V option for pbs-drmaa:
jt.jobEnvironment = os.environ
jt.jobEnvironment.update({'BASH_ENV': os.path.join(os.environ['HOME'],
'.bashrc')})
else:
raise ValueError("Queue manager %s not supported" % queue_manager)
jt.nativeSpecification = " ".join(spec) % options
# keep stdout and stderr separate
jt.joinFiles = False
return jt
def setDrmaaJobPaths(job_template, job_path):
'''Adds the job_path, stdout_path and stderr_paths
to the job_template.
'''
job_path = os.path.abspath(job_path)
os.chmod(job_path, stat.S_IRWXG | stat.S_IRWXU)
stdout_path = job_path + ".stdout"
stderr_path = job_path + ".stderr"
job_template.remoteCommand = job_path
job_template.outputPath = ":" + stdout_path
job_template.errorPath = ":" + stderr_path
return job_template, stdout_path, stderr_path
def expandStatement(statement, ignore_pipe_errors=False):
'''add generic commands before and after statement.
The prefixes and suffixes added are defined in :data:`exec_prefix`
and :data:`exec_suffix`. The main purpose of these prefixs is to
provide error detection code to detect errors at early steps in a
series of unix commands within a pipe.
Arguments
---------
statement : string
Command line statement to expand
ignore_pipe_errors : bool
If False, do not modify statement.
Returns
-------
statement : string
The expanded statement.
'''
_exec_prefix = '''detect_pipe_error_helper()
{
while [ "$#" != 0 ] ; do
# there was an error in at least one program of the pipe
if [ "$1" != 0 ] ; then return 1 ; fi
shift 1
done
return 0
}
detect_pipe_error() {
detect_pipe_error_helper "${PIPESTATUS[@]}"
return $?
}
checkpoint() {
detect_pipe_error;
if [ $? != 0 ]; then exit 1; fi;
}
'''
_exec_suffix = "; detect_pipe_error"
if ignore_pipe_errors:
return statement
else:
return " ".join((_exec_prefix, statement, _exec_suffix))
def collectSingleJobFromCluster(session, job_id,
statement,
stdout_path, stderr_path,
job_path,
ignore_errors=False):
'''runs a single job on the cluster.'''
try:
retval = session.wait(
job_id, drmaa.Session.TIMEOUT_WAIT_FOREVER)
except Exception as msg:
# ignore message 24 in PBS code 24: drmaa: Job
# finished but resource usage information and/or
# termination status could not be provided.":
if not msg.message.startswith("code 24"):
raise
retval = None
stdout, stderr = getStdoutStderr(stdout_path, stderr_path)
if retval and retval.exitStatus != 0 and not ignore_errors:
raise OSError(
"---------------------------------------\n"
"Child was terminated by signal %i: \n"
"The stderr was: \n%s\n%s\n"
"-----------------------------------------" %
(retval.exitStatus,
"".join(stderr), statement))
if ((retval.hasExited is False or retval.wasAborted is True) and not
ignore_errors):
raise OSError(
"-------------------------------------------------\n"
"Cluster job was aborted (%s) and/or failed to exit (%s) "
"while running the following statement:\n"
"\n%s\n"
"(Job may have been cancelled by the user or the scheduler)\n"
"----------------------------------------------------------\n" %
(retval.wasAborted, not retval.hasExited, statement))
try:
os.unlink(job_path)
except OSError:
E.warn(
("temporary job file %s not present for "
"clean-up - ignored") % job_path)
def getStdoutStderr(stdout_path, stderr_path, tries=5):
'''get stdout/stderr allowing for same lag.
Try at most *tries* times. If unsuccessfull, throw OSError
Removes the files once they are read.
Returns tuple of stdout and stderr.
'''
x = tries
while x >= 0:
if os.path.exists(stdout_path):
break
time.sleep(1)
x -= 1
x = tries
while x >= 0:
if os.path.exists(stderr_path):
break
time.sleep(1)
x -= 1
try:
stdout = open(stdout_path, "r").readlines()
except IOError as msg:
E.warn("could not open stdout: %s" % msg)
stdout = []
try:
stderr = open(stderr_path, "r").readlines()
except IOError as msg:
E.warn("could not open stdout: %s" % msg)
stderr = []
try:
os.unlink(stdout_path)
os.unlink(stderr_path)
except OSError as msg:
pass
return stdout, stderr
| 34.115385
| 96
| 0.583991
|
import re
import os
import stat
import time
import CGAT.Experiment as E
try:
import drmaa
HAS_DRMAA = True
except (ImportError, RuntimeError):
HAS_DRMAA = False
def setupDrmaaJobTemplate(drmaa_session, options, job_name, job_memory):
if not job_memory:
raise ValueError("Job memory must be specified when running"
"DRMAA jobs")
jt = drmaa_session.createJobTemplate()
jt.workingDirectory = options["workingdir"]
jt.jobEnvironment = {'BASH_ENV': '~/.bashrc'}
jt.args = []
if not re.match("[a-zA-Z]", job_name[0]):
job_name = "_" + job_name
queue_manager = options["cluster_queue_manager"]
if queue_manager.lower() == "sge":
spec = ["-V",
"-N %s" % job_name]
if options["cluster_priority"]:
spec.append("-p %(cluster_priority)i")
if options["cluster_options"]:
spec.append("%(cluster_options)s")
if not options["cluster_memory_resource"]:
raise ValueError("The cluster memory resource must be specified")
for resource in options["cluster_memory_resource"].split(","):
spec.append("-l %s=%s" % (resource, job_memory))
multithread = 'job_threads' in options and options['job_threads'] > 1
if multithread:
spec.append(
"-pe %(cluster_parallel_environment)s %(job_threads)i -R y")
if "cluster_pe_queue" in options and multithread:
spec.append(
"-q %(cluster_pe_queue)s")
elif len(options['cluster_queue']) > 0:
spec.append("-q %(cluster_queue)s")
elif queue_manager.lower() == "slurm":
spec = ["-J %s" % job_name]
if options["cluster_options"]:
spec.append("%(cluster_options)s")
if 'job_threads' in options:
job_threads = options["job_threads"]
else:
job_threads = 1
spec.append("--cpus-per-task=%s" % job_threads)
if job_memory.endswith("G"):
job_memory_per_cpu = int(job_memory[:-1]) * 1000
elif job_memory.endswith("M"):
job_memory_per_cpu = int(job_memory[:-1])
else:
raise ValueError('job memory unit not recognised for SLURM, '
'must be either "M" (for Mb) or "G" (for Gb),'
' e.g. 1G or 1000M for 1 Gigabyte of memory')
spec.append("--mem-per-cpu=%s" % job_memory_per_cpu)
spec.append("--partition=%(cluster_queue)s")
elif queue_manager.lower() == "torque":
spec = ["-N %s" % job_name,
"-l mem=%s" % job_memory, ]
if options["cluster_options"]:
spec.append("%(cluster_options)s")
jt.jobEnvironment = os.environ
jt.jobEnvironment.update({'BASH_ENV': os.path.join(os.environ['HOME'],
'.bashrc')})
elif queue_manager.lower() == "pbspro":
# For qsub job requirements would be passed as e.g.
#PBS -lselect=N:ncpus=X:mem=Ygb
#PBS -lwalltime=HH:00:00
# 'select=1' determines de number of nodes. Should go in a config file.
# mem is per node and maximum memory
# Site dependent but in general setting '
# is sufficient for parallel jobs (OpenMP, MPI).
# Also architecture dependent, jobs could be hanging if resource doesn't exist.
nodes = 1
if 'job_threads' in options:
job_threads = options["job_threads"]
else:
job_threads = 1
spec = ["-N %s" % job_name[0:15],
"-l select=%s:ncpus=%s:mem=%s" % (nodes, job_threads, job_memory)]
if options["cluster_options"]:
conds = ('mem' in options["cluster_options"],
'ncpus' in options["cluster_options"],
'select' in options["cluster_options"]
)
if any(conds):
spec = ["-N %s" % job_name[0:15]]
spec.append("%(cluster_options)s")
else:
spec.append("%(cluster_options)s")
if "cluster_pe_queue" in options and multithread:
spec.append("-q %(cluster_pe_queue)s")
elif options['cluster_queue'] != "NONE":
spec.append("-q %(cluster_queue)s")
elif options['cluster_queue'] == "NONE":
pass
jt.jobEnvironment = os.environ
jt.jobEnvironment.update({'BASH_ENV': os.path.join(os.environ['HOME'],
'.bashrc')})
else:
raise ValueError("Queue manager %s not supported" % queue_manager)
jt.nativeSpecification = " ".join(spec) % options
jt.joinFiles = False
return jt
def setDrmaaJobPaths(job_template, job_path):
job_path = os.path.abspath(job_path)
os.chmod(job_path, stat.S_IRWXG | stat.S_IRWXU)
stdout_path = job_path + ".stdout"
stderr_path = job_path + ".stderr"
job_template.remoteCommand = job_path
job_template.outputPath = ":" + stdout_path
job_template.errorPath = ":" + stderr_path
return job_template, stdout_path, stderr_path
def expandStatement(statement, ignore_pipe_errors=False):
_exec_prefix = '''detect_pipe_error_helper()
{
while [ "$#" != 0 ] ; do
# there was an error in at least one program of the pipe
if [ "$1" != 0 ] ; then return 1 ; fi
shift 1
done
return 0
}
detect_pipe_error() {
detect_pipe_error_helper "${PIPESTATUS[@]}"
return $?
}
checkpoint() {
detect_pipe_error;
if [ $? != 0 ]; then exit 1; fi;
}
'''
_exec_suffix = "; detect_pipe_error"
if ignore_pipe_errors:
return statement
else:
return " ".join((_exec_prefix, statement, _exec_suffix))
def collectSingleJobFromCluster(session, job_id,
statement,
stdout_path, stderr_path,
job_path,
ignore_errors=False):
try:
retval = session.wait(
job_id, drmaa.Session.TIMEOUT_WAIT_FOREVER)
except Exception as msg:
if not msg.message.startswith("code 24"):
raise
retval = None
stdout, stderr = getStdoutStderr(stdout_path, stderr_path)
if retval and retval.exitStatus != 0 and not ignore_errors:
raise OSError(
"---------------------------------------\n"
"Child was terminated by signal %i: \n"
"The stderr was: \n%s\n%s\n"
"-----------------------------------------" %
(retval.exitStatus,
"".join(stderr), statement))
if ((retval.hasExited is False or retval.wasAborted is True) and not
ignore_errors):
raise OSError(
"-------------------------------------------------\n"
"Cluster job was aborted (%s) and/or failed to exit (%s) "
"while running the following statement:\n"
"\n%s\n"
"(Job may have been cancelled by the user or the scheduler)\n"
"----------------------------------------------------------\n" %
(retval.wasAborted, not retval.hasExited, statement))
try:
os.unlink(job_path)
except OSError:
E.warn(
("temporary job file %s not present for "
"clean-up - ignored") % job_path)
def getStdoutStderr(stdout_path, stderr_path, tries=5):
x = tries
while x >= 0:
if os.path.exists(stdout_path):
break
time.sleep(1)
x -= 1
x = tries
while x >= 0:
if os.path.exists(stderr_path):
break
time.sleep(1)
x -= 1
try:
stdout = open(stdout_path, "r").readlines()
except IOError as msg:
E.warn("could not open stdout: %s" % msg)
stdout = []
try:
stderr = open(stderr_path, "r").readlines()
except IOError as msg:
E.warn("could not open stdout: %s" % msg)
stderr = []
try:
os.unlink(stdout_path)
os.unlink(stderr_path)
except OSError as msg:
pass
return stdout, stderr
| true
| true
|
1c451a8e590f9ec729b1bc20c53b683a1db7a13e
| 1,503
|
py
|
Python
|
src/OTLMOW/OTLModel/Datatypes/KlVerlichtingstoestelconnectorBesturingsconnector.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/OTLModel/Datatypes/KlVerlichtingstoestelconnectorBesturingsconnector.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/OTLModel/Datatypes/KlVerlichtingstoestelconnectorBesturingsconnector.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlVerlichtingstoestelconnectorBesturingsconnector(KeuzelijstField):
"""Type van connector verwerkt in de behuizing van het verlichtingstoestel voor de aansluiting van de module voor lokale afstandsbediening en -bewaking."""
naam = 'KlVerlichtingstoestelconnectorBesturingsconnector'
label = 'WV-besturingsconnector'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#KlVerlichtingstoestelconnectorBesturingsconnector'
definition = 'Type van connector verwerkt in de behuizing van het verlichtingstoestel voor de aansluiting van de module voor lokale afstandsbediening en -bewaking.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlVerlichtingstoestelconnectorBesturingsconnector'
options = {
'NEMA': KeuzelijstWaarde(invulwaarde='NEMA',
label='NEMA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelconnectorBesturingsconnector/NEMA'),
'SR': KeuzelijstWaarde(invulwaarde='SR',
label='SR',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelconnectorBesturingsconnector/SR')
}
| 65.347826
| 168
| 0.743846
|
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
class KlVerlichtingstoestelconnectorBesturingsconnector(KeuzelijstField):
naam = 'KlVerlichtingstoestelconnectorBesturingsconnector'
label = 'WV-besturingsconnector'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#KlVerlichtingstoestelconnectorBesturingsconnector'
definition = 'Type van connector verwerkt in de behuizing van het verlichtingstoestel voor de aansluiting van de module voor lokale afstandsbediening en -bewaking.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlVerlichtingstoestelconnectorBesturingsconnector'
options = {
'NEMA': KeuzelijstWaarde(invulwaarde='NEMA',
label='NEMA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelconnectorBesturingsconnector/NEMA'),
'SR': KeuzelijstWaarde(invulwaarde='SR',
label='SR',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelconnectorBesturingsconnector/SR')
}
| true
| true
|
1c451acb8ba967675446c6c9dcd9a6f243d7c450
| 1,913
|
py
|
Python
|
experiments/comparison/baseline_search.py
|
alcinos/auto_yolo
|
78727596f937b38d4de47dd9f0a7cc8c6104323f
|
[
"MIT"
] | 54
|
2018-12-10T21:08:42.000Z
|
2022-02-18T02:44:19.000Z
|
experiments/comparison/baseline_search.py
|
alcinos/auto_yolo
|
78727596f937b38d4de47dd9f0a7cc8c6104323f
|
[
"MIT"
] | 8
|
2019-04-02T10:31:13.000Z
|
2022-03-31T13:44:25.000Z
|
experiments/comparison/baseline_search.py
|
alcinos/auto_yolo
|
78727596f937b38d4de47dd9f0a7cc8c6104323f
|
[
"MIT"
] | 16
|
2019-04-26T11:45:08.000Z
|
2022-02-09T07:59:25.000Z
|
from auto_yolo import envs
import argparse
import numpy as np
readme = "Searching for baseline threshold."
parser = argparse.ArgumentParser()
parser.add_argument("--n-digits", type=int, default=1)
parser.add_argument("--transfer", action="store_true")
parser.add_argument("--sc", choices="AP count_error count_1norm".split())
args, _ = parser.parse_known_args()
# dist_dict = {
# 3: np.linspace(0, .1, 101),
# 5: np.linspace(0, .1, 101),
# 7: np.linspace(.6599-0.05, .6599+0.05, 101),
# 9: np.linspace(.599-0.05, .599+0.05, 101),
# }
distributions = [dict(cc_threshold=t) for t in np.linspace(0.01, 3.0, 100)]
durations = dict(
oak=dict(
max_hosts=1, ppn=4, cpp=1, gpu_set="0", wall_time="1year",
cleanup_time="1mins", slack_time="1mins", n_repeats=1, kind="parallel", host_pool=":"),
)
def build_net(scope):
from dps.utils.tf import MLP
return MLP(n_units=[10, 10], scope=scope)
config = dict(
curriculum=[dict()],
render_hook=None,
cc_threshold=0.000001,
do_train=False,
build_object_encoder=build_net,
build_object_decoder=build_net
)
if args.sc == "AP":
config.update(stopping_criteria="AP,max", threshold=1.0)
elif args.sc == "count_error":
config.update(stopping_criteria="count_error,min", threshold=0.0)
elif args.sc == "count_1norm":
config.update(stopping_criteria="count_1norm,min", threshold=0.0)
else:
raise Exception()
if args.transfer:
config["min_chars"] = args.n_digits
config["max_chars"] = args.n_digits
config["n_train"] = 25000
task = "scatter"
else:
config["min_digits"] = args.n_digits
config["max_digits"] = args.n_digits
config["n_train"] = 64000
task = "arithmetic"
envs.run_experiment(
"baseline_search_sc={}_n_digits={}".format(args.sc, args.n_digits), config, readme,
distributions=distributions, alg="baseline", durations=durations, task=task
)
| 27.724638
| 95
| 0.68322
|
from auto_yolo import envs
import argparse
import numpy as np
readme = "Searching for baseline threshold."
parser = argparse.ArgumentParser()
parser.add_argument("--n-digits", type=int, default=1)
parser.add_argument("--transfer", action="store_true")
parser.add_argument("--sc", choices="AP count_error count_1norm".split())
args, _ = parser.parse_known_args()
distributions = [dict(cc_threshold=t) for t in np.linspace(0.01, 3.0, 100)]
durations = dict(
oak=dict(
max_hosts=1, ppn=4, cpp=1, gpu_set="0", wall_time="1year",
cleanup_time="1mins", slack_time="1mins", n_repeats=1, kind="parallel", host_pool=":"),
)
def build_net(scope):
from dps.utils.tf import MLP
return MLP(n_units=[10, 10], scope=scope)
config = dict(
curriculum=[dict()],
render_hook=None,
cc_threshold=0.000001,
do_train=False,
build_object_encoder=build_net,
build_object_decoder=build_net
)
if args.sc == "AP":
config.update(stopping_criteria="AP,max", threshold=1.0)
elif args.sc == "count_error":
config.update(stopping_criteria="count_error,min", threshold=0.0)
elif args.sc == "count_1norm":
config.update(stopping_criteria="count_1norm,min", threshold=0.0)
else:
raise Exception()
if args.transfer:
config["min_chars"] = args.n_digits
config["max_chars"] = args.n_digits
config["n_train"] = 25000
task = "scatter"
else:
config["min_digits"] = args.n_digits
config["max_digits"] = args.n_digits
config["n_train"] = 64000
task = "arithmetic"
envs.run_experiment(
"baseline_search_sc={}_n_digits={}".format(args.sc, args.n_digits), config, readme,
distributions=distributions, alg="baseline", durations=durations, task=task
)
| true
| true
|
1c451ace7c8c4a9840e36df73ca94d6221a26439
| 5,729
|
py
|
Python
|
test/client/dev_server.py
|
GeekLiB/unrealcv
|
9acfcb5b52c5b085e72e64a0bb46ea4d0adadcdb
|
[
"MIT"
] | 1
|
2020-06-29T02:33:44.000Z
|
2020-06-29T02:33:44.000Z
|
test/client/dev_server.py
|
GeekLiB/unrealcv
|
9acfcb5b52c5b085e72e64a0bb46ea4d0adadcdb
|
[
"MIT"
] | null | null | null |
test/client/dev_server.py
|
GeekLiB/unrealcv
|
9acfcb5b52c5b085e72e64a0bb46ea4d0adadcdb
|
[
"MIT"
] | 4
|
2017-03-23T14:52:22.000Z
|
2020-06-29T02:33:54.000Z
|
'''
A python server to mimic the behavior of unrealcv server
Useful for development
'''
import threading, logging, sys
if (sys.version_info > (3, 0)):
import socketserver as SocketServer
else:
import SocketServer
# import MySocketServer as SocketServer
SocketServer.ThreadingMixIn.daemon_threads = True
SocketServer.TCPServer.allow_reuse_address = True
# from common_conf import *
import unrealcv
_L = logging.getLogger(__name__)
_L.setLevel(logging.INFO)
_L.addHandler(logging.NullHandler())
class ThreadedServer:
def start(self):
def _():
cur_thread = threading.current_thread()
_L.debug('Start in %s' % cur_thread.name)
self.server.serve_forever()
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
import threading
server_thread = threading.Thread(target = _)
server_thread.setDaemon(1)
server_thread.start() # TODO: stop this thread
# time.sleep(0.1) # Wait for the server started
def shutdown(self):
cur_thread = threading.current_thread()
_L.debug('Shutdown in %s' % cur_thread.name)
self.server.shutdown()
# try:
# self.server.socket.shutdown(socket.SHUT_RDWR)
# except:
# pass
self.server.server_close() # Close socket
_L.debug('Shutdown completed')
class EchoTCPHandler(SocketServer.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self): # Return a socket when a new connection is started
# self.request is the TCP socket connected to the client
# Each handle is running in a seperate thread
cur_thread = threading.current_thread()
# print 'Got data in ', cur_thread.name
while 1: # Need a way to stop the server
data = self.request.recv(1024) # Return whatever it gets
if not data: # The connection is lost
break
# print "{} wrote:".format(self.client_address[0])
# print data
self.request.sendall(data)
# print 'Close data thread ', cur_thread.name
# connected = False
connected_lock = threading.RLock()
class MessageTCPHandler(SocketServer.BaseRequestHandler):
connected = False
socket = None
def handle(self):
thread_name = threading.current_thread().name
_L.debug('Got a new connection from %s in %s' % ( self.request.getpeername(), thread_name))
with connected_lock:
if MessageTCPHandler.connected:
# SocketMessage.WrapAndSendPayload(self.request, "Only accept one connection")
# Close socket, Disconnect request
self.request.close()
# self.request.close()
_L.debug('Reject, only accept one connection')
return
else:
unrealcv.SocketMessage.WrapAndSendPayload(self.request, 'connected to Python Message Server')
_L.debug('Accept new connection')
MessageTCPHandler.connected = True
MessageTCPHandler.socket = self.request
# t = threading.Thread(target = self.ticking_message)
# t.setDaemon(True)
# t.start()
while 1: # Main loop to receive message
_L.debug('Server looping in %s' % thread_name)
message = unrealcv.SocketMessage.ReceivePayload(self.request)
_L.debug('Server looping finished in %s' % thread_name)
if not message:
_L.debug('Server release connection in %s' % thread_name)
MessageTCPHandler.connected = False
MessageTCPHandler.socket = None
break
# SocketMessage.WrapAndSendPayload(self.request, 'reply')
unrealcv.SocketMessage.WrapAndSendPayload(self.request, message)
# SocketMessage.WrapAndSendPayload(self.request, 'got2')
MessageTCPHandler.connected = False
@classmethod
def send(cls, message):
if cls.connected:
unrealcv.SocketMessage.WrapAndSendPayload(cls.socket, message)
class NULLTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
unrealcv.SocketMessage.WrapAndSendPayload(self.request, 'connected to Python Null Server')
while 1:
message = unrealcv.SocketMessage.ReceivePayload(self.request)
if not message:
break
class EchoServer(ThreadedServer):
def __init__(self, endpoint):
self.endpoint = endpoint
# Create the server, binding to localhost on port 9999
# self.server = SocketServer.TCPServer(self.endpoint, EchoServer.MyTCPHandler)
self.server = SocketServer.ThreadingTCPServer(self.endpoint, EchoTCPHandler)
class MessageServer(ThreadedServer):
def __init__(self, endpoint):
self.endpoint = endpoint
self.server = SocketServer.ThreadingTCPServer(self.endpoint, MessageTCPHandler)
def send(self, message):
MessageTCPHandler.send(message)
class NullServer(ThreadedServer):
'''
Message sent to here will get no response
'''
def __init__(self, endpoint):
self.endpoint = endpoint
self.server = SocketServer.ThreadingTCPServer(self.endpoint, NULLTCPHandler)
if __name__ == '__main__':
import logging
L = logging.getLogger('unrealcv')
L.setLevel(logging.DEBUG)
logging.basicConfig()
server = MessageServer(('localhost', 9000))
server.start()
while(1):
pass
| 36.259494
| 109
| 0.653168
|
import threading, logging, sys
if (sys.version_info > (3, 0)):
import socketserver as SocketServer
else:
import SocketServer
SocketServer.ThreadingMixIn.daemon_threads = True
SocketServer.TCPServer.allow_reuse_address = True
import unrealcv
_L = logging.getLogger(__name__)
_L.setLevel(logging.INFO)
_L.addHandler(logging.NullHandler())
class ThreadedServer:
def start(self):
def _():
cur_thread = threading.current_thread()
_L.debug('Start in %s' % cur_thread.name)
self.server.serve_forever()
import threading
server_thread = threading.Thread(target = _)
server_thread.setDaemon(1)
server_thread.start()
cur_thread = threading.current_thread()
_L.debug('Shutdown in %s' % cur_thread.name)
self.server.shutdown()
self.server.server_close()
_L.debug('Shutdown completed')
class EchoTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
cur_thread = threading.current_thread()
while 1:
data = self.request.recv(1024)
if not data:
break
self.request.sendall(data)
connected_lock = threading.RLock()
class MessageTCPHandler(SocketServer.BaseRequestHandler):
connected = False
socket = None
def handle(self):
thread_name = threading.current_thread().name
_L.debug('Got a new connection from %s in %s' % ( self.request.getpeername(), thread_name))
with connected_lock:
if MessageTCPHandler.connected:
self.request.close()
_L.debug('Reject, only accept one connection')
return
else:
unrealcv.SocketMessage.WrapAndSendPayload(self.request, 'connected to Python Message Server')
_L.debug('Accept new connection')
MessageTCPHandler.connected = True
MessageTCPHandler.socket = self.request
while 1:
_L.debug('Server looping in %s' % thread_name)
message = unrealcv.SocketMessage.ReceivePayload(self.request)
_L.debug('Server looping finished in %s' % thread_name)
if not message:
_L.debug('Server release connection in %s' % thread_name)
MessageTCPHandler.connected = False
MessageTCPHandler.socket = None
break
unrealcv.SocketMessage.WrapAndSendPayload(self.request, message)
MessageTCPHandler.connected = False
@classmethod
def send(cls, message):
if cls.connected:
unrealcv.SocketMessage.WrapAndSendPayload(cls.socket, message)
class NULLTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
unrealcv.SocketMessage.WrapAndSendPayload(self.request, 'connected to Python Null Server')
while 1:
message = unrealcv.SocketMessage.ReceivePayload(self.request)
if not message:
break
class EchoServer(ThreadedServer):
def __init__(self, endpoint):
self.endpoint = endpoint
self.server = SocketServer.ThreadingTCPServer(self.endpoint, EchoTCPHandler)
class MessageServer(ThreadedServer):
def __init__(self, endpoint):
self.endpoint = endpoint
self.server = SocketServer.ThreadingTCPServer(self.endpoint, MessageTCPHandler)
def send(self, message):
MessageTCPHandler.send(message)
class NullServer(ThreadedServer):
def __init__(self, endpoint):
self.endpoint = endpoint
self.server = SocketServer.ThreadingTCPServer(self.endpoint, NULLTCPHandler)
if __name__ == '__main__':
import logging
L = logging.getLogger('unrealcv')
L.setLevel(logging.DEBUG)
logging.basicConfig()
server = MessageServer(('localhost', 9000))
server.start()
while(1):
pass
| true
| true
|
1c451be187ab0f02d6d4a30d729c850021a93b2f
| 598
|
py
|
Python
|
bdaydict.py
|
rayjustinhuang/BitesofPy
|
03b694c5259ff607621419d9677c5caff90a6057
|
[
"MIT"
] | null | null | null |
bdaydict.py
|
rayjustinhuang/BitesofPy
|
03b694c5259ff607621419d9677c5caff90a6057
|
[
"MIT"
] | null | null | null |
bdaydict.py
|
rayjustinhuang/BitesofPy
|
03b694c5259ff607621419d9677c5caff90a6057
|
[
"MIT"
] | null | null | null |
from datetime import date
MSG = 'Hey {}, there are more people with your birthday!'
class BirthdayDict(dict):
"""Override dict to print a message every time a new person is added that has
the same birthday (day+month) as somebody already in the dict"""
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, name, birthday):
for date in self.values():
if birthday.day == date.day and birthday.month == date.month:
print(MSG.format(name))
dict.__setitem__(self, name, birthday)
pass
| 33.222222
| 81
| 0.64214
|
from datetime import date
MSG = 'Hey {}, there are more people with your birthday!'
class BirthdayDict(dict):
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, name, birthday):
for date in self.values():
if birthday.day == date.day and birthday.month == date.month:
print(MSG.format(name))
dict.__setitem__(self, name, birthday)
pass
| true
| true
|
1c451c467f4bd7d3914dcec4205aa7681ffb50f0
| 11,533
|
py
|
Python
|
datasets/nclt.py
|
XiaoyongNI/hybrid-inference
|
c268e1ada019e08f62e3f02fc6d5059130ec5358
|
[
"MIT"
] | 16
|
2019-11-22T15:40:32.000Z
|
2022-03-14T14:39:01.000Z
|
datasets/nclt.py
|
XiaoyongNI/hybrid-inference
|
c268e1ada019e08f62e3f02fc6d5059130ec5358
|
[
"MIT"
] | 2
|
2020-02-11T13:36:56.000Z
|
2020-05-18T15:58:21.000Z
|
datasets/nclt.py
|
XiaoyongNI/hybrid-inference
|
c268e1ada019e08f62e3f02fc6d5059130ec5358
|
[
"MIT"
] | 4
|
2020-02-04T16:36:31.000Z
|
2021-11-25T07:26:46.000Z
|
from __future__ import print_function
import sys, os
sys.path.append('../')
import torch.utils.data as data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import pickle
import settings
import time
dates = [];
dates.append('2012-01-08')
dates.append('2012-01-15')
dates.append('2012-01-22')
dates.append('2012-02-02')
dates.append('2012-02-04')
dates.append('2012-02-05')
dates.append('2012-02-12')
dates.append('2012-02-18')
dates.append('2012-02-19')
dates.append('2012-03-17')
dates.append('2012-03-25')
dates.append('2012-03-31')
dates.append('2012-04-29')
dates.append('2012-05-11')
dates.append('2012-05-26')
dates.append('2012-06-15')
dates.append('2012-08-04')
dates.append('2012-08-20')
dates.append('2012-09-28')
dates.append('2012-10-28')
dates.append('2012-11-04')
dates.append('2012-11-16')
dates.append('2012-11-17')
dates.append('2012-12-01')
dates.append('2013-01-10')
dates.append('2013-02-23')
dates.append('2013-04-05')
dates = ['2012-01-22']
path_gps = "data/nclt/sensor_data/%s/gps.csv"
path_gps_rtk = "data/nclt/sensor_data/%s/gps_rtk.csv"
path_gps_rtk_err = "data/nclt/sensor_data/%s/gps_rtk_err.csv"
path_gt = "data/nclt/ground_truth/groundtruth_%s.csv"
compact_path = "temp/nclt_%s.pickle"
class NCLT(data.Dataset):
def __init__(self, date, partition='train', ratio=1.0):
self.partition = partition
self.ratio = ratio
if not os.path.exists(compact_path % date):
print("Loading NCLT dataset ...")
self.gps, self.gps_rtk, self.gps_rtk_err, self.gt = self.__load_data(date)
self.__process_data()
self.dump(compact_path % date, [self.gps, self.gps_rtk, self.gps_rtk_err, self.gt])
else:
[self.gps, self.gps_rtk, self.gps_rtk_err, self.gt] = self.load(compact_path % date)
if self.partition == 'train':
indexes = [1, 3]
elif self.partition == 'val':
indexes = [0, 2]
elif self.partition == 'test':
indexes = [4, 5, 6]
else:
raise Exception('Wrong partition')
self.gps = [self.gps[i].astype(np.float32) for i in indexes]
self.gps_rtk = [self.gps_rtk[i].astype(np.float32) for i in indexes]
self.gt = [self.gt[i].astype(np.float32) for i in indexes]
self.cut_data()
print("NCLT %s loaded: %d samples " % (partition, sum([x.shape[0] for x in self.gps_rtk])))
self.operators_b = [self.__buildoperators_sparse(self.gps[i].shape[0]) for i in range(len(self.gps))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (state, meas) where target is index of the target class.
"""
x0, P0 = self.__pos2x0(self.gps_rtk[index][0, 1:].astype(np.float32))
return self.gt[index][:, 0], self.gt[index][:, 1:], self.gps_rtk[index][:, 1:], x0, P0, self.operators_b[index]
def cut_data(self):
self.gps = [cut_array(e, self.ratio) for e in self.gps]
self.gps_rtk = [cut_array(e, self.ratio) for e in self.gps_rtk]
self.gt = [cut_array(e, self.ratio) for e in self.gt]
def __pos2x0(self, pos):
if settings.x0_v.shape[0] == 4:
x0 = np.zeros(4).astype(np.float32)
x0[0] = pos[0]
x0[2] = pos[1]
P0 = np.eye(4)*1
else:
x0 = np.zeros(6).astype(np.float32)
x0[0] = pos[0]
x0[3] = pos[1]
P0 = np.eye(6)*1
return x0, P0
def dump(self, path, object):
if not os.path.exists('temp'):
os.makedirs('temp')
with open(path, 'wb') as f:
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)
def load(self, path):
with open(path, 'rb') as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
return pickle.load(f)
def __len__(self):
return len(self.gt)
def total_len(self):
total = 0
for arr in self.gt:
total += arr.shape[0]
return total
def _generate_sample(self, seed):
np.random.seed(seed)
if self.acceleration:
return simulate_system(create_model_parameters_a, K=self.K, x0=self.x0)
else:
return simulate_system(create_model_parameters_v, K=self.K, x0=self.x0)
def __buildoperators_sparse_old(self, nn=20):
# Identity
i = torch.LongTensor([[i, i] for i in range(nn)])
v = torch.FloatTensor([1 for i in range(nn)])
I = torch.sparse.FloatTensor(i.t(), v)
#Message right
i = torch.LongTensor([[i, i+1] for i in range(nn-1)] + [[nn-1, nn-1]])
v = torch.FloatTensor([1 for i in range(nn-1)] + [0])
mr = torch.sparse.FloatTensor(i.t(), v)
#Message left
i = torch.LongTensor([[0, nn-1]] + [[i+1, i] for i in range(nn-1)])
v = torch.FloatTensor([0] + [1 for i in range(nn-1)])
ml = torch.sparse.FloatTensor(i.t(), v)
return [I, mr, ml]
def __buildoperators_sparse(self, nn=20):
# Message right to left
m_left_r = []
m_left_c = []
m_right_r = []
m_right_c = []
m_up_r = []
m_up_c = []
for i in range(nn - 1):
m_left_r.append(i)
m_left_c.append((i + 1))
m_right_r.append(i + 1)
m_right_c.append((i))
for i in range(nn):
m_up_r.append(i)
m_up_c.append(i + nn)
m_left = [torch.LongTensor(m_left_r), torch.LongTensor(m_left_c)]
m_right = [torch.LongTensor(m_right_r), torch.LongTensor(m_right_c)]
m_up = [torch.LongTensor(m_up_r), torch.LongTensor(m_up_c)]
return {"m_left": m_left, "m_right": m_right, "m_up": m_up}
def __load_gps(self, path, date):
df = pd.read_csv(path % date)
df = df.iloc[:, [0, 3, 4]]
return df.values
def __load_gps_err(self, date):
df = pd.read_csv(path_gps % date)
df = df.iloc[:, 6]
return df.values
def __load_gt(self, date):
df = pd.read_csv(path_gt % date)
gt = df.iloc[:, [0, 2, 1]].values
gt_err = df.iloc[:, [5, 4]].values
return gt, gt_err
def __load_gps_rtk_err(self, date):
df = pd.read_csv(path_gps_rtk_err % date)
return df.values
def __compute_gps_err(self, gps, gt):
return np.mean(np.square(gps - gt), axis=1)
def __load_data(self, date):
"We use the timestamp of gps_rtk which has the lowest frequency 1 Hz"
gps = self.__load_gps(path_gps, date)
gps_rtk = self.__load_gps(path_gps_rtk, date)
gps_rtk_err = self.__load_gps_rtk_err(date)
gt, _ = self.__load_gt(date)
self.lat0 = gps_rtk[0, 1]
self.lng0 = gps_rtk[0, 2]
self.bias = [gt[0, 1], gt[0, 2]]
gps_rtk_dec = self.__decompose(gps_rtk, date)
gps_rtk_err_dec = self.__decompose(gps_rtk_err, date)
gps_ar = []
gt_ar = []
gps_rtk_ar, gps_rtk_err_ar = [], []
for gps_rtk_i, gps_rtk_err_i in zip(gps_rtk_dec, gps_rtk_err_dec):
idxs = self.__filer_freq(gps_rtk_i[:, 0], f=1.)
gps_rtk_ar.append(gps_rtk_i[idxs, :])
gps_rtk_err_ar.append(gps_rtk_err_i[idxs, :])
#Matching with GT
idxs_gt = self.__match_tt(gps_rtk_ar[-1][:, 0], gt[:, 0])
gt_ar.append(gt[idxs_gt, :])
#Matching with gps
idxs = self.__match_tt(gps_rtk_ar[-1][:, 0], gps[:, 0])
gps_ar.append(gps[idxs, :])
return gps_ar, gps_rtk_ar, gps_rtk_err_ar, gt_ar
def __decompose(self, data, date):
if date == '2012-01-22':
return [data[100:2054], data[2054:4009], data[4147:6400], data[6400:8890], data[9103:10856], data[11113:12608],
data[12733:13525]]#, [0, 4147, 9103, 11113, 12733]
else:
return data
def concatenate(self, arrays):
return np.concatenate(arrays, axis=0)
def __process_data(self):
'''
lat0 = self.gps_rtk[0][0, 1]
lng0 = self.gps_rtk[0][0, 2]
bias = [self.gt[0][0, 1], self.gt[0][0, 2]]
'''
for i in range(len(self.gps_rtk)):
self.gps_rtk[i][:, 1:] = polar2cartesian(self.gps_rtk[i][:, 1], self.gps_rtk[i][:, 2], self.lat0,
self.lng0)
self.gps[i][:, 1:] = polar2cartesian(self.gps[i][:, 1], self.gps[i][:, 2], self.lat0,
self.lng0)
self.gt[i][:, 1:] = remove_bias(self.gt[i][:, 1:], self.bias)
def __match_tt(self, tt1, tt2):
print("\tMatching gps and gt timestamps")
arr_idx = []
for i, ti in enumerate(tt1):
diff = np.abs(tt2 - ti)
min_idx = np.argmin(diff)
arr_idx.append(min_idx)
return arr_idx
def _match_gt_step1(self, gps, gps_err, gt, margin=5):
gt_aux = gt.copy()
min_err = 1e10
min_x, min_y = 0, 0
for x in np.linspace(-margin, margin, 200):
for y in np.linspace(-margin, margin, 200):
gt_aux[:, 0] = gt[:, 0] + x
gt_aux[:, 1] = gt[:, 1] + y
err = mse(gps, gps_err, gt_aux)
if err < min_err:
min_err = err
min_x = x
min_y = y
#print("x: %.4f \t y:%.4f \t err:%.4f" % (min_x, min_y, err))
print(err)
print("Fixing GT bias x: %.4f \t y:%.4f \t error:%.4f" % (min_x, min_y, min_err))
return (min_x, min_y)
def _match_gt_step2(self, gt, err):
(min_x, min_y) = err
gt[:, 0] = gt[:, 0] + min_x
gt[:, 1] = gt[:, 1] + min_y
return gt
def __filer_freq(self, ts, f=1., window=5):
arr_idx = []
last_id = 0
arr_idx.append(last_id)
check = False
while last_id < len(ts) - window:
rel_j = []
for j in range(1, window):
rel_j.append(np.abs(f - (ts[last_id+j] - ts[last_id])/1000000))
last_id = last_id + 1 + np.argmin(rel_j)
min_val = np.min(rel_j)
if min_val > 0.05:
check = True
arr_idx.append(last_id)
if check:
print("\tWarning: Not all frequencies are %.3fHz" % f)
print("\tFiltering finished!")
return arr_idx
def mse(gps, gps_err, gt, th=2):
error = np.mean(np.square(gps - gt), axis=1)
mapping = (gps_err < th).astype(np.float32)
return np.mean(error*mapping)
def polar2cartesian(lat, lng, lat0, lng0):
dLat = lat - lat0
dLng = lng - lng0
r = 6400000 # approx. radius of earth (m)
x = r * np.cos(lat0) * np.sin(dLng)
y = r * np.sin(dLat)
return np.concatenate((np.expand_dims(x, 1), np.expand_dims(y, 1)), 1)
def remove_bias(vector, bias):
for i in range(vector.shape[1]):
vector[:, i] = vector[:, i] - bias[i]
return vector
if __name__ == '__main__':
for date in dates:
dataset = NCLT('2012-01-22', partition='train')
dataset = NCLT('2012-01-22', partition='val')
dataset = NCLT('2012-01-22', partition='test')
def cut_array(array, ratio):
length = len(array)
return array[0:int(round(ratio*length))]
| 32.764205
| 123
| 0.562213
|
from __future__ import print_function
import sys, os
sys.path.append('../')
import torch.utils.data as data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import pickle
import settings
import time
dates = [];
dates.append('2012-01-08')
dates.append('2012-01-15')
dates.append('2012-01-22')
dates.append('2012-02-02')
dates.append('2012-02-04')
dates.append('2012-02-05')
dates.append('2012-02-12')
dates.append('2012-02-18')
dates.append('2012-02-19')
dates.append('2012-03-17')
dates.append('2012-03-25')
dates.append('2012-03-31')
dates.append('2012-04-29')
dates.append('2012-05-11')
dates.append('2012-05-26')
dates.append('2012-06-15')
dates.append('2012-08-04')
dates.append('2012-08-20')
dates.append('2012-09-28')
dates.append('2012-10-28')
dates.append('2012-11-04')
dates.append('2012-11-16')
dates.append('2012-11-17')
dates.append('2012-12-01')
dates.append('2013-01-10')
dates.append('2013-02-23')
dates.append('2013-04-05')
dates = ['2012-01-22']
path_gps = "data/nclt/sensor_data/%s/gps.csv"
path_gps_rtk = "data/nclt/sensor_data/%s/gps_rtk.csv"
path_gps_rtk_err = "data/nclt/sensor_data/%s/gps_rtk_err.csv"
path_gt = "data/nclt/ground_truth/groundtruth_%s.csv"
compact_path = "temp/nclt_%s.pickle"
class NCLT(data.Dataset):
def __init__(self, date, partition='train', ratio=1.0):
self.partition = partition
self.ratio = ratio
if not os.path.exists(compact_path % date):
print("Loading NCLT dataset ...")
self.gps, self.gps_rtk, self.gps_rtk_err, self.gt = self.__load_data(date)
self.__process_data()
self.dump(compact_path % date, [self.gps, self.gps_rtk, self.gps_rtk_err, self.gt])
else:
[self.gps, self.gps_rtk, self.gps_rtk_err, self.gt] = self.load(compact_path % date)
if self.partition == 'train':
indexes = [1, 3]
elif self.partition == 'val':
indexes = [0, 2]
elif self.partition == 'test':
indexes = [4, 5, 6]
else:
raise Exception('Wrong partition')
self.gps = [self.gps[i].astype(np.float32) for i in indexes]
self.gps_rtk = [self.gps_rtk[i].astype(np.float32) for i in indexes]
self.gt = [self.gt[i].astype(np.float32) for i in indexes]
self.cut_data()
print("NCLT %s loaded: %d samples " % (partition, sum([x.shape[0] for x in self.gps_rtk])))
self.operators_b = [self.__buildoperators_sparse(self.gps[i].shape[0]) for i in range(len(self.gps))]
def __getitem__(self, index):
x0, P0 = self.__pos2x0(self.gps_rtk[index][0, 1:].astype(np.float32))
return self.gt[index][:, 0], self.gt[index][:, 1:], self.gps_rtk[index][:, 1:], x0, P0, self.operators_b[index]
def cut_data(self):
self.gps = [cut_array(e, self.ratio) for e in self.gps]
self.gps_rtk = [cut_array(e, self.ratio) for e in self.gps_rtk]
self.gt = [cut_array(e, self.ratio) for e in self.gt]
def __pos2x0(self, pos):
if settings.x0_v.shape[0] == 4:
x0 = np.zeros(4).astype(np.float32)
x0[0] = pos[0]
x0[2] = pos[1]
P0 = np.eye(4)*1
else:
x0 = np.zeros(6).astype(np.float32)
x0[0] = pos[0]
x0[3] = pos[1]
P0 = np.eye(6)*1
return x0, P0
def dump(self, path, object):
if not os.path.exists('temp'):
os.makedirs('temp')
with open(path, 'wb') as f:
pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)
def load(self, path):
with open(path, 'rb') as f:
return pickle.load(f)
def __len__(self):
return len(self.gt)
def total_len(self):
total = 0
for arr in self.gt:
total += arr.shape[0]
return total
def _generate_sample(self, seed):
np.random.seed(seed)
if self.acceleration:
return simulate_system(create_model_parameters_a, K=self.K, x0=self.x0)
else:
return simulate_system(create_model_parameters_v, K=self.K, x0=self.x0)
def __buildoperators_sparse_old(self, nn=20):
i = torch.LongTensor([[i, i] for i in range(nn)])
v = torch.FloatTensor([1 for i in range(nn)])
I = torch.sparse.FloatTensor(i.t(), v)
i = torch.LongTensor([[i, i+1] for i in range(nn-1)] + [[nn-1, nn-1]])
v = torch.FloatTensor([1 for i in range(nn-1)] + [0])
mr = torch.sparse.FloatTensor(i.t(), v)
i = torch.LongTensor([[0, nn-1]] + [[i+1, i] for i in range(nn-1)])
v = torch.FloatTensor([0] + [1 for i in range(nn-1)])
ml = torch.sparse.FloatTensor(i.t(), v)
return [I, mr, ml]
def __buildoperators_sparse(self, nn=20):
m_left_r = []
m_left_c = []
m_right_r = []
m_right_c = []
m_up_r = []
m_up_c = []
for i in range(nn - 1):
m_left_r.append(i)
m_left_c.append((i + 1))
m_right_r.append(i + 1)
m_right_c.append((i))
for i in range(nn):
m_up_r.append(i)
m_up_c.append(i + nn)
m_left = [torch.LongTensor(m_left_r), torch.LongTensor(m_left_c)]
m_right = [torch.LongTensor(m_right_r), torch.LongTensor(m_right_c)]
m_up = [torch.LongTensor(m_up_r), torch.LongTensor(m_up_c)]
return {"m_left": m_left, "m_right": m_right, "m_up": m_up}
def __load_gps(self, path, date):
df = pd.read_csv(path % date)
df = df.iloc[:, [0, 3, 4]]
return df.values
def __load_gps_err(self, date):
df = pd.read_csv(path_gps % date)
df = df.iloc[:, 6]
return df.values
def __load_gt(self, date):
df = pd.read_csv(path_gt % date)
gt = df.iloc[:, [0, 2, 1]].values
gt_err = df.iloc[:, [5, 4]].values
return gt, gt_err
def __load_gps_rtk_err(self, date):
df = pd.read_csv(path_gps_rtk_err % date)
return df.values
def __compute_gps_err(self, gps, gt):
return np.mean(np.square(gps - gt), axis=1)
def __load_data(self, date):
gps = self.__load_gps(path_gps, date)
gps_rtk = self.__load_gps(path_gps_rtk, date)
gps_rtk_err = self.__load_gps_rtk_err(date)
gt, _ = self.__load_gt(date)
self.lat0 = gps_rtk[0, 1]
self.lng0 = gps_rtk[0, 2]
self.bias = [gt[0, 1], gt[0, 2]]
gps_rtk_dec = self.__decompose(gps_rtk, date)
gps_rtk_err_dec = self.__decompose(gps_rtk_err, date)
gps_ar = []
gt_ar = []
gps_rtk_ar, gps_rtk_err_ar = [], []
for gps_rtk_i, gps_rtk_err_i in zip(gps_rtk_dec, gps_rtk_err_dec):
idxs = self.__filer_freq(gps_rtk_i[:, 0], f=1.)
gps_rtk_ar.append(gps_rtk_i[idxs, :])
gps_rtk_err_ar.append(gps_rtk_err_i[idxs, :])
idxs_gt = self.__match_tt(gps_rtk_ar[-1][:, 0], gt[:, 0])
gt_ar.append(gt[idxs_gt, :])
idxs = self.__match_tt(gps_rtk_ar[-1][:, 0], gps[:, 0])
gps_ar.append(gps[idxs, :])
return gps_ar, gps_rtk_ar, gps_rtk_err_ar, gt_ar
def __decompose(self, data, date):
if date == '2012-01-22':
return [data[100:2054], data[2054:4009], data[4147:6400], data[6400:8890], data[9103:10856], data[11113:12608],
data[12733:13525]]
else:
return data
def concatenate(self, arrays):
return np.concatenate(arrays, axis=0)
def __process_data(self):
for i in range(len(self.gps_rtk)):
self.gps_rtk[i][:, 1:] = polar2cartesian(self.gps_rtk[i][:, 1], self.gps_rtk[i][:, 2], self.lat0,
self.lng0)
self.gps[i][:, 1:] = polar2cartesian(self.gps[i][:, 1], self.gps[i][:, 2], self.lat0,
self.lng0)
self.gt[i][:, 1:] = remove_bias(self.gt[i][:, 1:], self.bias)
def __match_tt(self, tt1, tt2):
print("\tMatching gps and gt timestamps")
arr_idx = []
for i, ti in enumerate(tt1):
diff = np.abs(tt2 - ti)
min_idx = np.argmin(diff)
arr_idx.append(min_idx)
return arr_idx
def _match_gt_step1(self, gps, gps_err, gt, margin=5):
gt_aux = gt.copy()
min_err = 1e10
min_x, min_y = 0, 0
for x in np.linspace(-margin, margin, 200):
for y in np.linspace(-margin, margin, 200):
gt_aux[:, 0] = gt[:, 0] + x
gt_aux[:, 1] = gt[:, 1] + y
err = mse(gps, gps_err, gt_aux)
if err < min_err:
min_err = err
min_x = x
min_y = y
print(err)
print("Fixing GT bias x: %.4f \t y:%.4f \t error:%.4f" % (min_x, min_y, min_err))
return (min_x, min_y)
def _match_gt_step2(self, gt, err):
(min_x, min_y) = err
gt[:, 0] = gt[:, 0] + min_x
gt[:, 1] = gt[:, 1] + min_y
return gt
def __filer_freq(self, ts, f=1., window=5):
arr_idx = []
last_id = 0
arr_idx.append(last_id)
check = False
while last_id < len(ts) - window:
rel_j = []
for j in range(1, window):
rel_j.append(np.abs(f - (ts[last_id+j] - ts[last_id])/1000000))
last_id = last_id + 1 + np.argmin(rel_j)
min_val = np.min(rel_j)
if min_val > 0.05:
check = True
arr_idx.append(last_id)
if check:
print("\tWarning: Not all frequencies are %.3fHz" % f)
print("\tFiltering finished!")
return arr_idx
def mse(gps, gps_err, gt, th=2):
error = np.mean(np.square(gps - gt), axis=1)
mapping = (gps_err < th).astype(np.float32)
return np.mean(error*mapping)
def polar2cartesian(lat, lng, lat0, lng0):
dLat = lat - lat0
dLng = lng - lng0
r = 6400000
x = r * np.cos(lat0) * np.sin(dLng)
y = r * np.sin(dLat)
return np.concatenate((np.expand_dims(x, 1), np.expand_dims(y, 1)), 1)
def remove_bias(vector, bias):
for i in range(vector.shape[1]):
vector[:, i] = vector[:, i] - bias[i]
return vector
if __name__ == '__main__':
for date in dates:
dataset = NCLT('2012-01-22', partition='train')
dataset = NCLT('2012-01-22', partition='val')
dataset = NCLT('2012-01-22', partition='test')
def cut_array(array, ratio):
length = len(array)
return array[0:int(round(ratio*length))]
| true
| true
|
1c451cce6b7f3b495ac9f7b0e576b3407cde8ba6
| 719
|
py
|
Python
|
lemon/libs/route.py
|
InsaneMiner/Salt
|
b61c5f931fe4b6fa652e8fbfb59b30dbaaf9ed18
|
[
"MIT"
] | 6
|
2020-11-22T11:42:55.000Z
|
2022-01-09T12:29:30.000Z
|
lemon/libs/route.py
|
InsaneMiner/Salt
|
b61c5f931fe4b6fa652e8fbfb59b30dbaaf9ed18
|
[
"MIT"
] | 1
|
2020-11-21T00:05:40.000Z
|
2020-11-22T21:58:54.000Z
|
lemon/libs/route.py
|
InsaneMiner/Salt
|
b61c5f931fe4b6fa652e8fbfb59b30dbaaf9ed18
|
[
"MIT"
] | 2
|
2021-06-05T04:19:04.000Z
|
2021-06-05T04:28:08.000Z
|
import app.web
import config.config
import lemon.libs.lemon
import lemon.libs.colors
import lemon.libs.url_validation
import app.urls
def page(object):
correct_url = lemon.libs.url_validation.validate_url(object.url,app.urls.urls)
if correct_url[0] != None:
try:
object.url_data = correct_url[1]
data = getattr(app.web, app.urls.urls[correct_url[0]])(object)
except Exception as e:
data = lemon.libs.lemon.error(object,500)
print(e)
return data
else:
try:
data = lemon.libs.lemon.render_static(object,object.url[1:])
return data
except:
return lemon.libs.lemon.error(object,404)
| 29.958333
| 82
| 0.632823
|
import app.web
import config.config
import lemon.libs.lemon
import lemon.libs.colors
import lemon.libs.url_validation
import app.urls
def page(object):
correct_url = lemon.libs.url_validation.validate_url(object.url,app.urls.urls)
if correct_url[0] != None:
try:
object.url_data = correct_url[1]
data = getattr(app.web, app.urls.urls[correct_url[0]])(object)
except Exception as e:
data = lemon.libs.lemon.error(object,500)
print(e)
return data
else:
try:
data = lemon.libs.lemon.render_static(object,object.url[1:])
return data
except:
return lemon.libs.lemon.error(object,404)
| true
| true
|
1c451da618026b8bebb5ad5310a8825f0a00e52b
| 3,724
|
py
|
Python
|
osdu/services/search.py
|
eternelpanic/osdupy
|
3b30ceaed7f7f333a6a41d542b9430d4042f77f2
|
[
"MIT"
] | null | null | null |
osdu/services/search.py
|
eternelpanic/osdupy
|
3b30ceaed7f7f333a6a41d542b9430d4042f77f2
|
[
"MIT"
] | 7
|
2020-09-24T03:54:34.000Z
|
2022-03-29T20:16:42.000Z
|
osdu/services/search.py
|
eternelpanic/osdupy
|
3b30ceaed7f7f333a6a41d542b9430d4042f77f2
|
[
"MIT"
] | 3
|
2021-03-10T20:51:50.000Z
|
2021-09-30T08:31:45.000Z
|
""" Provides a simple Python interface to the OSDU Search API.
"""
import requests
from .base import BaseService
class SearchService(BaseService):
def __init__(self, client):
super().__init__(client, 'search', service_version=2)
def query(self, query: dict) -> dict:
"""Executes a query against the OSDU search service.
:param query: dict representing the JSON-style query to be sent to the search API. Must adhere to
the Lucene syntax suported by OSDU. For more details, see:
https://community.opengroup.org/osdu/documentation/-/wikis/Releases/R2.0/OSDU-Query-Syntax
:returns: dict containing 3 items: aggregations, results, totalCount
- aggregations: dict: returned only if 'aggregateBy' specified in query
- results: list: of records resutling from search query
- totalCount: int: the total number of results despite any 'limit' specified in the
query or the 1,000 record limit of the API
"""
url = f'{self._service_url}/query'
response = requests.post(url=url, headers=self._headers(), json=query)
response.raise_for_status()
return response.json()
def query_with_paging(self, query: dict):
"""Executes a query with cursor against the OSDU search service. Returns a generator, which can than be
iterated over to retrieve each page in the result set without having to deal with any cursor.
:param query: dict representing the JSON-style query to be sent to the search API. Must adhere to
the Lucene syntax suported by OSDU. For more details, see:
https://community.opengroup.org/osdu/documentation/-/wikis/Releases/R2.0/OSDU-Query-Syntax
:returns: iterator of tuple containing 2 items: (results, totalCount)
- results: list: one page of records resutling from search query. Default page size
is 10. This can be modified by passing the 'limit' parameter in
query with the maximum allowed being 1000.
- totalCount: int: the total number of results despite any 'limit' specified in the
query or the 1,000 record limit of the API
"""
url = f'{self._service_url}/query_with_cursor'
# Initial cursor can be anything, but using a non-empty string value helps prevent accidents
# in the case of sloppy/implicit boolean tests on the cursor value.
cursor = 'initial'
# Note: The last page does not include a cursor in the response body, so we have to
# unpack the values carefully and use a keyword to break our loop
while cursor != 'none': # Effective do-while loop
# Add cursor to request body for subsequent requests.
if cursor != 'initial':
query['cursor'] = cursor
response = requests.post(
url=url, headers=self._headers(), json=query)
response.raise_for_status()
response_values = response.json()
if 'cursor' not in response_values:
cursor = 'none'
else:
cursor = response_values['cursor']
if 'results' in response_values and 'totalCount' in response_values:
results = response_values['results']
total_count = response_values['totalCount']
yield results, total_count
| 51.722222
| 114
| 0.59855
|
import requests
from .base import BaseService
class SearchService(BaseService):
def __init__(self, client):
super().__init__(client, 'search', service_version=2)
def query(self, query: dict) -> dict:
url = f'{self._service_url}/query'
response = requests.post(url=url, headers=self._headers(), json=query)
response.raise_for_status()
return response.json()
def query_with_paging(self, query: dict):
url = f'{self._service_url}/query_with_cursor'
cursor = 'initial'
while cursor != 'none':
if cursor != 'initial':
query['cursor'] = cursor
response = requests.post(
url=url, headers=self._headers(), json=query)
response.raise_for_status()
response_values = response.json()
if 'cursor' not in response_values:
cursor = 'none'
else:
cursor = response_values['cursor']
if 'results' in response_values and 'totalCount' in response_values:
results = response_values['results']
total_count = response_values['totalCount']
yield results, total_count
| true
| true
|
1c451da9965b9c22319a97ee2b115df66aa1b1c4
| 8,190
|
py
|
Python
|
Gladiator/Player.py
|
sergenp/gladoidbot
|
6e450d8b379e2c8238e4cf32b3d71b2e13154034
|
[
"MIT"
] | 1
|
2020-09-04T03:59:27.000Z
|
2020-09-04T03:59:27.000Z
|
Gladiator/Player.py
|
sergenp/gladoidbot
|
6e450d8b379e2c8238e4cf32b3d71b2e13154034
|
[
"MIT"
] | null | null | null |
Gladiator/Player.py
|
sergenp/gladoidbot
|
6e450d8b379e2c8238e4cf32b3d71b2e13154034
|
[
"MIT"
] | 1
|
2020-03-18T13:10:11.000Z
|
2020-03-18T13:10:11.000Z
|
import random
import math
import json
from Gladiator.Stats.GladiatorStats import GladiatorStats
from Gladiator.AttackInformation.GladiatorAttackInformation import GladiatorAttackInformation
from Gladiator.Equipments.GladiatorEquipments import GladiatorEquipments
import urllib.parse
import pathlib
path = pathlib.Path(__file__).parent.absolute()
INITIAL_ATTACK_TYPES_COUNT = 3
class Player:
def __init__(self, stats_path):
self.dead = False
self.debuffs = []
self.permitted_attacks = []
with open(stats_path) as f:
self.json_dict = json.load(f)
self.stats = GladiatorStats(self.json_dict["Stats"])
self.attack_information = GladiatorAttackInformation()
with open(path / "Settings" / "GladiatorGameSettings.json") as f:
self.information = json.load(f)["game_information_texts"]
def take_damage(self, damage, damage_type):
try:
dmg = damage - self.stats[damage_type["armor_type_that_absorbs"]]
except KeyError:
dmg = damage
# check if the damage is blocked
roll = random.randint(0, 100)
if self.stats["Block Chance"] > roll or dmg <= 0:
return self.information["block_damage_text"].format(self)
dmg = round(dmg, 2)
self.stats["Health"] = round(self.stats["Health"] - dmg, 2)
if self.stats["Health"] <= 0:
return self.die()
# return info
return self.information["take_damage_text"].format(self, dmg, self, self.stats['Health'])
def damage_enemy(self, otherPlayer, damage_type_name=""):
inf = ""
# roll to see if attack hit
roll = random.randint(0, 100)
if self.stats["Attack Chance"] < roll:
return self.information["dodge_text"].format(otherPlayer)
dmg_type = self.attack_information.find_damage_type(damage_type_name)
min_dmg = self.stats[dmg_type["min_damage_stat"]]
max_dmg = self.stats[dmg_type["max_damage_stat"]]
# roll for damage
dmg = random.randint(min_dmg, max_dmg)
# roll for critical damage
crit_roll = random.randint(0, 100)
try:
if self.stats["Debuff Chance"] > 0:
# roll for debuff effect to other player
if self.stats["Debuff Chance"] > random.randint(0, 100):
inf += otherPlayer.take_debuff(self.stats["Debuff Type"])
except KeyError:
pass
if self.stats["Critical Damage Chance"] > crit_roll:
crit_dmg = math.ceil(dmg * self.stats["Critical Damage Boost"])
return inf + self.information["critical_hit_text"] + otherPlayer.take_damage(crit_dmg, dmg_type)
return inf + otherPlayer.take_damage(dmg, dmg_type)
def attack(self, otherPlayer, attack_type_name=""):
if not isinstance(otherPlayer, Player):
raise ValueError(
"otherPlayer must be an instance of Player")
# find the attack corresponding the name
attack = self.attack_information.find_attack_type(attack_type_name)
if not attack:
attack = random.choice(self.permitted_attacks)
self.buff(attack["buffs"])
inf = self.damage_enemy(otherPlayer, attack["damage_type_name"])
self.buff(attack["buffs"], buff_type="debuff")
return f"{self} Used {attack['name']} {attack['reaction_emoji']}\n" + inf
def die(self):
self.dead = True
return random.choice(self.information["death_texts"]).format(self)
def buff(self, buff: GladiatorStats or dict, buff_type="buff"):
if buff_type == "buff":
self.stats += buff
elif buff_type == "debuff":
self.stats -= buff
def take_debuff(self, turn_debuff_name: str):
debuff = self.attack_information.find_turn_debuff(turn_debuff_name)
# if the given debuff is already affecting the player,
# make it last more turns
for dbf in self.debuffs:
if dbf["debuff_stats"]["Debuff Type"] == debuff["debuff_stats"]["Debuff Type"]:
dbf["lasts_turn_count"] += 1
break
# if given debuff is not currently affecting the player,
# append it to the current debuffs list
else:
self.debuffs.append(debuff)
return self.information["take_debuff_text"].format(self, debuff["debuff_stats"]["Debuff Type"], debuff["lasts_turn_count"])
def take_damage_per_turn(self):
# if there is any debuffs in the list
if len(self.debuffs) > 0:
inf = ""
for index, debuff in enumerate(self.debuffs):
if debuff["lasts_turn_count"] > 0:
debuff["lasts_turn_count"] -= 1
self.stats['Health'] -= debuff["debuff_stats"]["Debuff Damage"]
inf += self.information["take_damage_per_turn_from_debuffs_text"].format(
self, debuff["debuff_stats"]["Debuff Damage"], debuff["debuff_stats"]["Debuff Type"], self.stats["Health"], debuff["lasts_turn_count"])
if self.stats["Health"] <= 0:
inf += "\n" + self.die()
else:
del self.debuffs[index]
return inf
return ""
class GladiatorPlayer(Player):
def __init__(self, member):
super().__init__(stats_path=path / "UserProfileData" / f"{member.id}.json")
self.member = member
self.equipment_information = GladiatorEquipments()
self.permitted_attacks = self.attack_information.attack_types[:INITIAL_ATTACK_TYPES_COUNT]
def equip_item(self, equipment_name, equipment_slot_name):
slot = self.equipment_information.find_slot(equipment_slot_name)
# if there is an equipment equipped already in the slot,
# do nothing, and return
if slot:
if slot["Equipment"]:
return
equipment = self.equipment_information.find_equipment(equipment_name)
if equipment:
if equipment["type"] == slot["Slot Name"]:
self.equipment_information.update_slot(slot["Slot Name"], equipment)
self.stats += equipment["buffs"]
if equipment["unlock_attack_name"]:
self.unlock_attack_type(equipment["unlock_attack_name"])
debuff = self.attack_information.find_turn_debuff(equipment["debuff_name"])
if debuff:
self.stats += debuff["debuff_stats"]
def unlock_attack_type(self, attack_name):
for i in self.permitted_attacks:
if i["name"] == attack_name:
return
self.permitted_attacks.append(
self.attack_information.find_attack_type(attack_name))
def __repr__(self):
return f"<@{self.member.id}>"
class GladiatorNPC(Player):
def __init__(self, stats_path, **kwargs):
super().__init__(stats_path)
self.name = self.json_dict["Name"]
url_encoded_name = urllib.parse.quote(self.name)
self.image_path = f"https://gladoid.herokuapp.com/npcimage?name={url_encoded_name}"
self.level = random.randint(self.json_dict["Min Level"], self.json_dict["Max Level"])
self.footer_text = self.json_dict.get("FooterText", "")
for attack_name in self.json_dict["Attacks"]:
self.permitted_attacks.append(
self.attack_information.find_attack_type(attack_name))
for k, min_stat in dict(self.json_dict["Stats"]).items():
for l in range(self.level):
min_stat += (l/17)**1.1
min_stat = round(min_stat, 2)
self.stats[k] = min_stat
for debuff_name in self.json_dict["Debuffs"]:
self.stats += self.attack_information.find_turn_debuff(debuff_name)["debuff_stats"]
self.stats += kwargs
def get_random_attack(self):
return random.choice(self.permitted_attacks)
def __repr__(self):
return f"Level {self.level} {self.name} "
| 39.186603
| 159
| 0.617582
|
import random
import math
import json
from Gladiator.Stats.GladiatorStats import GladiatorStats
from Gladiator.AttackInformation.GladiatorAttackInformation import GladiatorAttackInformation
from Gladiator.Equipments.GladiatorEquipments import GladiatorEquipments
import urllib.parse
import pathlib
path = pathlib.Path(__file__).parent.absolute()
INITIAL_ATTACK_TYPES_COUNT = 3
class Player:
def __init__(self, stats_path):
self.dead = False
self.debuffs = []
self.permitted_attacks = []
with open(stats_path) as f:
self.json_dict = json.load(f)
self.stats = GladiatorStats(self.json_dict["Stats"])
self.attack_information = GladiatorAttackInformation()
with open(path / "Settings" / "GladiatorGameSettings.json") as f:
self.information = json.load(f)["game_information_texts"]
def take_damage(self, damage, damage_type):
try:
dmg = damage - self.stats[damage_type["armor_type_that_absorbs"]]
except KeyError:
dmg = damage
roll = random.randint(0, 100)
if self.stats["Block Chance"] > roll or dmg <= 0:
return self.information["block_damage_text"].format(self)
dmg = round(dmg, 2)
self.stats["Health"] = round(self.stats["Health"] - dmg, 2)
if self.stats["Health"] <= 0:
return self.die()
return self.information["take_damage_text"].format(self, dmg, self, self.stats['Health'])
def damage_enemy(self, otherPlayer, damage_type_name=""):
inf = ""
roll = random.randint(0, 100)
if self.stats["Attack Chance"] < roll:
return self.information["dodge_text"].format(otherPlayer)
dmg_type = self.attack_information.find_damage_type(damage_type_name)
min_dmg = self.stats[dmg_type["min_damage_stat"]]
max_dmg = self.stats[dmg_type["max_damage_stat"]]
dmg = random.randint(min_dmg, max_dmg)
crit_roll = random.randint(0, 100)
try:
if self.stats["Debuff Chance"] > 0:
if self.stats["Debuff Chance"] > random.randint(0, 100):
inf += otherPlayer.take_debuff(self.stats["Debuff Type"])
except KeyError:
pass
if self.stats["Critical Damage Chance"] > crit_roll:
crit_dmg = math.ceil(dmg * self.stats["Critical Damage Boost"])
return inf + self.information["critical_hit_text"] + otherPlayer.take_damage(crit_dmg, dmg_type)
return inf + otherPlayer.take_damage(dmg, dmg_type)
def attack(self, otherPlayer, attack_type_name=""):
if not isinstance(otherPlayer, Player):
raise ValueError(
"otherPlayer must be an instance of Player")
attack = self.attack_information.find_attack_type(attack_type_name)
if not attack:
attack = random.choice(self.permitted_attacks)
self.buff(attack["buffs"])
inf = self.damage_enemy(otherPlayer, attack["damage_type_name"])
self.buff(attack["buffs"], buff_type="debuff")
return f"{self} Used {attack['name']} {attack['reaction_emoji']}\n" + inf
def die(self):
self.dead = True
return random.choice(self.information["death_texts"]).format(self)
def buff(self, buff: GladiatorStats or dict, buff_type="buff"):
if buff_type == "buff":
self.stats += buff
elif buff_type == "debuff":
self.stats -= buff
def take_debuff(self, turn_debuff_name: str):
debuff = self.attack_information.find_turn_debuff(turn_debuff_name)
for dbf in self.debuffs:
if dbf["debuff_stats"]["Debuff Type"] == debuff["debuff_stats"]["Debuff Type"]:
dbf["lasts_turn_count"] += 1
break
else:
self.debuffs.append(debuff)
return self.information["take_debuff_text"].format(self, debuff["debuff_stats"]["Debuff Type"], debuff["lasts_turn_count"])
def take_damage_per_turn(self):
if len(self.debuffs) > 0:
inf = ""
for index, debuff in enumerate(self.debuffs):
if debuff["lasts_turn_count"] > 0:
debuff["lasts_turn_count"] -= 1
self.stats['Health'] -= debuff["debuff_stats"]["Debuff Damage"]
inf += self.information["take_damage_per_turn_from_debuffs_text"].format(
self, debuff["debuff_stats"]["Debuff Damage"], debuff["debuff_stats"]["Debuff Type"], self.stats["Health"], debuff["lasts_turn_count"])
if self.stats["Health"] <= 0:
inf += "\n" + self.die()
else:
del self.debuffs[index]
return inf
return ""
class GladiatorPlayer(Player):
def __init__(self, member):
super().__init__(stats_path=path / "UserProfileData" / f"{member.id}.json")
self.member = member
self.equipment_information = GladiatorEquipments()
self.permitted_attacks = self.attack_information.attack_types[:INITIAL_ATTACK_TYPES_COUNT]
def equip_item(self, equipment_name, equipment_slot_name):
slot = self.equipment_information.find_slot(equipment_slot_name)
if slot:
if slot["Equipment"]:
return
equipment = self.equipment_information.find_equipment(equipment_name)
if equipment:
if equipment["type"] == slot["Slot Name"]:
self.equipment_information.update_slot(slot["Slot Name"], equipment)
self.stats += equipment["buffs"]
if equipment["unlock_attack_name"]:
self.unlock_attack_type(equipment["unlock_attack_name"])
debuff = self.attack_information.find_turn_debuff(equipment["debuff_name"])
if debuff:
self.stats += debuff["debuff_stats"]
def unlock_attack_type(self, attack_name):
for i in self.permitted_attacks:
if i["name"] == attack_name:
return
self.permitted_attacks.append(
self.attack_information.find_attack_type(attack_name))
def __repr__(self):
return f"<@{self.member.id}>"
class GladiatorNPC(Player):
def __init__(self, stats_path, **kwargs):
super().__init__(stats_path)
self.name = self.json_dict["Name"]
url_encoded_name = urllib.parse.quote(self.name)
self.image_path = f"https://gladoid.herokuapp.com/npcimage?name={url_encoded_name}"
self.level = random.randint(self.json_dict["Min Level"], self.json_dict["Max Level"])
self.footer_text = self.json_dict.get("FooterText", "")
for attack_name in self.json_dict["Attacks"]:
self.permitted_attacks.append(
self.attack_information.find_attack_type(attack_name))
for k, min_stat in dict(self.json_dict["Stats"]).items():
for l in range(self.level):
min_stat += (l/17)**1.1
min_stat = round(min_stat, 2)
self.stats[k] = min_stat
for debuff_name in self.json_dict["Debuffs"]:
self.stats += self.attack_information.find_turn_debuff(debuff_name)["debuff_stats"]
self.stats += kwargs
def get_random_attack(self):
return random.choice(self.permitted_attacks)
def __repr__(self):
return f"Level {self.level} {self.name} "
| true
| true
|
1c451e591138c58b97abd21b494bd67e7590cc57
| 457
|
py
|
Python
|
1143-longest-common-subsequence/1143-longest-common-subsequence.py
|
tlylt/LeetCodeAnki
|
9f69504c3762f7895d95c2a592f18ad395199ff4
|
[
"MIT"
] | 1
|
2022-02-14T08:03:32.000Z
|
2022-02-14T08:03:32.000Z
|
1143-longest-common-subsequence/1143-longest-common-subsequence.py
|
tlylt/LeetCodeAnki
|
9f69504c3762f7895d95c2a592f18ad395199ff4
|
[
"MIT"
] | null | null | null |
1143-longest-common-subsequence/1143-longest-common-subsequence.py
|
tlylt/LeetCodeAnki
|
9f69504c3762f7895d95c2a592f18ad395199ff4
|
[
"MIT"
] | null | null | null |
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
m = len(text1)
n = len(text2)
dp = [[0 for i in range(n+1)] for j in range(m+1)]
for i in range(1, m+1):
for j in range(1, n+1):
if text1[i-1] == text2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[m][n]
| 38.083333
| 70
| 0.428884
|
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
m = len(text1)
n = len(text2)
dp = [[0 for i in range(n+1)] for j in range(m+1)]
for i in range(1, m+1):
for j in range(1, n+1):
if text1[i-1] == text2[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[m][n]
| true
| true
|
1c451ea06c8fd0d11137c3e4b9dda843e3fa5e7b
| 44,232
|
py
|
Python
|
python_modules/dagster/dagster/core/definitions/pipeline_definition.py
|
kstennettlull/dagster
|
dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/definitions/pipeline_definition.py
|
kstennettlull/dagster
|
dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/definitions/pipeline_definition.py
|
kstennettlull/dagster
|
dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6
|
[
"Apache-2.0"
] | null | null | null |
from functools import update_wrapper
from typing import TYPE_CHECKING, AbstractSet, Any, Dict, FrozenSet, List, Optional, Set, Union
from dagster import check
from dagster.core.definitions.policy import RetryPolicy
from dagster.core.definitions.resource_definition import ResourceDefinition
from dagster.core.definitions.solid_definition import NodeDefinition
from dagster.core.errors import (
DagsterInvalidDefinitionError,
DagsterInvalidSubsetError,
DagsterInvariantViolationError,
)
from dagster.core.storage.output_manager import IOutputManagerDefinition
from dagster.core.storage.root_input_manager import (
IInputManagerDefinition,
RootInputManagerDefinition,
)
from dagster.core.storage.tags import MEMOIZED_RUN_TAG
from dagster.core.types.dagster_type import DagsterType, DagsterTypeKind
from dagster.core.utils import str_format_set
from dagster.utils import frozentags, merge_dicts
from dagster.utils.backcompat import experimental_class_warning
from .dependency import (
DependencyDefinition,
DependencyStructure,
DynamicCollectDependencyDefinition,
IDependencyDefinition,
MultiDependencyDefinition,
Node,
NodeHandle,
NodeInvocation,
SolidInputHandle,
)
from .graph_definition import GraphDefinition, SubselectedGraphDefinition
from .hook_definition import HookDefinition
from .mode import ModeDefinition
from .node_definition import NodeDefinition
from .preset import PresetDefinition
from .utils import validate_tags
from .version_strategy import VersionStrategy
if TYPE_CHECKING:
from dagster.core.definitions.partition import PartitionSetDefinition
from dagster.core.execution.execute_in_process_result import ExecuteInProcessResult
from dagster.core.host_representation import PipelineIndex
from dagster.core.instance import DagsterInstance
from dagster.core.snap import ConfigSchemaSnapshot, PipelineSnapshot
from .run_config_schema import RunConfigSchema
class PipelineDefinition:
"""Defines a Dagster pipeline.
A pipeline is made up of
- Solids, each of which is a single functional unit of data computation.
- Dependencies, which determine how the values produced by solids as their outputs flow from
one solid to another. This tells Dagster how to arrange solids, and potentially multiple
aliased instances of solids, into a directed, acyclic graph (DAG) of compute.
- Modes, which can be used to attach resources, custom loggers, custom system storage
options, and custom executors to a pipeline, and to switch between them.
- Presets, which can be used to ship common combinations of pipeline config options in Python
code, and to switch between them.
Args:
solid_defs (List[SolidDefinition]): The set of solids used in this pipeline.
name (str): The name of the pipeline. Must be unique within any
:py:class:`RepositoryDefinition` containing the pipeline.
description (Optional[str]): A human-readable description of the pipeline.
dependencies (Optional[Dict[Union[str, NodeInvocation], Dict[str, DependencyDefinition]]]):
A structure that declares the dependencies of each solid's inputs on the outputs of
other solids in the pipeline. Keys of the top level dict are either the string names of
solids in the pipeline or, in the case of aliased solids,
:py:class:`NodeInvocations <NodeInvocation>`. Values of the top level dict are
themselves dicts, which map input names belonging to the solid or aliased solid to
:py:class:`DependencyDefinitions <DependencyDefinition>`.
mode_defs (Optional[List[ModeDefinition]]): The set of modes in which this pipeline can
operate. Modes are used to attach resources, custom loggers, custom system storage
options, and custom executors to a pipeline. Modes can be used, e.g., to vary available
resource and logging implementations between local test and production runs.
preset_defs (Optional[List[PresetDefinition]]): A set of preset collections of configuration
options that may be used to execute a pipeline. A preset consists of an environment
dict, an optional subset of solids to execute, and a mode selection. Presets can be used
to ship common combinations of options to pipeline end users in Python code, and can
be selected by tools like Dagit.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for any execution run of the pipeline.
Values that are not strings will be json encoded and must meet the criteria that
`json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag
values provided at invocation time.
hook_defs (Optional[AbstractSet[HookDefinition]]): A set of hook definitions applied to the
pipeline. When a hook is applied to a pipeline, it will be attached to all solid
instances within the pipeline.
solid_retry_policy (Optional[RetryPolicy]): The default retry policy for all solids in
this pipeline. Only used if retry policy is not defined on the solid definition or
solid invocation.
_parent_pipeline_def (INTERNAL ONLY): Used for tracking pipelines created using solid subsets.
Examples:
.. code-block:: python
@solid
def return_one(_):
return 1
@solid(input_defs=[InputDefinition('num')], required_resource_keys={'op'})
def apply_op(context, num):
return context.resources.op(num)
@resource(config_schema=Int)
def adder_resource(init_context):
return lambda x: x + init_context.resource_config
add_mode = ModeDefinition(
name='add_mode',
resource_defs={'op': adder_resource},
description='Mode that adds things',
)
add_three_preset = PresetDefinition(
name='add_three_preset',
run_config={'resources': {'op': {'config': 3}}},
mode='add_mode',
)
pipeline_def = PipelineDefinition(
name='basic',
solid_defs=[return_one, apply_op],
dependencies={'apply_op': {'num': DependencyDefinition('return_one')}},
mode_defs=[add_mode],
preset_defs=[add_three_preset],
)
"""
def __init__(
self,
solid_defs: Optional[List[NodeDefinition]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
dependencies: Optional[
Dict[Union[str, NodeInvocation], Dict[str, IDependencyDefinition]]
] = None,
mode_defs: Optional[List[ModeDefinition]] = None,
preset_defs: Optional[List[PresetDefinition]] = None,
tags: Optional[Dict[str, Any]] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
solid_retry_policy: Optional[RetryPolicy] = None,
graph_def=None,
_parent_pipeline_def=None, # https://github.com/dagster-io/dagster/issues/2115
version_strategy: Optional[VersionStrategy] = None,
):
# If a graph is specificed directly use it
if check.opt_inst_param(graph_def, "graph_def", GraphDefinition):
self._graph_def = graph_def
self._name = name or graph_def.name
# Otherwise fallback to legacy construction
else:
if name is None:
check.failed("name must be set provided")
self._name = name
if solid_defs is None:
check.failed("solid_defs must be provided")
self._graph_def = GraphDefinition(
name=name,
dependencies=dependencies,
node_defs=solid_defs,
input_mappings=None,
output_mappings=None,
config=None,
description=None,
)
# tags and description can exist on graph as well, but since
# same graph may be in multiple pipelines/jobs, keep separate layer
self._description = check.opt_str_param(description, "description")
self._tags = validate_tags(tags)
self._current_level_node_defs = self._graph_def.node_defs
mode_definitions = check.opt_list_param(mode_defs, "mode_defs", of_type=ModeDefinition)
if not mode_definitions:
mode_definitions = [ModeDefinition()]
self._mode_definitions = mode_definitions
seen_modes = set()
for mode_def in mode_definitions:
if mode_def.name in seen_modes:
raise DagsterInvalidDefinitionError(
(
'Two modes seen with the name "{mode_name}" in "{pipeline_name}". '
"Modes must have unique names."
).format(mode_name=mode_def.name, pipeline_name=self.name)
)
seen_modes.add(mode_def.name)
self._hook_defs = check.opt_set_param(hook_defs, "hook_defs", of_type=HookDefinition)
self._solid_retry_policy = check.opt_inst_param(
solid_retry_policy, "solid_retry_policy", RetryPolicy
)
self._preset_defs = check.opt_list_param(preset_defs, "preset_defs", PresetDefinition)
self._preset_dict: Dict[str, PresetDefinition] = {}
for preset in self._preset_defs:
if preset.name in self._preset_dict:
raise DagsterInvalidDefinitionError(
(
'Two PresetDefinitions seen with the name "{name}" in "{pipeline_name}". '
"PresetDefinitions must have unique names."
).format(name=preset.name, pipeline_name=self.name)
)
if preset.mode not in seen_modes:
raise DagsterInvalidDefinitionError(
(
'PresetDefinition "{name}" in "{pipeline_name}" '
'references mode "{mode}" which is not defined.'
).format(name=preset.name, pipeline_name=self.name, mode=preset.mode)
)
self._preset_dict[preset.name] = preset
self._resource_requirements = {
mode_def.name: _checked_resource_reqs_for_mode(
mode_def,
self._current_level_node_defs,
self._graph_def._dagster_type_dict,
self._graph_def._node_dict,
self._hook_defs,
self._graph_def._dependency_structure,
)
for mode_def in self._mode_definitions
}
# Recursively explore all nodes in the this pipeline
self._all_node_defs = _build_all_node_defs(self._current_level_node_defs)
self._parent_pipeline_def = check.opt_inst_param(
_parent_pipeline_def, "_parent_pipeline_def", PipelineDefinition
)
self._cached_run_config_schemas: Dict[str, "RunConfigSchema"] = {}
self._cached_external_pipeline = None
self.version_strategy = check.opt_inst_param(
version_strategy, "version_strategy", VersionStrategy
)
if self.version_strategy is not None:
experimental_class_warning("VersionStrategy")
@property
def name(self):
return self._name
@property
def target_type(self):
return "pipeline"
@property
def is_job(self) -> bool:
return False
def describe_target(self):
return f"{self.target_type} '{self.name}'"
@property
def tags(self):
return frozentags(**merge_dicts(self._graph_def.tags, self._tags))
@property
def description(self):
return self._description
@property
def graph(self):
return self._graph_def
@property
def dependency_structure(self):
return self._graph_def.dependency_structure
@property
def dependencies(self):
return self._graph_def.dependencies
def get_run_config_schema(self, mode: Optional[str] = None) -> "RunConfigSchema":
check.str_param(mode, "mode")
mode_def = self.get_mode_definition(mode)
if mode_def.name in self._cached_run_config_schemas:
return self._cached_run_config_schemas[mode_def.name]
self._cached_run_config_schemas[mode_def.name] = _create_run_config_schema(
self,
mode_def,
self._resource_requirements[mode_def.name],
)
return self._cached_run_config_schemas[mode_def.name]
@property
def mode_definitions(self) -> List[ModeDefinition]:
return self._mode_definitions
@property
def preset_defs(self) -> List[PresetDefinition]:
return self._preset_defs
def _get_mode_definition(self, mode: str) -> Optional[ModeDefinition]:
check.str_param(mode, "mode")
for mode_definition in self._mode_definitions:
if mode_definition.name == mode:
return mode_definition
return None
def get_default_mode(self) -> ModeDefinition:
return self._mode_definitions[0]
@property
def is_single_mode(self) -> bool:
return len(self._mode_definitions) == 1
@property
def is_multi_mode(self) -> bool:
return len(self._mode_definitions) > 1
def is_using_memoization(self, run_tags: Dict[str, str]) -> bool:
tags = merge_dicts(self.tags, run_tags)
# If someone provides a false value for memoized run tag, then they are intentionally
# switching off memoization.
if tags.get(MEMOIZED_RUN_TAG) == "false":
return False
return (
MEMOIZED_RUN_TAG in tags and tags.get(MEMOIZED_RUN_TAG) == "true"
) or self.version_strategy is not None
def has_mode_definition(self, mode: str) -> bool:
check.str_param(mode, "mode")
return bool(self._get_mode_definition(mode))
def get_default_mode_name(self) -> str:
return self._mode_definitions[0].name
def get_mode_definition(self, mode: Optional[str] = None) -> ModeDefinition:
check.opt_str_param(mode, "mode")
if mode is None:
check.invariant(self.is_single_mode)
return self.get_default_mode()
mode_def = self._get_mode_definition(mode)
if mode_def is None:
check.failed(
"Could not find mode {mode} in pipeline {name}".format(mode=mode, name=self.name),
)
return mode_def
@property
def available_modes(self) -> List[str]:
return [mode_def.name for mode_def in self._mode_definitions]
def get_required_resource_defs_for_mode(self, mode: str) -> Dict[str, ResourceDefinition]:
return {
resource_key: resource
for resource_key, resource in self.get_mode_definition(mode).resource_defs.items()
if resource_key in self._resource_requirements[mode]
}
@property
def all_node_defs(self) -> List[NodeDefinition]:
return list(self._all_node_defs.values())
@property
def top_level_solid_defs(self) -> List[NodeDefinition]:
return self._current_level_node_defs
def solid_def_named(self, name: str) -> NodeDefinition:
check.str_param(name, "name")
check.invariant(name in self._all_node_defs, "{} not found".format(name))
return self._all_node_defs[name]
def has_solid_def(self, name: str) -> bool:
check.str_param(name, "name")
return name in self._all_node_defs
def get_solid(self, handle):
return self._graph_def.get_solid(handle)
def has_solid_named(self, name):
return self._graph_def.has_solid_named(name)
def solid_named(self, name):
return self._graph_def.solid_named(name)
@property
def solids(self):
return self._graph_def.solids
@property
def solids_in_topological_order(self):
return self._graph_def.solids_in_topological_order
def all_dagster_types(self):
return self._graph_def.all_dagster_types()
def has_dagster_type(self, name):
return self._graph_def.has_dagster_type(name)
def dagster_type_named(self, name):
return self._graph_def.dagster_type_named(name)
def get_pipeline_subset_def(
self, solids_to_execute: Optional[AbstractSet[str]]
) -> "PipelineDefinition":
return (
self if solids_to_execute is None else _get_pipeline_subset_def(self, solids_to_execute)
)
def has_preset(self, name: str) -> bool:
check.str_param(name, "name")
return name in self._preset_dict
def get_preset(self, name: str) -> PresetDefinition:
check.str_param(name, "name")
if name not in self._preset_dict:
raise DagsterInvariantViolationError(
(
'Could not find preset for "{name}". Available presets '
'for pipeline "{pipeline_name}" are {preset_names}.'
).format(
name=name,
preset_names=list(self._preset_dict.keys()),
pipeline_name=self.name,
)
)
return self._preset_dict[name]
def get_pipeline_snapshot(self) -> "PipelineSnapshot":
return self.get_pipeline_index().pipeline_snapshot
def get_pipeline_snapshot_id(self) -> str:
return self.get_pipeline_index().pipeline_snapshot_id
def get_pipeline_index(self) -> "PipelineIndex":
from dagster.core.host_representation import PipelineIndex
from dagster.core.snap import PipelineSnapshot
return PipelineIndex(
PipelineSnapshot.from_pipeline_def(self), self.get_parent_pipeline_snapshot()
)
def get_config_schema_snapshot(self) -> "ConfigSchemaSnapshot":
return self.get_pipeline_snapshot().config_schema_snapshot
@property
def is_subset_pipeline(self) -> bool:
return False
@property
def parent_pipeline_def(self) -> Optional["PipelineDefinition"]:
return None
def get_parent_pipeline_snapshot(self) -> Optional["PipelineSnapshot"]:
return None
@property
def solids_to_execute(self) -> Optional[FrozenSet[str]]:
return None
@property
def hook_defs(self) -> AbstractSet[HookDefinition]:
return self._hook_defs
def get_all_hooks_for_handle(self, handle: NodeHandle) -> FrozenSet[HookDefinition]:
"""Gather all the hooks for the given solid from all places possibly attached with a hook.
A hook can be attached to any of the following objects
* Solid (solid invocation)
* PipelineDefinition
Args:
handle (NodeHandle): The solid's handle
Returns:
FrozenSet[HookDefinition]
"""
check.inst_param(handle, "handle", NodeHandle)
hook_defs: AbstractSet[HookDefinition] = set()
current = handle
lineage = []
while current:
lineage.append(current.name)
current = current.parent
# hooks on top-level solid
name = lineage.pop()
solid = self._graph_def.solid_named(name)
hook_defs = hook_defs.union(solid.hook_defs)
# hooks on non-top-level solids
while lineage:
name = lineage.pop()
solid = solid.definition.solid_named(name)
hook_defs = hook_defs.union(solid.hook_defs)
# hooks applied to a pipeline definition will run on every solid
hook_defs = hook_defs.union(self.hook_defs)
return frozenset(hook_defs)
def get_retry_policy_for_handle(self, handle: NodeHandle) -> Optional[RetryPolicy]:
solid = self.get_solid(handle)
if solid.retry_policy:
return solid.retry_policy
elif solid.definition.retry_policy:
return solid.definition.retry_policy
# could be expanded to look in composite_solid / graph containers
else:
return self._solid_retry_policy
def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "PipelineDefinition":
"""Apply a set of hooks to all solid instances within the pipeline."""
hook_defs = check.set_param(hook_defs, "hook_defs", of_type=HookDefinition)
pipeline_def = PipelineDefinition(
name=self.name,
graph_def=self._graph_def,
mode_defs=self.mode_definitions,
preset_defs=self.preset_defs,
tags=self.tags,
hook_defs=hook_defs | self.hook_defs,
description=self._description,
solid_retry_policy=self._solid_retry_policy,
_parent_pipeline_def=self._parent_pipeline_def,
)
update_wrapper(pipeline_def, self, updated=())
return pipeline_def
# make Callable for decorator reference updates
def __call__(self, *args, **kwargs):
if self.is_job:
msg = (
f"Attempted to call job '{self.name}' directly. Jobs should be invoked by "
"using an execution API function (e.g. `job.execute_in_process`)."
)
else:
msg = (
f"Attempted to call pipeline '{self.name}' directly. Pipelines should be invoked by "
"using an execution API function (e.g. `execute_pipeline`)."
)
raise DagsterInvariantViolationError(msg)
class PipelineSubsetDefinition(PipelineDefinition):
@property
def solids_to_execute(self):
return frozenset(self._graph_def.node_names())
@property
def solid_selection(self) -> List[str]:
# we currently don't pass the real solid_selection (the solid query list) down here.
# so in the short-term, to make the call sites cleaner, we will convert the solids to execute
# to a list
return self._graph_def.node_names()
@property
def parent_pipeline_def(self) -> PipelineDefinition:
return self._parent_pipeline_def
def get_parent_pipeline_snapshot(self) -> Optional["PipelineSnapshot"]:
return self._parent_pipeline_def.get_pipeline_snapshot()
@property
def is_subset_pipeline(self) -> bool:
return True
def get_pipeline_subset_def(
self, solids_to_execute: Optional[AbstractSet[str]]
) -> "PipelineSubsetDefinition":
raise DagsterInvariantViolationError("Pipeline subsets may not be subset again.")
def _dep_key_of(solid: Node) -> NodeInvocation:
return NodeInvocation(
name=solid.definition.name,
alias=solid.name,
tags=solid.tags,
hook_defs=solid.hook_defs,
retry_policy=solid.retry_policy,
)
def _get_pipeline_subset_def(
pipeline_def: PipelineDefinition,
solids_to_execute: AbstractSet[str],
) -> "PipelineSubsetDefinition":
"""
Build a pipeline which is a subset of another pipeline.
Only includes the solids which are in solids_to_execute.
"""
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
check.set_param(solids_to_execute, "solids_to_execute", of_type=str)
graph = pipeline_def.graph
for solid_name in solids_to_execute:
if not graph.has_solid_named(solid_name):
raise DagsterInvalidSubsetError(
"{target_type} {pipeline_name} has no {node_type} named {name}.".format(
target_type=pipeline_def.target_type,
pipeline_name=pipeline_def.name,
name=solid_name,
node_type="ops" if pipeline_def.is_job else "solids",
),
)
# go in topo order to ensure deps dict is ordered
solids = list(
filter(lambda solid: solid.name in solids_to_execute, graph.solids_in_topological_order)
)
deps: Dict[
Union[str, NodeInvocation],
Dict[str, IDependencyDefinition],
] = {_dep_key_of(solid): {} for solid in solids}
for solid in solids:
for input_handle in solid.input_handles():
if graph.dependency_structure.has_direct_dep(input_handle):
output_handle = pipeline_def.dependency_structure.get_direct_dep(input_handle)
if output_handle.solid.name in solids_to_execute:
deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
elif graph.dependency_structure.has_dynamic_fan_in_dep(input_handle):
output_handle = graph.dependency_structure.get_dynamic_fan_in_dep(input_handle)
if output_handle.solid.name in solids_to_execute:
deps[_dep_key_of(solid)][
input_handle.input_def.name
] = DynamicCollectDependencyDefinition(
solid_name=output_handle.solid.name,
output_name=output_handle.output_def.name,
)
elif graph.dependency_structure.has_fan_in_deps(input_handle):
output_handles = graph.dependency_structure.get_fan_in_deps(input_handle)
deps[_dep_key_of(solid)][input_handle.input_def.name] = MultiDependencyDefinition(
[
DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
for output_handle in output_handles
if output_handle.solid.name in solids_to_execute
]
)
# else input is unconnected
try:
sub_pipeline_def = PipelineSubsetDefinition(
name=pipeline_def.name, # should we change the name for subsetted pipeline?
solid_defs=list({solid.definition for solid in solids}),
mode_defs=pipeline_def.mode_definitions,
dependencies=deps,
_parent_pipeline_def=pipeline_def,
tags=pipeline_def.tags,
hook_defs=pipeline_def.hook_defs,
)
return sub_pipeline_def
except DagsterInvalidDefinitionError as exc:
# This handles the case when you construct a subset such that an unsatisfied
# input cannot be loaded from config. Instead of throwing a DagsterInvalidDefinitionError,
# we re-raise a DagsterInvalidSubsetError.
raise DagsterInvalidSubsetError(
f"The attempted subset {str_format_set(solids_to_execute)} for {pipeline_def.target_type} "
f"{pipeline_def.name} results in an invalid {pipeline_def.target_type}"
) from exc
def _checked_resource_reqs_for_mode(
mode_def: ModeDefinition,
node_defs: List[NodeDefinition],
dagster_type_dict: Dict[str, DagsterType],
solid_dict: Dict[str, Node],
pipeline_hook_defs: AbstractSet[HookDefinition],
dependency_structure: DependencyStructure,
) -> Set[str]:
"""
Calculate the resource requirements for the pipeline in this mode and ensure they are
provided by the mode.
We combine these operations in to one traversal to allow for raising excpetions that provide
as much context as possible about where the unsatisfied resource requirement came from.
"""
resource_reqs: Set[str] = set()
mode_output_managers = set(
key
for key, resource_def in mode_def.resource_defs.items()
if isinstance(resource_def, IOutputManagerDefinition)
)
mode_resources = set(mode_def.resource_defs.keys())
for node_def in node_defs:
for solid_def in node_def.iterate_solid_defs():
for required_resource in solid_def.required_resource_keys:
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=solid_def.describe_node(),
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
for output_def in solid_def.output_defs:
resource_reqs.add(output_def.io_manager_key)
if output_def.io_manager_key not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="IO manager",
resource_key=output_def.io_manager_key,
descriptor=f"output '{output_def.name}' of {solid_def.describe_node()}",
mode_def=mode_def,
resource_defs_of_type=mode_output_managers,
)
raise DagsterInvalidDefinitionError(error_msg)
resource_reqs.update(
_checked_type_resource_reqs_for_mode(
mode_def,
dagster_type_dict,
)
)
# Validate unsatisfied inputs can be materialized from config
resource_reqs.update(
_checked_input_resource_reqs_for_mode(dependency_structure, solid_dict, mode_def)
)
for solid in solid_dict.values():
for hook_def in solid.hook_defs:
for required_resource in hook_def.required_resource_keys:
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"hook '{hook_def.name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
for hook_def in pipeline_hook_defs:
for required_resource in hook_def.required_resource_keys:
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"hook '{hook_def.name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
for resource_key, resource in mode_def.resource_defs.items():
for required_resource in resource.required_resource_keys:
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"resource at key '{resource_key}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
# Finally, recursively add any resources that the set of required resources require
while True:
new_resources: Set[str] = set()
for resource_key in resource_reqs:
resource = mode_def.resource_defs[resource_key]
new_resources.update(resource.required_resource_keys - resource_reqs)
if not len(new_resources):
break
resource_reqs.update(new_resources)
return resource_reqs
def _checked_type_resource_reqs_for_mode(
mode_def: ModeDefinition,
dagster_type_dict: Dict[str, DagsterType],
) -> Set[str]:
"""
Calculate all the resource requirements related to DagsterTypes for this mode and ensure the
mode provides those resources.
"""
resource_reqs = set()
mode_resources = set(mode_def.resource_defs.keys())
for dagster_type in dagster_type_dict.values():
for required_resource in dagster_type.required_resource_keys:
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"type '{dagster_type.display_name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
if dagster_type.loader:
for required_resource in dagster_type.loader.required_resource_keys():
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"the loader on type '{dagster_type.display_name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
if dagster_type.materializer:
for required_resource in dagster_type.materializer.required_resource_keys():
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"the materializer on type '{dagster_type.display_name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
return resource_reqs
def _checked_input_resource_reqs_for_mode(
dependency_structure: DependencyStructure,
node_dict: Dict[str, Node],
mode_def: ModeDefinition,
outer_dependency_structures: Optional[List[DependencyStructure]] = None,
outer_solids: Optional[List[Node]] = None,
) -> Set[str]:
outer_dependency_structures = check.opt_list_param(
outer_dependency_structures, "outer_dependency_structures", DependencyStructure
)
outer_solids = check.opt_list_param(outer_solids, "outer_solids", Node)
resource_reqs = set()
mode_root_input_managers = set(
key
for key, resource_def in mode_def.resource_defs.items()
if isinstance(resource_def, RootInputManagerDefinition)
)
for node in node_dict.values():
if node.is_graph:
graph_def = node.definition.ensure_graph_def()
# check inner solids
resource_reqs.update(
_checked_input_resource_reqs_for_mode(
dependency_structure=graph_def.dependency_structure,
node_dict=graph_def.node_dict,
mode_def=mode_def,
outer_dependency_structures=outer_dependency_structures
+ [dependency_structure],
outer_solids=outer_solids + [node],
)
)
for handle in node.input_handles():
source_output_handles = None
if dependency_structure.has_deps(handle):
# input is connected to outputs from the same dependency structure
source_output_handles = dependency_structure.get_deps_list(handle)
else:
# input is connected to outputs from outer dependency structure, e.g. first solids
# in a composite
curr_node = node
curr_handle = handle
curr_index = len(outer_solids) - 1
# Checks to see if input is mapped to an outer dependency structure
while curr_index >= 0 and curr_node.container_maps_input(curr_handle.input_name):
curr_handle = SolidInputHandle(
solid=outer_solids[curr_index],
input_def=curr_node.container_mapped_input(
curr_handle.input_name
).definition,
)
if outer_dependency_structures[curr_index].has_deps(curr_handle):
source_output_handles = outer_dependency_structures[
curr_index
].get_deps_list(curr_handle)
break
curr_node = outer_solids[curr_index]
curr_index -= 1
if source_output_handles:
# input is connected to source output handles within the graph
for source_output_handle in source_output_handles:
output_manager_key = source_output_handle.output_def.io_manager_key
output_manager_def = mode_def.resource_defs[output_manager_key]
if not isinstance(output_manager_def, IInputManagerDefinition):
raise DagsterInvalidDefinitionError(
f'Input "{handle.input_def.name}" of {node.describe_node()} is '
f'connected to output "{source_output_handle.output_def.name}" '
f"of {source_output_handle.solid.describe_node()}. That output does not "
"have an output "
f"manager that knows how to load inputs, so we don't know how "
f"to load the input. To address this, assign an IOManager to "
f"the upstream output."
)
else:
# input is unconnected
input_def = handle.input_def
if (
not input_def.dagster_type.loader
and not input_def.dagster_type.kind == DagsterTypeKind.NOTHING
and not input_def.root_manager_key
and not input_def.has_default_value
):
raise DagsterInvalidDefinitionError(
"Input '{input_name}' in {described_node} is not connected to "
"the output of a previous node and can not be loaded from configuration, "
"making it impossible to execute. "
"Possible solutions are:\n"
" * add a dagster_type_loader for the type '{dagster_type}'\n"
" * connect '{input_name}' to the output of another node\n".format(
described_node=node.describe_node(),
input_name=input_def.name,
dagster_type=input_def.dagster_type.display_name,
)
)
# If a root manager is provided, it's always used. I.e. it has priority over
# the other ways of loading unsatisfied inputs - dagster type loaders and
# default values.
if input_def.root_manager_key:
resource_reqs.add(input_def.root_manager_key)
if input_def.root_manager_key not in mode_def.resource_defs:
error_msg = _get_missing_resource_error_msg(
resource_type="root input manager",
resource_key=input_def.root_manager_key,
descriptor=f"unsatisfied input '{input_def.name}' of {node.describe_node()}",
mode_def=mode_def,
resource_defs_of_type=mode_root_input_managers,
)
raise DagsterInvalidDefinitionError(error_msg)
return resource_reqs
def _get_missing_resource_error_msg(
resource_type, resource_key, descriptor, mode_def, resource_defs_of_type
):
if mode_def.name == "default":
return (
f"{resource_type} key '{resource_key}' is required by "
f"{descriptor}, but is not provided. Provide a {resource_type} for key '{resource_key}', "
f"or change '{resource_key}' to one of the provided {resource_type} keys: "
f"{sorted(resource_defs_of_type)}."
)
else:
return (
f"{resource_type} key '{resource_key}' is required by "
f"{descriptor}, but is not provided by mode '{mode_def.name}'. "
f"In mode '{mode_def.name}', provide a {resource_type} for key '{resource_key}', "
f"or change '{resource_key}' to one of the provided root input managers keys: {sorted(resource_defs_of_type)}."
)
def _build_all_node_defs(node_defs: List[NodeDefinition]) -> Dict[str, NodeDefinition]:
all_defs: Dict[str, NodeDefinition] = {}
for current_level_node_def in node_defs:
for node_def in current_level_node_def.iterate_node_defs():
if node_def.name in all_defs:
if all_defs[node_def.name] != node_def:
raise DagsterInvalidDefinitionError(
'Detected conflicting node definitions with the same name "{name}"'.format(
name=node_def.name
)
)
else:
all_defs[node_def.name] = node_def
return all_defs
def _create_run_config_schema(
pipeline_def: PipelineDefinition,
mode_definition: ModeDefinition,
required_resources: Set[str],
) -> "RunConfigSchema":
from .run_config import (
RunConfigSchemaCreationData,
construct_config_type_dictionary,
define_run_config_schema_type,
)
from .run_config_schema import RunConfigSchema
# When executing with a subset pipeline, include the missing solids
# from the original pipeline as ignored to allow execution with
# run config that is valid for the original
if isinstance(pipeline_def.graph, SubselectedGraphDefinition):
ignored_solids = pipeline_def.graph.get_top_level_omitted_nodes()
elif pipeline_def.is_subset_pipeline:
if pipeline_def.parent_pipeline_def is None:
check.failed("Unexpected subset pipeline state")
ignored_solids = [
solid
for solid in pipeline_def.parent_pipeline_def.graph.solids
if not pipeline_def.has_solid_named(solid.name)
]
else:
ignored_solids = []
run_config_schema_type = define_run_config_schema_type(
RunConfigSchemaCreationData(
pipeline_name=pipeline_def.name,
solids=pipeline_def.graph.solids,
graph_def=pipeline_def.graph,
dependency_structure=pipeline_def.graph.dependency_structure,
mode_definition=mode_definition,
logger_defs=mode_definition.loggers,
ignored_solids=ignored_solids,
required_resources=required_resources,
is_using_graph_job_op_apis=pipeline_def.is_job,
)
)
if mode_definition.config_mapping:
outer_config_type = mode_definition.config_mapping.config_schema.config_type
else:
outer_config_type = run_config_schema_type
if outer_config_type is None:
check.failed("Unexpected outer_config_type value of None")
config_type_dict_by_name, config_type_dict_by_key = construct_config_type_dictionary(
pipeline_def.all_node_defs,
outer_config_type,
)
return RunConfigSchema(
run_config_schema_type=run_config_schema_type,
config_type_dict_by_name=config_type_dict_by_name,
config_type_dict_by_key=config_type_dict_by_key,
config_mapping=mode_definition.config_mapping,
)
| 41.03154
| 123
| 0.639107
|
from functools import update_wrapper
from typing import TYPE_CHECKING, AbstractSet, Any, Dict, FrozenSet, List, Optional, Set, Union
from dagster import check
from dagster.core.definitions.policy import RetryPolicy
from dagster.core.definitions.resource_definition import ResourceDefinition
from dagster.core.definitions.solid_definition import NodeDefinition
from dagster.core.errors import (
DagsterInvalidDefinitionError,
DagsterInvalidSubsetError,
DagsterInvariantViolationError,
)
from dagster.core.storage.output_manager import IOutputManagerDefinition
from dagster.core.storage.root_input_manager import (
IInputManagerDefinition,
RootInputManagerDefinition,
)
from dagster.core.storage.tags import MEMOIZED_RUN_TAG
from dagster.core.types.dagster_type import DagsterType, DagsterTypeKind
from dagster.core.utils import str_format_set
from dagster.utils import frozentags, merge_dicts
from dagster.utils.backcompat import experimental_class_warning
from .dependency import (
DependencyDefinition,
DependencyStructure,
DynamicCollectDependencyDefinition,
IDependencyDefinition,
MultiDependencyDefinition,
Node,
NodeHandle,
NodeInvocation,
SolidInputHandle,
)
from .graph_definition import GraphDefinition, SubselectedGraphDefinition
from .hook_definition import HookDefinition
from .mode import ModeDefinition
from .node_definition import NodeDefinition
from .preset import PresetDefinition
from .utils import validate_tags
from .version_strategy import VersionStrategy
if TYPE_CHECKING:
from dagster.core.definitions.partition import PartitionSetDefinition
from dagster.core.execution.execute_in_process_result import ExecuteInProcessResult
from dagster.core.host_representation import PipelineIndex
from dagster.core.instance import DagsterInstance
from dagster.core.snap import ConfigSchemaSnapshot, PipelineSnapshot
from .run_config_schema import RunConfigSchema
class PipelineDefinition:
def __init__(
self,
solid_defs: Optional[List[NodeDefinition]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
dependencies: Optional[
Dict[Union[str, NodeInvocation], Dict[str, IDependencyDefinition]]
] = None,
mode_defs: Optional[List[ModeDefinition]] = None,
preset_defs: Optional[List[PresetDefinition]] = None,
tags: Optional[Dict[str, Any]] = None,
hook_defs: Optional[AbstractSet[HookDefinition]] = None,
solid_retry_policy: Optional[RetryPolicy] = None,
graph_def=None,
_parent_pipeline_def=None,
version_strategy: Optional[VersionStrategy] = None,
):
if check.opt_inst_param(graph_def, "graph_def", GraphDefinition):
self._graph_def = graph_def
self._name = name or graph_def.name
else:
if name is None:
check.failed("name must be set provided")
self._name = name
if solid_defs is None:
check.failed("solid_defs must be provided")
self._graph_def = GraphDefinition(
name=name,
dependencies=dependencies,
node_defs=solid_defs,
input_mappings=None,
output_mappings=None,
config=None,
description=None,
)
self._description = check.opt_str_param(description, "description")
self._tags = validate_tags(tags)
self._current_level_node_defs = self._graph_def.node_defs
mode_definitions = check.opt_list_param(mode_defs, "mode_defs", of_type=ModeDefinition)
if not mode_definitions:
mode_definitions = [ModeDefinition()]
self._mode_definitions = mode_definitions
seen_modes = set()
for mode_def in mode_definitions:
if mode_def.name in seen_modes:
raise DagsterInvalidDefinitionError(
(
'Two modes seen with the name "{mode_name}" in "{pipeline_name}". '
"Modes must have unique names."
).format(mode_name=mode_def.name, pipeline_name=self.name)
)
seen_modes.add(mode_def.name)
self._hook_defs = check.opt_set_param(hook_defs, "hook_defs", of_type=HookDefinition)
self._solid_retry_policy = check.opt_inst_param(
solid_retry_policy, "solid_retry_policy", RetryPolicy
)
self._preset_defs = check.opt_list_param(preset_defs, "preset_defs", PresetDefinition)
self._preset_dict: Dict[str, PresetDefinition] = {}
for preset in self._preset_defs:
if preset.name in self._preset_dict:
raise DagsterInvalidDefinitionError(
(
'Two PresetDefinitions seen with the name "{name}" in "{pipeline_name}". '
"PresetDefinitions must have unique names."
).format(name=preset.name, pipeline_name=self.name)
)
if preset.mode not in seen_modes:
raise DagsterInvalidDefinitionError(
(
'PresetDefinition "{name}" in "{pipeline_name}" '
'references mode "{mode}" which is not defined.'
).format(name=preset.name, pipeline_name=self.name, mode=preset.mode)
)
self._preset_dict[preset.name] = preset
self._resource_requirements = {
mode_def.name: _checked_resource_reqs_for_mode(
mode_def,
self._current_level_node_defs,
self._graph_def._dagster_type_dict,
self._graph_def._node_dict,
self._hook_defs,
self._graph_def._dependency_structure,
)
for mode_def in self._mode_definitions
}
self._all_node_defs = _build_all_node_defs(self._current_level_node_defs)
self._parent_pipeline_def = check.opt_inst_param(
_parent_pipeline_def, "_parent_pipeline_def", PipelineDefinition
)
self._cached_run_config_schemas: Dict[str, "RunConfigSchema"] = {}
self._cached_external_pipeline = None
self.version_strategy = check.opt_inst_param(
version_strategy, "version_strategy", VersionStrategy
)
if self.version_strategy is not None:
experimental_class_warning("VersionStrategy")
@property
def name(self):
return self._name
@property
def target_type(self):
return "pipeline"
@property
def is_job(self) -> bool:
return False
def describe_target(self):
return f"{self.target_type} '{self.name}'"
@property
def tags(self):
return frozentags(**merge_dicts(self._graph_def.tags, self._tags))
@property
def description(self):
return self._description
@property
def graph(self):
return self._graph_def
@property
def dependency_structure(self):
return self._graph_def.dependency_structure
@property
def dependencies(self):
return self._graph_def.dependencies
def get_run_config_schema(self, mode: Optional[str] = None) -> "RunConfigSchema":
check.str_param(mode, "mode")
mode_def = self.get_mode_definition(mode)
if mode_def.name in self._cached_run_config_schemas:
return self._cached_run_config_schemas[mode_def.name]
self._cached_run_config_schemas[mode_def.name] = _create_run_config_schema(
self,
mode_def,
self._resource_requirements[mode_def.name],
)
return self._cached_run_config_schemas[mode_def.name]
@property
def mode_definitions(self) -> List[ModeDefinition]:
return self._mode_definitions
@property
def preset_defs(self) -> List[PresetDefinition]:
return self._preset_defs
def _get_mode_definition(self, mode: str) -> Optional[ModeDefinition]:
check.str_param(mode, "mode")
for mode_definition in self._mode_definitions:
if mode_definition.name == mode:
return mode_definition
return None
def get_default_mode(self) -> ModeDefinition:
return self._mode_definitions[0]
@property
def is_single_mode(self) -> bool:
return len(self._mode_definitions) == 1
@property
def is_multi_mode(self) -> bool:
return len(self._mode_definitions) > 1
def is_using_memoization(self, run_tags: Dict[str, str]) -> bool:
tags = merge_dicts(self.tags, run_tags)
if tags.get(MEMOIZED_RUN_TAG) == "false":
return False
return (
MEMOIZED_RUN_TAG in tags and tags.get(MEMOIZED_RUN_TAG) == "true"
) or self.version_strategy is not None
def has_mode_definition(self, mode: str) -> bool:
check.str_param(mode, "mode")
return bool(self._get_mode_definition(mode))
def get_default_mode_name(self) -> str:
return self._mode_definitions[0].name
def get_mode_definition(self, mode: Optional[str] = None) -> ModeDefinition:
check.opt_str_param(mode, "mode")
if mode is None:
check.invariant(self.is_single_mode)
return self.get_default_mode()
mode_def = self._get_mode_definition(mode)
if mode_def is None:
check.failed(
"Could not find mode {mode} in pipeline {name}".format(mode=mode, name=self.name),
)
return mode_def
@property
def available_modes(self) -> List[str]:
return [mode_def.name for mode_def in self._mode_definitions]
def get_required_resource_defs_for_mode(self, mode: str) -> Dict[str, ResourceDefinition]:
return {
resource_key: resource
for resource_key, resource in self.get_mode_definition(mode).resource_defs.items()
if resource_key in self._resource_requirements[mode]
}
@property
def all_node_defs(self) -> List[NodeDefinition]:
return list(self._all_node_defs.values())
@property
def top_level_solid_defs(self) -> List[NodeDefinition]:
return self._current_level_node_defs
def solid_def_named(self, name: str) -> NodeDefinition:
check.str_param(name, "name")
check.invariant(name in self._all_node_defs, "{} not found".format(name))
return self._all_node_defs[name]
def has_solid_def(self, name: str) -> bool:
check.str_param(name, "name")
return name in self._all_node_defs
def get_solid(self, handle):
return self._graph_def.get_solid(handle)
def has_solid_named(self, name):
return self._graph_def.has_solid_named(name)
def solid_named(self, name):
return self._graph_def.solid_named(name)
@property
def solids(self):
return self._graph_def.solids
@property
def solids_in_topological_order(self):
return self._graph_def.solids_in_topological_order
def all_dagster_types(self):
return self._graph_def.all_dagster_types()
def has_dagster_type(self, name):
return self._graph_def.has_dagster_type(name)
def dagster_type_named(self, name):
return self._graph_def.dagster_type_named(name)
def get_pipeline_subset_def(
self, solids_to_execute: Optional[AbstractSet[str]]
) -> "PipelineDefinition":
return (
self if solids_to_execute is None else _get_pipeline_subset_def(self, solids_to_execute)
)
def has_preset(self, name: str) -> bool:
check.str_param(name, "name")
return name in self._preset_dict
def get_preset(self, name: str) -> PresetDefinition:
check.str_param(name, "name")
if name not in self._preset_dict:
raise DagsterInvariantViolationError(
(
'Could not find preset for "{name}". Available presets '
'for pipeline "{pipeline_name}" are {preset_names}.'
).format(
name=name,
preset_names=list(self._preset_dict.keys()),
pipeline_name=self.name,
)
)
return self._preset_dict[name]
def get_pipeline_snapshot(self) -> "PipelineSnapshot":
return self.get_pipeline_index().pipeline_snapshot
def get_pipeline_snapshot_id(self) -> str:
return self.get_pipeline_index().pipeline_snapshot_id
def get_pipeline_index(self) -> "PipelineIndex":
from dagster.core.host_representation import PipelineIndex
from dagster.core.snap import PipelineSnapshot
return PipelineIndex(
PipelineSnapshot.from_pipeline_def(self), self.get_parent_pipeline_snapshot()
)
def get_config_schema_snapshot(self) -> "ConfigSchemaSnapshot":
return self.get_pipeline_snapshot().config_schema_snapshot
@property
def is_subset_pipeline(self) -> bool:
return False
@property
def parent_pipeline_def(self) -> Optional["PipelineDefinition"]:
return None
def get_parent_pipeline_snapshot(self) -> Optional["PipelineSnapshot"]:
return None
@property
def solids_to_execute(self) -> Optional[FrozenSet[str]]:
return None
@property
def hook_defs(self) -> AbstractSet[HookDefinition]:
return self._hook_defs
def get_all_hooks_for_handle(self, handle: NodeHandle) -> FrozenSet[HookDefinition]:
check.inst_param(handle, "handle", NodeHandle)
hook_defs: AbstractSet[HookDefinition] = set()
current = handle
lineage = []
while current:
lineage.append(current.name)
current = current.parent
name = lineage.pop()
solid = self._graph_def.solid_named(name)
hook_defs = hook_defs.union(solid.hook_defs)
while lineage:
name = lineage.pop()
solid = solid.definition.solid_named(name)
hook_defs = hook_defs.union(solid.hook_defs)
hook_defs = hook_defs.union(self.hook_defs)
return frozenset(hook_defs)
def get_retry_policy_for_handle(self, handle: NodeHandle) -> Optional[RetryPolicy]:
solid = self.get_solid(handle)
if solid.retry_policy:
return solid.retry_policy
elif solid.definition.retry_policy:
return solid.definition.retry_policy
else:
return self._solid_retry_policy
def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> "PipelineDefinition":
hook_defs = check.set_param(hook_defs, "hook_defs", of_type=HookDefinition)
pipeline_def = PipelineDefinition(
name=self.name,
graph_def=self._graph_def,
mode_defs=self.mode_definitions,
preset_defs=self.preset_defs,
tags=self.tags,
hook_defs=hook_defs | self.hook_defs,
description=self._description,
solid_retry_policy=self._solid_retry_policy,
_parent_pipeline_def=self._parent_pipeline_def,
)
update_wrapper(pipeline_def, self, updated=())
return pipeline_def
def __call__(self, *args, **kwargs):
if self.is_job:
msg = (
f"Attempted to call job '{self.name}' directly. Jobs should be invoked by "
"using an execution API function (e.g. `job.execute_in_process`)."
)
else:
msg = (
f"Attempted to call pipeline '{self.name}' directly. Pipelines should be invoked by "
"using an execution API function (e.g. `execute_pipeline`)."
)
raise DagsterInvariantViolationError(msg)
class PipelineSubsetDefinition(PipelineDefinition):
@property
def solids_to_execute(self):
return frozenset(self._graph_def.node_names())
@property
def solid_selection(self) -> List[str]:
# so in the short-term, to make the call sites cleaner, we will convert the solids to execute
# to a list
return self._graph_def.node_names()
@property
def parent_pipeline_def(self) -> PipelineDefinition:
return self._parent_pipeline_def
def get_parent_pipeline_snapshot(self) -> Optional["PipelineSnapshot"]:
return self._parent_pipeline_def.get_pipeline_snapshot()
@property
def is_subset_pipeline(self) -> bool:
return True
def get_pipeline_subset_def(
self, solids_to_execute: Optional[AbstractSet[str]]
) -> "PipelineSubsetDefinition":
raise DagsterInvariantViolationError("Pipeline subsets may not be subset again.")
def _dep_key_of(solid: Node) -> NodeInvocation:
return NodeInvocation(
name=solid.definition.name,
alias=solid.name,
tags=solid.tags,
hook_defs=solid.hook_defs,
retry_policy=solid.retry_policy,
)
def _get_pipeline_subset_def(
pipeline_def: PipelineDefinition,
solids_to_execute: AbstractSet[str],
) -> "PipelineSubsetDefinition":
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
check.set_param(solids_to_execute, "solids_to_execute", of_type=str)
graph = pipeline_def.graph
for solid_name in solids_to_execute:
if not graph.has_solid_named(solid_name):
raise DagsterInvalidSubsetError(
"{target_type} {pipeline_name} has no {node_type} named {name}.".format(
target_type=pipeline_def.target_type,
pipeline_name=pipeline_def.name,
name=solid_name,
node_type="ops" if pipeline_def.is_job else "solids",
),
)
# go in topo order to ensure deps dict is ordered
solids = list(
filter(lambda solid: solid.name in solids_to_execute, graph.solids_in_topological_order)
)
deps: Dict[
Union[str, NodeInvocation],
Dict[str, IDependencyDefinition],
] = {_dep_key_of(solid): {} for solid in solids}
for solid in solids:
for input_handle in solid.input_handles():
if graph.dependency_structure.has_direct_dep(input_handle):
output_handle = pipeline_def.dependency_structure.get_direct_dep(input_handle)
if output_handle.solid.name in solids_to_execute:
deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
elif graph.dependency_structure.has_dynamic_fan_in_dep(input_handle):
output_handle = graph.dependency_structure.get_dynamic_fan_in_dep(input_handle)
if output_handle.solid.name in solids_to_execute:
deps[_dep_key_of(solid)][
input_handle.input_def.name
] = DynamicCollectDependencyDefinition(
solid_name=output_handle.solid.name,
output_name=output_handle.output_def.name,
)
elif graph.dependency_structure.has_fan_in_deps(input_handle):
output_handles = graph.dependency_structure.get_fan_in_deps(input_handle)
deps[_dep_key_of(solid)][input_handle.input_def.name] = MultiDependencyDefinition(
[
DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
for output_handle in output_handles
if output_handle.solid.name in solids_to_execute
]
)
# else input is unconnected
try:
sub_pipeline_def = PipelineSubsetDefinition(
name=pipeline_def.name, # should we change the name for subsetted pipeline?
solid_defs=list({solid.definition for solid in solids}),
mode_defs=pipeline_def.mode_definitions,
dependencies=deps,
_parent_pipeline_def=pipeline_def,
tags=pipeline_def.tags,
hook_defs=pipeline_def.hook_defs,
)
return sub_pipeline_def
except DagsterInvalidDefinitionError as exc:
# This handles the case when you construct a subset such that an unsatisfied
# input cannot be loaded from config. Instead of throwing a DagsterInvalidDefinitionError,
# we re-raise a DagsterInvalidSubsetError.
raise DagsterInvalidSubsetError(
f"The attempted subset {str_format_set(solids_to_execute)} for {pipeline_def.target_type} "
f"{pipeline_def.name} results in an invalid {pipeline_def.target_type}"
) from exc
def _checked_resource_reqs_for_mode(
mode_def: ModeDefinition,
node_defs: List[NodeDefinition],
dagster_type_dict: Dict[str, DagsterType],
solid_dict: Dict[str, Node],
pipeline_hook_defs: AbstractSet[HookDefinition],
dependency_structure: DependencyStructure,
) -> Set[str]:
resource_reqs: Set[str] = set()
mode_output_managers = set(
key
for key, resource_def in mode_def.resource_defs.items()
if isinstance(resource_def, IOutputManagerDefinition)
)
mode_resources = set(mode_def.resource_defs.keys())
for node_def in node_defs:
for solid_def in node_def.iterate_solid_defs():
for required_resource in solid_def.required_resource_keys:
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=solid_def.describe_node(),
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
for output_def in solid_def.output_defs:
resource_reqs.add(output_def.io_manager_key)
if output_def.io_manager_key not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="IO manager",
resource_key=output_def.io_manager_key,
descriptor=f"output '{output_def.name}' of {solid_def.describe_node()}",
mode_def=mode_def,
resource_defs_of_type=mode_output_managers,
)
raise DagsterInvalidDefinitionError(error_msg)
resource_reqs.update(
_checked_type_resource_reqs_for_mode(
mode_def,
dagster_type_dict,
)
)
# Validate unsatisfied inputs can be materialized from config
resource_reqs.update(
_checked_input_resource_reqs_for_mode(dependency_structure, solid_dict, mode_def)
)
for solid in solid_dict.values():
for hook_def in solid.hook_defs:
for required_resource in hook_def.required_resource_keys:
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"hook '{hook_def.name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
for hook_def in pipeline_hook_defs:
for required_resource in hook_def.required_resource_keys:
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"hook '{hook_def.name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
for resource_key, resource in mode_def.resource_defs.items():
for required_resource in resource.required_resource_keys:
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"resource at key '{resource_key}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
# Finally, recursively add any resources that the set of required resources require
while True:
new_resources: Set[str] = set()
for resource_key in resource_reqs:
resource = mode_def.resource_defs[resource_key]
new_resources.update(resource.required_resource_keys - resource_reqs)
if not len(new_resources):
break
resource_reqs.update(new_resources)
return resource_reqs
def _checked_type_resource_reqs_for_mode(
mode_def: ModeDefinition,
dagster_type_dict: Dict[str, DagsterType],
) -> Set[str]:
resource_reqs = set()
mode_resources = set(mode_def.resource_defs.keys())
for dagster_type in dagster_type_dict.values():
for required_resource in dagster_type.required_resource_keys:
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"type '{dagster_type.display_name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
if dagster_type.loader:
for required_resource in dagster_type.loader.required_resource_keys():
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"the loader on type '{dagster_type.display_name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
if dagster_type.materializer:
for required_resource in dagster_type.materializer.required_resource_keys():
resource_reqs.add(required_resource)
if required_resource not in mode_resources:
error_msg = _get_missing_resource_error_msg(
resource_type="resource",
resource_key=required_resource,
descriptor=f"the materializer on type '{dagster_type.display_name}'",
mode_def=mode_def,
resource_defs_of_type=mode_resources,
)
raise DagsterInvalidDefinitionError(error_msg)
return resource_reqs
def _checked_input_resource_reqs_for_mode(
dependency_structure: DependencyStructure,
node_dict: Dict[str, Node],
mode_def: ModeDefinition,
outer_dependency_structures: Optional[List[DependencyStructure]] = None,
outer_solids: Optional[List[Node]] = None,
) -> Set[str]:
outer_dependency_structures = check.opt_list_param(
outer_dependency_structures, "outer_dependency_structures", DependencyStructure
)
outer_solids = check.opt_list_param(outer_solids, "outer_solids", Node)
resource_reqs = set()
mode_root_input_managers = set(
key
for key, resource_def in mode_def.resource_defs.items()
if isinstance(resource_def, RootInputManagerDefinition)
)
for node in node_dict.values():
if node.is_graph:
graph_def = node.definition.ensure_graph_def()
# check inner solids
resource_reqs.update(
_checked_input_resource_reqs_for_mode(
dependency_structure=graph_def.dependency_structure,
node_dict=graph_def.node_dict,
mode_def=mode_def,
outer_dependency_structures=outer_dependency_structures
+ [dependency_structure],
outer_solids=outer_solids + [node],
)
)
for handle in node.input_handles():
source_output_handles = None
if dependency_structure.has_deps(handle):
# input is connected to outputs from the same dependency structure
source_output_handles = dependency_structure.get_deps_list(handle)
else:
# input is connected to outputs from outer dependency structure, e.g. first solids
# in a composite
curr_node = node
curr_handle = handle
curr_index = len(outer_solids) - 1
# Checks to see if input is mapped to an outer dependency structure
while curr_index >= 0 and curr_node.container_maps_input(curr_handle.input_name):
curr_handle = SolidInputHandle(
solid=outer_solids[curr_index],
input_def=curr_node.container_mapped_input(
curr_handle.input_name
).definition,
)
if outer_dependency_structures[curr_index].has_deps(curr_handle):
source_output_handles = outer_dependency_structures[
curr_index
].get_deps_list(curr_handle)
break
curr_node = outer_solids[curr_index]
curr_index -= 1
if source_output_handles:
# input is connected to source output handles within the graph
for source_output_handle in source_output_handles:
output_manager_key = source_output_handle.output_def.io_manager_key
output_manager_def = mode_def.resource_defs[output_manager_key]
if not isinstance(output_manager_def, IInputManagerDefinition):
raise DagsterInvalidDefinitionError(
f'Input "{handle.input_def.name}" of {node.describe_node()} is '
f'connected to output "{source_output_handle.output_def.name}" '
f"of {source_output_handle.solid.describe_node()}. That output does not "
"have an output "
f"manager that knows how to load inputs, so we don't know how "
f"to load the input. To address this, assign an IOManager to "
f"the upstream output."
)
else:
input_def = handle.input_def
if (
not input_def.dagster_type.loader
and not input_def.dagster_type.kind == DagsterTypeKind.NOTHING
and not input_def.root_manager_key
and not input_def.has_default_value
):
raise DagsterInvalidDefinitionError(
"Input '{input_name}' in {described_node} is not connected to "
"the output of a previous node and can not be loaded from configuration, "
"making it impossible to execute. "
"Possible solutions are:\n"
" * add a dagster_type_loader for the type '{dagster_type}'\n"
" * connect '{input_name}' to the output of another node\n".format(
described_node=node.describe_node(),
input_name=input_def.name,
dagster_type=input_def.dagster_type.display_name,
)
)
# the other ways of loading unsatisfied inputs - dagster type loaders and
# default values.
if input_def.root_manager_key:
resource_reqs.add(input_def.root_manager_key)
if input_def.root_manager_key not in mode_def.resource_defs:
error_msg = _get_missing_resource_error_msg(
resource_type="root input manager",
resource_key=input_def.root_manager_key,
descriptor=f"unsatisfied input '{input_def.name}' of {node.describe_node()}",
mode_def=mode_def,
resource_defs_of_type=mode_root_input_managers,
)
raise DagsterInvalidDefinitionError(error_msg)
return resource_reqs
def _get_missing_resource_error_msg(
resource_type, resource_key, descriptor, mode_def, resource_defs_of_type
):
if mode_def.name == "default":
return (
f"{resource_type} key '{resource_key}' is required by "
f"{descriptor}, but is not provided. Provide a {resource_type} for key '{resource_key}', "
f"or change '{resource_key}' to one of the provided {resource_type} keys: "
f"{sorted(resource_defs_of_type)}."
)
else:
return (
f"{resource_type} key '{resource_key}' is required by "
f"{descriptor}, but is not provided by mode '{mode_def.name}'. "
f"In mode '{mode_def.name}', provide a {resource_type} for key '{resource_key}', "
f"or change '{resource_key}' to one of the provided root input managers keys: {sorted(resource_defs_of_type)}."
)
def _build_all_node_defs(node_defs: List[NodeDefinition]) -> Dict[str, NodeDefinition]:
all_defs: Dict[str, NodeDefinition] = {}
for current_level_node_def in node_defs:
for node_def in current_level_node_def.iterate_node_defs():
if node_def.name in all_defs:
if all_defs[node_def.name] != node_def:
raise DagsterInvalidDefinitionError(
'Detected conflicting node definitions with the same name "{name}"'.format(
name=node_def.name
)
)
else:
all_defs[node_def.name] = node_def
return all_defs
def _create_run_config_schema(
pipeline_def: PipelineDefinition,
mode_definition: ModeDefinition,
required_resources: Set[str],
) -> "RunConfigSchema":
from .run_config import (
RunConfigSchemaCreationData,
construct_config_type_dictionary,
define_run_config_schema_type,
)
from .run_config_schema import RunConfigSchema
# When executing with a subset pipeline, include the missing solids
# from the original pipeline as ignored to allow execution with
# run config that is valid for the original
if isinstance(pipeline_def.graph, SubselectedGraphDefinition):
ignored_solids = pipeline_def.graph.get_top_level_omitted_nodes()
elif pipeline_def.is_subset_pipeline:
if pipeline_def.parent_pipeline_def is None:
check.failed("Unexpected subset pipeline state")
ignored_solids = [
solid
for solid in pipeline_def.parent_pipeline_def.graph.solids
if not pipeline_def.has_solid_named(solid.name)
]
else:
ignored_solids = []
run_config_schema_type = define_run_config_schema_type(
RunConfigSchemaCreationData(
pipeline_name=pipeline_def.name,
solids=pipeline_def.graph.solids,
graph_def=pipeline_def.graph,
dependency_structure=pipeline_def.graph.dependency_structure,
mode_definition=mode_definition,
logger_defs=mode_definition.loggers,
ignored_solids=ignored_solids,
required_resources=required_resources,
is_using_graph_job_op_apis=pipeline_def.is_job,
)
)
if mode_definition.config_mapping:
outer_config_type = mode_definition.config_mapping.config_schema.config_type
else:
outer_config_type = run_config_schema_type
if outer_config_type is None:
check.failed("Unexpected outer_config_type value of None")
config_type_dict_by_name, config_type_dict_by_key = construct_config_type_dictionary(
pipeline_def.all_node_defs,
outer_config_type,
)
return RunConfigSchema(
run_config_schema_type=run_config_schema_type,
config_type_dict_by_name=config_type_dict_by_name,
config_type_dict_by_key=config_type_dict_by_key,
config_mapping=mode_definition.config_mapping,
)
| true
| true
|
1c451f5fc59e92b0a8345779653aacf61ab487e0
| 4,150
|
py
|
Python
|
tcconfig/_common.py
|
Mnkras/tcconfig
|
2173ffc4fa4e23fa0a2b89c1185e9e44350d5aad
|
[
"MIT"
] | 1
|
2020-07-23T07:07:47.000Z
|
2020-07-23T07:07:47.000Z
|
tcconfig/_common.py
|
RinaisSuper/tcconfig
|
d45efa64a589c6f0fb75059414bf629683b920dc
|
[
"MIT"
] | null | null | null |
tcconfig/_common.py
|
RinaisSuper/tcconfig
|
d45efa64a589c6f0fb75059414bf629683b920dc
|
[
"MIT"
] | null | null | null |
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import contextlib
import errno
import os
import re
import sys
import msgfy
import subprocrunner as spr
import typepy
from humanreadable import ParameterError
from path import Path
from simplesqlite import SimpleSQLite
from ._const import IPV6_OPTION_ERROR_MSG_FORMAT, TcCommandOutput
from ._logger import logger, set_log_level
_bin_path_cache = {}
@contextlib.contextmanager
def logging_context(name):
logger.debug("|---- {:s}: {:s} -----".format("start", name))
try:
yield
finally:
logger.debug("----- {:s}: {:s} ----|".format("complete", name))
def find_bin_path(command):
def _to_regular_bin_path(file_path):
path_obj = Path(file_path)
if path_obj.islink():
return path_obj.readlinkabs()
return file_path
if command in _bin_path_cache:
return _bin_path_cache.get(command)
bin_path = spr.Which(command, follow_symlinks=True)
if bin_path.is_exist():
_bin_path_cache[command] = bin_path.abspath()
return _bin_path_cache[command]
for sbin_path in ("/sbin/{:s}".format(command), "/usr/sbin/{:s}".format(command)):
if os.path.isfile(sbin_path):
_bin_path_cache[command] = _to_regular_bin_path(sbin_path)
return _bin_path_cache[command]
# return the command as it is when binary file not found
return command
def check_command_installation(command):
if find_bin_path(command):
return
logger.error("command not found: {}".format(command))
sys.exit(errno.ENOENT)
def initialize_cli(options):
set_log_level(options.log_level)
spr.SubprocessRunner.is_save_history = True
if options.is_output_stacktrace:
spr.SubprocessRunner.is_output_stacktrace = options.is_output_stacktrace
SimpleSQLite.global_debug_query = options.debug_query
def is_execute_tc_command(tc_command_output):
return tc_command_output == TcCommandOutput.NOT_SET
def validate_within_min_max(param_name, value, min_value, max_value, unit):
from dataproperty import DataProperty
if value is None:
return
if unit is None:
unit = ""
else:
unit = "[{:s}]".format(unit)
if value > max_value:
raise ParameterError(
"'{:s}' is too high".format(param_name),
expected="<={:s}{:s}".format(DataProperty(max_value).to_str(), unit),
value="{:s}{:s}".format(DataProperty(value).to_str(), unit),
)
if value < min_value:
raise ParameterError(
"'{:s}' is too low".format(param_name),
expected=">={:s}{:s}".format(DataProperty(min_value).to_str(), unit),
value="{:s}{:s}".format(DataProperty(value).to_str(), unit),
)
def normalize_tc_value(tc_obj):
import ipaddress
try:
tc_obj.sanitize()
except ipaddress.AddressValueError as e:
logger.error(IPV6_OPTION_ERROR_MSG_FORMAT.format(e))
sys.exit(errno.EINVAL)
except ValueError as e:
logger.error(msgfy.to_error_message(e))
sys.exit(errno.EINVAL)
def run_command_helper(command, ignore_error_msg_regexp, notice_msg, exception_class=None):
proc = spr.SubprocessRunner(command, error_log_level="QUIET")
proc.run()
if proc.returncode == 0:
return 0
if ignore_error_msg_regexp:
match = ignore_error_msg_regexp.search(proc.stderr)
if match is None:
error_msg = "\n".join(
[
"command execution failed",
" command={}".format(command),
" stderr={}".format(proc.stderr),
]
)
if re.search("RTNETLINK answers: Operation not permitted", proc.stderr):
logger.error(error_msg)
sys.exit(proc.returncode)
logger.error(error_msg)
return proc.returncode
if typepy.is_not_null_string(notice_msg):
logger.warning(notice_msg)
if exception_class is not None:
raise exception_class(command)
return proc.returncode
| 26.948052
| 91
| 0.650602
|
import contextlib
import errno
import os
import re
import sys
import msgfy
import subprocrunner as spr
import typepy
from humanreadable import ParameterError
from path import Path
from simplesqlite import SimpleSQLite
from ._const import IPV6_OPTION_ERROR_MSG_FORMAT, TcCommandOutput
from ._logger import logger, set_log_level
_bin_path_cache = {}
@contextlib.contextmanager
def logging_context(name):
logger.debug("|---- {:s}: {:s} -----".format("start", name))
try:
yield
finally:
logger.debug("----- {:s}: {:s} ----|".format("complete", name))
def find_bin_path(command):
def _to_regular_bin_path(file_path):
path_obj = Path(file_path)
if path_obj.islink():
return path_obj.readlinkabs()
return file_path
if command in _bin_path_cache:
return _bin_path_cache.get(command)
bin_path = spr.Which(command, follow_symlinks=True)
if bin_path.is_exist():
_bin_path_cache[command] = bin_path.abspath()
return _bin_path_cache[command]
for sbin_path in ("/sbin/{:s}".format(command), "/usr/sbin/{:s}".format(command)):
if os.path.isfile(sbin_path):
_bin_path_cache[command] = _to_regular_bin_path(sbin_path)
return _bin_path_cache[command]
return command
def check_command_installation(command):
if find_bin_path(command):
return
logger.error("command not found: {}".format(command))
sys.exit(errno.ENOENT)
def initialize_cli(options):
set_log_level(options.log_level)
spr.SubprocessRunner.is_save_history = True
if options.is_output_stacktrace:
spr.SubprocessRunner.is_output_stacktrace = options.is_output_stacktrace
SimpleSQLite.global_debug_query = options.debug_query
def is_execute_tc_command(tc_command_output):
return tc_command_output == TcCommandOutput.NOT_SET
def validate_within_min_max(param_name, value, min_value, max_value, unit):
from dataproperty import DataProperty
if value is None:
return
if unit is None:
unit = ""
else:
unit = "[{:s}]".format(unit)
if value > max_value:
raise ParameterError(
"'{:s}' is too high".format(param_name),
expected="<={:s}{:s}".format(DataProperty(max_value).to_str(), unit),
value="{:s}{:s}".format(DataProperty(value).to_str(), unit),
)
if value < min_value:
raise ParameterError(
"'{:s}' is too low".format(param_name),
expected=">={:s}{:s}".format(DataProperty(min_value).to_str(), unit),
value="{:s}{:s}".format(DataProperty(value).to_str(), unit),
)
def normalize_tc_value(tc_obj):
import ipaddress
try:
tc_obj.sanitize()
except ipaddress.AddressValueError as e:
logger.error(IPV6_OPTION_ERROR_MSG_FORMAT.format(e))
sys.exit(errno.EINVAL)
except ValueError as e:
logger.error(msgfy.to_error_message(e))
sys.exit(errno.EINVAL)
def run_command_helper(command, ignore_error_msg_regexp, notice_msg, exception_class=None):
proc = spr.SubprocessRunner(command, error_log_level="QUIET")
proc.run()
if proc.returncode == 0:
return 0
if ignore_error_msg_regexp:
match = ignore_error_msg_regexp.search(proc.stderr)
if match is None:
error_msg = "\n".join(
[
"command execution failed",
" command={}".format(command),
" stderr={}".format(proc.stderr),
]
)
if re.search("RTNETLINK answers: Operation not permitted", proc.stderr):
logger.error(error_msg)
sys.exit(proc.returncode)
logger.error(error_msg)
return proc.returncode
if typepy.is_not_null_string(notice_msg):
logger.warning(notice_msg)
if exception_class is not None:
raise exception_class(command)
return proc.returncode
| true
| true
|
1c451fd9da10cf900c3dbc0db1934d2f21680917
| 11,336
|
py
|
Python
|
sdk/lusid_drive/models/lusid_validation_problem_details.py
|
finbourne/drive-sdk-python-preview
|
24d218e09c45efa378ba2e5b9da00a3b84258fa1
|
[
"MIT"
] | null | null | null |
sdk/lusid_drive/models/lusid_validation_problem_details.py
|
finbourne/drive-sdk-python-preview
|
24d218e09c45efa378ba2e5b9da00a3b84258fa1
|
[
"MIT"
] | null | null | null |
sdk/lusid_drive/models/lusid_validation_problem_details.py
|
finbourne/drive-sdk-python-preview
|
24d218e09c45efa378ba2e5b9da00a3b84258fa1
|
[
"MIT"
] | 1
|
2021-03-01T02:27:02.000Z
|
2021-03-01T02:27:02.000Z
|
# coding: utf-8
"""
FINBOURNE Drive API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.1.274
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid_drive.configuration import Configuration
class LusidValidationProblemDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'name': 'str',
'error_details': 'list[dict(str, str)]',
'code': 'int',
'errors': 'dict(str, list[str])',
'type': 'str',
'title': 'str',
'status': 'int',
'detail': 'str',
'instance': 'str',
'extensions': 'dict(str, object)'
}
attribute_map = {
'name': 'name',
'error_details': 'errorDetails',
'code': 'code',
'errors': 'errors',
'type': 'type',
'title': 'title',
'status': 'status',
'detail': 'detail',
'instance': 'instance',
'extensions': 'extensions'
}
required_map = {
'name': 'required',
'error_details': 'optional',
'code': 'required',
'errors': 'optional',
'type': 'optional',
'title': 'optional',
'status': 'optional',
'detail': 'optional',
'instance': 'optional',
'extensions': 'optional'
}
def __init__(self, name=None, error_details=None, code=None, errors=None, type=None, title=None, status=None, detail=None, instance=None, extensions=None, local_vars_configuration=None): # noqa: E501
"""LusidValidationProblemDetails - a model defined in OpenAPI"
:param name: (required)
:type name: str
:param error_details:
:type error_details: list[dict(str, str)]
:param code: (required)
:type code: int
:param errors:
:type errors: dict(str, list[str])
:param type:
:type type: str
:param title:
:type title: str
:param status:
:type status: int
:param detail:
:type detail: str
:param instance:
:type instance: str
:param extensions:
:type extensions: dict(str, object)
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._error_details = None
self._code = None
self._errors = None
self._type = None
self._title = None
self._status = None
self._detail = None
self._instance = None
self._extensions = None
self.discriminator = None
self.name = name
self.error_details = error_details
self.code = code
self.errors = errors
self.type = type
self.title = title
self.status = status
self.detail = detail
self.instance = instance
self.extensions = extensions
@property
def name(self):
"""Gets the name of this LusidValidationProblemDetails. # noqa: E501
:return: The name of this LusidValidationProblemDetails. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this LusidValidationProblemDetails.
:param name: The name of this LusidValidationProblemDetails. # noqa: E501
:type name: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def error_details(self):
"""Gets the error_details of this LusidValidationProblemDetails. # noqa: E501
:return: The error_details of this LusidValidationProblemDetails. # noqa: E501
:rtype: list[dict(str, str)]
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this LusidValidationProblemDetails.
:param error_details: The error_details of this LusidValidationProblemDetails. # noqa: E501
:type error_details: list[dict(str, str)]
"""
self._error_details = error_details
@property
def code(self):
"""Gets the code of this LusidValidationProblemDetails. # noqa: E501
:return: The code of this LusidValidationProblemDetails. # noqa: E501
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this LusidValidationProblemDetails.
:param code: The code of this LusidValidationProblemDetails. # noqa: E501
:type code: int
"""
if self.local_vars_configuration.client_side_validation and code is None: # noqa: E501
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
self._code = code
@property
def errors(self):
"""Gets the errors of this LusidValidationProblemDetails. # noqa: E501
:return: The errors of this LusidValidationProblemDetails. # noqa: E501
:rtype: dict(str, list[str])
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this LusidValidationProblemDetails.
:param errors: The errors of this LusidValidationProblemDetails. # noqa: E501
:type errors: dict(str, list[str])
"""
self._errors = errors
@property
def type(self):
"""Gets the type of this LusidValidationProblemDetails. # noqa: E501
:return: The type of this LusidValidationProblemDetails. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this LusidValidationProblemDetails.
:param type: The type of this LusidValidationProblemDetails. # noqa: E501
:type type: str
"""
self._type = type
@property
def title(self):
"""Gets the title of this LusidValidationProblemDetails. # noqa: E501
:return: The title of this LusidValidationProblemDetails. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this LusidValidationProblemDetails.
:param title: The title of this LusidValidationProblemDetails. # noqa: E501
:type title: str
"""
self._title = title
@property
def status(self):
"""Gets the status of this LusidValidationProblemDetails. # noqa: E501
:return: The status of this LusidValidationProblemDetails. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this LusidValidationProblemDetails.
:param status: The status of this LusidValidationProblemDetails. # noqa: E501
:type status: int
"""
self._status = status
@property
def detail(self):
"""Gets the detail of this LusidValidationProblemDetails. # noqa: E501
:return: The detail of this LusidValidationProblemDetails. # noqa: E501
:rtype: str
"""
return self._detail
@detail.setter
def detail(self, detail):
"""Sets the detail of this LusidValidationProblemDetails.
:param detail: The detail of this LusidValidationProblemDetails. # noqa: E501
:type detail: str
"""
self._detail = detail
@property
def instance(self):
"""Gets the instance of this LusidValidationProblemDetails. # noqa: E501
:return: The instance of this LusidValidationProblemDetails. # noqa: E501
:rtype: str
"""
return self._instance
@instance.setter
def instance(self, instance):
"""Sets the instance of this LusidValidationProblemDetails.
:param instance: The instance of this LusidValidationProblemDetails. # noqa: E501
:type instance: str
"""
self._instance = instance
@property
def extensions(self):
"""Gets the extensions of this LusidValidationProblemDetails. # noqa: E501
:return: The extensions of this LusidValidationProblemDetails. # noqa: E501
:rtype: dict(str, object)
"""
return self._extensions
@extensions.setter
def extensions(self, extensions):
"""Sets the extensions of this LusidValidationProblemDetails.
:param extensions: The extensions of this LusidValidationProblemDetails. # noqa: E501
:type extensions: dict(str, object)
"""
self._extensions = extensions
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LusidValidationProblemDetails):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, LusidValidationProblemDetails):
return True
return self.to_dict() != other.to_dict()
| 28.411028
| 204
| 0.593772
|
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re
import six
from lusid_drive.configuration import Configuration
class LusidValidationProblemDetails(object):
openapi_types = {
'name': 'str',
'error_details': 'list[dict(str, str)]',
'code': 'int',
'errors': 'dict(str, list[str])',
'type': 'str',
'title': 'str',
'status': 'int',
'detail': 'str',
'instance': 'str',
'extensions': 'dict(str, object)'
}
attribute_map = {
'name': 'name',
'error_details': 'errorDetails',
'code': 'code',
'errors': 'errors',
'type': 'type',
'title': 'title',
'status': 'status',
'detail': 'detail',
'instance': 'instance',
'extensions': 'extensions'
}
required_map = {
'name': 'required',
'error_details': 'optional',
'code': 'required',
'errors': 'optional',
'type': 'optional',
'title': 'optional',
'status': 'optional',
'detail': 'optional',
'instance': 'optional',
'extensions': 'optional'
}
def __init__(self, name=None, error_details=None, code=None, errors=None, type=None, title=None, status=None, detail=None, instance=None, extensions=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._error_details = None
self._code = None
self._errors = None
self._type = None
self._title = None
self._status = None
self._detail = None
self._instance = None
self._extensions = None
self.discriminator = None
self.name = name
self.error_details = error_details
self.code = code
self.errors = errors
self.type = type
self.title = title
self.status = status
self.detail = detail
self.instance = instance
self.extensions = extensions
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if self.local_vars_configuration.client_side_validation and name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def error_details(self):
return self._error_details
@error_details.setter
def error_details(self, error_details):
self._error_details = error_details
@property
def code(self):
return self._code
@code.setter
def code(self, code):
if self.local_vars_configuration.client_side_validation and code is None:
raise ValueError("Invalid value for `code`, must not be `None`")
self._code = code
@property
def errors(self):
return self._errors
@errors.setter
def errors(self, errors):
self._errors = errors
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self._type = type
@property
def title(self):
return self._title
@title.setter
def title(self, title):
self._title = title
@property
def status(self):
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def detail(self):
return self._detail
@detail.setter
def detail(self, detail):
self._detail = detail
@property
def instance(self):
return self._instance
@instance.setter
def instance(self, instance):
self._instance = instance
@property
def extensions(self):
return self._extensions
@extensions.setter
def extensions(self, extensions):
self._extensions = extensions
def to_dict(self, serialize=False):
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, LusidValidationProblemDetails):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, LusidValidationProblemDetails):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
1c45209729e1d21c4b3a6f31d130e2310e6cba86
| 392
|
py
|
Python
|
tests/test_entities/test_lead_source.py
|
stas12312/aioalfacrm
|
1501634fa5ef4591936be2e6147827565e4a0b36
|
[
"MIT"
] | null | null | null |
tests/test_entities/test_lead_source.py
|
stas12312/aioalfacrm
|
1501634fa5ef4591936be2e6147827565e4a0b36
|
[
"MIT"
] | 49
|
2021-11-11T16:00:40.000Z
|
2021-11-24T15:37:34.000Z
|
tests/test_entities/test_lead_source.py
|
stas12312/aioalfacrm
|
1501634fa5ef4591936be2e6147827565e4a0b36
|
[
"MIT"
] | null | null | null |
from aioalfacrm.entities import LeadSource
def test_init_lead_source():
lead_source = LeadSource(
id=1,
code='123',
name='name',
is_enabled=True,
weight=1,
)
assert lead_source.id == 1
assert lead_source.code == '123'
assert lead_source.name == 'name'
assert lead_source.is_enabled is True
assert lead_source.weight == 1
| 21.777778
| 42
| 0.632653
|
from aioalfacrm.entities import LeadSource
def test_init_lead_source():
lead_source = LeadSource(
id=1,
code='123',
name='name',
is_enabled=True,
weight=1,
)
assert lead_source.id == 1
assert lead_source.code == '123'
assert lead_source.name == 'name'
assert lead_source.is_enabled is True
assert lead_source.weight == 1
| true
| true
|
1c4521fc9177fa6a313e6d050feee3d74b820b75
| 877
|
py
|
Python
|
Projetos-Python/Aula 4/Driver.py
|
gfjallais/Projetos-Python
|
17e67dd020246c244dcd0c4891eefbc7f3fc7ed2
|
[
"MIT"
] | null | null | null |
Projetos-Python/Aula 4/Driver.py
|
gfjallais/Projetos-Python
|
17e67dd020246c244dcd0c4891eefbc7f3fc7ed2
|
[
"MIT"
] | null | null | null |
Projetos-Python/Aula 4/Driver.py
|
gfjallais/Projetos-Python
|
17e67dd020246c244dcd0c4891eefbc7f3fc7ed2
|
[
"MIT"
] | null | null | null |
import sys
import VPL_mSort
def test(case, args):
if case == 0:
print(VPL_mSort.ll2py(VPL_mSort.py2ll(args)))
elif case == 1:
print(VPL_mSort.size(VPL_mSort.py2ll(args)))
elif case == 2:
print(VPL_mSort.sorted(VPL_mSort.py2ll(args)))
elif case == 3:
print(VPL_mSort.sorted(VPL_mSort.py2ll(args)))
elif case == 4:
print(VPL_mSort.sum(VPL_mSort.py2ll(args)))
elif case == 5:
print(VPL_mSort.ll2py(VPL_mSort.mSort(VPL_mSort.py2ll(args))))
elif case == 6:
print(VPL_mSort.max(VPL_mSort.py2ll(args)))
elif case == 7:
print(VPL_mSort.get(VPL_mSort.py2ll(args[1:]), args[0]))
else:
print("Unknown case: ", case)
for line in sys.stdin:
inps = [int(x) for x in list(line.split(" "))]
case = inps[0]
args = inps[1:]
test(case, args)
| 30.241379
| 71
| 0.59065
|
import sys
import VPL_mSort
def test(case, args):
if case == 0:
print(VPL_mSort.ll2py(VPL_mSort.py2ll(args)))
elif case == 1:
print(VPL_mSort.size(VPL_mSort.py2ll(args)))
elif case == 2:
print(VPL_mSort.sorted(VPL_mSort.py2ll(args)))
elif case == 3:
print(VPL_mSort.sorted(VPL_mSort.py2ll(args)))
elif case == 4:
print(VPL_mSort.sum(VPL_mSort.py2ll(args)))
elif case == 5:
print(VPL_mSort.ll2py(VPL_mSort.mSort(VPL_mSort.py2ll(args))))
elif case == 6:
print(VPL_mSort.max(VPL_mSort.py2ll(args)))
elif case == 7:
print(VPL_mSort.get(VPL_mSort.py2ll(args[1:]), args[0]))
else:
print("Unknown case: ", case)
for line in sys.stdin:
inps = [int(x) for x in list(line.split(" "))]
case = inps[0]
args = inps[1:]
test(case, args)
| true
| true
|
1c4522290fc38b60b333c6de255cbf07d0f9cc5a
| 3,371
|
py
|
Python
|
code/auto_download/auto-download-usc.py
|
altymis/covid19-forecast-hub-europe
|
1a413439d0a4800356cfed8129ea943d14e37f8e
|
[
"MIT"
] | 31
|
2020-05-20T15:38:57.000Z
|
2022-02-13T01:31:33.000Z
|
code/auto_download/auto-download-usc.py
|
altymis/covid19-forecast-hub-europe
|
1a413439d0a4800356cfed8129ea943d14e37f8e
|
[
"MIT"
] | 777
|
2020-05-18T14:55:53.000Z
|
2022-03-29T20:43:17.000Z
|
code/auto_download/auto-download-usc.py
|
altymis/covid19-forecast-hub-europe
|
1a413439d0a4800356cfed8129ea943d14e37f8e
|
[
"MIT"
] | 65
|
2020-05-20T07:42:36.000Z
|
2021-11-20T21:25:23.000Z
|
# Auto-download forecasts of Geneva-Team
# Jakob Ketterer, November 2020
import re
import os
import urllib.request
from dateutil.parser import parse
from datetime import datetime, timedelta
def get_filenames(date, root, format_str):
'''get available csv files for dir specified by root link and date'''
# open directory url
dirpath = root + date
url = urllib.request.urlopen(dirpath)
str = url.read().decode('utf-8')
# get filenames from html
pattern = re.compile('/' + date + '/.*.csv"')
finds = pattern.findall(str)
filenames = [f.rstrip('"').replace("/" + date + "/","") for f in finds]
# print(filenames)
return filenames
def is_date(string, fuzzy=False):
"""
Return whether the string can be interpreted as a date.
:param string: str, string to check for date
:param fuzzy: bool, ignore unknown tokens in string if True
"""
try:
parse(string)
return True
except ValueError:
return False
if __name__ == "__main__":
# most current date in raw
format_str = "%Y-%m-%d"
data_raw_dir = "./data-raw/USC"
files = os.listdir(data_raw_dir)
dates = list(filter(lambda x: is_date(x) == True, files))
latest_date = datetime.strptime(dates[-1], format_str)
# determine date up to which files should be downloaded
today = datetime.today()
weekday = today.weekday()
if weekday == "0":
download_up_to_date = today
else: # if not Monday, only download until Monday
download_up_to_date = today - timedelta(weekday)
assert download_up_to_date > latest_date, "Required forecasts already exists in the repo!"
# generate lists of dates to download
date_list = [latest_date + timedelta(days=x) for x in range(1, (download_up_to_date-latest_date).days+1)]
if date_list:
print("Trying to download forecasts for the following dates: \n", ["".join(str(d.date())) for d in date_list])
else:
print("Nothing to update. Repo either contains latest forecasts (do nothing) or empty date folders (delete folders). ")
crawl_root = "https://github.com/scc-usc/ReCOVER-COVID-19/tree/master/results/historical_forecasts/"
download_root = "https://raw.githubusercontent.com/scc-usc/ReCOVER-COVID-19/master/results/historical_forecasts/"
for date in date_list:
# get available csv files for date dir
date_str = date.strftime(format_str)
filenames = get_filenames(date_str, crawl_root, format_str)
urls = [download_root + date_str + "/" + name for name in filenames]
date_dir = os.path.join(data_raw_dir, date_str)
dir_names = [os.path.join(date_dir, name) for name in filenames]
# create new folder if not already exists
if not os.path.exists(date_dir):
os.makedirs(date_dir)
print("Created directory:", date_dir)
# download and save files
for url, dir_name in zip(urls, dir_names):
urllib.request.urlretrieve(url, dir_name)
print("Downloaded and saved forecast to", dir_name)
# catch URL Errors:
# try:
# urllib.request.urlretrieve(url, dir_name)
# print("Downloaded and saved forecast to", dir_name)
# except:
# print("Download failed for", url)
| 37.455556
| 127
| 0.652625
|
import re
import os
import urllib.request
from dateutil.parser import parse
from datetime import datetime, timedelta
def get_filenames(date, root, format_str):
dirpath = root + date
url = urllib.request.urlopen(dirpath)
str = url.read().decode('utf-8')
pattern = re.compile('/' + date + '/.*.csv"')
finds = pattern.findall(str)
filenames = [f.rstrip('"').replace("/" + date + "/","") for f in finds]
return filenames
def is_date(string, fuzzy=False):
try:
parse(string)
return True
except ValueError:
return False
if __name__ == "__main__":
format_str = "%Y-%m-%d"
data_raw_dir = "./data-raw/USC"
files = os.listdir(data_raw_dir)
dates = list(filter(lambda x: is_date(x) == True, files))
latest_date = datetime.strptime(dates[-1], format_str)
today = datetime.today()
weekday = today.weekday()
if weekday == "0":
download_up_to_date = today
else:
download_up_to_date = today - timedelta(weekday)
assert download_up_to_date > latest_date, "Required forecasts already exists in the repo!"
date_list = [latest_date + timedelta(days=x) for x in range(1, (download_up_to_date-latest_date).days+1)]
if date_list:
print("Trying to download forecasts for the following dates: \n", ["".join(str(d.date())) for d in date_list])
else:
print("Nothing to update. Repo either contains latest forecasts (do nothing) or empty date folders (delete folders). ")
crawl_root = "https://github.com/scc-usc/ReCOVER-COVID-19/tree/master/results/historical_forecasts/"
download_root = "https://raw.githubusercontent.com/scc-usc/ReCOVER-COVID-19/master/results/historical_forecasts/"
for date in date_list:
date_str = date.strftime(format_str)
filenames = get_filenames(date_str, crawl_root, format_str)
urls = [download_root + date_str + "/" + name for name in filenames]
date_dir = os.path.join(data_raw_dir, date_str)
dir_names = [os.path.join(date_dir, name) for name in filenames]
if not os.path.exists(date_dir):
os.makedirs(date_dir)
print("Created directory:", date_dir)
for url, dir_name in zip(urls, dir_names):
urllib.request.urlretrieve(url, dir_name)
print("Downloaded and saved forecast to", dir_name)
| true
| true
|
1c45243b3347721b169c75fea7b987a1e3a1f73d
| 79
|
py
|
Python
|
Chapter5_module_package_program/Section5.3_module_and_import/weatherman.py
|
skatsuta/introducing-python
|
945fc84ba58aaa2602e454890c8c6f26e403660e
|
[
"MIT"
] | null | null | null |
Chapter5_module_package_program/Section5.3_module_and_import/weatherman.py
|
skatsuta/introducing-python
|
945fc84ba58aaa2602e454890c8c6f26e403660e
|
[
"MIT"
] | null | null | null |
Chapter5_module_package_program/Section5.3_module_and_import/weatherman.py
|
skatsuta/introducing-python
|
945fc84ba58aaa2602e454890c8c6f26e403660e
|
[
"MIT"
] | null | null | null |
import report
desc = report.get_description()
print("Today's weather:", desc)
| 15.8
| 31
| 0.746835
|
import report
desc = report.get_description()
print("Today's weather:", desc)
| true
| true
|
1c4525585f1c8640b6f463f98969dc51236fc7ed
| 2,259
|
py
|
Python
|
esp32/tools/lora/actility/actility.py
|
nevercast/pycom-micropython-sigfox
|
d1c5ea900b94fb62890742b54fa0b249b93c9f96
|
[
"MIT"
] | 1
|
2019-03-28T10:37:35.000Z
|
2019-03-28T10:37:35.000Z
|
esp32/tools/lora/actility/actility.py
|
nevercast/pycom-micropython-sigfox
|
d1c5ea900b94fb62890742b54fa0b249b93c9f96
|
[
"MIT"
] | null | null | null |
esp32/tools/lora/actility/actility.py
|
nevercast/pycom-micropython-sigfox
|
d1c5ea900b94fb62890742b54fa0b249b93c9f96
|
[
"MIT"
] | 1
|
2019-09-22T01:28:52.000Z
|
2019-09-22T01:28:52.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2018, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
from network import LoRa
from machine import ADC
import time
import binascii
import socket
import struct
DEV_EUI = '1A 2B 3C 4D 01 02 03'
APP_EUI = 'AD A4 DA E3 AC 12 67 6B'
APP_KEY = '11 B0 28 2A 18 9B 75 B0 B4 D2 D8 C7 FA 38 54 8B'
DEV_ADDR = '00 00 00 0A'
NWK_SWKEY = '2B 7E 15 16 28 AE D2 A6 AB F7 15 88 09 CF 4F 3C'
APP_SWKEY = '2B 7E 15 16 28 AE D2 A6 AB F7 15 88 09 CF 4F 3C'
class Actility:
def __init__(self, activation=LoRa.OTAA, adr=False):
self.lora = LoRa(mode=LoRa.LORAWAN, adr=adr)
self.activation = activation
self._join()
self.s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
self.s.setsockopt(socket.SOL_LORA, socket.SO_DR, 3)
self.s.setsockopt(socket.SOL_LORA, socket.SO_CONFIRMED, False)
self.s.setblocking(False)
self.adc = ADC()
self.adc_c = self.adc.channel(pin='P13')
def _join(self):
if self.activation == LoRa.OTAA:
dev_eui = binascii.unhexlify(DEV_EUI.replace(' ',''))
app_eui = binascii.unhexlify(APP_EUI.replace(' ',''))
app_key = binascii.unhexlify(APP_KEY.replace(' ',''))
self.lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)
else:
dev_addr = struct.unpack(">l", binascii.unhexlify(DEV_ADDR.replace(' ','')))[0]
nwk_swkey = binascii.unhexlify(NWK_SWKEY.replace(' ',''))
app_swkey = binascii.unhexlify(APP_SWKEY.replace(' ',''))
self.lora.join(activation=LoRa.ABP, auth=(dev_addr, nwk_swkey, app_swkey))
# wait until the module has joined the network
while not self.lora.has_joined():
time.sleep(5)
print("Joining...")
print("Network joined!")
def run(self):
while True:
time.sleep(10)
tx_data = '%d' % self.adc_c()
print('Sending', tx_data)
self.s.send(tx_data)
| 33.220588
| 93
| 0.633023
|
from network import LoRa
from machine import ADC
import time
import binascii
import socket
import struct
DEV_EUI = '1A 2B 3C 4D 01 02 03'
APP_EUI = 'AD A4 DA E3 AC 12 67 6B'
APP_KEY = '11 B0 28 2A 18 9B 75 B0 B4 D2 D8 C7 FA 38 54 8B'
DEV_ADDR = '00 00 00 0A'
NWK_SWKEY = '2B 7E 15 16 28 AE D2 A6 AB F7 15 88 09 CF 4F 3C'
APP_SWKEY = '2B 7E 15 16 28 AE D2 A6 AB F7 15 88 09 CF 4F 3C'
class Actility:
def __init__(self, activation=LoRa.OTAA, adr=False):
self.lora = LoRa(mode=LoRa.LORAWAN, adr=adr)
self.activation = activation
self._join()
self.s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
self.s.setsockopt(socket.SOL_LORA, socket.SO_DR, 3)
self.s.setsockopt(socket.SOL_LORA, socket.SO_CONFIRMED, False)
self.s.setblocking(False)
self.adc = ADC()
self.adc_c = self.adc.channel(pin='P13')
def _join(self):
if self.activation == LoRa.OTAA:
dev_eui = binascii.unhexlify(DEV_EUI.replace(' ',''))
app_eui = binascii.unhexlify(APP_EUI.replace(' ',''))
app_key = binascii.unhexlify(APP_KEY.replace(' ',''))
self.lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)
else:
dev_addr = struct.unpack(">l", binascii.unhexlify(DEV_ADDR.replace(' ','')))[0]
nwk_swkey = binascii.unhexlify(NWK_SWKEY.replace(' ',''))
app_swkey = binascii.unhexlify(APP_SWKEY.replace(' ',''))
self.lora.join(activation=LoRa.ABP, auth=(dev_addr, nwk_swkey, app_swkey))
while not self.lora.has_joined():
time.sleep(5)
print("Joining...")
print("Network joined!")
def run(self):
while True:
time.sleep(10)
tx_data = '%d' % self.adc_c()
print('Sending', tx_data)
self.s.send(tx_data)
| true
| true
|
1c4526eff1ed90273050f64a4dd975e16e39aea8
| 7,835
|
py
|
Python
|
airflow/providers/apache/kylin/operators/kylin_cube.py
|
DavisWang-LR/airflow
|
60b10ef9248ec59fecaa7628c07c76950005a35d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/providers/apache/kylin/operators/kylin_cube.py
|
DavisWang-LR/airflow
|
60b10ef9248ec59fecaa7628c07c76950005a35d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/providers/apache/kylin/operators/kylin_cube.py
|
DavisWang-LR/airflow
|
60b10ef9248ec59fecaa7628c07c76950005a35d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from datetime import datetime
from typing import Optional
from kylinpy import kylinpy
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.kylin.hooks.kylin import KylinHook
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class KylinCubeOperator(BaseOperator):
"""
This operator is used to submit request about kylin build/refresh/merge,
and can track job status . so users can easier to build kylin job
For more detail information in
`Apache Kylin <http://kylin.apache.org/>`_
:param kylin_conn_id: The connection id as configured in Airflow administration.
:type kylin_conn_id: str
:param project: kylin project name, this param will overwrite the project in kylin_conn_id:
:type project: str
:param cube: kylin cube name
:type cube: str
:param dsn: (dsn , dsn url of kylin connection ,which will overwrite kylin_conn_id.
for example: kylin://ADMIN:KYLIN@sandbox/learn_kylin?timeout=60&is_debug=1)
:type dsn: str
:param command: (kylin command include 'build', 'merge', 'refresh', 'delete',
'build_streaming', 'merge_streaming', 'refresh_streaming', 'disable', 'enable',
'purge', 'clone', 'drop'.
build - use /kylin/api/cubes/{cubeName}/build rest api,and buildType is ‘BUILD’,
and you should give start_time and end_time
refresh - use build rest api,and buildType is ‘REFRESH’
merge - use build rest api,and buildType is ‘MERGE’
build_streaming - use /kylin/api/cubes/{cubeName}/build2 rest api,and buildType is ‘BUILD’
and you should give offset_start and offset_end
refresh_streaming - use build2 rest api,and buildType is ‘REFRESH’
merge_streaming - use build2 rest api,and buildType is ‘MERGE’
delete - delete segment, and you should give segment_name value
disable - disable cube
enable - enable cube
purge - purge cube
clone - clone cube,new cube name is {cube_name}_clone
drop - drop cube)
:type command: str
:param start_time: build segment start time
:type start_time: Optional[str]
:param end_time: build segment end time
:type end_time: Optional[str]
:param offset_start: streaming build segment start time
:type offset_start: Optional[str]
:param offset_end: streaming build segment end time
:type offset_end: Optional[str]
:param segment_name: segment name
:type segment_name: str
:param is_track_job: (whether to track job status. if value is True,will track job until
job status is in("FINISHED", "ERROR", "DISCARDED", "KILLED", "SUICIDAL",
"STOPPED") or timeout)
:type is_track_job: bool
:param interval: track job status,default value is 60s
:type interval: int
:param timeout: timeout value,default value is 1 day,60 * 60 * 24 s
:type timeout: int
:param eager_error_status: (jobs error status,if job status in this list ,this task will be error.
default value is tuple(["ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"]))
:type eager_error_status: tuple
"""
template_fields = (
'project',
'cube',
'dsn',
'command',
'start_time',
'end_time',
'segment_name',
'offset_start',
'offset_end',
)
ui_color = '#E79C46'
build_command = {
'fullbuild',
'build',
'merge',
'refresh',
'build_streaming',
'merge_streaming',
'refresh_streaming',
}
jobs_end_status = {"FINISHED", "ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"}
# pylint: disable=too-many-arguments,inconsistent-return-statements
@apply_defaults
def __init__(
self,
*,
kylin_conn_id: Optional[str] = 'kylin_default',
project: Optional[str] = None,
cube: Optional[str] = None,
dsn: Optional[str] = None,
command: Optional[str] = None,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
offset_start: Optional[str] = None,
offset_end: Optional[str] = None,
segment_name: Optional[str] = None,
is_track_job: bool = False,
interval: int = 60,
timeout: int = 60 * 60 * 24,
eager_error_status=("ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"),
**kwargs,
):
super().__init__(**kwargs)
self.kylin_conn_id = kylin_conn_id
self.project = project
self.cube = cube
self.dsn = dsn
self.command = command
self.start_time = start_time
self.end_time = end_time
self.segment_name = segment_name
self.offset_start = offset_start
self.offset_end = offset_end
self.is_track_job = is_track_job
self.interval = interval
self.timeout = timeout
self.eager_error_status = eager_error_status
self.jobs_error_status = [stat.upper() for stat in eager_error_status]
def execute(self, context):
_hook = KylinHook(kylin_conn_id=self.kylin_conn_id, project=self.project, dsn=self.dsn)
_support_invoke_command = kylinpy.CubeSource.support_invoke_command
if self.command.lower() not in _support_invoke_command:
raise AirflowException(
'Kylin:Command {} can not match kylin command list {}'.format(
self.command, _support_invoke_command
)
)
kylinpy_params = {
'start': datetime.fromtimestamp(int(self.start_time) / 1000) if self.start_time else None,
'end': datetime.fromtimestamp(int(self.end_time) / 1000) if self.end_time else None,
'name': self.segment_name,
'offset_start': int(self.offset_start) if self.offset_start else None,
'offset_end': int(self.offset_end) if self.offset_end else None,
}
rsp_data = _hook.cube_run(self.cube, self.command.lower(), **kylinpy_params)
if self.is_track_job and self.command.lower() in self.build_command:
started_at = timezone.utcnow()
job_id = rsp_data.get("uuid")
if job_id is None:
raise AirflowException("kylin job id is None")
self.log.info("kylin job id: %s", job_id)
job_status = None
while job_status not in self.jobs_end_status:
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
raise AirflowException('kylin job {} timeout'.format(job_id))
time.sleep(self.interval)
job_status = _hook.get_job_status(job_id)
self.log.info('Kylin job status is %s ', job_status)
if job_status in self.jobs_error_status:
raise AirflowException('Kylin job {} status {} is error '.format(job_id, job_status))
if self.do_xcom_push:
return rsp_data
| 41.020942
| 105
| 0.657817
|
import time
from datetime import datetime
from typing import Optional
from kylinpy import kylinpy
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.kylin.hooks.kylin import KylinHook
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class KylinCubeOperator(BaseOperator):
template_fields = (
'project',
'cube',
'dsn',
'command',
'start_time',
'end_time',
'segment_name',
'offset_start',
'offset_end',
)
ui_color = '#E79C46'
build_command = {
'fullbuild',
'build',
'merge',
'refresh',
'build_streaming',
'merge_streaming',
'refresh_streaming',
}
jobs_end_status = {"FINISHED", "ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"}
@apply_defaults
def __init__(
self,
*,
kylin_conn_id: Optional[str] = 'kylin_default',
project: Optional[str] = None,
cube: Optional[str] = None,
dsn: Optional[str] = None,
command: Optional[str] = None,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
offset_start: Optional[str] = None,
offset_end: Optional[str] = None,
segment_name: Optional[str] = None,
is_track_job: bool = False,
interval: int = 60,
timeout: int = 60 * 60 * 24,
eager_error_status=("ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"),
**kwargs,
):
super().__init__(**kwargs)
self.kylin_conn_id = kylin_conn_id
self.project = project
self.cube = cube
self.dsn = dsn
self.command = command
self.start_time = start_time
self.end_time = end_time
self.segment_name = segment_name
self.offset_start = offset_start
self.offset_end = offset_end
self.is_track_job = is_track_job
self.interval = interval
self.timeout = timeout
self.eager_error_status = eager_error_status
self.jobs_error_status = [stat.upper() for stat in eager_error_status]
def execute(self, context):
_hook = KylinHook(kylin_conn_id=self.kylin_conn_id, project=self.project, dsn=self.dsn)
_support_invoke_command = kylinpy.CubeSource.support_invoke_command
if self.command.lower() not in _support_invoke_command:
raise AirflowException(
'Kylin:Command {} can not match kylin command list {}'.format(
self.command, _support_invoke_command
)
)
kylinpy_params = {
'start': datetime.fromtimestamp(int(self.start_time) / 1000) if self.start_time else None,
'end': datetime.fromtimestamp(int(self.end_time) / 1000) if self.end_time else None,
'name': self.segment_name,
'offset_start': int(self.offset_start) if self.offset_start else None,
'offset_end': int(self.offset_end) if self.offset_end else None,
}
rsp_data = _hook.cube_run(self.cube, self.command.lower(), **kylinpy_params)
if self.is_track_job and self.command.lower() in self.build_command:
started_at = timezone.utcnow()
job_id = rsp_data.get("uuid")
if job_id is None:
raise AirflowException("kylin job id is None")
self.log.info("kylin job id: %s", job_id)
job_status = None
while job_status not in self.jobs_end_status:
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
raise AirflowException('kylin job {} timeout'.format(job_id))
time.sleep(self.interval)
job_status = _hook.get_job_status(job_id)
self.log.info('Kylin job status is %s ', job_status)
if job_status in self.jobs_error_status:
raise AirflowException('Kylin job {} status {} is error '.format(job_id, job_status))
if self.do_xcom_push:
return rsp_data
| true
| true
|
1c4527dedfe7c3af42d455407bac0356cec37b01
| 937
|
py
|
Python
|
tests/test_scraper.py
|
yasen-m/dosage
|
81fe088621ad335cac2a53fcbc7b9b37f49ddce2
|
[
"MIT"
] | null | null | null |
tests/test_scraper.py
|
yasen-m/dosage
|
81fe088621ad335cac2a53fcbc7b9b37f49ddce2
|
[
"MIT"
] | null | null | null |
tests/test_scraper.py
|
yasen-m/dosage
|
81fe088621ad335cac2a53fcbc7b9b37f49ddce2
|
[
"MIT"
] | null | null | null |
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2013-2014 Bastian Kleineidam
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
"""Test scraper module functions."""
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperobj = scraperclass()
scraperobj = scraperclass(indexes=["bla"])
self.assertTrue(scraperobj.url,
"missing url in %s" % scraperobj.getName())
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
| 34.703704
| 72
| 0.692636
|
from unittest import TestCase
from dosagelib import scraper
class ScraperTester(TestCase):
def test_get_scraperclasses(self):
for scraperclass in scraper.get_scraperclasses():
scraperobj = scraperclass()
scraperobj = scraperclass(indexes=["bla"])
self.assertTrue(scraperobj.url,
"missing url in %s" % scraperobj.getName())
def test_find_scraperclasses_single(self):
result = scraper.find_scraperclasses("CalvinAndHobbes")
self.assertEqual(len(result), 1)
def test_find_scraperclasses_multi(self):
result = scraper.find_scraperclasses("a", multiple_allowed=True)
self.assertTrue(len(result) > 1)
def test_find_scraperclasses_error(self):
self.assertRaises(ValueError, scraper.find_scraperclasses, "")
| true
| true
|
1c4527ebc8a4e4ee7a6fe10a1481392fa1695e4a
| 438
|
py
|
Python
|
plotly/validators/contour/_ncontours.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/contour/_ncontours.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | null | null | null |
plotly/validators/contour/_ncontours.py
|
faezs/plotly.py
|
6009b5b9c746e5d2a2849ad255a4eb234b551ed7
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class NcontoursValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name='ncontours', parent_name='contour', **kwargs
):
super(NcontoursValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
min=1,
role='style',
**kwargs
)
| 25.764706
| 72
| 0.614155
|
import _plotly_utils.basevalidators
class NcontoursValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name='ncontours', parent_name='contour', **kwargs
):
super(NcontoursValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
min=1,
role='style',
**kwargs
)
| true
| true
|
1c452813948fb86477b8078254ef466e67e018db
| 29,372
|
py
|
Python
|
notebook/home/.jupyter/jupyter_notebook_config.py
|
cj-lin/docker-hadoop-workbench
|
d2a74f28c4fd5cdcf38c080efae89edcfcf4d0b9
|
[
"MIT"
] | null | null | null |
notebook/home/.jupyter/jupyter_notebook_config.py
|
cj-lin/docker-hadoop-workbench
|
d2a74f28c4fd5cdcf38c080efae89edcfcf4d0b9
|
[
"MIT"
] | null | null | null |
notebook/home/.jupyter/jupyter_notebook_config.py
|
cj-lin/docker-hadoop-workbench
|
d2a74f28c4fd5cdcf38c080efae89edcfcf4d0b9
|
[
"MIT"
] | null | null | null |
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Allow password to be changed at login for the notebook server.
#
# While loggin in with a token, the notebook server UI will give the opportunity
# to the user to enter a new password at the same time that will replace the
# token login mechanism.
#
# This can be set to false to prevent changing password from the UI/API.
#c.NotebookApp.allow_password_change = True
## Allow requests where the Host header doesn't point to a local server
#
# By default, requests get a 403 forbidden response if the 'Host' header shows
# that the browser thinks it's on a non-local domain. Setting this option to
# True disables this check.
#
# This protects against 'DNS rebinding' attacks, where a remote web server
# serves you a page and then changes its DNS to send later requests to a local
# IP, bypassing same-origin checks.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local, along
# with hostnames configured in local_hostnames.
#c.NotebookApp.allow_remote_access = False
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## Override URL shown to users.
#
# Replace actual URL, including protocol, address, port and base URL, with the
# given value when displaying URL to the users. Do not change the actual
# connection URL. If authentication token is enabled, the token is added to the
# custom URL automatically.
#
# This option is intended to be used when the URL to display to the user cannot
# be determined reliably by the Jupyter notebook server (proxified or
# containerized setups for example).
#c.NotebookApp.custom_display_url = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Extra keyword arguments to pass to `get_secure_cookie`. See tornado's
# get_secure_cookie docs for details.
#c.NotebookApp.get_secure_cookie_kwargs = {}
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
c.NotebookApp.ip = '0.0.0.0'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## Hostnames to allow as local when allow_remote_access is False.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted as
# local as well.
#c.NotebookApp.local_hostnames = ['localhost']
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Sets the maximum allowed size of the client request body, specified in the
# Content-Length request header field. If the size in a request exceeds the
# configured value, a malformed HTTP message is returned to the client.
#
# Note: max_body_size is applied even in streaming mode.
#c.NotebookApp.max_body_size = 536870912
## Gets or sets the maximum amount of memory, in bytes, that is allocated for
# use by the buffer manager.
#c.NotebookApp.max_buffer_size = 536870912
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = '/home/user/devel'
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## If True, display a button in the dashboard to quit (shutdown the notebook
# server).
#c.NotebookApp.quit_button = True
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
#c.NotebookApp.shutdown_no_activity_timeout = 0
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Set to False to disable terminals.
#
# This does *not* make the notebook server more secure by itself. Anything the
# user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package is not
# available.
#c.NotebookApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the `new` argument
# passed to the standard library method `webbrowser.open`. The behaviour is not
# guaranteed, but depends on browser support. Valid values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
## Timeout for giving up on a kernel (in seconds).
#
# On starting and restarting kernels, we check whether the kernel is running and
# responsive by sending kernel_info_requests. This sets the timeout in seconds
# for how long the kernel can take before being presumed dead. This affects the
# MappingKernelManager (which handles kernel restarts) and the
# ZMQChannelsHandler (which handles the startup).
#c.MappingKernelManager.kernel_info_timeout = 60
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
# trash/recycle bin, where they can be recovered. If False, deleting files
# really deletes them.
#c.FileContentsManager.delete_to_trash = True
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| 38.344648
| 103
| 0.703663
|
enabled, the token is added to the
# custom URL automatically.
#
# This option is intended to be used when the URL to display to the user cannot
# be determined reliably by the Jupyter notebook server (proxified or
# containerized setups for example).
#c.NotebookApp.custom_display_url = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Extra keyword arguments to pass to `get_secure_cookie`. See tornado's
s or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
on.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
## Timeout for giving up on a kernel (in seconds).
#
# On starting and restarting kernels, we check whether the kernel is running and
# responsive by sending kernel_info_requests. This sets the timeout in seconds
# for how long the kernel can take before being presumed dead. This affects the
# MappingKernelManager (which handles kernel restarts) and the
# ZMQChannelsHandler (which handles the startup).
#c.MappingKernelManager.kernel_info_timeout = 60
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
| true
| true
|
1c452a490eeb077cc003533ae2228ef6439afa07
| 150
|
py
|
Python
|
cra_helper/context_processors.py
|
squidsoup/django-cra-helper
|
ba50c643c181a18b80ee9bbdbea74b58abd6daad
|
[
"MIT"
] | 54
|
2017-04-03T20:20:16.000Z
|
2022-01-29T21:12:05.000Z
|
cra_helper/context_processors.py
|
squidsoup/django-cra-helper
|
ba50c643c181a18b80ee9bbdbea74b58abd6daad
|
[
"MIT"
] | 23
|
2018-07-19T13:19:35.000Z
|
2021-09-22T19:25:39.000Z
|
cra_helper/context_processors.py
|
squidsoup/django-cra-helper
|
ba50c643c181a18b80ee9bbdbea74b58abd6daad
|
[
"MIT"
] | 9
|
2019-03-21T20:24:14.000Z
|
2022-01-29T21:12:16.000Z
|
from cra_helper import STATIC_ASSET_MANIFEST
def static(request):
if STATIC_ASSET_MANIFEST:
return STATIC_ASSET_MANIFEST
return {}
| 16.666667
| 44
| 0.753333
|
from cra_helper import STATIC_ASSET_MANIFEST
def static(request):
if STATIC_ASSET_MANIFEST:
return STATIC_ASSET_MANIFEST
return {}
| true
| true
|
1c452b463aa824b02cb38ecbe8f981d73b33f2d7
| 169
|
py
|
Python
|
apps/account/urls.py
|
8area8/p8_pure_beurre
|
9e930f52a5f2c4c6c25a0a52b247f7b61fc7ffe8
|
[
"MIT"
] | null | null | null |
apps/account/urls.py
|
8area8/p8_pure_beurre
|
9e930f52a5f2c4c6c25a0a52b247f7b61fc7ffe8
|
[
"MIT"
] | 3
|
2020-06-05T19:09:18.000Z
|
2022-02-10T13:20:38.000Z
|
apps/account/urls.py
|
8area8/p8_pure_beurre
|
9e930f52a5f2c4c6c25a0a52b247f7b61fc7ffe8
|
[
"MIT"
] | null | null | null |
"""account urls."""
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.account, name='account'),
]
| 14.083333
| 44
| 0.680473
|
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.account, name='account'),
]
| true
| true
|
1c452b77744be37b8ba91f4297cc5bee8a543b0b
| 6,704
|
py
|
Python
|
build/driver/depth_camera/image_transport_plugins/compressed_depth_image_transport/catkin_generated/pkg.installspace.context.pc.py
|
lty1994/atuolabor
|
42b8c52eac93a2e48fbd64275c7dd426a988000c
|
[
"Apache-2.0"
] | null | null | null |
build/driver/depth_camera/image_transport_plugins/compressed_depth_image_transport/catkin_generated/pkg.installspace.context.pc.py
|
lty1994/atuolabor
|
42b8c52eac93a2e48fbd64275c7dd426a988000c
|
[
"Apache-2.0"
] | null | null | null |
build/driver/depth_camera/image_transport_plugins/compressed_depth_image_transport/catkin_generated/pkg.installspace.context.pc.py
|
lty1994/atuolabor
|
42b8c52eac93a2e48fbd64275c7dd426a988000c
|
[
"Apache-2.0"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/lty/catkin_ws/install/include;/opt/ros/kinetic/include/opencv-3.3.1-dev;/opt/ros/kinetic/include/opencv-3.3.1-dev/opencv".split(';') if "/home/lty/catkin_ws/install/include;/opt/ros/kinetic/include/opencv-3.3.1-dev;/opt/ros/kinetic/include/opencv-3.3.1-dev/opencv" != "" else []
PROJECT_CATKIN_DEPENDS = "cv_bridge;dynamic_reconfigure;image_transport".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcompressed_depth_image_transport;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_calib3d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_core3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_dnn3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_features2d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_flann3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_highgui3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_imgcodecs3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_imgproc3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ml3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_objdetect3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_photo3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_shape3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_stitching3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_superres3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_video3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_videoio3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_videostab3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_viz3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_aruco3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_bgsegm3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_bioinspired3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ccalib3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_cvv3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_datasets3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_dpm3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_face3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_fuzzy3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_hdf3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_img_hash3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_line_descriptor3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_optflow3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_phase_unwrapping3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_plot3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_reg3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_rgbd3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_saliency3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_stereo3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_structured_light3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_surface_matching3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_text3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_tracking3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xfeatures2d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ximgproc3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xobjdetect3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xphoto3.so.3.3.1".split(';') if "-lcompressed_depth_image_transport;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_calib3d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_core3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_dnn3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_features2d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_flann3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_highgui3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_imgcodecs3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_imgproc3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ml3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_objdetect3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_photo3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_shape3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_stitching3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_superres3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_video3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_videoio3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_videostab3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_viz3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_aruco3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_bgsegm3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_bioinspired3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ccalib3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_cvv3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_datasets3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_dpm3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_face3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_fuzzy3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_hdf3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_img_hash3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_line_descriptor3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_optflow3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_phase_unwrapping3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_plot3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_reg3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_rgbd3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_saliency3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_stereo3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_structured_light3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_surface_matching3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_text3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_tracking3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xfeatures2d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ximgproc3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xobjdetect3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xphoto3.so.3.3.1" != "" else []
PROJECT_NAME = "compressed_depth_image_transport"
PROJECT_SPACE_DIR = "/home/lty/catkin_ws/install"
PROJECT_VERSION = "1.9.5"
| 744.888889
| 6,082
| 0.800716
|
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/lty/catkin_ws/install/include;/opt/ros/kinetic/include/opencv-3.3.1-dev;/opt/ros/kinetic/include/opencv-3.3.1-dev/opencv".split(';') if "/home/lty/catkin_ws/install/include;/opt/ros/kinetic/include/opencv-3.3.1-dev;/opt/ros/kinetic/include/opencv-3.3.1-dev/opencv" != "" else []
PROJECT_CATKIN_DEPENDS = "cv_bridge;dynamic_reconfigure;image_transport".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcompressed_depth_image_transport;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_calib3d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_core3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_dnn3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_features2d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_flann3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_highgui3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_imgcodecs3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_imgproc3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ml3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_objdetect3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_photo3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_shape3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_stitching3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_superres3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_video3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_videoio3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_videostab3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_viz3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_aruco3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_bgsegm3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_bioinspired3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ccalib3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_cvv3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_datasets3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_dpm3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_face3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_fuzzy3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_hdf3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_img_hash3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_line_descriptor3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_optflow3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_phase_unwrapping3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_plot3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_reg3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_rgbd3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_saliency3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_stereo3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_structured_light3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_surface_matching3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_text3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_tracking3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xfeatures2d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ximgproc3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xobjdetect3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xphoto3.so.3.3.1".split(';') if "-lcompressed_depth_image_transport;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_calib3d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_core3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_dnn3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_features2d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_flann3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_highgui3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_imgcodecs3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_imgproc3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ml3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_objdetect3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_photo3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_shape3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_stitching3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_superres3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_video3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_videoio3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_videostab3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_viz3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_aruco3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_bgsegm3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_bioinspired3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ccalib3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_cvv3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_datasets3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_dpm3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_face3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_fuzzy3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_hdf3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_img_hash3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_line_descriptor3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_optflow3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_phase_unwrapping3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_plot3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_reg3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_rgbd3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_saliency3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_stereo3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_structured_light3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_surface_matching3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_text3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_tracking3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xfeatures2d3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_ximgproc3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xobjdetect3.so.3.3.1;/opt/ros/kinetic/lib/x86_64-linux-gnu/libopencv_xphoto3.so.3.3.1" != "" else []
PROJECT_NAME = "compressed_depth_image_transport"
PROJECT_SPACE_DIR = "/home/lty/catkin_ws/install"
PROJECT_VERSION = "1.9.5"
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.