hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
631273e5c0c4cb70ebc0fef77057e89e9ffce526 | 844 | py | Python | Lintcode/Ladder_11_15_A/209. First Unique Character in a String.py | ctc316/algorithm-python | ac4580d55e05e93e407c6156c9bb801808027d60 | [
"MIT"
] | null | null | null | Lintcode/Ladder_11_15_A/209. First Unique Character in a String.py | ctc316/algorithm-python | ac4580d55e05e93e407c6156c9bb801808027d60 | [
"MIT"
] | null | null | null | Lintcode/Ladder_11_15_A/209. First Unique Character in a String.py | ctc316/algorithm-python | ac4580d55e05e93e407c6156c9bb801808027d60 | [
"MIT"
] | null | null | null | class Solution:
"""
@param str: str: the given string
@return: char: the first unique character in a given string
"""
def firstUniqChar(self, str):
counter = {}
for c in str:
counter[c] = counter.get(c, 0) + 1
for c in str:
if counter[c] == 1:
return c
class Solution:
"""
@param str: str: the given string
@return: char: the first unique character in a given string
"""
def firstUniqChar(self, str):
uniques = set()
occurs = set()
for s in str:
if s in uniques:
uniques.remove(s)
elif s not in occurs:
uniques.add(s)
occurs.add(s)
for s in str:
if s in uniques:
return s
return '' | 22.810811 | 63 | 0.479858 |
359fb4588a81da23d3102b4731186fe0a3dd8efa | 10,699 | py | Python | ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py | wbear2/ambari | a1891193984da47015cd5483b5b95e040677d7df | [
"Apache-2.0"
] | 5 | 2018-06-03T05:19:40.000Z | 2021-04-16T17:10:49.000Z | ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py | wbear2/ambari | a1891193984da47015cd5483b5b95e040677d7df | [
"Apache-2.0"
] | null | null | null | ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py | wbear2/ambari | a1891193984da47015cd5483b5b95e040677d7df | [
"Apache-2.0"
] | 6 | 2019-05-07T13:24:39.000Z | 2021-02-15T14:12:37.000Z | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
class TestHiveMetastore(RMFTestCase):
def test_configure_default(self):
self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
classname = "HiveMetastore",
command = "configure",
config_file="../../2.1/configs/default.json"
)
self.assert_configure_default()
def test_start_default(self):
self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
classname = "HiveMetastore",
command = "start",
config_file="../../2.1/configs/default.json"
)
self.assert_configure_default()
self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
user = 'hive'
)
self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'], tries=5, try_sleep=10
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
classname = "HiveMetastore",
command = "stop",
config_file="../../2.1/configs/default.json"
)
self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive.pid',
not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1)'
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
classname = "HiveMetastore",
command = "configure",
config_file="../../2.1/configs/secured.json"
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_secured(self):
self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
classname = "HiveMetastore",
command = "start",
config_file="../../2.1/configs/secured.json"
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
user = 'hive'
)
self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true hive asd com.mysql.jdbc.Driver',
path=['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'], tries=5, try_sleep=10
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
classname = "HiveMetastore",
command = "stop",
config_file="../../2.1/configs/secured.json"
)
self.assertResourceCalled('Execute', 'kill `cat /var/run/hive/hive.pid` >/dev/null 2>&1 && rm -f /var/run/hive/hive.pid',
not_if = '! (ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1)'
)
self.assertNoMoreResources()
def assert_configure_default(self):
self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
path = ['/bin', '/usr/bin/'],
not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
)
self.assertResourceCalled('Directory', '/etc/hive/conf.server',
owner = 'hive',
group = 'hadoop',
recursive = True,
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'hive',
group = 'hadoop',
mode = 0600,
conf_dir = '/etc/hive/conf.server',
configurations = self.getConfig()['configurations']['mapred-site'],
)
self.assertResourceCalled('XmlConfig', 'hive-site.xml',
owner = 'hive',
group = 'hadoop',
mode = 0600,
conf_dir = '/etc/hive/conf.server',
configurations = self.getConfig()['configurations']['hive-site'],
)
self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
not_if = '[ -f DBConnectionVerification.jar]',
environment = {'no_proxy': 'c6401.ambari.apache.org'},
)
self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
owner = 'hive',
group = 'hadoop',
)
self.assertResourceCalled('File', '/tmp/start_metastore_script',
content = StaticFile('startMetastore.sh'),
mode = 0755,
)
self.assertResourceCalled('Execute', "export HIVE_CONF_DIR=/etc/hive/conf.server ; /usr/lib/hive/bin/schematool -initSchema -dbType postgres -userName hive -passWord asd",
not_if = 'export HIVE_CONF_DIR=/etc/hive/conf.server ; /usr/lib/hive/bin/schematool -info -dbType postgres -userName hive -passWord asd',
)
self.assertResourceCalled('Directory', '/var/run/hive',
owner = 'hive',
group = 'hadoop',
mode = 0755,
recursive = True,
)
self.assertResourceCalled('Directory', '/var/log/hive',
owner = 'hive',
group = 'hadoop',
mode = 0755,
recursive = True,
)
self.assertResourceCalled('Directory', '/var/lib/hive',
owner = 'hive',
group = 'hadoop',
mode = 0755,
recursive = True,
)
self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
owner = 'hive',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
owner = 'hive',
group = 'hadoop',
)
def assert_configure_secured(self):
self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/HDP-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
path = ['/bin', '/usr/bin/'],
not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
)
self.assertResourceCalled('Directory', '/etc/hive/conf.server',
owner = 'hive',
group = 'hadoop',
recursive = True,
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'hive',
group = 'hadoop',
mode = 0600,
conf_dir = '/etc/hive/conf.server',
configurations = self.getConfig()['configurations']['mapred-site'],
)
self.assertResourceCalled('XmlConfig', 'hive-site.xml',
owner = 'hive',
group = 'hadoop',
mode = 0600,
conf_dir = '/etc/hive/conf.server',
configurations = self.getConfig()['configurations']['hive-site'],
)
self.assertResourceCalled('Execute', "/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar'",
not_if = '[ -f DBConnectionVerification.jar]',
environment = {'no_proxy': 'c6401.ambari.apache.org'},
)
self.assertResourceCalled('File', '/etc/hive/conf.server/hive-env.sh',
content = Template('hive-env.sh.j2', conf_dir="/etc/hive/conf.server"),
owner = 'hive',
group = 'hadoop',
)
self.assertResourceCalled('File', '/tmp/start_metastore_script',
content = StaticFile('startMetastore.sh'),
mode = 0755,
)
self.assertResourceCalled('Execute', "export HIVE_CONF_DIR=/etc/hive/conf.server ; /usr/lib/hive/bin/schematool -initSchema -dbType postgres -userName hive -passWord asd",
not_if = 'export HIVE_CONF_DIR=/etc/hive/conf.server ; /usr/lib/hive/bin/schematool -info -dbType postgres -userName hive -passWord asd',
)
self.assertResourceCalled('Directory', '/var/run/hive',
owner = 'hive',
group = 'hadoop',
mode = 0755,
recursive = True,
)
self.assertResourceCalled('Directory', '/var/log/hive',
owner = 'hive',
group = 'hadoop',
mode = 0755,
recursive = True,
)
self.assertResourceCalled('Directory', '/var/lib/hive',
owner = 'hive',
group = 'hadoop',
mode = 0755,
recursive = True,
)
self.assertResourceCalled('File', '/etc/hive/conf/hive-default.xml.template',
owner = 'hive',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hive/conf/hive-env.sh.template',
owner = 'hive',
group = 'hadoop',
)
| 44.394191 | 324 | 0.635013 |
83b4294479bb03fa58fea8bbff50705d931b3816 | 7,292 | py | Python | jabs/ilf/gen_numbers.py | hertogp/jabs | ed419caa448075dcf327d2af561952a385115228 | [
"MIT"
] | 1 | 2021-05-14T03:17:48.000Z | 2021-05-14T03:17:48.000Z | jabs/ilf/gen_numbers.py | hertogp/jabs | ed419caa448075dcf327d2af561952a385115228 | [
"MIT"
] | null | null | null | jabs/ilf/gen_numbers.py | hertogp/jabs | ed419caa448075dcf327d2af561952a385115228 | [
"MIT"
] | 1 | 2017-10-31T02:04:52.000Z | 2017-10-31T02:04:52.000Z | #!/usr/bin/env python3
'''
Helper script:
- reads IANA IPv4 proto numbers & services
- writes numbers.py
'''
import sys
import argparse
import logging
import pandas as pd
import numpy as np
__version__ = '0.1'
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
URL_BASE = 'https://www.iana.org/assignments'
URL_PROTOCOLS = '{}/protocol-numbers/protocol-numbers-1.csv'.format(URL_BASE)
# URL_SERVICES = '{}/service-names-port-numbers/service-names-port-numbers.csv'.format(URL_BASE)
URL_SERVICES = '{0}/{1}/{1}.csv'.format(URL_BASE, 'service-names-port-numbers')
PY_OUTFILE = 'numbers.py'
def console_logging(log_level):
'set console logging to level given by args.v'
console_fmt = logging.Formatter('%(funcName)s %(levelname)s: %(message)s')
console_hdl = logging.StreamHandler(stream=sys.stderr)
console_hdl.set_name('console')
console_hdl.setFormatter(console_fmt)
console_hdl.setLevel(log_level)
log.setLevel(log_level)
log.addHandler(console_hdl)
def load_csv(url):
'load a csv into a df and normalize column names somewhat'
df = pd.read_csv(url)
df.columns = df.columns.str.lower()
df.columns = df.columns.str.replace(r'\s+', '_')
log.info('done reading url')
return df
def load_protocols(url):
'load protocol numbers from iana'
try:
df = load_csv(url)
cols = 'decimal keyword protocol'.split()
df = df[cols]
except KeyError:
raise Exception('Unexpected/different data, wrong url {}?'.format(url))
# clean up values
log.info('cleaning up strings')
df['protocol'] = df['protocol'].str.replace(r'\s+', ' ') # clean spaces
df['keyword'] = df['keyword'].str.strip()
df['keyword'] = df['keyword'].str.replace(r'\s.*$', '') # 1st word
df['keyword'] = df['keyword'].str.lower()
df['decimal'] = df['decimal'].astype(str) # ensure they're all strings!
df['decimal'] = df['decimal'].str.replace(r'\s+', '') # no whitespace
df = df.drop_duplicates(subset='decimal', keep='first') # drop dups
# eliminate protocol-ranges by making them explicit
log.info('making protocol ranges explicit')
rows = []
for idx, row in df[df['decimal'].str.contains('-')].iterrows():
parts = row['decimal'].split('-')
start = int(parts[0])
stop = int(parts[-1])
proto = row['protocol']
orgkey = row['keyword']
for num in range(start, stop+1):
keyw = 'ip{}'.format(num) if pd.isnull(orgkey) else orgkey
rows.append({'decimal': str(num),
'keyword': keyw,
'protocol': proto})
df = df.append(rows, ignore_index=True)
df = df[~df['decimal'].str.contains('-')] # drop the 'start-max' entries
# set any remaining NaN keywords to <nr>
# donot use '{}/ip'.format(df['decimal']) <-- insert whole decimal column!
log.info('filling empty strings (if any) with sane defaults')
df['keyword'] = np.where(df['keyword'].isnull(),
'ip' + df['decimal'],
df['keyword'])
# set any remaining NaN protocols to keyword
df['protocol'] = np.where(df['protocol'].isnull(),
df['keyword'],
df['protocol'])
return df
def load_services(url):
'load ip4 services from iana'
cols = 'port_number transport_protocol service_name'.split()
df = load_csv(URL_SERVICES)
log.info('keep only columns {!r}'.format(cols))
df = df[cols]
df = df.dropna() # if any field is nan, drop the row
log.info('cleaning up strings')
for col in cols:
df[col] = df[col].astype(str) # ensure strings
df[col] = df[col].str.lower() # lowercase
df[col] = df[col].str.replace(r'\s.*$', '') # 1st word only
df[col] = df[col].str.replace('_', '-') # aliased names -/_
# eliminate port-ranges by making them explicit
log.info('make port-ranges explicit')
rows = []
for idx, row in df[df['port_number'].str.contains('-')].iterrows():
parts = row['port_number'].split('-')
start = int(parts[0])
stop = int(parts[-1])
proto = row['transport_protocol']
if not proto:
continue
service = row['service_name']
for num in range(start, stop+1):
srv = service if service else 'p-{}'.format(num)
rows.append(dict(zip(cols, [str(num), proto, srv])))
df = df.append(rows, ignore_index=True)
df = df[~df['port_number'].str.contains('-')]
log.info('{} entries after clean up'.format(len(df.index)))
return df
def protocol_topy(df, fh):
'write protocols dict'
df['decimal'] = df['decimal'].astype('int64')
dd = df.set_index('decimal')
dd = dd.drop_duplicates()
dct = dict(zip(dd.index, zip(dd['keyword'], dd['protocol'])))
print("", file=fh)
print('IP4PROTOCOLS = {', file=fh)
for k, v in sorted(dct.items()):
print(' {}: {},'.format(k, v), file=fh)
print('}', file=fh)
log.info('wrote {} protocol numbers to {}'.format(len(dct), fh.name))
def services_topy(df, fh):
'write services dict'
dd = df.copy()
pt = 'port_number transport_protocol'.split()
dd['port'] = dd[pt].apply(lambda g: '/'.join(x for x in g), axis=1)
dct = dict(zip(dd['port'], dd['service_name']))
print("", file=fh)
print('IP4SERVICES = {', file=fh)
for k, v in sorted(dct.items()):
print(' {!r}: {!r},'.format(k, v), file=fh)
print('}', file=fh)
log.info('wrote {} service entries to {}'.format(len(dct), fh.name))
def parse_args(argv):
'parse command line arguments'
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
padd = p.add_argument
padd('-v', '--verbose', action='store_const', dest='log_level',
const=logging.INFO, default=logging.WARNING,
help='show informational messages')
padd('-d', '--debug', action='store_const', dest='log_level',
const=logging.DEBUG, help='show debug messages')
padd('-V', '--Version', action='version',
version='{} {}'.format(argv[0], __version__))
arg = p.parse_args(argv[1:])
arg.prog = argv[0]
return arg
def main():
with open(PY_OUTFILE, 'w') as outf:
print("'''", file=outf)
print('This file is generated by ' + __file__, file=outf)
print('Donot edit, override entries via objects:', file=outf)
print(' - ilf.IP4Protocols', file=outf)
print(' - ilf.IP4Services', file=outf)
print('Data retrieved from:', file=outf)
print(' - {}'.format(URL_PROTOCOLS), file=outf)
print(' - {}'.format(URL_SERVICES), file=outf)
print("'''", file=outf)
log.info('retrieving protocols, url {}'.format(URL_PROTOCOLS))
dfp = load_protocols(URL_PROTOCOLS)
protocol_topy(dfp, outf)
log.info('retrieving services, url {}'.format(URL_SERVICES))
dfs = load_services(URL_SERVICES)
services_topy(dfs, outf)
log.info('done!')
if __name__ == '__main__':
args = parse_args(sys.argv)
console_logging(args.log_level)
sys.exit(main())
| 35.227053 | 96 | 0.603264 |
c8f7db74bdd3a7223446f2777623f449887d031b | 2,981 | py | Python | tests/sub_test.py | admire93/linky | 8cbe326ece49d9811dca4b298fb4b141717c538b | [
"MIT"
] | null | null | null | tests/sub_test.py | admire93/linky | 8cbe326ece49d9811dca4b298fb4b141717c538b | [
"MIT"
] | null | null | null | tests/sub_test.py | admire93/linky | 8cbe326ece49d9811dca4b298fb4b141717c538b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from linkfy.sub import linky, url_schema
def fullmatch(pattern, s):
m = pattern.match(s)
return m and m.group() == s
def test_url_scheme_more():
# test case in https://mathiasbynens.be/demo/url-regex
correct = [
u'http://foo.com/blah_blah',
u'http://foo.com/blah_blah/',
u'http://foo.com/blah_blah_(wikipedia)',
u'http://foo.com/blah_blah_(wikipedia)_(again)',
u'http://www.example.com/wpstyle/?p=364',
u'https://www.example.com/foo/?bar=baz&inga=42&quux',
u'http://✪df.ws/123',
u'http://142.42.1.1/',
u'http://142.42.1.1:8080/',
u'http://➡.ws/䨹',
u'http://⌘.ws',
u'http://⌘.ws/',
u'http://foo.com/blah_(wikipedia)#cite-1',
u'http://foo.com/blah_(wikipedia)_blah#cite-1',
u'http://foo.com/unicode_(✪)_in_parens',
u'http://foo.com/(something)?after=parens',
u'http://☺.damowmow.com/',
u'http://code.google.com/events/#&product=browser',
u'http://j.mp',
u'http://foo.bar/?q=test%20url-encoded%20stuff',
u'http://مثال.إختبار',
u'http://例子.测试',
u'http://उदाहरण.परीक्षा',
u'http://a.b-c.de',
]
for c in correct:
assert fullmatch(url_schema, c), c
wrong = [
'http://',
'http://.',
'http://..',
'http://../',
'http://?',
'http://??',
'http://??/',
'http://#',
'http://##',
'http://##/',
'http://foo.bar?q=Spaces should be encoded',
'//',
'//a',
'///a',
'///',
'http:///a',
'rdar://1234',
'h://test',
'http:// shouldfail.com',
':// should fail',
'http://foo.bar/foo(bar)baz quux',
'ftps://foo.bar/',
'http://3628126748',
'http://.www.foo.bar/',
'http://.www.foo.bar./',
]
for w in wrong:
assert not fullmatch(url_schema, w), w
def test_url_scheme(fx_url):
assert fullmatch(url_schema, fx_url), fx_url
def test_linky_escape():
assert linky('hello<>', escape=True) == 'hello<>'
assert linky('hello<>', escape=False) == 'hello<>'
def test_linky_substitude(fx_url):
assert linky('hello {}'.format(fx_url)) == \
'hello <a href="{0}">{0}</a>'.format(fx_url)
assert linky('hello {} world'.format(fx_url)) == \
'hello <a href="{0}">{0}</a> world'.format(fx_url)
assert linky('hello {0} world {0}'.format(fx_url)) == \
'hello <a href="{0}">{0}</a> world ' \
'<a href="{0}">{0}</a>'.format(fx_url)
assert linky('hello {0} world google.com'.format(fx_url)) == \
'hello <a href="{0}">{0}</a> world ' \
'<a href="google.com">google.com</a>'.format(fx_url)
if fx_url.startswith('http') or fx_url.startswith('https'):
assert linky(u'hello{} world'.format(fx_url)) == \
u'hello<a href="{0}">{0}</a> world'.format(fx_url)
| 31.712766 | 66 | 0.511238 |
fb1767661437f9a5fe743f8804c45bd49cc6f0f9 | 7,180 | py | Python | vendor/github.com/elastic/beats/heartbeat/tests/system/test_monitor.py | lstyles/nsgflowlogsbeat | 06aa15a7eaaf24cf70dd2520ed7d2186f4135c09 | [
"Apache-2.0"
] | 1 | 2021-05-17T01:31:15.000Z | 2021-05-17T01:31:15.000Z | vendor/github.com/elastic/beats/heartbeat/tests/system/test_monitor.py | lstyles/nsgflowlogsbeat | 06aa15a7eaaf24cf70dd2520ed7d2186f4135c09 | [
"Apache-2.0"
] | null | null | null | vendor/github.com/elastic/beats/heartbeat/tests/system/test_monitor.py | lstyles/nsgflowlogsbeat | 06aa15a7eaaf24cf70dd2520ed7d2186f4135c09 | [
"Apache-2.0"
] | null | null | null | from heartbeat import BaseTest
from parameterized import parameterized
import os
from nose.plugins.skip import SkipTest
import nose.tools
class Test(BaseTest):
@parameterized.expand([
"200", "404"
])
def test_http(self, status_code):
"""
Test http server
"""
status_code = int(status_code)
server = self.start_server("hello world", status_code)
self.render_http_config(
["localhost:{}".format(server.server_port)])
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("heartbeat is running"))
self.wait_until(
lambda: self.output_has(lines=1))
proc.check_kill_and_wait()
server.shutdown()
output = self.read_output()
assert status_code == output[0]["http.response.status_code"]
if os.name == "nt":
# Currently skipped on Windows as fields.yml not generated
raise SkipTest
self.assert_fields_are_documented(output[0])
@parameterized.expand([
"200", "404"
])
def test_http_with_hosts_config(self, status_code):
"""
Test http server
"""
status_code = int(status_code)
server = self.start_server("hello world", status_code)
self.render_http_config_with_hosts(
["localhost:{}".format(server.server_port)])
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("heartbeat is running"))
self.wait_until(
lambda: self.output_has(lines=1))
proc.check_kill_and_wait()
server.shutdown()
output = self.read_output()
assert status_code == output[0]["http.response.status_code"]
if os.name == "nt":
# Currently skipped on Windows as fields.yml not generated
raise SkipTest
self.assert_fields_are_documented(output[0])
def test_http_delayed(self):
"""
Ensure that the HTTP monitor consumes the whole body.
We do this by ensuring that a slow HTTP body write's time is reflected
in the beats metrics.
"""
try:
delay = 1.0
server = self.start_server("sloooow body", 200, write_delay=delay)
self.render_http_config(
["http://localhost:{}".format(server.server_port)])
try:
proc = self.start_beat()
self.wait_until(lambda: self.output_has(lines=1))
nose.tools.assert_greater_equal(
self.last_output_line()['http.rtt.total.us'], delay)
finally:
proc.check_kill_and_wait()
finally:
server.shutdown()
@parameterized.expand([
("up", '{"foo": {"baz": "bar"}}'),
("down", '{"foo": "unexpected"}'),
("down", 'notjson'),
])
def test_http_json(self, expected_status, body):
"""
Test JSON response checks
"""
server = self.start_server(body, 200)
try:
self.render_config_template(
monitors=[{
"type": "http",
"urls": ["http://localhost:{}".format(server.server_port)],
"check_response_json": [{
"description": "foo equals bar",
"condition": {
"equals": {"foo": {"baz": "bar"}}
}
}]
}]
)
try:
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("heartbeat is running"))
self.wait_until(
lambda: self.output_has(lines=1))
finally:
proc.check_kill_and_wait()
self.assert_last_status(expected_status)
if expected_status == "down":
nose.tools.eq_(self.last_output_line()["http.response.body.content"], body)
else:
assert "http.response.body.content" not in self.last_output_line()
finally:
server.shutdown()
@parameterized.expand([
('{"foo": "bar"}', {"foo": "bar"}),
('{"foo": true}', {"foo": True},),
('{"foo": 3}', {"foo": 3},),
])
def test_json_simple_comparisons(self, body, comparison):
"""
Test JSON response with simple straight-forward comparisons
"""
server = self.start_server(body, 200)
try:
self.render_config_template(
monitors=[{
"type": "http",
"urls": ["http://localhost:{}".format(server.server_port)],
"check_response_json": [{
"description": body,
"condition": {
"equals": comparison
}
}]
}]
)
try:
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("heartbeat is running"))
self.wait_until(
lambda: self.output_has(lines=1))
finally:
proc.check_kill_and_wait()
self.assert_last_status("up")
finally:
server.shutdown()
@parameterized.expand([
(lambda server: "localhost:{}".format(server.server_port), "up"),
# This IP is reserved in IPv4
(lambda server: "203.0.113.1:1233", "down"),
])
def test_tcp(self, url, status):
"""
Test tcp server
"""
server = self.start_server("hello world", 200)
try:
self.render_config_template(
monitors=[{
"type": "tcp",
"hosts": [url(server)],
"timeout": "3s"
}],
)
proc = self.start_beat()
try:
self.wait_until(lambda: self.log_contains(
"heartbeat is running"))
self.wait_until(
lambda: self.output_has(lines=1))
finally:
proc.check_kill_and_wait()
output = self.read_output()
self.assert_last_status(status)
if os.name == "nt":
# Currently skipped on Windows as fields.yml not generated
raise SkipTest
self.assert_fields_are_documented(output[0])
finally:
server.shutdown()
def render_http_config(self, urls):
self.render_config_template(
monitors=[{
"type": "http",
"urls": urls,
}]
)
def render_http_config_with_hosts(self, urls):
self.render_config_template(
monitors=[{
"type": "http",
"hosts": urls,
}]
)
| 32.053571 | 92 | 0.493454 |
99cdc8203e658d81b2baec41b3352995d6ea75ee | 4,661 | py | Python | packages/python/plotly/plotly/graph_objs/barpolar/_legendgrouptitle.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/barpolar/_legendgrouptitle.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/barpolar/_legendgrouptitle.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Legendgrouptitle(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "barpolar"
_path_str = "barpolar.legendgrouptitle"
_valid_props = {"font", "text"}
# font
# ----
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.barpolar.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.barpolar.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super(Legendgrouptitle, self).__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.barpolar.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.Legendgrouptitle`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 30.070968 | 84 | 0.524995 |
48b65d46719d5a9c5a8ecbc957cd4feca91a8cc9 | 3,895 | py | Python | setup.py | fakecoinbase/4emslashhummingbot1 | 74b0ef943770ad141382a3a1851f55998facdac0 | [
"Apache-2.0"
] | null | null | null | setup.py | fakecoinbase/4emslashhummingbot1 | 74b0ef943770ad141382a3a1851f55998facdac0 | [
"Apache-2.0"
] | null | null | null | setup.py | fakecoinbase/4emslashhummingbot1 | 74b0ef943770ad141382a3a1851f55998facdac0 | [
"Apache-2.0"
] | 1 | 2020-05-15T03:37:03.000Z | 2020-05-15T03:37:03.000Z | #!/usr/bin/env python
from setuptools import setup
from Cython.Build import cythonize
import numpy as np
import os
import subprocess
import sys
is_posix = (os.name == "posix")
if is_posix:
os_name = subprocess.check_output("uname").decode("utf8")
if "Darwin" in os_name:
os.environ["CFLAGS"] = "-stdlib=libc++ -std=c++11"
else:
os.environ["CFLAGS"] = "-std=c++11"
def main():
cpu_count = os.cpu_count() or 8
version = "20200424"
packages = [
"hummingbot",
"hummingbot.client",
"hummingbot.client.command",
"hummingbot.client.config",
"hummingbot.client.ui",
"hummingbot.core",
"hummingbot.core.data_type",
"hummingbot.core.event",
"hummingbot.core.management",
"hummingbot.core.utils",
"hummingbot.data_feed",
"hummingbot.logger",
"hummingbot.market",
"hummingbot.market.bamboo_relay",
"hummingbot.market.binance",
"hummingbot.market.bittrex",
"hummingbot.market.coinbase_pro",
"hummingbot.market.huobi",
"hummingbot.market.radar_relay",
"hummingbot.market.kraken",
"hummingbot.strategy",
"hummingbot.strategy.arbitrage",
"hummingbot.strategy.cross_exchange_market_making",
"hummingbot.strategy.pure_market_making",
"hummingbot.templates",
"hummingbot.wallet",
"hummingbot.wallet.ethereum",
"hummingbot.wallet.ethereum.uniswap",
"hummingbot.wallet.ethereum.watcher",
"hummingbot.wallet.ethereum.zero_ex",
]
package_data = {
"hummingbot": [
"core/cpp/*",
"wallet/ethereum/zero_ex/*.json",
"wallet/ethereum/token_abi/*.json",
"wallet/ethereum/erc20_tokens.json",
"VERSION",
"templates/*TEMPLATE.yml"
],
}
install_requires = [
"aioconsole",
"aiokafka",
"attrdict",
"cytoolz",
"eth-abi",
"eth-account",
"eth-bloom",
"eth-hash",
"eth-keyfile",
"eth-keys",
"eth-rlp",
"eth-utils",
"hexbytes",
"kafka-python",
"lru-dict",
"parsimonious",
"pycryptodome",
"requests",
"rlp",
"toolz",
"tzlocal",
"urllib3",
"web3",
"websockets",
"aiohttp",
"async-timeout",
"attrs",
"certifi",
"chardet",
"cython==0.29.15",
"idna",
"idna_ssl",
"multidict",
"numpy",
"pandas",
"pytz",
"pyyaml",
"python-binance==0.7.1",
"sqlalchemy",
"ujson",
"yarl",
]
cython_kwargs = {
"language": "c++",
"language_level": 3,
}
if is_posix:
cython_kwargs["nthreads"] = cpu_count
if "DEV_MODE" in os.environ:
version += ".dev1"
package_data[""] = [
"*.pxd", "*.pyx", "*.h"
]
package_data["hummingbot"].append("core/cpp/*.cpp")
if len(sys.argv) > 1 and sys.argv[1] == "build_ext" and is_posix:
sys.argv.append(f"--parallel={cpu_count}")
setup(name="hummingbot",
version=version,
description="Hummingbot",
url="https://github.com/CoinAlpha/hummingbot",
author="CoinAlpha, Inc.",
author_email="dev@hummingbot.io",
license="Apache 2.0",
packages=packages,
package_data=package_data,
install_requires=install_requires,
ext_modules=cythonize(["hummingbot/**/*.pyx"], **cython_kwargs),
include_dirs=[
np.get_include()
],
scripts=[
"bin/hummingbot.py",
"bin/hummingbot_quickstart.py"
],
)
if __name__ == "__main__":
main()
| 25.966667 | 74 | 0.534018 |
5c7d5592a39c3ad78d862c43322edf7aae811b6a | 11,302 | py | Python | sdk/tests/utilities/test_api_client_builder.py | bogdanLicaFinbourne/lusid-sdk-python-preview | f0f91f992e0417733c4c8abd2674d080a52b6890 | [
"MIT"
] | 6 | 2018-06-19T15:50:17.000Z | 2022-03-26T22:53:16.000Z | sdk/tests/utilities/test_api_client_builder.py | bogdanLicaFinbourne/lusid-sdk-python-preview | f0f91f992e0417733c4c8abd2674d080a52b6890 | [
"MIT"
] | 98 | 2020-04-15T06:05:43.000Z | 2022-03-01T10:25:25.000Z | sdk/tests/utilities/test_api_client_builder.py | bogdanLicaFinbourne/lusid-sdk-python-preview | f0f91f992e0417733c4c8abd2674d080a52b6890 | [
"MIT"
] | 9 | 2019-09-30T11:19:25.000Z | 2021-11-17T19:49:59.000Z | import unittest
from unittest.mock import patch
from parameterized import parameterized
from lusid import ApiClient
from lusid.utilities import ApiClientBuilder, ApiConfiguration
from lusid.utilities.proxy_config import ProxyConfig
from utilities import CredentialsSource
from utilities.temp_file_manager import TempFileManager
source_config_details, config_keys = CredentialsSource.fetch_credentials(), CredentialsSource.fetch_config_keys()
class ApiClientBuilderTests(unittest.TestCase):
# Test creation with configuration inc proxy settings
@parameterized.expand(
[
[
"Missing api_url when using an explicit token",
["api_url"],
"example_token"
],
[
"Missing username and password when generating a token",
["password", "username"],
None
]
], testcase_func_name=CredentialsSource.custom_name_func
)
def test_missing_from_config_file_throws(self, _, missing_attributes, token):
"""
Tests that if some required fields are missing from the ApiConfiguration an error is thrown
:return:
"""
# Create an ApiConfiguration with all values populated
proxy_config = ProxyConfig(**{
key.replace("proxy_", ""): value for key, value in source_config_details.items() if
value is not None and "proxy" in key
}) if source_config_details["proxy_address"] is not None else None
api_config_kwargs = {key: value for key, value in source_config_details.items() if
value is not None and "proxy" not in key}
api_config_kwargs["proxy_config"] = proxy_config
api_configuration = ApiConfiguration(**api_config_kwargs)
# Pop off the missing attributes
[setattr(api_configuration, missing_attribute, None) for missing_attribute in missing_attributes]
# Ensure that there are no environment variables which can be used to fill the missing Api Url
with patch.dict('os.environ', clear=True), self.assertRaises(ValueError) as ex:
ApiClientBuilder.build(api_configuration=api_configuration, token=token)
self.assertEqual(
ex.exception.args[0], f"The fields {str(missing_attributes)} on the ApiConfiguration are set to None, "
f"please ensure that you have provided them directly, via a secrets file or environment "
f"variables")
def test_build_client_no_token_provided_config_takes_precedence(self):
"""
This test builds an ApiClient from a provided secrets.json file. The call to generate the token is mocked here.
"""
secrets = {
"api": {
config_keys[key]["config"]: "DUMMYVALUE" for key in source_config_details.keys() if
"proxy" not in key
}
}
env_vars = {}
api_configuration = ApiConfiguration(**{
key: value for key, value in source_config_details.items() if "proxy" not in key
})
# Use a temporary file and no environment variables to generate the API Client
with patch.dict('os.environ', env_vars, clear=True), patch("requests.post") as mock_requests:
mock_requests.return_value.status_code = 200
mock_requests.return_value.json.return_value = {
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 60
}
secrets_file = TempFileManager.create_temp_file(secrets)
client = ApiClientBuilder.build(
api_secrets_filename=secrets_file.name,
api_configuration=api_configuration)
TempFileManager.delete_temp_file(secrets_file)
self.assertEqual(client.configuration.access_token, "mock_access_token")
self.assertEqual(client.configuration.host, source_config_details["api_url"])
self.assertIsInstance(client, ApiClient)
def test_build_client_no_token_provided_file_only(self):
"""
This test builds an ApiClient from a provided secrets.json file. The call to generate the token is mocked here.
"""
secrets = {
"api": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" not in key
}
}
env_vars = {}
api_configuration = None
# Use a temporary file and no environment variables to generate the API Client
with patch.dict('os.environ', env_vars, clear=True), patch("requests.post") as mock_requests:
mock_requests.return_value.status_code = 200
mock_requests.return_value.json.return_value = {
"access_token": "mock_access_token",
"refresh_token": "mock_refresh_token",
"expires_in": 60
}
secrets_file = TempFileManager.create_temp_file(secrets)
client = ApiClientBuilder.build(
api_secrets_filename=secrets_file.name,
api_configuration=api_configuration)
TempFileManager.delete_temp_file(secrets_file)
self.assertEqual(client.configuration.access_token, "mock_access_token")
self.assertEqual(client.configuration.host, source_config_details["api_url"])
self.assertIsInstance(client, ApiClient)
@parameterized.expand(
[
[
"Build client from token ONLY",
["api"],
{},
ApiConfiguration(
api_url=source_config_details["api_url"]),
"abc42123.e423klfkel.sdlj53kl23423"
],
[
"Test that when token and secrets file are provided the token is used",
[],
{},
None,
"abc42123.e423klfkel.sdlj53kl23423"
],
], testcase_func_name=CredentialsSource.custom_name_func
)
def test_build_client_with_token_provided(self, _, config_to_remove, env_vars, api_configuration, token):
"""
This test builds an ApiClient from a provided token.
"""
secrets = {
"api": {
config_keys[key]["config"]: value for key, value in source_config_details.items() if
value is not None and "proxy" not in key
}
}
[secrets.pop(config) for config in config_to_remove]
# Use a temporary file and no environment variables to generate the API Client
with patch.dict('os.environ', env_vars, clear=True):
secrets_file = TempFileManager.create_temp_file(secrets)
client = ApiClientBuilder.build(
api_secrets_filename=secrets_file.name,
api_configuration=api_configuration,
token=token)
TempFileManager.delete_temp_file(secrets_file)
self.assertEqual(client.configuration.host, source_config_details["api_url"])
self.assertEqual(client.configuration.access_token, token)
self.assertIsInstance(client, ApiClient)
def test_use_okta_response_handler(self):
api_configuration = ApiConfiguration(**{
key: value for key, value in source_config_details.items() if "proxy" not in key
})
api_configuration.certificate_filename = None
env_vars = {}
def response_handler(okta_response):
if not okta_response.json.return_value.get("refresh_token", False):
raise ValueError("Refresh token missing from config")
with patch.dict('os.environ', env_vars, clear=True), patch("requests.post") as mock_requests, self.assertRaises(
ValueError):
mock_requests.return_value.status_code = 200
mock_requests.return_value.json.return_value = {
"access_token": "mock_access_token",
"expires_in": 60
}
client = ApiClientBuilder.build(
api_configuration=api_configuration,
id_provider_response_handler=response_handler)
# Force evaluation of the access token so that it is retrieved
repr(client.configuration.access_token)
def test_set_correlation_id_from_env_var(self):
api_configuration = ApiConfiguration(**{
key: value for key, value in source_config_details.items() if "proxy" not in key
})
env_vars = {config_keys[key]["env"]: value for key, value in source_config_details.items() if value is not None}
env_vars["FBN_CORRELATION_ID"] = "env-correlation-id"
with patch.dict('os.environ', env_vars, clear=True):
client = ApiClientBuilder.build(api_configuration=api_configuration)
self.assertTrue("CorrelationId" in client.default_headers, msg="CorrelationId not found in headers")
self.assertEquals(client.default_headers["CorrelationId"], "env-correlation-id")
def test_set_correlation_id_from_param(self):
api_configuration = ApiConfiguration(**{
key: value for key, value in source_config_details.items() if "proxy" not in key
})
env_vars = {config_keys[key]["env"]: value for key, value in source_config_details.items() if value is not None}
with patch.dict('os.environ', env_vars, clear=True):
client = ApiClientBuilder.build(api_configuration=api_configuration, correlation_id="param-correlation-id")
self.assertTrue("CorrelationId" in client.default_headers, msg="CorrelationId not found in headers")
self.assertEquals(client.default_headers["CorrelationId"], "param-correlation-id")
def test_no_correlation_id_is_set_when_no_env_var_is_set(self):
api_configuration = ApiConfiguration(**{
key: value for key, value in source_config_details.items() if "proxy" not in key
})
env_vars = {config_keys[key]["env"]: value for key, value in source_config_details.items() if value is not None}
with patch.dict('os.environ', env_vars, clear=True):
client = ApiClientBuilder.build(api_configuration=api_configuration)
self.assertFalse("CorrelationId" in client.default_headers, msg="Unexpected CorrelationId found in headers")
def test_use_explicit_correlation_id_when_env_var_exists(self):
api_configuration = ApiConfiguration(**{
key: value for key, value in source_config_details.items() if "proxy" not in key
})
env_vars = {config_keys[key]["env"]: value for key, value in source_config_details.items() if value is not None}
env_vars["FBN_CORRELATION_ID"] = "env-correlation-id"
with patch.dict('os.environ', env_vars, clear=True):
client = ApiClientBuilder.build(api_configuration=api_configuration, correlation_id="param-correlation-id")
self.assertTrue("CorrelationId" in client.default_headers, msg="CorrelationId not found in headers")
self.assertEquals(client.default_headers["CorrelationId"], "param-correlation-id")
| 42.329588 | 123 | 0.650858 |
853fb0d479a4c9a19636aa58f590316a6a93832e | 3,427 | py | Python | homeassistant/components/ios/notify.py | mikan-megane/core | 837220cce40890e296920d33a623adbc11bd15a6 | [
"Apache-2.0"
] | 5 | 2018-10-23T14:15:05.000Z | 2021-11-26T06:38:44.000Z | homeassistant/components/ios/notify.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 79 | 2020-07-23T07:13:37.000Z | 2022-03-22T06:02:37.000Z | homeassistant/components/ios/notify.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 4 | 2017-01-10T04:17:33.000Z | 2021-09-02T16:37:24.000Z | """Support for iOS push notifications."""
import logging
import requests
from homeassistant.components import ios
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
BaseNotificationService,
)
from homeassistant.const import HTTP_CREATED, HTTP_TOO_MANY_REQUESTS
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
PUSH_URL = "https://ios-push.home-assistant.io/push"
# pylint: disable=invalid-name
def log_rate_limits(hass, target, resp, level=20):
"""Output rate limit log line at given level."""
rate_limits = resp["rateLimits"]
resetsAt = dt_util.parse_datetime(rate_limits["resetsAt"])
resetsAtTime = resetsAt - dt_util.utcnow()
rate_limit_msg = (
"iOS push notification rate limits for %s: "
"%d sent, %d allowed, %d errors, "
"resets in %s"
)
_LOGGER.log(
level,
rate_limit_msg,
ios.device_name_for_push_id(hass, target),
rate_limits["successful"],
rate_limits["maximum"],
rate_limits["errors"],
str(resetsAtTime).split(".")[0],
)
def get_service(hass, config, discovery_info=None):
"""Get the iOS notification service."""
if "notify.ios" not in hass.config.components:
# Need this to enable requirements checking in the app.
hass.config.components.add("notify.ios")
if not ios.devices_with_push(hass):
return None
return iOSNotificationService()
class iOSNotificationService(BaseNotificationService):
"""Implement the notification service for iOS."""
def __init__(self):
"""Initialize the service."""
@property
def targets(self):
"""Return a dictionary of registered targets."""
return ios.devices_with_push(self.hass)
def send_message(self, message="", **kwargs):
"""Send a message to the Lambda APNS gateway."""
data = {ATTR_MESSAGE: message}
# Remove default title from notifications.
if (
kwargs.get(ATTR_TITLE) is not None
and kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT
):
data[ATTR_TITLE] = kwargs.get(ATTR_TITLE)
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = ios.enabled_push_ids(self.hass)
if kwargs.get(ATTR_DATA) is not None:
data[ATTR_DATA] = kwargs.get(ATTR_DATA)
for target in targets:
if target not in ios.enabled_push_ids(self.hass):
_LOGGER.error("The target (%s) does not exist in .ios.conf", targets)
return
data[ATTR_TARGET] = target
req = requests.post(PUSH_URL, json=data, timeout=10)
if req.status_code != HTTP_CREATED:
fallback_error = req.json().get("errorMessage", "Unknown error")
fallback_message = (
f"Internal server error, please try again later: {fallback_error}"
)
message = req.json().get("message", fallback_message)
if req.status_code == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(message)
log_rate_limits(self.hass, target, req.json(), 30)
else:
_LOGGER.error(message)
else:
log_rate_limits(self.hass, target, req.json())
| 31.440367 | 86 | 0.625912 |
08404fc566bec02d00aeb90b2415d16a163846d5 | 1,807 | py | Python | tests/test_js_jstypes.py | andymckay/amo-validator | d13e3644eb657e56666ee40d91a9c67382cfa725 | [
"BSD-3-Clause"
] | null | null | null | tests/test_js_jstypes.py | andymckay/amo-validator | d13e3644eb657e56666ee40d91a9c67382cfa725 | [
"BSD-3-Clause"
] | null | null | null | tests/test_js_jstypes.py | andymckay/amo-validator | d13e3644eb657e56666ee40d91a9c67382cfa725 | [
"BSD-3-Clause"
] | null | null | null | import validator.testcases.javascript.jstypes as jstypes
from js_helper import _do_test_raw
def test_jsarray_output():
"""Test that the output function for JSArray doesn't bork."""
ja = jstypes.JSArray()
ja.elements = [None, None]
ja.output() # Used to throw tracebacks.
ja.get_literal_value() # Also used to throw tracebacks.
def test_jsobject_output():
"""Test that the output function for JSObject doesn't bork."""
jso = jstypes.JSObject()
jso.data = {"first": None}
jso.output() # Used to throw tracebacks
def test_jsobject_recursion():
"""Test that circular references don't cause recursion errors."""
jso = jstypes.JSObject()
jso2 = jstypes.JSObject()
jso.data = {"first": jstypes.JSWrapper(jso2)}
jso2.data = {"second": jstypes.JSWrapper(jso)}
print jso.output()
assert "(recursion)" in jso.output()
def test_jsarray_recursion():
"""Test that circular references don't cause recursion errors."""
ja = jstypes.JSArray()
ja2 = jstypes.JSArray()
ja.elements = [jstypes.JSWrapper(ja2)]
ja2.elements = [jstypes.JSWrapper(ja)]
print ja.output()
assert "(recursion)" in ja.output()
print ja.get_literal_value()
assert "(recursion)" in ja.get_literal_value()
def test_jsliteral_regex():
"""
Test that there aren't tracebacks from JSLiterals that perform raw binary
operations.
"""
assert not _do_test_raw("""
var x = /foo/gi;
var y = x + " ";
var z = /bar/i + 0;
""").failed()
def test_jsarray_contsructor():
"""
Test for tracebacks that were caused by JSArray not calling it's parent's
constructor.
"""
assert not _do_test_raw("""
var x = [];
x.foo = "bar";
x["zap"] = "foo";
baz("zap" in x);
""").failed()
| 24.418919 | 77 | 0.645822 |
3f5806cba0e24d96ced6822b3e8a8e8facfae643 | 20,003 | py | Python | chia/full_node/coin_store.py | ForestCrazy/chia-blockchain-remote-plot | 0ba838b7a8ea2b5410d438ac70295df699a30dae | [
"Apache-2.0"
] | 2 | 2021-07-10T12:51:05.000Z | 2021-07-26T04:33:31.000Z | chia/full_node/coin_store.py | ForestCrazy/chia-blockchain-remote-plot | 0ba838b7a8ea2b5410d438ac70295df699a30dae | [
"Apache-2.0"
] | 33 | 2021-09-28T10:17:59.000Z | 2022-03-29T10:13:18.000Z | chia/full_node/coin_store.py | ForestCrazy/chia-blockchain-remote-plot | 0ba838b7a8ea2b5410d438ac70295df699a30dae | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional, Set, Dict, Any, Tuple
import aiosqlite
from chia.protocols.wallet_protocol import CoinState
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_record import CoinRecord
from chia.util.db_wrapper import DBWrapper
from chia.util.ints import uint32, uint64
from chia.util.lru_cache import LRUCache
from time import time
import logging
log = logging.getLogger(__name__)
class CoinStore:
"""
This object handles CoinRecords in DB.
A cache is maintained for quicker access to recent coins.
"""
coin_record_db: aiosqlite.Connection
coin_record_cache: LRUCache
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
if self.db_wrapper.db_version == 2:
# the coin_name is unique in this table because the CoinStore always
# only represent a single peak
await self.coin_record_db.execute(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name blob PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint," # if this is zero, it means the coin has not been spent
" coinbase int,"
" puzzle_hash blob,"
" coin_parent blob,"
" amount blob," # we use a blob of 8 bytes to store uint64
" timestamp bigint)"
)
else:
# the coin_name is unique in this table because the CoinStore always
# only represent a single peak
await self.coin_record_db.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" timestamp bigint)"
)
)
# Useful for reorg lookups
await self.coin_record_db.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)")
if self.db_wrapper.allow_upgrades:
await self.coin_record_db.execute("DROP INDEX IF EXISTS coin_spent")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_parent_index on coin_record(coin_parent)")
await self.coin_record_db.commit()
self.coin_record_cache = LRUCache(cache_size)
return self
def maybe_from_hex(self, field: Any) -> bytes:
if self.db_wrapper.db_version == 2:
return field
else:
return bytes.fromhex(field)
def maybe_to_hex(self, field: bytes) -> Any:
if self.db_wrapper.db_version == 2:
return field
else:
return field.hex()
async def new_block(
self,
height: uint32,
timestamp: uint64,
included_reward_coins: Set[Coin],
tx_additions: List[Coin],
tx_removals: List[bytes32],
) -> List[CoinRecord]:
"""
Only called for blocks which are blocks (and thus have rewards and transactions)
Returns a list of the CoinRecords that were added by this block
"""
start = time()
additions = []
for coin in tx_additions:
record: CoinRecord = CoinRecord(
coin,
height,
uint32(0),
False,
timestamp,
)
additions.append(record)
if height == 0:
assert len(included_reward_coins) == 0
else:
assert len(included_reward_coins) >= 2
for coin in included_reward_coins:
reward_coin_r: CoinRecord = CoinRecord(
coin,
height,
uint32(0),
True,
timestamp,
)
additions.append(reward_coin_r)
await self._add_coin_records(additions)
await self._set_spent(tx_removals, height)
end = time()
log.log(
logging.WARNING if end - start > 10 else logging.DEBUG,
f"It took {end - start:0.2f}s to apply {len(tx_additions)} additions and "
+ f"{len(tx_removals)} removals to the coin store. Make sure "
+ "blockchain database is on a fast drive",
)
return additions
# Checks DB and DiffStores for CoinRecord with coin_name and returns it
async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]:
cached = self.coin_record_cache.get(coin_name)
if cached is not None:
return cached
async with self.coin_record_db.execute(
"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
"coin_parent, amount, timestamp FROM coin_record WHERE coin_name=?",
(self.maybe_to_hex(coin_name),),
) as cursor:
row = await cursor.fetchone()
if row is not None:
coin = self.row_to_coin(row)
record = CoinRecord(coin, row[0], row[1], row[2], row[6])
self.coin_record_cache.put(record.coin.name(), record)
return record
return None
async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]:
async with self.coin_record_db.execute(
"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
"coin_parent, amount, timestamp FROM coin_record WHERE confirmed_index=?",
(height,),
) as cursor:
rows = await cursor.fetchall()
coins = []
for row in rows:
coin = self.row_to_coin(row)
coins.append(CoinRecord(coin, row[0], row[1], row[2], row[6]))
return coins
async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]:
# Special case to avoid querying all unspent coins (spent_index=0)
if height == 0:
return []
async with self.coin_record_db.execute(
"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
"coin_parent, amount, timestamp FROM coin_record WHERE spent_index=?",
(height,),
) as cursor:
coins = []
for row in await cursor.fetchall():
if row[1] != 0:
coin = self.row_to_coin(row)
coin_record = CoinRecord(coin, row[0], row[1], row[2], row[6])
coins.append(coin_record)
return coins
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(
self,
include_spent_coins: bool,
puzzle_hash: bytes32,
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
coins = set()
async with self.coin_record_db.execute(
f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
f"coin_parent, amount, timestamp FROM coin_record INDEXED BY coin_puzzle_hash WHERE puzzle_hash=? "
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent_index=0'}",
(self.maybe_to_hex(puzzle_hash), start_height, end_height),
) as cursor:
for row in await cursor.fetchall():
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[0], row[1], row[2], row[6]))
return list(coins)
async def get_coin_records_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db: Tuple[Any, ...]
if self.db_wrapper.db_version == 2:
puzzle_hashes_db = tuple(puzzle_hashes)
else:
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
async with self.coin_record_db.execute(
f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
f"coin_parent, amount, timestamp FROM coin_record INDEXED BY coin_puzzle_hash "
f'WHERE puzzle_hash in ({"?," * (len(puzzle_hashes) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent_index=0'}",
puzzle_hashes_db + (start_height, end_height),
) as cursor:
for row in await cursor.fetchall():
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[0], row[1], row[2], row[6]))
return list(coins)
async def get_coin_records_by_names(
self,
include_spent_coins: bool,
names: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(names) == 0:
return []
coins = set()
names_db: Tuple[Any, ...]
if self.db_wrapper.db_version == 2:
names_db = tuple(names)
else:
names_db = tuple([name.hex() for name in names])
async with self.coin_record_db.execute(
f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
f'coin_parent, amount, timestamp FROM coin_record WHERE coin_name in ({"?," * (len(names) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent_index=0'}",
names_db + (start_height, end_height),
) as cursor:
for row in await cursor.fetchall():
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[0], row[1], row[2], row[6]))
return list(coins)
def row_to_coin(self, row) -> Coin:
return Coin(
bytes32(self.maybe_from_hex(row[4])), bytes32(self.maybe_from_hex(row[3])), uint64.from_bytes(row[5])
)
def row_to_coin_state(self, row):
coin = self.row_to_coin(row)
spent_h = None
if row[1] != 0:
spent_h = row[1]
return CoinState(coin, spent_h, row[0])
async def get_coin_states_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinState]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db: Tuple[Any, ...]
if self.db_wrapper.db_version == 2:
puzzle_hashes_db = tuple(puzzle_hashes)
else:
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
async with self.coin_record_db.execute(
f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
f"coin_parent, amount, timestamp FROM coin_record INDEXED BY coin_puzzle_hash "
f'WHERE puzzle_hash in ({"?," * (len(puzzle_hashes) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent_index=0'}",
puzzle_hashes_db + (start_height, end_height),
) as cursor:
for row in await cursor.fetchall():
coins.add(self.row_to_coin_state(row))
return list(coins)
async def get_coin_records_by_parent_ids(
self,
include_spent_coins: bool,
parent_ids: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(parent_ids) == 0:
return []
coins = set()
parent_ids_db: Tuple[Any, ...]
if self.db_wrapper.db_version == 2:
parent_ids_db = tuple(parent_ids)
else:
parent_ids_db = tuple([pid.hex() for pid in parent_ids])
async with self.coin_record_db.execute(
f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
f'coin_parent, amount, timestamp FROM coin_record WHERE coin_parent in ({"?," * (len(parent_ids) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent_index=0'}",
parent_ids_db + (start_height, end_height),
) as cursor:
for row in await cursor.fetchall():
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[0], row[1], row[2], row[6]))
return list(coins)
async def get_coin_state_by_ids(
self,
include_spent_coins: bool,
coin_ids: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinState]:
if len(coin_ids) == 0:
return []
coins = set()
coin_ids_db: Tuple[Any, ...]
if self.db_wrapper.db_version == 2:
coin_ids_db = tuple(coin_ids)
else:
coin_ids_db = tuple([pid.hex() for pid in coin_ids])
async with self.coin_record_db.execute(
f"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
f'coin_parent, amount, timestamp FROM coin_record WHERE coin_name in ({"?," * (len(coin_ids) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent_index=0'}",
coin_ids_db + (start_height, end_height),
) as cursor:
for row in await cursor.fetchall():
coins.add(self.row_to_coin_state(row))
return list(coins)
async def rollback_to_block(self, block_index: int) -> List[CoinRecord]:
"""
Note that block_index can be negative, in which case everything is rolled back
Returns the list of coin records that have been modified
"""
# Update memory cache
delete_queue: List[bytes32] = []
for coin_name, coin_record in list(self.coin_record_cache.cache.items()):
if int(coin_record.spent_block_index) > block_index:
new_record = CoinRecord(
coin_record.coin,
coin_record.confirmed_block_index,
uint32(0),
coin_record.coinbase,
coin_record.timestamp,
)
self.coin_record_cache.put(coin_record.coin.name(), new_record)
if int(coin_record.confirmed_block_index) > block_index:
delete_queue.append(coin_name)
for coin_name in delete_queue:
self.coin_record_cache.remove(coin_name)
coin_changes: Dict[bytes32, CoinRecord] = {}
async with self.coin_record_db.execute(
"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
"coin_parent, amount, timestamp FROM coin_record WHERE confirmed_index>?",
(block_index,),
) as cursor:
for row in await cursor.fetchall():
coin = self.row_to_coin(row)
record = CoinRecord(coin, uint32(0), row[1], row[2], uint64(0))
coin_changes[record.name] = record
# Delete from storage
await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,))
async with self.coin_record_db.execute(
"SELECT confirmed_index, spent_index, coinbase, puzzle_hash, "
"coin_parent, amount, timestamp FROM coin_record WHERE confirmed_index>?",
(block_index,),
) as cursor:
for row in await cursor.fetchall():
coin = self.row_to_coin(row)
record = CoinRecord(coin, row[0], uint32(0), row[2], row[6])
if record.name not in coin_changes:
coin_changes[record.name] = record
if self.db_wrapper.db_version == 2:
await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index=0 WHERE spent_index>?", (block_index,)
)
else:
await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?", (block_index,)
)
return list(coin_changes.values())
# Store CoinRecord in DB and ram cache
async def _add_coin_records(self, records: List[CoinRecord]) -> None:
if self.db_wrapper.db_version == 2:
values2 = []
for record in records:
self.coin_record_cache.put(record.coin.name(), record)
values2.append(
(
record.coin.name(),
record.confirmed_block_index,
record.spent_block_index,
int(record.coinbase),
record.coin.puzzle_hash,
record.coin.parent_coin_info,
bytes(record.coin.amount),
record.timestamp,
)
)
await self.coin_record_db.executemany(
"INSERT INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
values2,
)
else:
values = []
for record in records:
self.coin_record_cache.put(record.coin.name(), record)
values.append(
(
record.coin.name().hex(),
record.confirmed_block_index,
record.spent_block_index,
int(record.spent),
int(record.coinbase),
record.coin.puzzle_hash.hex(),
record.coin.parent_coin_info.hex(),
bytes(record.coin.amount),
record.timestamp,
)
)
await self.coin_record_db.executemany(
"INSERT INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
values,
)
# Update coin_record to be spent in DB
async def _set_spent(self, coin_names: List[bytes32], index: uint32):
assert len(coin_names) == 0 or index > 0
# if this coin is in the cache, mark it as spent in there
updates = []
for coin_name in coin_names:
r = self.coin_record_cache.get(coin_name)
if r is not None:
self.coin_record_cache.put(
r.name, CoinRecord(r.coin, r.confirmed_block_index, index, r.coinbase, r.timestamp)
)
updates.append((index, self.maybe_to_hex(coin_name)))
if self.db_wrapper.db_version == 2:
await self.coin_record_db.executemany(
"UPDATE OR FAIL coin_record SET spent_index=? WHERE coin_name=?", updates
)
else:
await self.coin_record_db.executemany(
"UPDATE OR FAIL coin_record SET spent=1,spent_index=? WHERE coin_name=?", updates
)
| 39.068359 | 118 | 0.572414 |
faa941dbccf86aa7e031dbc940e6a43f87f482d0 | 412 | py | Python | src/api/apps.py | mp5maker/djangoblog | bc23ea0bc6975673d0fc374f10bfd61cae7255fb | [
"MIT"
] | null | null | null | src/api/apps.py | mp5maker/djangoblog | bc23ea0bc6975673d0fc374f10bfd61cae7255fb | [
"MIT"
] | null | null | null | src/api/apps.py | mp5maker/djangoblog | bc23ea0bc6975673d0fc374f10bfd61cae7255fb | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'api'
def ready(self):
# Only importing this would work with decorator method
from .signals import print_post, print_post_alternative
# For the non-decorator method
from django.db.models.signals import post_save
from .models import Post
post_save.connect(print_post, sender=Post)
| 29.428571 | 63 | 0.691748 |
65a70302b1d609f5738eea3c34aec7402425bd50 | 4,677 | py | Python | src/MediaPlayer/Torrents/DHT/Socket.py | JKorf/MediaPi | 4b21bb9cfa692534d0098ad947dd99beb7b0c1ed | [
"MIT"
] | 2 | 2018-02-26T15:57:04.000Z | 2019-03-11T15:21:38.000Z | src/MediaPlayer/Torrents/DHT/Socket.py | JKorf/MediaPi | 4b21bb9cfa692534d0098ad947dd99beb7b0c1ed | [
"MIT"
] | 1 | 2018-07-25T16:36:11.000Z | 2018-07-25T16:36:11.000Z | src/MediaPlayer/Torrents/DHT/Socket.py | JKorf/MediaPi | 4b21bb9cfa692534d0098ad947dd99beb7b0c1ed | [
"MIT"
] | null | null | null | import time
from socket import socket, AF_INET, SOCK_DGRAM
from MediaPlayer.Torrents.DHT.Messages import NodeMessage, BaseDHTMessage, QueryMessage, ErrorDHTMessage, QueryDHTMessage, ResponseDHTMessage
from Shared.Logger import Logger, LogVerbosity
from Shared.Threading import CustomThread
from Shared.Util import current_time
class Socket:
def __init__(self, port, on_node_seen, on_node_timeout, on_query):
self.port = port
self.socket = socket(AF_INET, SOCK_DGRAM)
self.socket.settimeout(0.1)
self.message_thread = CustomThread(self.message_thread_action, "DHT message", [])
self.running = False
self.node_seen_handler = on_node_seen
self.node_timeout_handler = on_node_timeout
self.query_handler = on_query
self.last_send = 0
self.received_messages = []
self.to_send_messages = []
self.awaiting_messages = []
def start(self):
self.socket.bind(('0.0.0.0', self.port))
self.running = True
self.message_thread.start()
def send_response(self, msg, ip, port):
self.to_send_messages.append(NodeMessage(ip, port, msg))
def send_query(self, msg, ip, port, on_response, on_timeout):
self.to_send_messages.append(QueryMessage(NodeMessage(ip, port, msg), 0, on_response, on_timeout))
def message_thread_action(self):
Logger().write(LogVerbosity.Debug, "Starting DHT socket")
while self.running:
self.receive()
self.send()
self.check()
time.sleep(0.005)
def receive(self):
try:
while True:
data, sender = self.socket.recvfrom(2048)
msg_object = BaseDHTMessage.from_bytes(data)
if msg_object is None:
return
if isinstance(msg_object, ErrorDHTMessage):
Logger().write(LogVerbosity.Debug, "DHT error message: " + str(msg_object.errorcode) + " " + str(msg_object.errormsg))
continue
else:
self.node_seen_handler(sender[0], sender[1], msg_object.id)
msg = NodeMessage(sender[0], sender[1], msg_object)
self.received_messages.append(msg)
Logger().write(LogVerbosity.All, "Received DHT message")
except OSError as e:
return
def send(self):
for pending in list(self.to_send_messages):
try:
if not isinstance(pending, QueryMessage):
data = pending.message.to_bytes()
self.socket.sendto(data, (pending.ip, pending.port))
self.to_send_messages.remove(pending)
Logger().write(LogVerbosity.All, "Sent DHT response")
else:
data = pending.message.message.to_bytes()
self.socket.sendto(data, (pending.message.ip, pending.message.port))
pending.send_at = current_time()
self.awaiting_messages.append(pending)
self.to_send_messages.remove(pending)
Logger().write(LogVerbosity.All, "Sent DHT query")
except OSError as e:
Logger().write(LogVerbosity.All, "Failed to send: " + str(e))
def check(self):
for pending in list(self.awaiting_messages):
if current_time() - pending.send_at > 10000:
Logger().write(LogVerbosity.All, "DHT message timeout")
self.node_timeout_handler(pending.message.ip, pending.message.port)
pending.on_timeout()
self.awaiting_messages.remove(pending)
for received in list(self.received_messages):
if isinstance(received.message, QueryDHTMessage):
self.query_handler(received.ip, received.port, received.message)
self.received_messages.remove(received)
continue
elif isinstance(received.message, ResponseDHTMessage):
pending = [x for x in self.awaiting_messages if x.message.message.transaction_id == received.message.transaction_id]
if len(pending) == 0:
Logger().write(LogVerbosity.All, "DHT response for no request (timed out?)")
self.received_messages.remove(received)
continue
Logger().write(LogVerbosity.All, "DHT message response")
pending[0].on_response(received.message) # answer to request
self.received_messages.remove(received)
self.awaiting_messages.remove(pending[0])
| 41.758929 | 141 | 0.60851 |
e6a2d030f735e80e478dc3b953769f38ead84814 | 1,105 | py | Python | social/app/unit_tests/common_tests.py | milesbelli/social-event-store | 5272b81dbb7cc926cee6732c90025fe2758d0e32 | [
"MIT"
] | null | null | null | social/app/unit_tests/common_tests.py | milesbelli/social-event-store | 5272b81dbb7cc926cee6732c90025fe2758d0e32 | [
"MIT"
] | 32 | 2020-08-01T03:30:17.000Z | 2022-02-26T19:38:54.000Z | social/app/unit_tests/common_tests.py | milesbelli/social-event-store | 5272b81dbb7cc926cee6732c90025fe2758d0e32 | [
"MIT"
] | null | null | null | import unittest
import common as c
import datetime as dt
class MyTestCase(unittest.TestCase):
def test_fitbit_eventobj_init(self):
tstmp = dt.datetime.now()
strt = dt.datetime(2012, 1, 1, 18, 30, 49)
end = dt.datetime(2012, 1, 2, 6, 50, 49)
myobj = c.eventObject(tstmp.date(), dt.timedelta(0, 12345), 'fitbit-sleep', 123456789, sleep_time='285000', rest_mins=50,
start_time=strt, end_time=end)
self.assertEqual(type(myobj), c.eventObject)
self.assertEqual(myobj.type, "fitbit-sleep")
self.assertEqual(myobj.date, tstmp.date())
self.assertEqual(myobj.time, dt.timedelta(0, 12345))
self.assertEqual(type(myobj.date), dt.date)
self.assertEqual(type(myobj.time), dt.timedelta)
self.assertGreater(len(myobj.body), 1)
def test_bad_eventjob_init(self):
def setup_function():
myobj = c.eventObject(dt.date(2001, 1, 1), dt.timedelta(0, 2000), "bad-type", 1001)
self.assertRaises(ValueError, setup_function)
if __name__ == '__main__':
unittest.main()
| 36.833333 | 129 | 0.645249 |
bd36ec25a1dde7c85d77946040f56ebf3104d7f6 | 2,050 | py | Python | bfs/surround-regions.py | windowssocket/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T02:29:40.000Z | 2020-02-05T03:28:16.000Z | bfs/surround-regions.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 1 | 2019-03-08T13:22:32.000Z | 2019-03-08T13:22:32.000Z | bfs/surround-regions.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T11:50:24.000Z | 2018-11-27T12:31:01.000Z | import queue
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if not board or len(board) == 0:
return
h = len(board)
w = len(board[0])
bool_matrix = [[False for _ in range(w)] for _ in range(h)]
edge_nodes = []
for x in range(h):
edge_nodes.append((x, 0))
edge_nodes.append((x, w-1))
bool_matrix[x][0] = True
bool_matrix[x][w-1] = True
for y in range(w):
edge_nodes.append((0, y))
edge_nodes.append((h-1, y))
bool_matrix[0][y] = True
bool_matrix[h-1][y] = True
q = queue.Queue()
edge_nodes = list(set(edge_nodes))
for node in edge_nodes:
x, y = node
if board[x][y] == "O":
q.put(node)
while not q.empty():
x, y = q.get()
if x+1 < h and bool_matrix[x+1][y] is False and board[x+1][y] == "O":
q.put((x+1, y))
bool_matrix[x+1][y] = True
if x-1 >= 0 and bool_matrix[x-1][y] is False and board[x-1][y] == "O":
q.put((x-1, y))
bool_matrix[x-1][y] = True
if y-1 >= 0 and bool_matrix[x][y-1] is False and board[x][y-1] == "O":
q.put((x, y-1))
bool_matrix[x][y-1] = True
if y+1 < w and bool_matrix[x][y+1] is False and board[x][y+1] == "O":
q.put((x, y+1))
bool_matrix[x][y+1] = True
for i in range(h):
for j in range(w):
if bool_matrix[i][j] is False and board[i][j] == "O":
board[i][j] = "X"
print(board)
s = Solution()
s.solve([["X","X","X","X"],["X","O","O","X"],["X","X","O","X"],["X","O","X","X"]]) | 31.538462 | 90 | 0.417073 |
287d4fa29028ef777540d8714d94446d6c0804ae | 3,154 | py | Python | tasking/task.py | jmskinner/pii_firewall | a9079bd431151c3228f1195993b44443b8ac2f02 | [
"MIT"
] | null | null | null | tasking/task.py | jmskinner/pii_firewall | a9079bd431151c3228f1195993b44443b8ac2f02 | [
"MIT"
] | null | null | null | tasking/task.py | jmskinner/pii_firewall | a9079bd431151c3228f1195993b44443b8ac2f02 | [
"MIT"
] | null | null | null | from datetime import datetime
class Task():
def __init__(self,domain,task_type,in_endpoint,out_endpoint,profile_endpoint,task_config):
self.task_type = task_type
self.data = None
self.config = task_config
self.profile = {}
self.in_endpoint = in_endpoint
self.out_endpoint = out_endpoint
self.profile_endpoint = profile_endpoint
self.domain = domain
self.in_is_local = not any(x in in_endpoint for x in ['https','http'])
self.out_is_local = not any(x in out_endpoint for x in ['https','http'])
self.start_time = datetime.now()
self._make_baseline_profile()
def has_data(self):
return self.data != None
def _make_baseline_profile(self):
self.profile['in_endpoint'] = self.in_endpoint
self.profile['out_endpoint'] = self.out_endpoint
self.profile['profile_endpoint'] = self.profile_endpoint
self.profile['domain'] = self.domain
self.profile['task_type'] = self.task_type
#
#
# class CSVTask(Task):
#
# def fetch(self):
# try:
# if "https://" in self.in_endpoint or "http://" in self.in_endpoint:
# self.is_local = False
# self.data = [pd.read_csv(self.in_endpoint, index_col=0)]
# except Exception:
# print(f'Error reading file from source: {self.in_endpoint}')
#
#
# class TextTask(Task):
#
# def fetch(self):
# text_lines = []
# try:
# if "https://" in self.in_endpoint or "http://" in self.in_endpoint:
# self.is_local = False
# for line in urlopen(self.in_endpoint):
# text_lines.append(line.decode('utf-8'))
# else:
# with open(self.in_endpoint) as local_file:
# for line in locals():
# text_lines.append(line)
# self.data = text_lines
# except Exception:
# print(f'Error reading file from source: {self.in_endpoint}')
#
#
# class PDFTask(Task):
# # must be local
# def fetch(self):
# try:
# images = convert_from_path(self.in_endpoint)
# except Exception:
# print(f'Error reading file from source: {self.in_endpoint}')
#
#
# class TextTask(Task):
#
# def read_in_chunks(file_object, chunk_size=1024):
# """Lazy function (generator) to read a file piece by piece.
# Default chunk size: 1k."""
# while True:
# data = file_object.read(chunk_size)
# if not data:
# break
# yield data
#
# def fetch(self):
# text_lines = []
# try:
# if "https://" in self.endpoint or "http://" in self.endpoint:
# self.is_local = False
# data = urlopen(self.endpoint)
# self.data =
# else:
# with open("log.txt") as infile:
# for line in infile:
#
# self.data = image[:, :, ::-1]
# except Exception:
# print(f'Error reading file from source: {self.endpoint}')
| 30.921569 | 94 | 0.557387 |
597612dd63dcca6e0722b2605c76c19186dd4f59 | 16,111 | py | Python | PYTHON/ReadNetCDF.py | Kate-Willett/Climate_Explorer | d49e65a6caa9beb1a859e16d3827022442bad324 | [
"CC0-1.0"
] | null | null | null | PYTHON/ReadNetCDF.py | Kate-Willett/Climate_Explorer | d49e65a6caa9beb1a859e16d3827022442bad324 | [
"CC0-1.0"
] | null | null | null | PYTHON/ReadNetCDF.py | Kate-Willett/Climate_Explorer | d49e65a6caa9beb1a859e16d3827022442bad324 | [
"CC0-1.0"
] | 1 | 2021-06-29T12:05:46.000Z | 2021-06-29T12:05:46.000Z | #!/usr/local/sci/bin/python
# PYTHON3
#
# Author: Kate Willett
# Created: 16 October 2015
# Last update: 10 April 2019
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/
# GitHub: https://github.com/Kate-Willett/Climate_Explorer/tree/master/PYTHON/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This reads in a netCDF file and outputs numpy arrays or lists of numpy arrays:
# GetGrid: of a time,lat,lon gridded dataset
# GetGrid4: of a time,lat,lon gridded dataset using netCDF4
# GetGrid4Slice: of a time,lat,lon gridded dataset slice using netCDF4
# GetField: of a lat,lon gridded field of e.g trends
# GetTS: of a time, point/station dataset
#
# -----------------------
# LIST OF MODULES
# -----------------------
# Inbuilt:
# import numpy as np
# import scipy.stats
# import itertools
# from scipy.io import netcdf
## from netCDF4 import Dataset
# import netCDF4 as nc4
# import pdb # pdb.set_trace() or c
#
# Kate's:
#
# -----------------------
# DATA
# -----------------------
# The code requires a netCDF file
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# GetGrid:
# INPUTS:
# # A scalar string containing the filepath+filename
# FileName = '/MyDir/MyFile.nc'
# # A LIST of strings with the names of each variable to read in
# ReadInfo = ['t_anoms'[ # ['t_anoms','t_abs']
# OPTIONAL LatInfo and LonInfo lists to build LatList and LonList if read in isn't going to work
# Default reads in lat and lon variables assuming LatInfo=['latitude'],LonInfo=['longitude']
# Option 1: just provide the variable names to read in:
# LatInfo = ['latitude'] # variable name, number of latitudes, start latitude
# LonInfo = ['longitude'] # variable name, number of longitudes, start longitude
# Option 2: provide number of boxes and start lat/lon:
# LatInfo = [36,-87.5] # number of latitudes, start latitude
# LonInfo = [72,-177.5] # number of longitudes, start longitude
#
# module load scitools/default-current
# python
# from ReadNetCDF import GetGrid
#
# # Premaking of arrays not strictly necessary:
# TmpData = np.reshape(np.repeat(mdi,NMons*NLats*NLons),(NMons,NLats,NLons)) # entire field of data read in
# LatList = [] # list of latitude gridbox centres
# LonList = [] # list of longitude gridbox centres
# # Multiple grid read in case at little more complicated, and still not necessary but nice?:
# import itertools
# TmpData=list(itertools.repeat(np.reshape(np.repeat(mdi,NMons*NLats*NLons),(NMons,NLats,NLons)),len(ReadInfo)))
#
# TmpData,LatList,LonList = ReadNetCDF.GetGrid(FileName,ReadInfo,LatInfo,LonInfo)
# # Multiple grid read in case requires unpacking of the list of np.arrays
# TmpData1=TmpData[0] # etc.
#
# GetField:
#
# GetTS:
#
# -----------------------
# OUTPUT
# -----------------------
# GetGrid:
# TmpData: a numpy array of the 3D gridded field as time, lat, lon or a list of numpy arrays to be unpacked
# LatList: a numpy array of latitude gridbox centres
# LonList: a numpy array of longitude gridbox centre
#
# GetField:
#
# GetTS:
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 3 (10th April 2019)
# ---------
#
# Enhancements
# Mow python 3
#
# Changes
# Now python 3
#
# Bug fixes
#
# Version 2 (28th February 2018)
# ---------
#
# Enhancements
# GetGrid4Slice can now pull out a slice (time, lat or lon)
#
# Changes
# I've added a new GetGrid4 which does the same as GetGrid but works using netCDF4.
# This 'should' then mean that it can work with netCDF4 format and also automatically scale/offset data on read in
# Later I may make it such that you can pull out the variable entirely rather than just the data array so that
# you can access all attributes
#
# Bug fixes
#
#
# Version 1 (16th October 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# Functions
#************************************************************************
# GETGRID
def GetGrid(FileName,
ReadInfo,
LatInfo = ['latitude'],
LonInfo = ['longitude']):
''' Open the NetCDF File
Get the list of latitudes either from file or derive
Get the list of longitudes either from file or derive
Get the data (1 to 10 fields)
INPUTS:
FileName: string containing filepath/name
ReadInfo: a LIST of string variable names (1+) for the grid to read in
LatInfo:
LatInfo=['latitude'[ # DEFAULT
LatInfo=['lat'[ # alternative LIST of string variable name
LatInfo=[36,-87.5] # list containing number of lats, a float for the start latitude
LatInfo:
LatInfo=['longitude'[ # DEFAULT
LatInfo=['lon'[ # alternative LIST of string variable name
LatInfo=[72,-177.5] # list containing number of lons, a float for the start longitude
OUTPUTS:
LatList: a numpy array of latitude gridbox centres
LonList: a numpy array of longitude gridbox centres
TheData:
1 variable: a numpy array of time,lat,lon
2+ variables: a list of numpy arrays of time,lat,lon to be unpacked '''
# Set up python imports
import numpy as np
import scipy.stats
from scipy.io import netcdf
import pdb # pdb.set_trace() or c
# print(FileName,ReadInfo,LatInfo,LonInfo)
# Open the netCDF file
ncf=netcdf.netcdf_file(FileName,'r')
# ncf.variables this lists the variable names
# If LatInfo is only 1 element long then read in the variable, else calculate using given nlats, start_lat
if (len(LatInfo) == 1):
var = ncf.variables[LatInfo[0]]
TheLatList=np.array(np.copy(var.data))
else:
if (LatInfo[1] < 0):
TheLatList=np.arange(LatInfo[1], LatInfo[1]+180.,(180./LatInfo[0]))
else:
TheLatList=np.arange(LatInfo[1], LatInfo[1]-180.,-(180./LatInfo[0]))
# If LonInfo is only 1 element long then read in the variable, else calculate using given nlons, start_lon
if (len(LonInfo) == 1):
var = ncf.variables[LonInfo[0]]
TheLonList=np.array(np.copy(var.data))
else:
if (LonInfo[1] < 10):
TheLonList=np.arange(LonInfo[1], LonInfo[1]+360.,(360./LonInfo[0]))
else:
TheLonList=np.arange(LonInfo[1], LonInfo[1]-360.,-(360./LonInfo[0]))
# If ReadInfo is only 1 element long then read into a numpy array, else make a list of arrays and then read in all
if (len(ReadInfo) == 1):
var = ncf.variables[ReadInfo[0]]
TheData=np.array(np.copy(var.data)) # times, lats, lons
#pdb.set_trace()
else:
# Initialise TheData as a list
TheData=[]
for loo in range(len(ReadInfo)):
var = ncf.variables[ReadInfo[loo]]
#pdb.set_trace()
#TmpData = np.array(var.data) # times, lats, lons
TheData.append(np.copy(np.array(var.data)))
# # Maybe I've done something wrong but its reading it transposed
# TheData=np.transpose(TheData)
var = '' # data must be copied and then original object released before being able to close the netCDF file!!!
ncf.close()
return TheData,TheLatList,TheLonList # GetGrid
#************************************************************************
#************************************************************************
# GETGRID4
def GetGrid4(FileName,
ReadInfo,
LatInfo = ['latitude'],
LonInfo = ['longitude']):
''' Open the NetCDF File
Get the list of latitudes either from file or derive
Get the list of longitudes either from file or derive
Get the data (1 to 10 fields)
INPUTS:
FileName: string containing filepath/name
ReadInfo: a LIST of string variable names (1+) for the grid to read in
LatInfo:
LatInfo=['latitude'[ # DEFAULT
LatInfo=['lat'[ # alternative LIST of string variable name
LatInfo=[36,-87.5] # list containing number of lats, a float for the start latitude
LatInfo:
LatInfo=['longitude'[ # DEFAULT
LatInfo=['lon'[ # alternative LIST of string variable name
LatInfo=[72,-177.5] # list containing number of lons, a float for the start longitude
OUTPUTS:
LatList: a numpy array of latitude gridbox centres
LonList: a numpy array of longitude gridbox centres
TheData:
1 variable: a numpy array of time,lat,lon
2+ variables: a list of numpy arrays of time,lat,lon to be unpacked '''
# Set up python imports
import numpy as np
import scipy.stats
from scipy.io import netcdf
#from netCDF4 import Dataset
import netCDF4 as nc4
import pdb # pdb.set_trace() or c
# print(FileName,ReadInfo,LatInfo,LonInfo)
# Open the netCDF file
ncf=nc4.Dataset(FileName,'r')
# ncf.variables this lists the variable names
# If LatInfo is only 1 element long then read in the variable, else calculate using given nlats, start_lat
if (len(LatInfo) == 1):
var = ncf.variables[LatInfo[0]] # loads the data and attributes
TheLatList = np.copy(var[:]) # just pulls out the data as a numpy array
#var.ncattrs() # prints the attributues
else:
if (LatInfo[1] < 0):
TheLatList=np.arange(LatInfo[1], LatInfo[1]+180.,(180./LatInfo[0]))
else:
TheLatList=np.arange(LatInfo[1], LatInfo[1]-180.,-(180./LatInfo[0]))
# If LonInfo is only 1 element long then read in the variable, else calculate using given nlons, start_lon
if (len(LonInfo) == 1):
var = ncf.variables[LonInfo[0]]
TheLonList = np.copy(var[:])
else:
if (LonInfo[1] < 10):
TheLonList=np.arange(LonInfo[1], LonInfo[1]+360.,(360./LonInfo[0]))
else:
TheLonList=np.arange(LonInfo[1], LonInfo[1]-360.,-(360./LonInfo[0]))
# If ReadInfo is only 1 element long then read into a numpy array, else make a list of arrays and then read in all
if (len(ReadInfo) == 1):
var = ncf.variables[ReadInfo[0]]
TheData = np.copy(var[:]) # times, lats, lons - THIS AUTOMATICALLY APPLIES SCALE AND OFFSET!!!
#var.ncattrs() # prints the attributues
#pdb.set_trace()
else:
# Initialise TheData as a list
TheData = []
for loo in range(len(ReadInfo)):
var = ncf.variables[ReadInfo[loo]]
#var.ncattrs() # prints the attributues
#var.add_offset # prints the add_offset attribute
#pdb.set_trace()
TmpData = np.copy(var[:]) # times, lats, lons - THIS AUTOMATICALLY APPLIES SCALE AND OFFSET!!!
TheData.append(TmpData)
# # Maybe I've done something wrong but its reading it transposed
# TheData=np.transpose(TheData)
var = '' # pointer to netCDF data must be released to close file
ncf.close()
return TheData,TheLatList,TheLonList # GetGrid4
#*********************************************************************************
#************************************************************************
# GETGRID4SLICE
def GetGrid4Slice(FileName,
ReadInfo,
SliceInfo,
LatInfo = ['latitude'],
LonInfo = ['longitude']):
''' Open the NetCDF File
Get the list of latitudes either from file or derive
Get the list of longitudes either from file or derive
Get the data (1 to 10 fields)
INPUTS:
FileName: string containing filepath/name
ReadInfo: a LIST of string variable names (1+) for the grid to read in
SliceInfo: a dictionary of TimeSlice, LatSlice and LonSlice
each element is either a 2 element list of start and stop+1 of slice
SliceInfo = dict([('TimeSlice',[0,12]), # a slice (e.g., 12 months of a year
('LatSlice',[0,180]), # all at 1deg res
('LonSlice',[0,360])]) # all 1deg res
LatInfo:
LatInfo=['latitude'[ # DEFAULT
LatInfo=['lat'[ # alternative LIST of string variable name
LatInfo=[36,-87.5] # list containing number of lats, a float for the start latitude
NOTE: THIS HAS TO BE ALL LATS EVEN IF YOU'RE PULLING OUT A SLICE
LATS WILL BE SUBSET TO SLICE
LonInfo:
LonInfo=['longitude'[ # DEFAULT
LonInfo=['lon'[ # alternative LIST of string variable name
LonInfo=[72,-177.5] # list containing number of lons, a float for the start longitude
NOTE: THIS HAS TO BE ALL LonS EVEN IF YOU'RE PULLING OUT A SLICE
LonS WILL BE SUBSET TO SLICE
OUTPUTS:
LatList: a numpy array of latitude gridbox centres
LonList: a numpy array of longitude gridbox centres
TheData:
1 variable: a numpy array of time,lat,lon
2+ variables: a list of numpy arrays of time,lat,lon to be unpacked
IF MORE THAN ONE VARIABLE THE SLICES MUST BE THE SAME'''
# Set up python imports
import numpy as np
import scipy.stats
from scipy.io import netcdf
#from netCDF4 import Dataset
import netCDF4 as nc4
import pdb # pdb.set_trace() or c
# print(FileName,ReadInfo,LatInfo,LonInfo)
# Open the netCDF file
ncf=nc4.Dataset(FileName,'r')
# ncf.variables this lists the variable names
# If LatInfo is only 1 element long then read in the variable, else calculate using given nlats, start_lat
if (len(LatInfo) == 1):
var = ncf.variables[LatInfo[0]] # loads the data and attributes
TheLatList = np.copy(var[:]) # just pulls out the data as a numpy array
#var.ncattrs() # prints the attributues
else:
if (LatInfo[1] < 0):
TheLatList=np.arange(LatInfo[1], LatInfo[1]+180.,(180./LatInfo[0]))
else:
TheLatList=np.arange(LatInfo[1], LatInfo[1]-180.,-(180./LatInfo[0]))
# If LonInfo is only 1 element long then read in the variable, else calculate using given nlons, start_lon
if (len(LonInfo) == 1):
var = ncf.variables[LonInfo[0]]
TheLonList = np.copy(var[:])
else:
if (LonInfo[1] < 10):
TheLonList=np.arange(LonInfo[1], LonInfo[1]+360.,(360./LonInfo[0]))
else:
TheLonList=np.arange(LonInfo[1], LonInfo[1]-360.,-(360./LonInfo[0]))
# If ReadInfo is only 1 element long then read into a numpy array, else make a list of arrays and then read in all
if (len(ReadInfo) == 1):
var = ncf.variables[ReadInfo[0]]
TheData = np.copy(var[SliceInfo['TimeSlice'][0]:SliceInfo['TimeSlice'][1],
SliceInfo['LatSlice'][0]:SliceInfo['LatSlice'][1],
SliceInfo['LonSlice'][0]:SliceInfo['LonSlice'][1]]) # times, lats, lons - THIS AUTOMATICALLY APPLIES SCALE AND OFFSET!!!
#var.ncattrs() # prints the attributues
#pdb.set_trace()
else:
# Initialise TheData as a list
TheData = []
for loo in range(len(ReadInfo)):
var = ncf.variables[ReadInfo[loo]]
#var.ncattrs() # prints the attributues
#var.add_offset # prints the add_offset attribute
#pdb.set_trace()
TmpData = np.copy(var[SliceInfo['TimeSlice'][0]:SliceInfo['TimeSlice'][1],
SliceInfo['LatSlice'][0]:SliceInfo['LatSlice'][1],
SliceInfo['LonSlice'][0]:SliceInfo['LonSlice'][1]]) # times, lats, lons - THIS AUTOMATICALLY APPLIES SCALE AND OFFSET!!!
TheData.append(TmpData)
# # Maybe I've done something wrong but its reading it transposed
# TheData=np.transpose(TheData)
var = ''
ncf.close()
# Sort out the lons and lats to slice if necessary
if ((SliceInfo['LatSlice'][1] - SliceInfo['LatSlice'][0]) != len(TheLatList)):
TheLatList = TheLatList[SliceInfo['LatSlice'][0]:SliceInfo['LatSlice'][1]]
if ((SliceInfo['LonSlice'][1] - SliceInfo['LonSlice'][0]) != len(TheLonList)):
TheLonList = TheLonList[SliceInfo['LonSlice'][0]:SliceInfo['LonSlice'][1]]
return TheData,TheLatList,TheLonList # GetGrid4
#*********************************************************************************
| 38.635492 | 135 | 0.618522 |
f33b3f65b714ed8c00ab4f51c4a14e5627b79afd | 59,631 | py | Python | sklearn/cluster/k_means_.py | NickVeld/scikit-learn-proj | 9694a5641a7abbec96c93817aed88ce827dbacd3 | [
"BSD-3-Clause"
] | 1 | 2021-11-26T12:22:13.000Z | 2021-11-26T12:22:13.000Z | sklearn/cluster/k_means_.py | NickVeld/scikit-learn-proj | 9694a5641a7abbec96c93817aed88ce827dbacd3 | [
"BSD-3-Clause"
] | null | null | null | sklearn/cluster/k_means_.py | NickVeld/scikit-learn-proj | 9694a5641a7abbec96c93817aed88ce827dbacd3 | [
"BSD-3-Clause"
] | null | null | null | """K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..metrics.pairwise import pairwise_distances_argmin_min
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : numpy.RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# Validate init array
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# Breakup nearest neighbor distance computation into batches to prevent
# memory blowup in the case of a large number of samples and clusters.
# TODO: Once PR #7383 is merged use check_inputs=False in metric_kwargs.
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
# cython k-means code assumes int32 inputs
labels = labels.astype(np.int32)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms : array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances : float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| 38.596117 | 81 | 0.631953 |
140265869850aab6d5780ac4bb428d882b18fdf1 | 1,773 | py | Python | resources/library/pycontrol/src/soccer_pycontrol/plotter.py | utra-robosoccer/Bez_IsaacGym | 10d6fae08949d9c131fa1d3c08d6fd257ec22869 | [
"BSD-3-Clause"
] | 56 | 2016-12-25T22:29:00.000Z | 2022-01-06T04:42:00.000Z | soccer_pycontrol/src/soccer_pycontrol/plotter.py | utra-robosoccer/soccerbot | f5e95b00356e42cdd143ab26f67f23c9cd8afd5a | [
"BSD-3-Clause"
] | 244 | 2021-04-05T03:22:25.000Z | 2022-03-31T16:47:36.000Z | soccer_pycontrol/src/soccer_pycontrol/plotter.py | sadmanca/soccerbot | 5e60eacb51ff1b063ae8c1caf7eb01053add43eb | [
"BSD-3-Clause"
] | 7 | 2017-01-24T23:38:07.000Z | 2022-01-19T16:58:08.000Z | import multiprocessing as mp
import time
import matplotlib.pyplot as plt
import numpy as np
# Sample code from:
# https://matplotlib.org/3.3.3/gallery/misc/multiprocess_sgskip.html#sphx-glr-gallery-misc-multiprocess-sgskip-py
class ProcessPlotter:
def __init__(self):
self.x = []
self.y = []
def terminate(self):
plt.close('all')
def call_back(self):
while self.pipe.poll():
command = self.pipe.recv()
if command is None:
self.terminate()
return False
else:
self.x.append(command[0])
self.y.append(command[1])
self.ax.plot(self.x, self.y, 'ro')
self.fig.canvas.draw()
return True
def __call__(self, pipe):
print('starting plotter...')
self.pipe = pipe
self.fig, self.ax = plt.subplots()
timer = self.fig.canvas.new_timer(interval=1000)
timer.add_callback(self.call_back)
timer.start()
print('...done')
plt.show()
class NBPlot:
def __init__(self):
self.plot_pipe, plotter_pipe = mp.Pipe()
self.plotter = ProcessPlotter()
self.plot_process = mp.Process(
target=self.plotter, args=(plotter_pipe,), daemon=True)
self.plot_process.start()
def plot(self, finished=False):
send = self.plot_pipe.send
if finished:
send(None)
else:
data = np.random.random(2)
send(data)
def main():
pl = NBPlot()
for ii in range(10):
pl.plot()
time.sleep(0.5)
pl.plot(finished=True)
if __name__ == '__main__':
if plt.get_backend() == "MacOSX":
mp.set_start_method("forkserver")
main() | 24.625 | 113 | 0.568528 |
0857d0a70d0b47f5dbb69ba19bf2fc6d3a4c9122 | 391 | py | Python | pmedapp/asgi.py | ibadkureshi/tnk-locationallocation | b06abcb7bf8675b13e4c2e4fe419afb5ee11018f | [
"MIT"
] | 1 | 2021-02-07T10:37:52.000Z | 2021-02-07T10:37:52.000Z | pmedapp/asgi.py | panosprotopapas/tnk-locationallocation | b06abcb7bf8675b13e4c2e4fe419afb5ee11018f | [
"MIT"
] | null | null | null | pmedapp/asgi.py | panosprotopapas/tnk-locationallocation | b06abcb7bf8675b13e4c2e4fe419afb5ee11018f | [
"MIT"
] | 2 | 2020-10-23T13:14:53.000Z | 2020-11-13T12:01:44.000Z | """
ASGI config for pmedapp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pmedapp.settings')
application = get_asgi_application()
| 23 | 78 | 0.785166 |
2ff066a6c5fab469ea1202e0bcd88cd1c9698e4f | 311 | py | Python | code/tags.py | isleofmull/knausj_talon | 0608869ae6a2cfd5f4fb505a477e755ab69294a1 | [
"Unlicense"
] | null | null | null | code/tags.py | isleofmull/knausj_talon | 0608869ae6a2cfd5f4fb505a477e755ab69294a1 | [
"Unlicense"
] | null | null | null | code/tags.py | isleofmull/knausj_talon | 0608869ae6a2cfd5f4fb505a477e755ab69294a1 | [
"Unlicense"
] | null | null | null | from talon import Context, Module
mod = Module()
tagList = ["firefox", "gdb", "tmux", "tabs"]
modes = {
"gdb": "a way to force gdb commands to be loaded",
}
for entry in tagList:
mod.tag(entry, f"tag to load {entry} and/or related plugins ")
for key, value in modes.items():
mod.mode(key, value)
| 22.214286 | 66 | 0.649518 |
22bd44bf4e92e459c1d15f2e0e3e0789435a3c69 | 3,878 | py | Python | misc/enum_type_perfect_hash.py | bqqbarbhg/ufbx | b67f34ecd5579fe002b7bd5e36b7ca8ba12e72ea | [
"Unlicense"
] | 128 | 2020-10-25T14:20:23.000Z | 2022-03-30T22:16:29.000Z | misc/enum_type_perfect_hash.py | bqqbarbhg/ufbx | b67f34ecd5579fe002b7bd5e36b7ca8ba12e72ea | [
"Unlicense"
] | 33 | 2019-06-11T10:23:51.000Z | 2022-03-31T19:56:22.000Z | misc/enum_type_perfect_hash.py | bqqbarbhg/ufbx | b67f34ecd5579fe002b7bd5e36b7ca8ba12e72ea | [
"Unlicense"
] | 7 | 2020-05-25T08:12:28.000Z | 2022-03-22T10:23:56.000Z | from collections import namedtuple
NameEnum = namedtuple("NameEnum", "name hash enum")
def str_hash(s):
h = 0x811c9dc5
for c in s:
h = ((h ^ ord(c)) * 0x01000193) & 0xffffffff
if h == 0: h = 1
return h
def nameEnum(name, enum):
return NameEnum(name, str_hash(name), enum)
prop_types = [
nameEnum("Boolean", "BOOLEAN"),
nameEnum("bool", "BOOLEAN"),
nameEnum("Integer", "INTEGER"),
nameEnum("int", "INTEGER"),
nameEnum("enum", "INTEGER"),
nameEnum("Number", "NUMBER"),
nameEnum("double", "NUMBER"),
nameEnum("Vector", "VECTOR"),
nameEnum("Vector3D", "VECTOR"),
nameEnum("Color", "COLOR"),
nameEnum("ColorRGB", "COLOR"),
nameEnum("String", "STRING"),
nameEnum("KString", "STRING"),
nameEnum("DateTime", "DATE_TIME"),
nameEnum("Lcl Translation", "TRANSLATION"),
nameEnum("Lcl Rotation", "ROTATION"),
nameEnum("Lcl Scaling", "SCALING"),
]
node_types = [
nameEnum("Model", "MODEL"),
nameEnum("Geometry", "MESH"),
nameEnum("Material", "MATERIAL"),
nameEnum("Texture", "TEXTURE"),
nameEnum("AnimationCurveNode", "ANIMATION"),
nameEnum("AnimationCurve", "ANIMATION_CURVE"),
nameEnum("AnimationLayer", "ANIMATION_LAYER"),
nameEnum("NodeAttribute", "ATTRIBUTE"),
]
element_mapping_types = [
nameEnum("ByVertex", "VERTEX"),
nameEnum("ByVertice", "VERTEX"),
nameEnum("ByPolygon", "POLYGON"),
nameEnum("ByPolygonVertex", "POLYGON_VERTEX"),
nameEnum("ByEdge", "EDGE"),
nameEnum("AllSame", "ALL_SAME"),
]
element_types = [
nameEnum("Normal", "NORMAL"),
nameEnum("Binormal", "BINORMAL"),
nameEnum("Tangent", "TANGENT"),
nameEnum("Color", "VERTEX_COLOR"),
nameEnum("UV", "UV"),
nameEnum("EdgeCrease", "EDGE_CREASE"),
nameEnum("Material", "FACE_MATERIAL"),
]
def find_params(names, map_size, max_k, max_s):
arr = [None] * map_size
for k in range(max_k):
for s in range(0, max_s):
for i in range(map_size):
arr[i] = None
for n in names:
ix = (n.hash * k >> s) % map_size
if arr[ix]:
break
else:
arr[ix] = n
else:
return k, s, arr
raise ValueError("Could not find params")
decl = []
test = []
def gen_table(names, type_name, enum_name, test_extra=""):
global decl
global test
map_size = 1
while map_size < len(names):
map_size *= 2
while True:
try:
k, s, arr = find_params(names, map_size, 10000, 24)
break
except:
map_size *= 2
decl.append("#define ufbxi_{0}_permute_hash(h) ((((h) * {1}) >> {2}) % {3})".format(type_name, k, s, map_size))
decl.append("static const ufbxi_{0}_map_entry ufbxi_{0}_map[{1}] = {{".format(type_name, map_size))
for n in arr:
if not n:
decl.append("\t{{ 0u, {{ 0,0 }}, {0}_UNKNOWN }},".format(enum_name))
else:
decl.append("\t{{ 0x{0:08x}u, {{ \"{1}\", {2} }}, {3}_{4} }},".format(n.hash, n.name, len(n.name), enum_name, n.enum))
decl.append("};")
test.append("")
test.append("UFBXT_TEST(table_{0}_map_values)".format(type_name))
test.append("#if UFBXT_IMPL")
test.append("{")
for n in names:
test.append("\tufbxt_assert(ufbxi_get_{0}(make_str(\"{1}\"){2}) == {3}_{4});".format(type_name, n.name, test_extra, enum_name, n.enum))
test.append("}")
test.append("#endif")
gen_table(prop_types, "prop_type", "UFBX_PROP", "")
gen_table(node_types, "node_type", "UFBX_NODE", ", ufbx_empty_string")
gen_table(element_mapping_types, "element_mapping", "UFBXI_ELEMENT_BY", "")
gen_table(element_types, "element_type", "UFBXI_ELEMENT", ", UFBXI_ELEMENT_BY_UNKNOWN")
print("\n".join(decl))
print()
print()
print("\n".join(test))
print()
| 30.296875 | 143 | 0.591542 |
da1365fbcdc8d71f1b601c6dd6696041b63f8f96 | 26,455 | py | Python | lib/stores/mysql_datastore_adapter.py | vermavis/cbtool | c3ddf66a5928035f7deec780af23eea54a6b68b7 | [
"Apache-2.0"
] | null | null | null | lib/stores/mysql_datastore_adapter.py | vermavis/cbtool | c3ddf66a5928035f7deec780af23eea54a6b68b7 | [
"Apache-2.0"
] | null | null | null | lib/stores/mysql_datastore_adapter.py | vermavis/cbtool | c3ddf66a5928035f7deec780af23eea54a6b68b7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#/*******************************************************************************
# Copyright (c) 2020 DigitalOcean, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#/*******************************************************************************
'''
Created on June 16th, 2020
Mysql data management operations library
@author: Michael Galaxy
'''
import json
import threading
import mysql.connector
from lib.auxiliary.code_instrumentation import trace, cbdebug, cberr, cbwarn, cbinfo, cbcrit
from lib.stores.common_datastore_adapter import MetricStoreMgdConn, MetricStoreMgdConnException
class MysqlMgdConn(MetricStoreMgdConn) :
@trace
def __init__(self, parameters) :
MetricStoreMgdConn.__init__(self, parameters)
self.username = self.mysql_username
self.port = self.mysql_port
self.version = mysql.connector.__version__.split('.')[0]
self.lastrow_mutex = threading.Lock()
self.conn_mutex = threading.Lock()
self.update_mutex = threading.Lock()
@trace
def connect(self, tout) :
try:
#if tout and tout > 0:
# MysqlMgdConn.conn.set_connection_timeout(tout)
if not MysqlMgdConn.catalogs.cbtool["conn"] :
cbdebug("Opening to: " + self.database)
MysqlMgdConn.catalogs.cbtool["conn"] = mysql.connector.connect(host = self.host, port = self.port, user = self.username, password = self.password)
cursor = MysqlMgdConn.catalogs.cbtool["conn"].cursor()
try :
cursor.execute("use " + self.database)
MysqlMgdConn.catalogs.cbtool["database"] = True
except mysql.connector.Error as err :
if err.errno == mysql.connector.errorcode.ER_BAD_DB_ERROR:
cbwarn("Database not found. Will create later.")
cursor.close()
_msg = "A connection to MySQL running on host "
_msg += self.host + ", port " + str(self.port) + ", database"
_msg += ' ' + str(MysqlMgdConn.catalogs.cbtool["database"]) + ", with a timeout of "
_msg += str(tout) + "s was established."
cbdebug(_msg)
except mysql.connector.Error as err :
if err.errno == mysql.connector.errorcode.ER_ACCESS_DENIED_ERROR:
_msg = "Something is wrong with your MySQL user name or password."
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 1)
else:
_msg = "Unknown MySQL error: " + str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 2)
@trace
def disconnect(self) :
try:
if "disconnect" in dir(MysqlMgdConn.catalogs.cbtool["conn"]) :
MysqlMgdConn.catalogs.cbtool["conn"].disconnect()
MysqlMgdConn.catalogs.cbtool["conn"] = False
_msg = "A connection to MySQL running on host "
_msg += self.host + ", port " + str(self.port) + ", database"
_msg += ' ' + str(MysqlMgdConn.catalogs.cbtool["database"]) + ", was terminated."
cbdebug(_msg)
except mysql.connector.Error as err :
_msg = "Unable to terminate a connection with MySQL "
_msg += "server on host " + self.host + " port "
_msg += str(self.port) + "database " + str(MysqlMgdConn.catalogs.cbtool["database"]) + ": "
_msg += str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 3)
@trace
def conn_check(self, hostov = False, dbov = False, tout = False) :
self.conn_mutex.acquire()
try :
getattr(MysqlMgdConn.catalogs, "cbtool")
except AttributeError as e :
cbdebug("Initializing thread local connection: ")
MysqlMgdConn.catalogs.cbtool = {}
if "database" not in MysqlMgdConn.catalogs.cbtool :
MysqlMgdConn.catalogs.cbtool["database"] = False
if "conn" not in MysqlMgdConn.catalogs.cbtool :
MysqlMgdConn.catalogs.cbtool["conn"] = False
if not MysqlMgdConn.catalogs.cbtool["conn"] or not MysqlMgdConn.catalogs.cbtool["conn"].is_connected() :
MysqlMgdConn.catalogs.cbtool["conn"] = False
if hostov :
self.host = hostov
if dbov :
MysqlMgdConn.catalogs.cbtool["database"] = dbov
if tout :
self.timeout = tout
try :
self.connect(self.timeout)
except MetricStoreMgdConnException as obj :
self.conn_mutex.release()
raise MetricStoreMgdConnException(obj.msg, 2)
except Exception as e :
self.conn_mutex.release()
raise(e)
assert(MysqlMgdConn.catalogs.cbtool["conn"])
assert(MysqlMgdConn.catalogs.cbtool["conn"].is_connected())
cursor = MysqlMgdConn.catalogs.cbtool["conn"].cursor()
self.conn_mutex.release()
return cursor
@trace
def initialize_metric_store(self, username) :
username = username.replace('-',"dash")
try :
cursor = self.conn_check()
if not MysqlMgdConn.catalogs.cbtool["database"] :
cursor.execute("create database " + self.database)
cursor.execute("use " + self.database)
MysqlMgdConn.catalogs.cbtool["database"] = True
_latest_tables = [ \
"latest_management_VM_" + username, \
"latest_management_HOST_" + username, \
"latest_runtime_os_VM_" + username, \
"latest_runtime_os_HOST_" + username, \
"latest_runtime_app_VM_" + username, \
"reported_management_VM_metric_names_" + username, \
"reported_runtime_app_VM_metric_names_" + username, \
"reported_runtime_os_HOST_metric_names_" + username, \
"reported_runtime_os_VM_metric_names_" + username \
]
_indexed_tables = [ "trace_" + username, \
"management_HOST_" + username, \
"management_VM_" + username, \
"runtime_os_VM_" + username, \
"runtime_app_VM_" + username, \
"runtime_os_HOST_" + username ]
cursor.execute("show tables")
_tables_found = []
for x in cursor:
_tables_found.append(x[0])
for _table in (_latest_tables + _indexed_tables) :
if _table not in _tables_found :
statement = "create table " + _table + "(" + \
"id int auto_increment primary key," + \
"document json NOT NULL," + \
"`expid` VARCHAR(255) GENERATED ALWAYS AS (`document` ->> '$.expid')," + \
"`_id` VARCHAR(255) GENERATED ALWAYS AS (`document` ->> '$._id')," + \
"`time` VARCHAR(255) GENERATED ALWAYS AS (`document` ->> '$.time')," + \
"`uuid` VARCHAR(255) GENERATED ALWAYS AS (`document` ->> '$.uuid')," + \
"`dashboard_polled` VARCHAR(255) GENERATED ALWAYS AS (`document` ->> '$.dashboard_polled')" + \
")"
cursor.execute(statement)
if _table in _indexed_tables :
cursor.execute("CREATE INDEX `expid_idx` ON `" + _table + "`(`expid`)")
cursor.execute("CREATE INDEX `time_idx` ON `" + _table + "`(`time`)")
cursor.execute("CREATE INDEX `uuid_idx` ON `" + _table + "`(`uuid`)")
cursor.execute("CREATE INDEX `dashboard_polled_idx` ON `" + _table + "`(`dashboard_polled`)")
cursor.close()
MysqlMgdConn.catalogs.cbtool["conn"].commit()
self.disconnect()
return True
except mysql.connector.Error as err :
self.disconnect()
_msg = "Unable to complete database initialization: "
_msg += str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 4)
def make_restrictions(self, criteria, join = "and", level = 0) :
full_list = ""
restrictions = []
for _key in criteria.keys() :
_value = criteria[_key]
if isinstance(_value, set) :
_msg = "1) We cannot yet handle this criteria: " + str(criteria)
cberr(_msg)
raise MetricStoreMgdConnException(_msg, 41)
elif isinstance(_value, dict) :
for subkey in _value.keys() :
if subkey.lower() == "$exists" :
if not isinstance(_value[subkey], bool) :
_msg = "2) We cannot yet handle this criteria: " + str(_value)
cberr(_msg)
raise MetricStoreMgdConnException(_msg, 41)
if _value[subkey] :
restrictions.append("document->>'$." + _key + "' IS NOT NULL")
else :
restrictions.append("document->>'$." + _key + "' IS NULL")
else :
_msg = "3) We cannot yet handle this criteria: " + str(subkey)
cberr(_msg)
raise MetricStoreMgdConnException(_msg, 41)
elif isinstance(_value, list) :
# Handle this group below
continue
else :
_newvalue = _value
if isinstance(_value, bytes) :
_newvalue = _value.decode("utf-8")
restrictions.append("document->>'$." + _key + "' = '" + str(_newvalue) + "'")
if len(restrictions) :
full_list += (" " + join + " ").join(restrictions)
for _key in criteria.keys() :
if _key.lower() == "$or" or _key.lower() == "$and" :
_value = criteria[_key]
if isinstance(_value, list) :
subdict = {}
for subitem in _value :
if not isinstance(subitem, dict) :
_msg = "4) We cannot yet handle this criteria: " + str(subitem)
cberr(_msg)
raise MetricStoreMgdConnException(_msg, 41)
subdict.update(subitem)
sub_restrictions = self.make_restrictions(subdict, join = _key[1:], level = level + 1)
if sub_restrictions.strip() != "" :
full_list += " and (" + sub_restrictions + ")"
else :
_msg = "5) We cannot yet handle this criteria: " + str(_value)
cberr(_msg)
raise MetricStoreMgdConnException(_msg, 41)
if full_list.strip() != "" :
if level == 0 :
return " where " + full_list
else :
return full_list
return ""
def flush_metric_store(self, username, partial = False, criteria = {}) :
username = username.replace('-',"dash")
try :
cursor = self.conn_check()
_tables = ["latest_management_VM_" + username, \
"latest_management_HOST_" + username, \
"latest_runtime_os_VM_" + username, \
"latest_runtime_os_HOST_" + username, \
"latest_runtime_app_VM_" + username, \
"trace_" + username, \
"management_HOST_" + username, \
"management_VM_" + username, \
"runtime_os_VM_" + username, \
"runtime_app_VM_" + username, \
"runtime_os_HOST_" + username, \
"reported_management_VM_metric_names_" + username, \
"reported_runtime_app_VM_metric_names_" + username, \
"reported_runtime_os_HOST_metric_names_" + username, \
"reported_runtime_os_VM_metric_names_" + username ]
for _table in _tables :
if partial and len(criteria) :
statement = "delete from " + _table + self.make_restrictions(criteria)
cursor.execute(statement)
else :
cursor.execute("delete from " + _table)
cursor.close()
MysqlMgdConn.catalogs.cbtool["conn"].commit()
self.disconnect()
return True
except mysql.connector.Error as err :
self.disconnect()
_msg = "Unable to flush metric store: " + str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 5)
@trace
def add_document(self, table, document, disconnect_finish = False) :
table = table.replace('-',"dash")
self.lastrow_mutex.acquire()
lastrowid = -1
try :
cursor = self.conn_check()
if "_id" in document and isinstance(document["_id"], bytes) :
document["_id"] = document["_id"].decode("utf-8")
statement = "insert into " + table + " (document) values ('" + json.dumps(document) + "')"
result = cursor.execute(statement)
if cursor.rowcount != 1 :
MysqlMgdConn.catalogs.cbtool["conn"].rollback()
raise MetricStoreMgdConnException("Add failed w/ statement: " + statement, 65)
cursor.close()
MysqlMgdConn.catalogs.cbtool["conn"].commit()
lastrowid = cursor.lastrowid
if disconnect_finish :
self.disconnect()
except mysql.connector.Error as err :
self.lastrow_mutex.release()
_msg = "Unable to insert document into table \"" + table + "\": "
_msg += str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 6)
except Exception as e :
self.lastrow_mutex.release()
raise MetricStoreMgdConnException(str(e), 64)
self.lastrow_mutex.release()
return lastrowid
@trace
def find_document(self, table, criteria, allmatches = False, \
sortkeypairs = None, limitdocuments = 0, \
documentfields = None, disconnect_finish = False) :
table = table.replace('-',"dash")
try :
cursor = self.conn_check()
statement = "select "
if documentfields is not None :
convertedfields = []
for field in documentfields :
convertedfields.append("document->>'$." + field + "'")
statement += ",".join(["id"] + convertedfields)
else :
statement += " id,document "
statement += " from " + table + " " + self.make_restrictions(criteria)
if sortkeypairs :
keylist = []
for keypair in sortkeypairs :
# FIXME: I'm unsure of how to have different directional sort criteria for multiple
# sorted keys. Will have to look into that later, so for the time being,
# I'm dropping the direction.
keylist.append("document->>'$." + keypair[0] + "'")
statement += " order by " + ",".join(keylist)
if not allmatches or limitdocuments :
if limitdocuments > 0 :
statement += " limit " + str(limitdocuments)
else :
statement += " limit 1"
_results = []
# FIXME: We need to figure out how to safely allow iterators over
# the live connection. But for now, let's just extract all the results
result = cursor.execute(statement)
while True :
rows = cursor.fetchmany(4)
if not len(rows) :
break
for resultset in rows :
original_mysql_id = resultset[0]
document = False
if documentfields is not None :
document = {}
for idx in range(1, len(resultset)) :
document[documentfields[idx - 1]] = resultset[idx].decode()
else :
if isinstance(resultset[1], str) :
document = json.loads(resultset[1])
else :
assert(isinstance(resultset[1], dict))
document = resultset[1]
document["original_mysql_id"] = original_mysql_id
_results.append(document)
cursor.close()
if allmatches :
return _results
else :
if len(_results) >= 1 :
return _results[0]
return None
except mysql.connector.Error as err :
_msg = "Unable to retrieve documents from the table \""
_msg += table + ": " + str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 7)
@trace
def update_document(self, table, document, disconnect_finish = False) :
table = table.replace('-',"dash")
self.update_mutex.acquire()
try :
cursor = self.conn_check()
if "_id" in document and isinstance(document["_id"], bytes) :
document["_id"] = document["_id"].decode("utf-8")
if "original_mysql_id" not in document :
if "_id" in document :
# Attempt to find the original ID first
statement = "select id from " + table + " where _id = '" + document["_id"] + "'"
cursor.execute(statement)
while True :
rows = cursor.fetchmany(1)
if not len(rows) :
break
for (original_mysql_id,) in rows :
document["original_mysql_id"] = original_mysql_id
if "original_mysql_id" not in document :
cursor.close()
cbwarn("This document does not have a pre-existing identifier. Cannot update. Will insert first")
document["original_mysql_id"] = self.add_document(table, document, disconnect_finish = disconnect_finish)
self.update_mutex.release()
return
statement = "update " + table + " set document = '" + json.dumps(document) + "' where id = " + str(document["original_mysql_id"])
result = cursor.execute(statement)
cursor.close()
MysqlMgdConn.catalogs.cbtool["conn"].commit()
if disconnect_finish :
self.disconnect()
except mysql.connector.Error as err :
self.update_mutex.release()
_msg = "Unable to update documents from the table \""
_msg += table + ": " + str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 8)
except Exception as e :
self.update_mutex.release()
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 67)
self.update_mutex.release()
@trace
def delete_document(self, table, criteria, disconnect_finish = False) :
table = table.replace('-',"dash")
try :
cursor = self.conn_check()
statement = "delete from " + table + self.make_restrictions(criteria)
cursor.execute(statement)
cursor.close()
MysqlMgdConn.catalogs.cbtool["conn"].commit()
if disconnect_finish :
self.disconnect()
except mysql.connector.Error as err :
_msg = "Unable to remove document from the table \""
_msg += table + ": " + str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 9)
# FIXME: I am unable to find any callers of this function
@trace
def cleanup_collection(self, table, disconnect_finish = False) :
table = table.replace('-',"dash")
try :
cursor = self.conn_check()
statement = "delete from " + table
cursor.execute(statement)
cursor.close()
MysqlMgdConn.catalogs.cbtool["conn"].commit()
if disconnect_finish :
self.disconnect()
return True
except mysql.connector.Error as err :
_msg = "Unable to drop all documents from the table \""
_msg += table + ": " + str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 10)
@trace
def count_document(self, table, criteria, disconnect_finish = False) :
table = table.replace('-',"dash")
try :
cursor = self.conn_check()
statement = "select * from " + table + self.make_restrictions(criteria)
count = cursor.execute(statement).rowcount
cursor.close()
if disconnect_finish :
self.disconnect()
return count
except mysql.connector.Error as err :
_msg = "Unable to count documents on the table \""
_msg += table + ": " + str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 11)
def get_reported_objects(self, table, disconnect_finish = False) :
table = table.replace('-',"dash")
try :
cursor = self.conn_check()
_result = {}
_attributes = [ "vm_name", "role", "ai_name", "type", "aidrs_name", "pattern" ]
for _attribute in _attributes :
_result[_attribute + 's'] = []
statement = "select id, document from " + table
cursor.execute(statement)
while True :
rows = cursor.fetchmany(4)
if not len(rows) :
break
for (original_mysql_id, _document) in rows :
for _attribute in _attributes :
if _attribute == "vm_name" :
_attribute_r = "name"
else :
_attribute_r = _attribute
if _attribute_r in _document :
if not _result[_attribute + 's'].count(_document[_attribute_r]) :
_result[_attribute + 's'].append(_document[_attribute_r])
cursor.close()
if disconnect_finish :
self.disconnect()
return _result
except mysql.connector.Error as err :
_msg = "Unable to get reported attributes on the table \""
_msg += table + ": " + str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 12)
# I could not find any code that uses this function
#def get_time_boundaries(self, table, disconnect_finish = False) :
def get_experiment_list(self, table, disconnect_finish = False) :
table = table.replace('-',"dash")
_experiment_list = []
try :
cursor = self.conn_check()
statement = "select distinct(expid) from " + table + " where expid is not NULL"
cursor.execute(statement)
while True :
rows = cursor.fetchmany(4)
if not len(rows) :
break
for (expid) in rows :
_experiment_list.append(expid)
cursor.close()
if disconnect_finish :
self.disconnect()
return _experiment_list
except mysql.connector.Error as err :
_msg = "Unable to get time experiment list for table \""
_msg += table + ": " + str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 14)
@trace
def get_info(self) :
try :
_output = []
cursor = self.conn_check()
cursor.execute("show variables")
while True :
rows = cursor.fetchmany(4)
if not len(rows) :
break
for row in rows :
if row[0] in ["version"] :
_output.append([row[0], row[1]])
cursor.execute("select sum(data_length + index_length)/1024/1024 'size' FROM information_schema.TABLES")
while True :
rows = cursor.fetchmany(4)
if not len(rows) :
break
for row in rows :
_output.append(["Data Size (MB)", str(float(row[0]))])
cursor.close()
return _output
except mysql.connector.Error as err :
_msg = "Unable to get info for database " + self.database + ": "
_msg += str(err)
cberr(_msg)
raise MetricStoreMgdConnException(str(_msg), 15)
except Exception as e :
cbdebug("No workey: " + str(e))
| 41.595912 | 162 | 0.516273 |
870e9a6ff6db8fcf94bfec69892ce369d26df9e5 | 505 | py | Python | examples/untitled0.py | Pradyumn-Patil/Gender-input-verification- | c2668d31f0c3b1d1e219eab5b09b7b45f0e5441e | [
"MIT"
] | 1 | 2021-11-22T20:22:26.000Z | 2021-11-22T20:22:26.000Z | examples/untitled0.py | Pradyumn-Patil/Gender-input-verification- | c2668d31f0c3b1d1e219eab5b09b7b45f0e5441e | [
"MIT"
] | null | null | null | examples/untitled0.py | Pradyumn-Patil/Gender-input-verification- | c2668d31f0c3b1d1e219eab5b09b7b45f0e5441e | [
"MIT"
] | null | null | null | import pandas as pd
import glob
# All files ending with .txt with depth of 2 folder
data = glob.glob(r"data/*/*.*")
print(data)
ini_string = 'data\1008\DEN21501433_P.jpg'
sstring_end = ini_string[7:]
sstring_strt = sstring_end[:11]
print(sstring_strt)
df = pd.DataFrame(data, columns= ['file_name'])
df['file'] = df['file_name'].str[:-15]
df_files =df[df['file_name'].str.contains("_P.jpg")]
df1 = df.head(10)
for index, row in df1.iterrows():
print (row['file'], ' - ', row['file_name']) | 21.956522 | 52 | 0.673267 |
a4b7adfdb8624f316ee5e0ef616a7b2a056cd07e | 148 | py | Python | polls/admin.py | nickmwangemi/polls | a31221d87251ce337c2b78c2678943d9f840e0ca | [
"MIT"
] | null | null | null | polls/admin.py | nickmwangemi/polls | a31221d87251ce337c2b78c2678943d9f840e0ca | [
"MIT"
] | null | null | null | polls/admin.py | nickmwangemi/polls | a31221d87251ce337c2b78c2678943d9f840e0ca | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Question
from .models import Choice
admin.site.register(Question)
admin.site.register(Choice) | 21.142857 | 32 | 0.824324 |
7148433121456306884b789637c7a102e08379a0 | 9,853 | py | Python | third_party/blink/tools/blinkpy/common/system/executive_unittest.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | third_party/blink/tools/blinkpy/common/system/executive_unittest.py | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/blink/tools/blinkpy/common/system/executive_unittest.py | DamieFC/chromium | 54ce2d3c77723697efd22cfdb02aea38f9dfa25c | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2009 Daniel Bates (dbates@intudata.com). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
import sys
import unittest
# Since we execute this script directly as part of the unit tests, we need to
# ensure that blink/tools is in sys.path for the next imports to work correctly.
script_dir = os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
from blinkpy.common.system.executive import Executive, ScriptError
class ScriptErrorTest(unittest.TestCase):
def test_message_with_output(self):
error = ScriptError('My custom message!', '', -1)
self.assertEqual(error.message_with_output(), 'My custom message!')
error = ScriptError('My custom message!', '', -1, 'My output.')
self.assertEqual(error.message_with_output(),
'My custom message!\n\noutput: My output.')
error = ScriptError('', 'my_command!', -1, 'My output.',
'/Users/username/blah')
self.assertEqual(
error.message_with_output(),
'Failed to run "\'my_command!\'" exit_code: -1 cwd: /Users/username/blah\n\noutput: My output.'
)
error = ScriptError('', 'my_command!', -1, 'ab' + '1' * 499)
self.assertEqual(
error.message_with_output(),
'Failed to run "\'my_command!\'" exit_code: -1\n\noutput: Last 500 characters of output:\nb'
+ '1' * 499)
def test_message_with_tuple(self):
error = ScriptError('', ('my', 'command'), -1, 'My output.',
'/Users/username/blah')
self.assertEqual(
error.message_with_output(),
'Failed to run "(\'my\', \'command\')" exit_code: -1 cwd: /Users/username/blah\n\noutput: My output.'
)
def never_ending_command():
"""Arguments for a command that will never end (useful for testing process
killing). It should be a process that is unlikely to already be running
because all instances will be killed.
"""
if sys.platform == 'win32':
return ['wmic']
return ['yes']
def command_line(cmd, *args):
return [sys.executable, __file__, '--' + cmd] + list(args)
class ExecutiveTest(unittest.TestCase):
def test_run_command_with_bad_command(self):
def run_bad_command():
Executive().run_command(['foo_bar_command_blah'],
error_handler=Executive.ignore_error,
return_exit_code=True)
with self.assertRaises(OSError):
run_bad_command()
def test_run_command_args_type(self):
executive = Executive()
with self.assertRaises(AssertionError):
executive.run_command('echo')
with self.assertRaises(AssertionError):
executive.run_command(u'echo')
executive.run_command(command_line('echo', 'foo'))
executive.run_command(tuple(command_line('echo', 'foo')))
def test_auto_stringify_args(self):
executive = Executive()
executive.run_command(command_line('echo', 1))
executive.popen(command_line('echo', 1), stdout=executive.PIPE).wait()
self.assertEqual('echo 1', executive.command_for_printing(['echo', 1]))
def test_print_command_unicode(self):
executive = Executive()
# The expected result is different on Windows because the unicode arg
# first gets encoded using 'mbcs'. This encoding makes it unnecessary to
# escape any unicode characters in the arg.
# Elsewhere, the 'mbcs' encoding is skipped, but then we must escape any
# non-ascii unicode characters by encoding with 'unicode_escape'. This
# results in an extra \ on non-Win platforms.
if sys.platform == 'win32':
expected_result = u'echo 1 a\xac'
else:
expected_result = u'echo 1 a\\xac'
self.assertEqual(expected_result,
executive.command_for_printing(['echo', 1, u'a\xac']))
def test_popen_args(self):
executive = Executive()
# Explicitly naming the 'args' argument should not throw an exception.
executive.popen(
args=command_line('echo', 1), stdout=executive.PIPE).wait()
def test_run_command_with_unicode(self):
"""Validate that it is safe to pass unicode() objects
to Executive.run* methods, and they will return unicode()
objects by default unless decode_output=False
"""
unicode_tor_input = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
if sys.platform == 'win32':
encoding = 'mbcs'
else:
encoding = 'utf-8'
encoded_tor = unicode_tor_input.encode(encoding)
# On Windows, we expect the unicode->mbcs->unicode roundtrip to be
# lossy. On other platforms, we expect a lossless roundtrip.
if sys.platform == 'win32':
unicode_tor_output = encoded_tor.decode(encoding)
else:
unicode_tor_output = unicode_tor_input
executive = Executive()
output = executive.run_command(
command_line('cat'), input=unicode_tor_input)
self.assertEqual(output, unicode_tor_output)
output = executive.run_command(command_line('echo', unicode_tor_input))
self.assertEqual(output, unicode_tor_output)
output = executive.run_command(
command_line('echo', unicode_tor_input), decode_output=False)
self.assertEqual(output, encoded_tor)
# Make sure that str() input also works.
output = executive.run_command(
command_line('cat'), input=encoded_tor, decode_output=False)
self.assertEqual(output, encoded_tor)
def test_kill_process(self):
executive = Executive()
if sys.platform == 'win32':
process = subprocess.Popen(
never_ending_command(), stdout=subprocess.PIPE)
else:
process = subprocess.Popen(
never_ending_command(),
stdout=subprocess.PIPE,
preexec_fn=lambda: os.setpgid(0, 0))
self.assertEqual(process.poll(), None) # Process is running
executive.kill_process(process.pid)
# Killing again should fail silently.
executive.kill_process(process.pid)
def test_timeout_exceeded(self):
executive = Executive()
def timeout():
executive.run_command(
command_line('sleep', 'infinity'), timeout_seconds=0.01)
with self.assertRaises(ScriptError):
timeout()
def test_timeout_exceeded_exit_code(self):
executive = Executive()
exit_code = executive.run_command(
command_line('sleep', 'infinity'),
timeout_seconds=0.01,
return_exit_code=True)
self.assertNotEqual(exit_code, 0)
def test_timeout_satisfied(self):
executive = Executive()
executive.run_command(command_line('sleep', '0'), timeout_seconds=1000)
def test_check_running_pid(self):
executive = Executive()
self.assertTrue(executive.check_running_pid(os.getpid()))
# According to the proc(5) man page, on 64-bit linux systems,
# pid_max can be set to any value up to 2^22 (approximately 4 million).
self.assertFalse(executive.check_running_pid(5000000))
def test_running_pids(self):
executive = Executive()
pids = executive.running_pids()
self.assertIn(os.getpid(), pids)
def test_run_in_parallel_assert_nonempty(self):
with self.assertRaises(AssertionError):
Executive().run_in_parallel([])
def main(platform, stdin, stdout, cmd, args):
if platform == 'win32' and hasattr(stdout, 'fileno'):
import msvcrt
msvcrt.setmode(stdout.fileno(), os.O_BINARY)
if cmd == '--cat':
stdout.write(stdin.read())
elif cmd == '--echo':
stdout.write(' '.join(args))
return 0
if __name__ == '__main__' and len(sys.argv) > 1 and sys.argv[1] in ('--cat',
'--echo'):
sys.exit(
main(sys.platform, sys.stdin, sys.stdout, sys.argv[1], sys.argv[2:]))
| 40.883817 | 113 | 0.650665 |
3405c87030f58cbc3a3b2db0a1ebe2385902ba6d | 2,034 | py | Python | run/logging-manual/main.py | maciekgawron/python-docs-samples | 3fa7bb1f97f1db55423ff9f98a53d6d3d1e9b281 | [
"Apache-2.0"
] | 1 | 2021-09-03T14:48:52.000Z | 2021-09-03T14:48:52.000Z | run/logging-manual/main.py | maciekgawron/python-docs-samples | 3fa7bb1f97f1db55423ff9f98a53d6d3d1e9b281 | [
"Apache-2.0"
] | null | null | null | run/logging-manual/main.py | maciekgawron/python-docs-samples | 3fa7bb1f97f1db55423ff9f98a53d6d3d1e9b281 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from flask import Flask, request
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
# [START cloudrun_manual_logging]
# [START run_manual_logging]
# Uncomment and populate this variable in your code:
# PROJECT = 'The project ID of your Cloud Run service';
# Build structured log messages as an object.
global_log_fields = {}
# Add log correlation to nest all log messages.
trace_header = request.headers.get('X-Cloud-Trace-Context')
if trace_header and PROJECT:
trace = trace_header.split('/')
global_log_fields['logging.googleapis.com/trace'] = (
f"projects/{PROJECT}/traces/{trace[0]}")
# Complete a structured log entry.
entry = dict(severity='NOTICE',
message='This is the default display field.',
# Log viewer accesses 'component' as jsonPayload.component'.
component='arbitrary-property',
**global_log_fields)
print(json.dumps(entry))
# [END run_manual_logging]
# [END cloudrun_manual_logging]
return 'Hello Logger!'
if __name__ == '__main__':
PORT = int(os.getenv('PORT')) if os.getenv('PORT') else 8080
# This is used when running locally. Gunicorn is used to run the
# application on Cloud Run. See entrypoint in Dockerfile.
app.run(host='127.0.0.1', port=PORT, debug=True)
| 31.78125 | 77 | 0.685349 |
42267565e96449d6cdfcc9b646c00d55f06f9712 | 516 | py | Python | 이재호/day2/practice4.py | edangros/SDDataExpertProgram2021 | 67b44365233994a41449497c34ebf7badaf82d92 | [
"CC-BY-4.0"
] | null | null | null | 이재호/day2/practice4.py | edangros/SDDataExpertProgram2021 | 67b44365233994a41449497c34ebf7badaf82d92 | [
"CC-BY-4.0"
] | null | null | null | 이재호/day2/practice4.py | edangros/SDDataExpertProgram2021 | 67b44365233994a41449497c34ebf7badaf82d92 | [
"CC-BY-4.0"
] | null | null | null | #practice 4
#googoodan
#print the first line
for n in range(1,10):
'''
#을 왼쪽정렬, 3칸
n단 글씨는 2칸(한글이 2칸 자리를 먹으므로 결과적으로 3칸)
#을 오른쪽정렬, 3칸
'''
print ("%-3s%-2s%3s"%("#",str(n)+"단","#"), end=" ")
#insert line
print("")
#print the rest lines
for n1 in range(1,10):#row
for n2 in range(1,10):#column
'''
이제 2칸+'X'
2칸+'='
' ' + 2칸
따라서 3칸 3칸 3칸
줄맞춤 완-벽
'''
print ("%-2dX%2d= %2d"%(n2, n1, n2*n1), end=" ")
print("")# new line | 18.428571 | 56 | 0.46124 |
0601a052f3d875ed9c3c859401edc070b3b2345f | 362 | py | Python | openprocurement/auction/interfaces.py | OrysiaDrabych/openprocurement.auction | d68b4aca7313dd4c7c13bd22c772a32a1b70d79f | [
"Apache-2.0"
] | 23 | 2015-07-09T17:07:39.000Z | 2020-11-14T11:23:39.000Z | openprocurement/auction/interfaces.py | OrysiaDrabych/openprocurement.auction | d68b4aca7313dd4c7c13bd22c772a32a1b70d79f | [
"Apache-2.0"
] | 23 | 2015-01-14T22:33:58.000Z | 2018-02-08T16:31:20.000Z | openprocurement/auction/interfaces.py | OrysiaDrabych/openprocurement.auction | d68b4aca7313dd4c7c13bd22c772a32a1b70d79f | [
"Apache-2.0"
] | 27 | 2015-02-17T10:22:32.000Z | 2021-06-08T06:50:45.000Z | from zope.interface import Interface
class IComponents(Interface):
""""""
class IAuctionDatabridge(Interface):
""""""
class IFeedItem(Interface):
""""""
class IAuctionsManager(Interface):
""""""
class IAuctionType(Interface):
""""""
class IAuctionsChronograph(Interface):
""""""
class IAuctionsServer(Interface):
""""""
| 12.066667 | 38 | 0.638122 |
185255439cc264358ba3d562dfd9f136f870779a | 14,908 | py | Python | python/paddle/fluid/tests/unittests/test_lstm_op.py | xxworkspace/Paddle | 1f6f2235e0751248ee1a684667023b78834f9d1f | [
"Apache-2.0"
] | 3 | 2017-05-11T11:10:13.000Z | 2017-10-23T09:13:14.000Z | python/paddle/fluid/tests/unittests/test_lstm_op.py | betterpig/paddle_npu | 74ad4b6a700795d5edce8dd49d6c2df6f15e8935 | [
"Apache-2.0"
] | 9 | 2021-08-03T11:39:03.000Z | 2021-09-16T08:03:58.000Z | python/paddle/fluid/tests/unittests/test_lstm_op.py | betterpig/paddle_npu | 74ad4b6a700795d5edce8dd49d6c2df6f15e8935 | [
"Apache-2.0"
] | 2 | 2021-02-19T06:42:29.000Z | 2021-02-26T12:16:05.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from paddle import fluid
from paddle.fluid.layers import lstm as LSTM
from paddle.fluid.layers import fill_constant
from paddle.fluid.framework import program_guard, Program
SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0
EXP_MAX_INPUT = 40.0
def identity(x):
return x
def sigmoid(x):
y = np.copy(x)
y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN
y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX
return 1. / (1. + np.exp(-y))
def tanh(x):
y = -2. * x
y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT
return (2. / (1. + np.exp(y))) - 1.
def relu(x):
return np.maximum(x, 0)
ACTIVATION = {
'identity': identity,
'sigmoid': sigmoid,
'tanh': tanh,
'relu': relu
}
def lstm(
input, # T x 4D
lod, # 1 x N
h0=None, # N x D
c0=None, # N x D
w_h=None, # D x 4D
w_b=None, # 1 x 4D
w_c=None, # 1 x 3D
is_reverse=False,
act_gate=None,
act_cell=None,
act_cand=None):
def _step(x, w_h, w_c, h_pre, c_pre, act_gate, act_cell, act_cand):
g = np.dot(h_pre, w_h) # 1 x 4D
g = g + x
g = np.reshape(g, (1, g.size))
c, g_i, g_f, g_o = np.split(g, 4, axis=1)
if w_c is None:
g_i = act_gate(g_i) # 1 x D
g_f = act_gate(g_f) # 1 x D
else:
w_ic, w_fc, w_oc = np.split(w_c, 3, axis=1)
g_i = act_gate(g_i + w_ic * c_pre) # 1 x D
g_f = act_gate(g_f + w_fc * c_pre) # 1 x D
c = g_f * c_pre + g_i * act_cand(c) # 1 x D
if w_c is None:
g_o = act_gate(g_o) # 1 x D
else:
_, _, w_oc = np.split(w_c, 3, axis=1)
g_o = act_gate(g_o + w_oc * c) # 1 x D
h = g_o * act_cell(c)
return h, c
def _reverse(x, offset):
y = np.zeros_like(x)
for i in range(len(offset) - 1):
b, e = offset[i], offset[i + 1]
y[b:e, :] = np.flip(x[b:e, :], 0)
return y
offset = [0]
for l in lod[0]:
offset.append(offset[-1] + l)
batch_size = len(lod[0])
hidden = []
cell = []
input = _reverse(input, offset) if is_reverse else input
if w_b is not None:
input = input + np.tile(w_b, (offset[-1], 1))
for i in range(batch_size):
# compute one sequence
seq_len = lod[0][i]
x = input[offset[i]:offset[i + 1], :]
h_pre = h0[i] # 1 x D
c_pre = c0[i] # 1 x D
for j in range(seq_len):
# compute one step
h_pre, c_pre = _step(x[j], w_h, w_c, h_pre, c_pre, act_gate,
act_cell, act_cand)
hidden.append(h_pre.flatten())
cell.append(c_pre.flatten())
hidden = np.array(hidden).astype('float64')
cell = np.array(cell).astype('float64')
hidden = _reverse(hidden, offset) if is_reverse else hidden
cell = _reverse(cell, offset) if is_reverse else cell
assert hidden.shape == (input.shape[0], input.shape[1] / 4)
assert cell.shape == (input.shape[0], input.shape[1] / 4)
return hidden, cell
class LstmUnitTestError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size = 20
seq_len = 100
dropout_prob = 0.2
hidden_size = 150
num_layers = 1
input = fluid.data(
name='input',
shape=[batch_size, seq_len, hidden_size],
dtype='float32')
pre_hidden = fill_constant([num_layers, batch_size, hidden_size],
'float32', 0.0)
pre_cell = fill_constant([num_layers, batch_size, hidden_size],
'float32', 0.0)
np_input = np.random.uniform(
-0.1, 0.1, (batch_size, seq_len, hidden_size)).astype('float64')
np_pre_hidden = np.random.uniform(
-0.1, 0.1,
(num_layers, batch_size, hidden_size)).astype('float64')
np_pre_cell = np.random.uniform(
-0.1, 0.1,
(num_layers, batch_size, hidden_size)).astype('float64')
def test_input_Variable():
LSTM(np_input, pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
LSTM(np_input, np_pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_pre_cell_Variable():
LSTM(np_input, pre_hidden, np_pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_cell_Variable)
def test_input_type():
error_input = fluid.data(
name='error_input',
shape=[None, hidden_size * 3],
dtype='int32')
LSTM(error_input, pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(
name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
LSTM(input, error_pre_hidden, pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_hidden_type)
def test_pre_cell_type():
error_pre_cell = fluid.data(
name='error_pre_cell',
shape=[None, hidden_size],
dtype='int32')
LSTM(input, pre_hidden, error_pre_cell, \
seq_len, hidden_size, num_layers, \
dropout_prob=dropout_prob)
self.assertRaises(TypeError, test_pre_cell_type)
class TestLstmOp(OpTest):
def set_lod(self):
self.lod = [[2, 3, 2]]
def set_argument(self):
self.set_lod()
self.D = 16
self.act_gate = 'sigmoid'
self.act_cell = 'tanh'
self.act_cand = 'tanh'
self.has_initial_state = False
self.is_reverse = False
self.use_peepholes = True
def setUp(self):
self.set_argument()
self.op_type = 'lstm'
T = sum(self.lod[0])
N = len(self.lod[0])
x = np.random.normal(size=(T, 4 * self.D)).astype('float64')
if self.has_initial_state:
h0 = np.random.normal(size=(N, self.D)).astype('float64')
c0 = np.random.normal(size=(N, self.D)).astype('float64')
else:
h0 = np.zeros((N, self.D)).astype('float64')
c0 = np.zeros((N, self.D)).astype('float64')
w = np.random.normal(size=(self.D, 4 * self.D)).astype('float64')
if self.use_peepholes:
b = np.random.normal(size=(1, 7 * self.D)).astype('float64')
else:
b = np.random.normal(size=(1, 4 * self.D)).astype('float64')
w_b = b[:, 0:4 * self.D]
w_c = b[:, 4 * self.D:] if self.use_peepholes else None
h, c = lstm(x, self.lod, h0, c0, w, w_b, w_c, self.is_reverse,
ACTIVATION[self.act_gate], ACTIVATION[self.act_cell],
ACTIVATION[self.act_cand])
self.inputs = {'Input': (x, self.lod), 'Weight': w}
self.inputs['Bias'] = b
if self.has_initial_state:
self.inputs['H0'] = h0
self.inputs['C0'] = c0
self.outputs = {
'Hidden': (h, self.lod),
'Cell': (c, self.lod),
}
self.attrs = {
'use_peepholes': self.use_peepholes,
'is_reverse': self.is_reverse,
'gate_activation': self.act_gate,
'cell_activation': self.act_cell,
'candidate_activation': self.act_cand
}
def test_check_output(self):
self.check_output(atol=1e-8, check_dygraph=False)
def test_check_grad(self):
# TODO(qingqing) remove folowing lines after the check_grad is refined.
N = len(self.lod[0])
self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
self.outputs['BatchCellPreAct'] = np.zeros(
(N, self.D)).astype('float64')
self.check_grad(
['Input', 'Weight', 'Bias'], ['Hidden'],
max_relative_error=5e-4,
check_dygraph=False)
class TestLstmOpCase1(TestLstmOp):
def set_lod(self):
self.lod = [[0, 3, 2]]
class TestLstmOpCase2(TestLstmOp):
def set_lod(self):
self.lod = [[0, 3, 0]]
class TestLstmOpCase3(TestLstmOp):
def set_lod(self):
self.lod = [[2, 0, 4]]
class TestLstmOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_Variable():
input_data = np.random.random((1, 2048)).astype("float32")
fluid.layers.dynamic_lstm(
input=input_data, size=2048, use_peepholes=False)
self.assertRaises(TypeError, test_Variable)
def test_h_0():
in_data = fluid.data(
name="input", shape=[None, 2048], dtype="float32")
h = fluid.data(name="h", shape=[None, 512], dtype="int32")
c = fluid.data(name="c", shape=[None, 512], dtype="float32")
fluid.layers.dynamic_lstm(
input=in_data, size=2048, use_peepholes=False, h_0=h, c_0=c)
self.assertRaises(TypeError, test_h_0)
def test_c_0():
in_data_ = fluid.data(
name="input_", shape=[None, 2048], dtype="float32")
h_ = fluid.data(name="h_", shape=[None, 512], dtype="float32")
c_ = fluid.data(name="c_", shape=[None, 512], dtype="int32")
fluid.layers.dynamic_lstm(
input=in_data_,
size=2048,
use_peepholes=False,
h_0=h_,
c_0=c_)
self.assertRaises(TypeError, test_c_0)
# class TestLstmOpHasInitial(TestLstmOp):
# def set_argument(self):
# self.lod = [[2, 3, 2]]
# self.D = 16
# self.act_gate = 'sigmoid'
# self.act_cell = 'tanh'
# self.act_cand = 'tanh'
# self.has_initial_state = True
# self.is_reverse = True
# self.use_peepholes = True
# def test_check_grad(self):
# # TODO(qingqing) remove folowing lines after the check_grad is refined.
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Weight', 'Bias', 'H0', 'C0'], ['Hidden'],
# max_relative_error=5e-4)
# def test_check_grad_ingore_bias(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Weight'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('Bias'))
# def test_check_grad_ingore_weight(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Bias'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('Weight'))
# def test_check_grad_ingore_input(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Weight', 'Bias'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('Input'))
# def test_check_grad_ingore_h0(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Weight', 'Bias', 'C0'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('H0'))
# def test_check_grad_ingore_c0(self):
# N = len(self.lod[0])
# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64')
# self.outputs['BatchCellPreAct'] = np.zeros(
# (N, self.D)).astype('float64')
# self.check_grad(
# ['Input', 'Weight', 'Bias', 'H0'], ['Hidden'],
# max_relative_error=5e-4,
# no_grad_set=set('C0'))
# class TestLstmOpRerverse(TestLstmOp):
# def set_argument(self):
# self.lod = [[2, 3, 2]]
# self.D = 16
# self.act_gate = 'sigmoid'
# self.act_cell = 'tanh'
# self.act_cand = 'tanh'
# self.has_initial_state = False
# self.is_reverse = True
# self.use_peepholes = True
# class TestLstmOpNotUsePeepholes(TestLstmOp):
# def set_argument(self):
# self.lod = [[2, 3, 2]]
# self.D = 16
# self.act_gate = 'sigmoid'
# self.act_cell = 'tanh'
# self.act_cand = 'tanh'
# self.has_initial_state = False
# self.is_reverse = True
# self.use_peepholes = False
if __name__ == '__main__':
unittest.main()
| 33.728507 | 81 | 0.544272 |
d0ce8dcff95aacb3803028726aa92221dd89f9fa | 5,856 | py | Python | airflow/contrib/operators/bigquery_check_operator.py | eepstein/airflow | dee7e67aa59537b66660f227fa3ff4c2af9d7808 | [
"Apache-2.0"
] | 1 | 2020-06-16T17:26:28.000Z | 2020-06-16T17:26:28.000Z | airflow/contrib/operators/bigquery_check_operator.py | sonal-raj/airflow | c63ddccf8de2b702d796dc5ccaf398c8062295f6 | [
"Apache-2.0"
] | 1 | 2019-04-18T09:48:17.000Z | 2019-04-18T09:48:17.000Z | airflow/contrib/operators/bigquery_check_operator.py | peking1987/airflow | a71d4b861302db6101a399db9ef562d716f3c26e | [
"Apache-2.0"
] | 1 | 2020-01-17T00:46:50.000Z | 2020-01-17T00:46:50.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.bigquery_hook import BigQueryHook
from airflow.operators.check_operator import \
CheckOperator, ValueCheckOperator, IntervalCheckOperator
from airflow.utils.decorators import apply_defaults
class BigQueryCheckOperator(CheckOperator):
"""
Performs checks against BigQuery. The ``BigQueryCheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed
:type sql: str
:param bigquery_conn_id: reference to the BigQuery database
:type bigquery_conn_id: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
template_fields = ('sql',)
template_ext = ('.sql', )
@apply_defaults
def __init__(self,
sql,
bigquery_conn_id='google_cloud_default',
use_legacy_sql=True,
*args, **kwargs):
super(BigQueryCheckOperator, self).__init__(sql=sql, *args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.sql = sql
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql)
class BigQueryValueCheckOperator(ValueCheckOperator):
"""
Performs a simple value check using sql code.
:param sql: the sql to be executed
:type sql: str
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
template_fields = ('sql',)
template_ext = ('.sql', )
@apply_defaults
def __init__(self, sql,
pass_value,
tolerance=None,
bigquery_conn_id='google_cloud_default',
use_legacy_sql=True,
*args, **kwargs):
super(BigQueryValueCheckOperator, self).__init__(
sql=sql, pass_value=pass_value, tolerance=tolerance,
*args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql)
class BigQueryIntervalCheckOperator(IntervalCheckOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
This method constructs a query like so ::
SELECT {metrics_threshold_dict_key} FROM {table}
WHERE {date_filter_column}=<date>
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics, for
example 'COUNT(*)': 1.5 would require a 50 percent or less difference
between the current day, and the prior days_back.
:type metrics_threshold: dict
:param use_legacy_sql: Whether to use legacy SQL (true)
or standard SQL (false).
:type use_legacy_sql: bool
"""
template_fields = ('table',)
@apply_defaults
def __init__(self, table, metrics_thresholds, date_filter_column='ds',
days_back=-7, bigquery_conn_id='google_cloud_default',
use_legacy_sql=True, *args, **kwargs):
super(BigQueryIntervalCheckOperator, self).__init__(
table=table, metrics_thresholds=metrics_thresholds,
date_filter_column=date_filter_column, days_back=days_back,
*args, **kwargs)
self.bigquery_conn_id = bigquery_conn_id
self.use_legacy_sql = use_legacy_sql
def get_db_hook(self):
return BigQueryHook(bigquery_conn_id=self.bigquery_conn_id,
use_legacy_sql=self.use_legacy_sql)
| 38.025974 | 77 | 0.683402 |
a9e1065ee04948b0c0588627353d939922b3e261 | 4,706 | py | Python | infoblox_netmri/api/remote/models/switch_port_fwd_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/switch_port_fwd_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/switch_port_fwd_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SwitchPortFwdRemote(RemoteModel):
"""
The switch forwarding table entries per device, per switch port.
| ``SwitchPortFwdID:`` The internal NetMRI identifier for this switch port forwarding entry.
| ``attribute type:`` number
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device from which this switch port forwarding entry was collected.
| ``attribute type:`` number
| ``SwitchPortNumber:`` The switch port number for the port on which this switch forwarding entry was found. This is as reported by the SNMP BRIDGE MIB, and is not the same as the SNMP interface index.
| ``attribute type:`` string
| ``InterfaceID:`` The internal NetMRI identifier for the interface on which this switch forwarding entry was found.
| ``attribute type:`` number
| ``SwitchPortFwdStartTime:`` The starting effective time of this record.
| ``attribute type:`` datetime
| ``SwitchPortFwdEndTime:`` The ending effective time of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``SwitchPortFwdChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
| ``SwitchPortFwdTimestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``SwitchPortFwdMAC:`` The MAC address that is being forwarded.
| ``attribute type:`` string
| ``SwitchPortFwdStatus:`` The status of this entry; indicates how the entry was entered in the switch forwarding table.
| ``attribute type:`` string
| ``SwitchPortFwdVlanIndex:`` The VLAN number for which this MAC address is forwarded.
| ``attribute type:`` number
| ``SwitchPortFwdVlanID:`` The internal NetMRI identifier for the VLAN for which this MAC address is forwarded.
| ``attribute type:`` number
| ``SwitchPortFwdInterfaceID:`` The internal NetMRI identifier of the interface to which the MAC address corresponds (that is, the destination interface).
| ``attribute type:`` number
| ``SwitchPortFwdDeviceID:`` The internal NetMRI identifier of the device to which the MAC address corresponds (that is, the destination device).
| ``attribute type:`` number
"""
properties = ("SwitchPortFwdID",
"DataSourceID",
"DeviceID",
"SwitchPortNumber",
"InterfaceID",
"SwitchPortFwdStartTime",
"SwitchPortFwdEndTime",
"SwitchPortFwdChangedCols",
"SwitchPortFwdTimestamp",
"SwitchPortFwdMAC",
"SwitchPortFwdStatus",
"SwitchPortFwdVlanIndex",
"SwitchPortFwdVlanID",
"SwitchPortFwdInterfaceID",
"SwitchPortFwdDeviceID",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"SwitchPortFwdID": self.SwitchPortFwdID })
@property
@check_api_availability
def device(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.device(**{"SwitchPortFwdID": self.SwitchPortFwdID })
@property
@check_api_availability
def interface(self):
"""
The interface on which the switch port forwarding entry was found.
``attribute type:`` model
"""
return self.broker.interface(**{"SwitchPortFwdID": self.SwitchPortFwdID })
@property
@check_api_availability
def vlan(self):
"""
The VLAN on which the switch port forwarding entry was found.
``attribute type:`` model
"""
return self.broker.vlan(**{"SwitchPortFwdID": self.SwitchPortFwdID })
@property
@check_api_availability
def infradevice(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.infradevice(**{"SwitchPortFwdID": self.SwitchPortFwdID })
| 33.856115 | 206 | 0.621972 |
4fac9bf9cbea091ff196118876cb35444d6dfc84 | 8,890 | py | Python | vqa/models/att.py | crockwell/sg2im | 662fd6b802d12258f7f7586dfb91920e82f2f7a5 | [
"Apache-2.0"
] | null | null | null | vqa/models/att.py | crockwell/sg2im | 662fd6b802d12258f7f7586dfb91920e82f2f7a5 | [
"Apache-2.0"
] | null | null | null | vqa/models/att.py | crockwell/sg2im | 662fd6b802d12258f7f7586dfb91920e82f2f7a5 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from vqa.lib import utils
from vqa.models import seq2vec
from vqa.models import fusion
class AbstractAtt(nn.Module):
def __init__(self, opt={}, vocab_words=[], vocab_answers=[]):
super(AbstractAtt, self).__init__()
self.opt = opt
self.vocab_words = vocab_words
self.vocab_answers = vocab_answers
self.num_classes = len(self.vocab_answers)
# Modules
self.seq2vec = seq2vec.factory(self.vocab_words, self.opt['seq2vec'])
# Modules for attention
self.conv_v_att = nn.Conv2d(self.opt['dim_v'],
self.opt['attention']['dim_v'], 1, 1)
self.linear_q_att = nn.Linear(self.opt['dim_q'],
self.opt['attention']['dim_q'])
self.conv_att = nn.Conv2d(self.opt['attention']['dim_mm'],
self.opt['attention']['nb_glimpses'], 1, 1)
# Modules for classification
self.list_linear_v_fusion = None
self.linear_q_fusion = None
self.linear_classif = None
def _fusion_att(self, x_v, x_q):
raise NotImplementedError
def _fusion_classif(self, x_v, x_q):
raise NotImplementedError
def _attention(self, input_v, x_q_vec):
batch_size = input_v.size(0)
width = input_v.size(2)
height = input_v.size(3)
# Process visual before fusion
#x_v = input_v.view(batch_size*width*height, dim_features)
x_v = input_v
x_v = F.dropout(x_v,
p=self.opt['attention']['dropout_v'],
training=self.training)
x_v = self.conv_v_att(x_v)
if 'activation_v' in self.opt['attention']:
x_v = getattr(F, self.opt['attention']['activation_v'])(x_v)
x_v = x_v.view(batch_size,
self.opt['attention']['dim_v'],
width * height)
x_v = x_v.transpose(1,2)
# Process question before fusion
x_q = F.dropout(x_q_vec, p=self.opt['attention']['dropout_q'],
training=self.training)
x_q = self.linear_q_att(x_q)
if 'activation_q' in self.opt['attention']:
x_q = getattr(F, self.opt['attention']['activation_q'])(x_q)
x_q = x_q.view(batch_size,
1,
self.opt['attention']['dim_q'])
x_q = x_q.expand(batch_size,
width * height,
self.opt['attention']['dim_q'])
# First multimodal fusion
x_att = self._fusion_att(x_v, x_q)
if 'activation_mm' in self.opt['attention']:
x_att = getattr(F, self.opt['attention']['activation_mm'])(x_att)
# Process attention vectors
x_att = F.dropout(x_att,
p=self.opt['attention']['dropout_mm'],
training=self.training)
# can be optim to avoid two views and transposes
x_att = x_att.view(batch_size,
width,
height,
self.opt['attention']['dim_mm'])
x_att = x_att.transpose(2,3).transpose(1,2)
x_att = self.conv_att(x_att)
x_att = x_att.view(batch_size,
self.opt['attention']['nb_glimpses'],
width * height)
list_att_split = torch.split(x_att, 1, dim=1)
list_att = []
for x_att in list_att_split:
x_att = x_att.contiguous()
x_att = x_att.view(batch_size, width*height)
x_att = F.softmax(x_att)
list_att.append(x_att)
self.list_att = [x_att.data for x_att in list_att]
# Apply attention vectors to input_v
x_v = input_v.view(batch_size, self.opt['dim_v'], width * height)
x_v = x_v.transpose(1,2)
list_v_att = []
for i, x_att in enumerate(list_att):
x_att = x_att.view(batch_size,
width * height,
1)
x_att = x_att.expand(batch_size,
width * height,
self.opt['dim_v'])
x_v_att = torch.mul(x_att, x_v)
x_v_att = x_v_att.sum(1)
x_v_att = x_v_att.view(batch_size, self.opt['dim_v'])
list_v_att.append(x_v_att)
return list_v_att
def _fusion_glimpses(self, list_v_att, x_q_vec):
# Process visual for each glimpses
list_v = []
for glimpse_id, x_v_att in enumerate(list_v_att):
x_v = F.dropout(x_v_att,
p=self.opt['fusion']['dropout_v'],
training=self.training)
x_v = self.list_linear_v_fusion[glimpse_id](x_v)
if 'activation_v' in self.opt['fusion']:
x_v = getattr(F, self.opt['fusion']['activation_v'])(x_v)
list_v.append(x_v)
x_v = torch.cat(list_v, 1)
# Process question
x_q = F.dropout(x_q_vec,
p=self.opt['fusion']['dropout_q'],
training=self.training)
x_q = self.linear_q_fusion(x_q)
if 'activation_q' in self.opt['fusion']:
x_q = getattr(F, self.opt['fusion']['activation_q'])(x_q)
# Second multimodal fusion
x = self._fusion_classif(x_v, x_q)
return x
def _classif(self, x):
if 'activation' in self.opt['classif']:
x = getattr(F, self.opt['classif']['activation'])(x)
x = F.dropout(x,
p=self.opt['classif']['dropout'],
training=self.training)
x = self.linear_classif(x)
return x
def forward(self, input_v, input_q):
if input_v.dim() != 4 and input_q.dim() != 2:
raise ValueError
x_q_vec = self.seq2vec(input_q)
list_v_att = self._attention(input_v, x_q_vec)
x = self._fusion_glimpses(list_v_att, x_q_vec)
x = self._classif(x)
return x
class MLBAtt(AbstractAtt):
def __init__(self, opt={}, vocab_words=[], vocab_answers=[]):
# TODO: deep copy ?
opt['attention']['dim_v'] = opt['attention']['dim_h']
opt['attention']['dim_q'] = opt['attention']['dim_h']
opt['attention']['dim_mm'] = opt['attention']['dim_h']
super(MLBAtt, self).__init__(opt, vocab_words, vocab_answers)
# Modules for classification
self.list_linear_v_fusion = nn.ModuleList([
nn.Linear(self.opt['dim_v'],
self.opt['fusion']['dim_h'])
for i in range(self.opt['attention']['nb_glimpses'])])
self.linear_q_fusion = nn.Linear(self.opt['dim_q'],
self.opt['fusion']['dim_h']
* self.opt['attention']['nb_glimpses'])
self.linear_classif = nn.Linear(self.opt['fusion']['dim_h']
* self.opt['attention']['nb_glimpses'],
self.num_classes)
def _fusion_att(self, x_v, x_q):
x_att = torch.mul(x_v, x_q)
return x_att
def _fusion_classif(self, x_v, x_q):
x_mm = torch.mul(x_v, x_q)
return x_mm
class MutanAtt(AbstractAtt):
def __init__(self, opt={}, vocab_words=[], vocab_answers=[]):
# TODO: deep copy ?
opt['attention']['dim_v'] = opt['attention']['dim_hv']
opt['attention']['dim_q'] = opt['attention']['dim_hq']
super(MutanAtt, self).__init__(opt, vocab_words, vocab_answers)
# Modules for classification
self.fusion_att = fusion.MutanFusion2d(self.opt['attention'],
visual_embedding=False,
question_embedding=False)
self.list_linear_v_fusion = nn.ModuleList([
nn.Linear(self.opt['dim_v'],
int(self.opt['fusion']['dim_hv']
/ opt['attention']['nb_glimpses']))
for i in range(self.opt['attention']['nb_glimpses'])])
self.linear_q_fusion = nn.Linear(self.opt['dim_q'],
self.opt['fusion']['dim_hq'])
self.linear_classif = nn.Linear(self.opt['fusion']['dim_mm'],
self.num_classes)
self.fusion_classif = fusion.MutanFusion(self.opt['fusion'],
visual_embedding=False,
question_embedding=False)
def _fusion_att(self, x_v, x_q):
return self.fusion_att(x_v, x_q)
def _fusion_classif(self, x_v, x_q):
return self.fusion_classif(x_v, x_q)
| 39.6875 | 80 | 0.532958 |
260fa5e94fcaac1cbd5e920be1efd810376d27f5 | 5,883 | py | Python | scripts/genseq.py | fxkuehl/keyboard | 52377899263409e3ca3aa230303a3cbd4656045e | [
"MIT"
] | null | null | null | scripts/genseq.py | fxkuehl/keyboard | 52377899263409e3ca3aa230303a3cbd4656045e | [
"MIT"
] | null | null | null | scripts/genseq.py | fxkuehl/keyboard | 52377899263409e3ca3aa230303a3cbd4656045e | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
# Keys are numbered 0-31. Even numbers are left hand, odd numbers are
# right hand, arranged such that for every left hand key x, the right
# hand mirror image is x+1.
#
# 8 | 6 | 4 | 2 0 || 1 3 | 5 | 7 | 9 | 0
# 20 18 | 16 | 14 | 12 10 || 11 13 | 15 | 17 | 19 21 | 1
# 30 | 28 | 26 | 24 22 || 23 25 | 27 | 29 | 31 | 2
# | | | || | | |
# pinky |ring|mid | index || index | mid|ring|pinky
# 3 | 2 | 1 | 0 || 0 | 1 | 2 | 3
# ||
# left hand || right hand
# 0 || 1
def key_hand(key):
return key & 1
def key_row(key):
if key < 10:
return 0
elif key < 22:
return 1
else:
return 2
def key_col(key):
row_start = (0, 10, 22)
r = key_row(key)
return (key - row_start[r]) >> 1
def col_finger(col):
fingers = (0, 0, 1, 2, 3, 3)
return fingers[col]
def key_finger(key):
return col_finger(key_col(key))
def key_number(h, r, c):
row_start = (0, 10, 22)
return row_start[r] + (c << 1) + h
all_keys = range(32)
lh_keys = [key for key in all_keys if key_hand(key) == 0]
rh_keys = [key for key in all_keys if key_hand(key) == 1]
def gen_pairs():
pairs = []
# left hand pairs
pairs += [(k, l) for k in lh_keys for l in lh_keys]
# right hand pairs
pairs += [(k, l) for k in rh_keys for l in rh_keys]
# left hand with one key from right hand in either order
pairs += [(k, 15) for k in lh_keys]
pairs += [(15, k) for k in lh_keys]
# right hand with one key from left hand in either order
pairs += [(k, 14) for k in rh_keys if k != 15]
pairs += [(14, k) for k in rh_keys if k != 15]
return pairs
categories = {0: "same key",
1: "same finger adj key up", # only mid, botton row
2: "same finger adj key down", # only top, mid row
3: "same finger adj key side", # only pinky and index finger
4: "same finger dist key", # skipping a row, top/bottom row
5: "adj finger + row 0",
6: "adj finger + row 1",
7: "adj finger + row 2",
8: "adj finger - row 0",
9: "adj finger - row 1",
10: "adj finger - row 2",
11: "dist finger row 1",
12: "dist finger row 1",
13: "dist finger row 1",
14: "other hand"} # middle finger, home row
def pick_next_key(key, cat):
if cat == 0: # same key, this one is easy
return key
h = key_hand(key)
r = key_row(key)
c = key_col(key)
f = col_finger(c)
if cat == 1: # same finger, adjacent key up
if r == 0:
return None
elif c == 5:
return key_number(h, r-1, 4)
else:
return key_number(h, r-1, c)
elif cat == 2: # same finger, adjacent key down
if r == 2:
return None
elif c == 5:
return key_number(h, r+1, 4)
else:
return key_number(h, r+1, c)
elif cat == 3: # same finger, adjacent key side
if c == 0 or c == 4:
return key + 2
elif c == 1 or c == 5:
return key - 2
else:
return None
elif cat == 4: # same finger, distant key (skipping one row)
if r == 0:
return key_number(h, 2, c)
elif r == 2:
return key_number(h, 0, c)
else:
return None
elif cat <= 7:
if f == 3:
return None
else:
if c == 0:
c = 1
return key_number(h, cat - 5, c+1)
elif cat <= 10:
if f == 0:
return None
else:
if c == 5:
c = 4
return key_number(h, cat - 8, c-1)
elif cat <= 13:
if c < 3:
c = 4
else:
c = 1
return key_number (h, cat - 11, c)
elif cat == 14:
h = (h + 1) & 1
return key_number (h, 1, 2)
else:
return None
def gen_cat_triplets(pairs):
triplets = []
for pair in pairs:
for cat in range(15):
lead = (pick_next_key(pair[0], cat), pair[0], pair[1])
trail = (pair[0], pair[1], pick_next_key(pair[1], cat))
if lead[0] != None and lead not in triplets:
triplets.append(lead)
if trail[2] != None and trail not in triplets:
triplets.append(trail)
return triplets
def gen_all_triplets(pairs):
triplets = [(pair1[0], pair1[1], pair2[1])
for pair1 in pairs for pair2 in pairs
if pair1[1] == pair2[0]]
return triplets
def triplet_filter(t):
h = key_hand(t[0]) + key_hand(t[1]) + key_hand(t[2])
if h != 0:
return False
r = [key_row(k) for k in t]
# If all 3 keys are in the same row, let the equivalent triplet on
# the home row represent it
if r[0] == r[1] and r[0] == r[2]:
return r[0] == 1
# If the keys are using only two adjacent rows, let the equivalent
# triplet on the top two rows represent it, but be careful not to
# eliminate triplets using column 5, which only exists on row 1.
# row.
c5 = [k for k in t if key_col(k) == 5]
r12 = [x for x in r if x >= 1]
if not c5 and len(r12) == 3:
return False
return True
pairs = gen_pairs()
cat_triplets = gen_cat_triplets(pairs)
all_triplets = gen_all_triplets(pairs)
filtered_triplets = [t for t in all_triplets if triplet_filter(t)]
print("Complete list of triples: %d" % len(all_triplets))
print("Category-based triplets: %d" % len(cat_triplets))
print("Filtered list of triplets: %d" % len(filtered_triplets))
| 29.862944 | 77 | 0.505864 |
f4d7668383a62724aec1239d97881797f7aa77b1 | 5,314 | py | Python | io.py | MartinPetersenDev/mqtt_io | 10225fe106b26f2276fafb9ad22f59d3ddeb34ac | [
"MIT"
] | null | null | null | io.py | MartinPetersenDev/mqtt_io | 10225fe106b26f2276fafb9ad22f59d3ddeb34ac | [
"MIT"
] | null | null | null | io.py | MartinPetersenDev/mqtt_io | 10225fe106b26f2276fafb9ad22f59d3ddeb34ac | [
"MIT"
] | null | null | null | from machine import Pin, ADC, SPI
import uasyncio as asyncio
class Output:
def __init__(self, pin, time_ms):
self._raw_output = Pin(pin, Pin.OUT)
self._output_flag = 0
self._time_ms = time_ms
loop = asyncio.get_event_loop()
loop.create_task(self.run())
def value(self):
return self._raw_output.value()
def on(self):
self._output_flag = 1
def off(self):
self._output_flag = 0
async def run (self):
while True:
if self._output_flag == 1:
self._raw_output.on()
else:
self._raw_output.off()
await asyncio.sleep_ms(self._time_ms)
class Input:
def __init__(self, pin, debounce_ms):
self._raw_input = Pin(pin, Pin.IN, Pin.PULL_UP)
self._raw_input.irq(trigger=Pin.IRQ_RISING | Pin.IRQ_FALLING, handler=self.irq_callback)
self._irq_flag = 1
self._value = 0
self._debounce = debounce_ms
self._has_new_value = 0
self._old_state = 0
loop = asyncio.get_event_loop()
loop.create_task(self.run())
def irq_callback(self, p):
self._irq_flag = self._raw_input.value()
#print("IRQ flag is: ", self._irq_flag)
def value(self):
self._has_new_value = 0
return self._value
def has_new_value(self):
return self._has_new_value
async def run(self):
while True:
if self._irq_flag == self._raw_input.value() and self._raw_input.value() == 0:
self._value = 1
self._has_new_value = 1
else:
self._value = 0
if self._old_state != self._value:
print("[io] Button:", self._value)
self._old_state = self._value
await asyncio.sleep_ms(self._debounce)
class Counter(Input):
async def run(self):
while True:
if self._has_new_value == 0:
self._value = 0
if self._irq_flag == self._raw_input.value() and self._raw_input.value() == 0:
self._value += 1
self._irq_flag = 1
self._has_new_value = 1
print("[io] Counter:", self._value)
await asyncio.sleep_ms(self._debounce)
def value(self):
self._has_new_value = 0
return int(self._value)
class Analog:
def __init__(self, scaling, offset, update_ms):
self._adc = ADC(0)
self._update_ms = update_ms
self._has_new_value = 0
self._value = 0
self._scaling = scaling
self._offset = offset
loop = asyncio.get_event_loop()
loop.create_task(self.run())
def value(self):
return self._value
def has_new_value(self):
return self._has_new_value
async def run(self):
while True:
self._value = self._adc.read() * self._scaling + self._offset
print("[io] Analog0 reads:", self._value)
self._has_new_value = 1
await asyncio.sleep_ms(self._update_ms)
class MCP3202(Analog):
def __init__(self, scaling, offset, update_ms, channel):
self._adc = ADC(0)
self._update_ms = update_ms
self._has_new_value = 0
self._value = 0
self._scaling = scaling
self._offset = offset
loop = asyncio.get_event_loop()
loop.create_task(self.run())
self._ch = channel
self._cs = Pin(15, Pin.OUT)
self._bytes = bytearray(3)
self._spi = SPI(-1, baudrate=100000, polarity=1, phase=0, sck=Pin(14), mosi=Pin(13), miso=Pin(12))
self._spi.init(baudrate=100000)
async def run(self):
while True:
self._cs.on()
self._cs.off()
if self._ch == 1:
self._bytes = self._spi.read(3, 0xe0)
else:
self._bytes = self._spi.read(3, 0xc0)
self._cs.on()
self._value = self._bytes[0]*256*256 + self._bytes[1]*256 + self._bytes[2]
self._value = self._value / 127
if self._ch == 1:
print("[io] MCP3202 CH1 reads:", self._value)
else:
print("[io] MCP3202 CH0 reads:", self._value)
self._has_new_value = 1
await asyncio.sleep_ms(self._update_ms)
class Digital(Input):
def __init__(self, pin, debounce_ms):
self._raw_input = Pin(pin, Pin.IN)
self._raw_input.irq(trigger=Pin.IRQ_RISING | Pin.IRQ_FALLING, handler=self.irq_callback)
self._irq_flag = 0
self._value = 0
self._debounce = debounce_ms
self._has_new_value = 0
loop = asyncio.get_event_loop()
loop.create_task(self.run())
async def run(self):
while True:
if self._irq_flag == self._raw_input.value() and self._raw_input.value() == 1:
self._value = self._raw_input.value()
self._has_new_value = 1
else:
self._value = 0
print("[io] Digital:", self._raw_input.value())
await asyncio.sleep_ms(self._debounce)
| 30.895349 | 107 | 0.551562 |
502172048cfdc55cdc6e25b0d396c62fa92539ed | 1,486 | py | Python | release/scripts/addons/add_mesh_BoltFactory/__init__.py | vic3t3chn0/Bforartists | 7c54a60dd7aa568e20ae7e3778dfef993b61b7b5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-01-18T22:13:24.000Z | 2020-01-18T22:13:24.000Z | release/scripts/addons/add_mesh_BoltFactory/__init__.py | vic3t3chn0/Bforartists | 7c54a60dd7aa568e20ae7e3778dfef993b61b7b5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/addons/add_mesh_BoltFactory/__init__.py | vic3t3chn0/Bforartists | 7c54a60dd7aa568e20ae7e3778dfef993b61b7b5 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "BoltFactory",
"author": "Aaron Keith",
"version": (0, 4, 0),
"blender": (2, 80, 0),
"location": "View3D > Add > Mesh",
"description": "Add a bolt or nut",
"wiki_url": "https://docs.blender.org/manual/en/dev/addons/"
"add_mesh/boltfactory.html",
"category": "Add Mesh",
}
if "bpy" in locals():
import importlib
importlib.reload(Boltfactory)
importlib.reload(createMesh)
else:
from . import Boltfactory
from . import createMesh
import bpy
# ### REGISTER ###
def register():
Boltfactory.register()
def unregister():
Boltfactory.unregister()
if __name__ == "__main__":
register()
| 25.62069 | 74 | 0.671602 |
8b9f6af822045d5bdff16bf1e19506327a7804c6 | 262 | py | Python | __init__.py | jckett/CriticalityMaps | 94a965ba86a86b80c08c632bc2647cf34b32eafe | [
"MIT"
] | 6 | 2019-11-21T20:53:07.000Z | 2020-10-28T07:19:46.000Z | __init__.py | jckett/CriticalityMaps | 94a965ba86a86b80c08c632bc2647cf34b32eafe | [
"MIT"
] | 3 | 2020-02-28T22:19:17.000Z | 2021-04-08T21:43:00.000Z | __init__.py | jckett/CriticalityMaps | 94a965ba86a86b80c08c632bc2647cf34b32eafe | [
"MIT"
] | 3 | 2020-01-21T17:29:02.000Z | 2021-04-08T16:02:59.000Z | from criticalityMaps.mapping import inp_to_geojson, make_criticality_map, wn_dataframe
from criticalityMaps.criticality import fire_criticality_analysis, pipe_criticality_analysis, segment_criticality_analysis, process_criticality, runner
__version__ = '0.1.0'
| 52.4 | 151 | 0.881679 |
5617daaa08c53739da72a72142598e35ca859b60 | 13,646 | py | Python | karafuru.py | Ennea/karafuru | 131dfece98a0bfb4d3795bf8357c2648768dbd9e | [
"MIT"
] | 1 | 2021-07-27T19:43:13.000Z | 2021-07-27T19:43:13.000Z | karafuru.py | Ennea/karafuru | 131dfece98a0bfb4d3795bf8357c2648768dbd9e | [
"MIT"
] | null | null | null | karafuru.py | Ennea/karafuru | 131dfece98a0bfb4d3795bf8357c2648768dbd9e | [
"MIT"
] | null | null | null | import ctypes
from functools import partial
import math
import os
import re
import sys
import tkinter as tk
from tkinter import ttk
from lch import lch_to_srgb, srgb_to_lch
from PIL import Image, ImageTk, ImageGrab
class Karafuru(tk.Frame):
_variables = {}
_int_regex = re.compile(r'^$|^\d+$')
_float_regex = re.compile(r'^$|^\d+\.?\d?$')
_hex_regex = re.compile(r'^$|^#[\da-fA-F]{,6}$')
_hex_strict_regex = re.compile(r'^#([\da-fA-F]{2})([\da-fA-F]{2})([\da-fA-F]{2})$')
_update_lock = False
_color_preview = None
_color_preview_frame = None
_picker_preview = None
_picker_preview_frame = None
_picker_button = None
def __init__(self, master=None):
super().__init__(master, name='karafuru')
master.title('karafuru')
master.iconphoto(False, tk.PhotoImage(file=os.path.join(os.path.dirname(sys.argv[0]), 'icon16.png')),
tk.PhotoImage(file=os.path.join(os.path.dirname(sys.argv[0]), 'icon.png')),
tk.PhotoImage(file=os.path.join(os.path.dirname(sys.argv[0]), 'icon256.png')))
master.resizable(False, False)
self.master = master
self.grid(padx=5, pady=5)
self.grid_columnconfigure(3, pad=20)
self._create_variables()
self._create_widgets()
self.hex = '#000000'
# variable getters and setters
def _get_variable_value(self, variable):
value = self._variables[variable].get()
return 0 if value == '' else value
def _set_variable_value(self, variable, value):
self._variables[variable].set(value)
@property
def hex(self):
return self._get_variable_value('hex')
@hex.setter
def hex(self, value):
self._set_variable_value('hex', value)
@property
def red(self):
return self._get_variable_value('red')
@red.setter
def red(self, value):
self._set_variable_value('red', value)
@property
def green(self):
return self._get_variable_value('green')
@green.setter
def green(self, value):
self._set_variable_value('green', value)
@property
def blue(self):
return self._get_variable_value('blue')
@blue.setter
def blue(self, value):
self._set_variable_value('blue', value)
@property
def lightness(self):
return self._get_variable_value('lightness')
@lightness.setter
def lightness(self, value):
self._set_variable_value('lightness', value)
@property
def chroma(self):
return self._get_variable_value('chroma')
@chroma.setter
def chroma(self, value):
self._set_variable_value('chroma', value)
@property
def hue(self):
return self._get_variable_value('hue')
@hue.setter
def hue(self, value):
self._set_variable_value('hue', value)
# change handlers
def _reset_warning(self):
self._warning.config(text='')
def _update_color_from_rgb(self, changed_component=None, changed_value=None):
if self._update_lock:
return
rgb = {
'red': self.red,
'green': self.green,
'blue': self.blue
}
if changed_component and changed_value:
rgb[changed_component] = changed_value
self._reset_warning()
hex_ = '#{:02x}{:02x}{:02x}'.format(rgb['red'], rgb['green'], rgb['blue'])
lch = srgb_to_lch((rgb['red'] / 255, rgb['green'] / 255, rgb['blue'] / 255))
self._update_lock = True
self.hex = hex_
self.lightness = lch[0]
self.chroma = lch[1]
self.hue = lch[2]
self._update_lock = False
self._color_preview.config(background=hex_)
def _update_color_from_lch(self, changed_component=None, changed_value=None):
if self._update_lock:
return
lch = {
'lightness': self.lightness,
'chroma': self.chroma,
'hue': self.hue
}
if changed_component and changed_value:
lch[changed_component] = changed_value
self._reset_warning()
(rgb, corrected) = lch_to_srgb((lch['lightness'], lch['chroma'], lch['hue']))
if corrected:
self._warning.config(text='Color has been auto-corrected to RGB boundaries.')
rgb = tuple(round(v * 255) for v in rgb)
hex_ = '#{:02x}{:02x}{:02x}'.format(rgb[0], rgb[1], rgb[2])
self._update_lock = True
self.hex = hex_
self.red = rgb[0]
self.green = rgb[1]
self.blue = rgb[2]
self._update_lock = False
self._color_preview.config(background=hex_)
def _update_color_from_hex(self, hex_):
if self._update_lock:
return
match = self._hex_strict_regex.match(hex_)
if match is None:
return
self._reset_warning()
rgb = tuple(int(v, 16) for v in match.groups())
lch = srgb_to_lch(tuple(v / 255 for v in rgb))
self._update_lock = True
self.red = rgb[0]
self.green = rgb[1]
self.blue = rgb[2]
self.lightness = lch[0]
self.chroma = lch[1]
self.hue = lch[2]
self._update_lock = False
self._color_preview.config(background=hex_)
# using the validate function to also cross update
# all other values, and the preview color. magic!
def _validate_entry(self, widget, value):
variable = widget.rsplit('_', 1)[1]
is_hex = variable == 'hex'
is_rgb = variable in ('red', 'green', 'blue')
is_lch = variable in ('lightness', 'chroma', 'hue')
if is_hex:
regex = self._hex_regex
elif is_lch:
regex = self._float_regex
else:
regex = self._int_regex
match = regex.match(value)
if match is None:
return False
# update color preview
if is_rgb and value != '':
self._update_color_from_rgb(variable, min(255, int(value)))
elif is_lch and value != '':
upper_limit = self._variables[variable].upper_limit
self._update_color_from_lch(variable, min(upper_limit, float(value)))
elif is_hex:
self._update_color_from_hex(value)
return True
# color picker event handlers
def _handle_picker_move(self, event):
x = event.x + self._picker_button.winfo_rootx()
y = event.y + self._picker_button.winfo_rooty()
# image = ImageGrab.grab(bbox=(x - 12, y - 12, x + 13, y + 13), all_screens=True)
image = ImageGrab.grab(bbox=(x - 7, y - 7, x + 8, y + 8), all_screens=True)
image = image.resize((75, 75), resample=Image.NEAREST)
self._picker_preview.original_image = image
self._picker_preview.image = ImageTk.PhotoImage(image)
self._picker_preview.config(image=self._picker_preview.image)
(red, green, blue) = image.getpixel((37, 37))
self._set_variable_value('red', red)
self._set_variable_value('green', green)
self._set_variable_value('blue', blue)
def _handle_preview_click(self, event):
x = max(0, min(74, event.x))
y = max(0, min(74, event.y))
(red, green, blue) = self._picker_preview.original_image.getpixel((x, y))
self._set_variable_value('red', red)
self._set_variable_value('green', green)
self._set_variable_value('blue', blue)
# initialization methods
def _create_variables(self):
self._variables['hex'] = tk.StringVar()
self._variables['red'] = tk.IntVar()
self._variables['green'] = tk.IntVar()
self._variables['blue'] = tk.IntVar()
self._variables['lightness'] = tk.DoubleVar()
self._variables['lightness'].upper_limit = 100
self._variables['chroma'] = tk.DoubleVar()
self._variables['chroma'].upper_limit = 132
self._variables['hue'] = tk.DoubleVar()
self._variables['hue'].upper_limit = 360
def _create_widgets(self):
# validation helper function
validate = self.register(lambda entry, value: self._validate_entry(entry, value))
# base layout
frame_left = tk.Frame(self)
frame_left.grid(row=0, column=0)
frame_right = tk.Frame(self)
frame_right.grid(row=0, column=1)
tk.Frame(frame_right, width=10).grid(row=0, column=0)
# color preview
self._color_preview_frame = tk.Frame(frame_left, bg='#808080', padx=5, pady=5)
self._color_preview_frame.pack()
self._color_preview = tk.Frame(self._color_preview_frame, bg='#000', width=75, height=75)
self._color_preview.pack()
params = {
'validate': 'key',
'validatecommand': (validate, '%W', '%P'),
'relief': tk.FLAT,
'borderwidth': 1,
'highlightbackground': '#a0a0a0',
'highlightcolor': '#606060',
'highlightthickness': 1
}
tk.Entry(frame_left, width=10, name='entry_hex', textvariable=self._variables['hex'], **params).pack(pady=5)
# color picker preview
self._picker_preview_frame = tk.Frame(frame_left, bg='#000', padx=1, pady=1)
self._picker_preview_frame.pack(pady=5)
self._picker_preview = tk.Label(self._picker_preview_frame, borderwidth=0)
self._picker_preview.original_image = Image.new('RGB', (75, 75), 0x808080)
self._picker_preview.image = ImageTk.PhotoImage(self._picker_preview.original_image)
self._picker_preview.config(image=self._picker_preview.image)
self._picker_preview.pack()
self._picker_preview.bind('<Button-1>', partial(self._handle_preview_click))
self._picker_preview.bind('<B1-Motion>', partial(self._handle_preview_click))
# color picker button
self._picker_button = ttk.Button(self, text='Pick color')
self._picker_button.grid(row=10, column=0)
self._picker_button.bind('<Button-1>', partial(self._handle_picker_move))
self._picker_button.bind('<B1-Motion>', partial(self._handle_picker_move))
# labels
tk.Label(frame_right, text='Red').grid(sticky=tk.W, row=0, column=1)
tk.Label(frame_right, text='Green').grid(sticky=tk.W, row=1, column=1)
tk.Label(frame_right, text='Blue').grid(sticky=tk.W, row=2, column=1)
tk.Label(frame_right, text='Lightness').grid(sticky=tk.W, row=4, column=1)
tk.Label(frame_right, text='Chroma').grid(sticky=tk.W, row=5, column=1)
tk.Label(frame_right, text='Hue').grid(sticky=tk.W, row=6, column=1)
self._warning = tk.Label(frame_right, text='', fg='#e04e39')
self._warning.grid(row=3, column=1, columnspan=3, sticky=tk.E)
# inputs
params = {
'validate': 'key',
'validatecommand': (validate, '%W', '%P'),
'justify': 'right',
'relief': tk.FLAT,
'borderwidth': 1,
'highlightbackground': '#a0a0a0',
'highlightcolor': '#606060',
'highlightthickness': 1
}
tk.Entry(frame_right, width=5, name='entry_red', textvariable=self._variables['red'], **params).grid(row=0, column=2, padx=10, pady=3.5)
tk.Entry(frame_right, width=5, name='entry_green', textvariable=self._variables['green'], **params).grid(row=1, column=2, padx=10, pady=3.5)
tk.Entry(frame_right, width=5, name='entry_blue', textvariable=self._variables['blue'], **params).grid(row=2, column=2, padx=10, pady=3.5)
tk.Entry(frame_right, width=5, name='entry_lightness', textvariable=self._variables['lightness'], **params).grid(row=4, column=2, padx=10, pady=3.5)
tk.Entry(frame_right, width=5, name='entry_chroma', textvariable=self._variables['chroma'], **params).grid(row=5, column=2, padx=10, pady=3.5)
tk.Entry(frame_right, width=5, name='entry_hue', textvariable=self._variables['hue'], **params).grid(row=6, column=2, padx=10, pady=3.5)
# sliders
params = {
'from_': 0,
'orient': tk.HORIZONTAL,
'length': 300,
'showvalue': False,
'repeatdelay': 150,
'repeatinterval': 25,
'sliderrelief': tk.FLAT,
'borderwidth': 0,
'foreground': '#00ff00',
'troughcolor': '#c0c0c0',
'highlightbackground': '#a0a0a0',
'highlightthickness': 1,
'background': '#e0e0e0',
'activebackground': '#f0f0f0'
}
tk.Scale(frame_right, to=255, variable=self._variables['red'], **params).grid(row=0, column=3)
tk.Scale(frame_right, to=255, variable=self._variables['green'], **params).grid(row=1, column=3)
tk.Scale(frame_right, to=255, variable=self._variables['blue'], **params).grid(row=2, column=3)
tk.Scale(frame_right, to=100, resolution=0.1, variable=self._variables['lightness'], **params).grid(row=4, column=3)
tk.Scale(frame_right, to=132, resolution=0.1, variable=self._variables['chroma'], **params).grid(row=5, column=3)
tk.Scale(frame_right, to=360, resolution=0.1, variable=self._variables['hue'], **params).grid(row=6, column=3)
# on windows, set our own app id to ensure the task bar renders our icon instead of python's
if sys.platform == 'win32':
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('ennea.karafuru')
# https://stackoverflow.com/questions/36514158/tkinter-output-blurry-for-icon-and-text-python-2-7
ctypes.windll.shcore.SetProcessDpiAwareness(1)
root = tk.Tk()
app = Karafuru(master=root)
app.mainloop()
| 37.489011 | 156 | 0.617544 |
5035811fb6be8367e91ea5927fae9b1c8bce9eb5 | 3,712 | py | Python | util/logUtil.py | tianlangbsg/stockHacker | 9b8929523aa488ee7bb51ee80a3b4738f0092d47 | [
"Apache-2.0"
] | 1 | 2021-12-08T09:10:32.000Z | 2021-12-08T09:10:32.000Z | util/logUtil.py | tianlangbsg/stockHacker | 9b8929523aa488ee7bb51ee80a3b4738f0092d47 | [
"Apache-2.0"
] | null | null | null | util/logUtil.py | tianlangbsg/stockHacker | 9b8929523aa488ee7bb51ee80a3b4738f0092d47 | [
"Apache-2.0"
] | null | null | null | import logging.config
import os
from datetime import datetime
from util.commonUtil import get_root_path
DEBUG = True
log_level = logging.DEBUG if DEBUG else logging.INFO
# log config here
root_path = get_root_path()
log_path = os.path.join(root_path, 'logs')
if not os.path.exists(log_path):
os.makedirs(log_path)
curdate = datetime.now().strftime('%Y%m%d')
InfoLogPath = log_path + '\\log_' + curdate + '.log'
# WarnLogPath = log_path + '\\warn' + curdate + '.log'
# ErrorLogPath = log_path + '\\error' + curdate + '.log'
# AccessLogPath = log_path + '\\access' + curdate + '.log'
# RootLogPath = log_path + '\\root' + curdate + '.log'
log_config_dict = {
"version": 1,
'disable_existing_loggers': False,
'loggers': {
'log.info': {
'handlers': ['info', 'console'], # 列表类型,可以控制打印到文件和控制台
'level': log_level,
'propagate': False, # 是否传递给父记录器
},
# 'log.warn': {
# 'handlers': ['warn', 'console'],
# 'level': logging.WARN,
# 'propagate': False, # 是否传递给父记录器
# },
# 'log.error': {
# 'handlers': ['error', 'console'],
# 'level': logging.ERROR,
# 'propagate': False, # 是否传递给父记录器
# },
# 'log.access': {
# 'handlers': ['access', 'console'],
# 'level': logging.INFO,
# 'propagate': False, # 是否传递给父记录器
# },
},
'handlers': {
# 输出到控制台
'console': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
# 输出到文件
'info': {
'level': log_level,
'class': 'logging.handlers.TimedRotatingFileHandler',
'formatter': 'standard',
'filename': InfoLogPath,
'when': "midnight", # 切割日志的时间
'backupCount': 7, # 备份份数
'encoding': 'utf-8'
},
# 'warn': {
# 'level': logging.WARN,
# 'class': 'logging.handlers.TimedRotatingFileHandler',
# 'formatter': 'standard',
# 'filename': WarnLogPath,
# 'when': "midnight", # 切割日志的时间
# 'backupCount': 7, # 备份份数
# 'encoding': 'utf-8'
# },
# 'error': {
# 'level': logging.ERROR,
# 'class': 'logging.handlers.TimedRotatingFileHandler',
# 'formatter': 'standard',
# 'filename': ErrorLogPath,
# 'when': "midnight", # 切割日志的时间
# 'backupCount': 7, # 备份份数
# 'encoding': 'utf-8',
# },
# 'access': {
# 'level': logging.INFO,
# 'class': 'logging.handlers.TimedRotatingFileHandler',
# 'formatter': 'standard',
# 'filename': AccessLogPath,
# 'when': "midnight", # 切割日志的时间
# 'backupCount': 7, # 备份份数
# 'encoding': 'utf-8'
# }
},
'filters': {},
'formatters': {
# 标准输出格式
'standard': {
# 'format': '[%(asctime)s] - %(levelname)s %(module)s:%(funcName)s(%(lineno)d) - %(message)s'
# 'format': '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
'format': '%(asctime)s - %(levelname)s: %(message)s',
}
}
}
logging.config.dictConfig(log_config_dict)
log_info = logging.getLogger("log.info")
# log_warn = logging.getLogger("log.warn")
# log_error = logging.getLogger("log.error")
# log_access = logging.getLogger("log.access")
def info(message):
log_info.info(message)
def warning(message):
log_info.warning(message)
def error(message):
log_info.error(message)
| 29 | 105 | 0.515086 |
07f16afc2810b9bbcf0ad917da5162d8e2a0c46e | 5,401 | py | Python | venv1/Lib/site-packages/tensorflow/python/training/momentum.py | Soum-Soum/Tensorflow_Face_Finder | fec6c15d2df7012608511ad87f4b55731bf99478 | [
"Apache-2.0",
"MIT"
] | null | null | null | venv1/Lib/site-packages/tensorflow/python/training/momentum.py | Soum-Soum/Tensorflow_Face_Finder | fec6c15d2df7012608511ad87f4b55731bf99478 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-05-20T00:58:04.000Z | 2021-05-20T00:58:04.000Z | venv1/Lib/site-packages/tensorflow/python/training/momentum.py | Soum-Soum/Tensorflow_Face_Finder | fec6c15d2df7012608511ad87f4b55731bf99478 | [
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Momentum for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.MomentumOptimizer")
class MomentumOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Momentum algorithm.
Computes (if `use_nesterov = False`):
```
accumulation = momentum * accumulation + gradient
variable -= learning_rate * accumulation
```
Note that in the dense version of this algorithm, `accumulation` is updated
and applied regardless of a gradient's value, whereas the sparse version (when
the gradient is an `IndexedSlices`, typically because of `tf.gather` or an
embedding) only updates variable slices and corresponding `accumulation` terms
when that part of the variable was used in the forward pass.
"""
def __init__(self, learning_rate, momentum,
use_locking=False, name="Momentum", use_nesterov=False):
"""Construct a new Momentum optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
use_nesterov: If `True` use Nesterov Momentum.
See [Sutskever et al., 2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf).
This implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
@compatibility(eager)
When eager execution is enabled, learning_rate and momentum can each be a
callable that takes no arguments and returns the actual value to use. This
can be useful for changing these values across different invocations of
optimizer functions.
@end_compatibility
"""
super(MomentumOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._momentum = momentum
self._use_nesterov = use_nesterov
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "momentum", self._name)
def _prepare(self):
learning_rate = self._learning_rate
if callable(learning_rate):
learning_rate = learning_rate()
self._learning_rate_tensor = ops.convert_to_tensor(learning_rate,
name="learning_rate")
momentum = self._momentum
if callable(momentum):
momentum = momentum()
self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
def _apply_dense(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.apply_momentum(
var, mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_dense(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.resource_apply_momentum(
var.handle, mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
grad,
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
def _apply_sparse(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.sparse_apply_momentum(
var, mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values, grad.indices,
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_sparse(self, grad, var, indices):
mom = self.get_slot(var, "momentum")
return training_ops.resource_sparse_apply_momentum(
var.handle, mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
grad, indices,
math_ops.cast(self._momentum_tensor, grad.dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
| 41.868217 | 81 | 0.696538 |
ad4dbbfa82965cf582ef0646ff16a51fb5c4d6b6 | 854 | py | Python | stylemotery/ast_generators/DefaultPythonAstGenerator.py | ml-in-programming/ml-on-source-code-models | 28f206afcda761320550cefdd53a3f89d206f82f | [
"Apache-2.0"
] | 3 | 2020-07-05T22:21:00.000Z | 2021-07-06T08:32:13.000Z | stylemotery/ast_generators/DefaultPythonAstGenerator.py | ml-in-programming/ml-on-source-code-models | 28f206afcda761320550cefdd53a3f89d206f82f | [
"Apache-2.0"
] | null | null | null | stylemotery/ast_generators/DefaultPythonAstGenerator.py | ml-in-programming/ml-on-source-code-models | 28f206afcda761320550cefdd53a3f89d206f82f | [
"Apache-2.0"
] | 3 | 2019-11-20T14:16:12.000Z | 2020-07-05T22:21:02.000Z | import ast
class DefaultPythonAstGenerator:
NONE = "None"
def __init__(self) -> None:
super().__init__()
self.node_types = []
for x in dir(ast):
try:
if isinstance(ast.__getattribute__(x)(), ast.AST):
self.node_types.append(x)
except TypeError:
pass
self.node_types.append(self.NONE)
self.node_types_indices = {v: i for i, v in enumerate(self.node_types)}
def get_node_id(self, node):
return self.node_types_indices[node]
def __call__(self, filepath):
try:
with open(filepath, 'r', encoding="utf-8") as file:
tree = ast.parse(file.read())
return tree
except Exception as e:
print("ERROR during creating AST: ", e, " filename", filepath)
| 29.448276 | 79 | 0.556206 |
6acb897cb307a5c8c8a3380f18f74972319be8c7 | 2,829 | py | Python | pybank/pybank.py | long2691/python-challenge | d73171b7bd177f3787efb491e3bd783392e6036d | [
"MIT"
] | null | null | null | pybank/pybank.py | long2691/python-challenge | d73171b7bd177f3787efb491e3bd783392e6036d | [
"MIT"
] | null | null | null | pybank/pybank.py | long2691/python-challenge | d73171b7bd177f3787efb491e3bd783392e6036d | [
"MIT"
] | null | null | null |
import os
import csv
#Total Months: 25
#Total Revenue: $1241412
#Average Revenue Change: $216825
#Greatest Increase in Revenue: Sep-16 ($815531)
#Greatest Decrease in Revenue: Aug-12 ($-652794)
csvpath = os.path.join(".", "raw_data","budget_data_1.csv")
#The total number of months included in the dataset
#read data into a dictionary and create an email
date_list = []
total_months = 0
total_revenue = 0
revenue_list = []
revenue_change = []
with open(csvpath, newline='') as csvfile:
csvreader = csv.DictReader(csvfile)
for row in csvreader:
date = str(row["Date"])
revenue = int(row["Revenue"])
date_and_revenue = date + " " + "($" + str(revenue) + ")"
#count total months in worksheet
total_months = total_months + 1
#add total revenue in worksheet
total_revenue = total_revenue + revenue
#append revenue to list in order to use indexes
revenue_list.append(revenue)
#append revenue to list in order to use indexes
date_list.append(date)
#revenue change
j = 0
for i in range(0,len(revenue_list)-1):
revenue_change.append((revenue_list[j+1] - revenue_list[j]))
j = j + 1
#Your final script must be able to handle any such similarly structured dataset in the future (your boss is going to give you more of these -- so your script has to work for the ones to come). In addition, your final script should both print the analysis to the terminal and export a text file with the results.
#print(revenue_change)
#print greatest increase
#print(date_list)
average_revenue_change = (sum(revenue_change)/total_months)
total_revenue_print = ("Total revenue:" + str(total_revenue))
total_months_print = ("Total months" + ": " + str(total_months))
average_revenue_print = ("Average Revenue Change:" + str(average_revenue_change))
#print("Financial Analysis")
#print("__________________________________________________________")
#print(total_months_print)
#print(total_revenue_print)
#print(average_revenue_print)
Greatest_increase_revenue = (("Greatest Increase in Revenue:" + date_list[revenue_change.index(max(revenue_change)) ] + " " + str(max(revenue_change))))
#print(Greatest_increase_revenue)
Greatest_decrease_revenue = (("Greatest Decrease in Revenue:" + date_list[revenue_change.index(min(revenue_change)) ] + " " + str(min(revenue_change))))
#print(Greatest_decrease_revenue)
output = (
f"Financial Analysis\n"
f"__________________________________________________________\n"
f"{total_months_print}\n"
f"{total_revenue_print}\n"
f"{average_revenue_print}\n"
f"{Greatest_increase_revenue}\n"
f"{Greatest_decrease_revenue}\n")
print(output)
file = open("finacial_analysis.txt","w")
file.write(output) | 43.523077 | 315 | 0.710145 |
507ccf1ff79129dd4fadfc8548480bdc1063b5ba | 6,388 | py | Python | models/GDN.py | chufangao/GLUE | 237265233846be1225b4a3646535e9dde414cb90 | [
"MIT"
] | 6 | 2021-12-07T08:37:15.000Z | 2022-02-28T14:33:51.000Z | models/GDN.py | chufangao/GLUE | 237265233846be1225b4a3646535e9dde414cb90 | [
"MIT"
] | null | null | null | models/GDN.py | chufangao/GLUE | 237265233846be1225b4a3646535e9dde414cb90 | [
"MIT"
] | null | null | null | import numpy as np
import torch
import matplotlib.pyplot as plt
import torch.nn as nn
import time
from util.time import *
from util.env import *
from torch_geometric.nn import GCNConv, GATConv, EdgeConv
import math
import torch.nn.functional as F
from .graph_layer import GraphLayer
def get_batch_edge_index(org_edge_index, batch_num, node_num):
# org_edge_index:(2, edge_num)
edge_index = org_edge_index.clone().detach()
edge_num = org_edge_index.shape[1]
batch_edge_index = edge_index.repeat(1,batch_num).contiguous()
for i in range(batch_num):
batch_edge_index[:, i*edge_num:(i+1)*edge_num] += i*node_num
return batch_edge_index.long()
# Output from GDN
class OutLayer(nn.Module):
def __init__(self, in_num, node_num, layer_num, inter_num = 512):
super(OutLayer, self).__init__()
modules = []
for i in range(layer_num):
# last layer, output shape:1
if i == layer_num-1:
# Change to 2 for GLUE ?
modules.append(nn.Linear( in_num if layer_num == 1 else inter_num, 1))
else:
layer_in_num = in_num if i == 0 else inter_num
modules.append(nn.Linear( layer_in_num, inter_num ))
modules.append(nn.BatchNorm1d(inter_num))
modules.append(nn.ReLU())
self.mlp = nn.ModuleList(modules)
def forward(self, x):
out = x
#print("Initial shape = ", out.shape)
for mod in self.mlp:
if isinstance(mod, nn.BatchNorm1d):
out = out.permute(0,2,1)
out = mod(out)
out = out.permute(0,2,1)
else:
out = mod(out)
#print("Final shape = ", out.shape)
return out
class GNNLayer(nn.Module):
def __init__(self, in_channel, out_channel, inter_dim=0, heads=1, node_num=100):
super(GNNLayer, self).__init__()
self.gnn = GraphLayer(in_channel, out_channel, inter_dim=inter_dim, heads=heads, concat=False)
self.bn = nn.BatchNorm1d(out_channel)
self.relu = nn.ReLU()
self.leaky_relu = nn.LeakyReLU()
def forward(self, x, edge_index, embedding=None, node_num=0):
out, (new_edge_index, att_weight) = self.gnn(x, edge_index, embedding, return_attention_weights=True)
self.att_weight_1 = att_weight
self.edge_index_1 = new_edge_index
out = self.bn(out)
return self.relu(out)
# Main GDN class
class GDN(nn.Module):
def __init__(self, edge_index_sets, node_num, dim=64, out_layer_inter_dim=256, input_dim=10, out_layer_num=1, topk=20):
super(GDN, self).__init__()
self.edge_index_sets = edge_index_sets
device = get_device()
edge_index = edge_index_sets[0]
embed_dim = dim
self.embedding = nn.Embedding(node_num, embed_dim)
self.bn_outlayer_in = nn.BatchNorm1d(embed_dim)
edge_set_num = len(edge_index_sets)
self.gnn_layers = nn.ModuleList([
GNNLayer(input_dim, dim, inter_dim=dim+embed_dim, heads=1) for i in range(edge_set_num)
])
self.node_embedding = None
self.topk = topk
self.learned_graph = None
self.out_layer = OutLayer(dim*edge_set_num, node_num, out_layer_num, inter_num = out_layer_inter_dim)
self.final_linear = nn.Linear(node_num, 2)
self.cache_edge_index_sets = [None] * edge_set_num
self.cache_embed_index = None
self.dp = nn.Dropout(0.2)
self.init_params()
print("init model")
def init_params(self):
nn.init.kaiming_uniform_(self.embedding.weight, a=math.sqrt(5))
def forward(self, data, org_edge_index):
x = data.clone().detach()
edge_index_sets = self.edge_index_sets
device = data.device
batch_num, node_num, all_feature = x.shape
x = x.view(-1, all_feature).contiguous()
gcn_outs = []
for i, edge_index in enumerate(edge_index_sets):
edge_num = edge_index.shape[1]
cache_edge_index = self.cache_edge_index_sets[i]
if cache_edge_index is None or cache_edge_index.shape[1] != edge_num*batch_num:
self.cache_edge_index_sets[i] = get_batch_edge_index(edge_index, batch_num, node_num).to(device)
batch_edge_index = self.cache_edge_index_sets[i]
# Compute v_i
all_embeddings = self.embedding(torch.arange(node_num).to(device))
weights_arr = all_embeddings.detach().clone()
all_embeddings = all_embeddings.repeat(batch_num, 1)
weights = weights_arr.view(node_num, -1)
# Neighbors j for i: Normalized cosine(norm) matrix
cos_ji_mat = torch.matmul(weights, weights.T)
normed_mat = torch.matmul(weights.norm(dim=-1).view(-1,1), weights.norm(dim=-1).view(1,-1))
cos_ji_mat = cos_ji_mat / normed_mat
dim = weights.shape[-1]
topk_num = self.topk
# Top k neighbors
topk_indices_ji = torch.topk(cos_ji_mat, topk_num, dim=-1)[1]
self.learned_graph = topk_indices_ji
gated_i = torch.arange(0, node_num).T.unsqueeze(1).repeat(1, topk_num).flatten().to(device).unsqueeze(0)
gated_j = topk_indices_ji.flatten().unsqueeze(0)
# Equation 7
gated_edge_index = torch.cat((gated_j, gated_i), dim=0)
batch_gated_edge_index = get_batch_edge_index(gated_edge_index, batch_num, node_num).to(device)
gcn_out = self.gnn_layers[i](x, batch_gated_edge_index, node_num=node_num*batch_num, embedding=all_embeddings)
gcn_outs.append(gcn_out)
x = torch.cat(gcn_outs, dim=1)
x = x.view(batch_num, node_num, -1)
indexes = torch.arange(0,node_num).to(device)
out = torch.mul(x, self.embedding(indexes))
out = out.permute(0,2,1)
out = F.relu(self.bn_outlayer_in(out))
out = out.permute(0,2,1)
out = self.dp(out)
out = self.out_layer(out)
out = out.view(out.shape[0], -1)
out = self.final_linear(out)
#(mean, var) = self.out_layer(out)
#mean = mean.view(-1, node_num)
#var = var.view(-1, node_num)
return out
| 31.94 | 123 | 0.620069 |
c84f51be8ee0a4864abed300b6bd1ab42f156042 | 7,280 | py | Python | Training/distill.py | cfoster0/GPT-Neo-visual-grounding | 06038be74a3606524a50aab932266ac681f0cdec | [
"Apache-2.0"
] | null | null | null | Training/distill.py | cfoster0/GPT-Neo-visual-grounding | 06038be74a3606524a50aab932266ac681f0cdec | [
"Apache-2.0"
] | null | null | null | Training/distill.py | cfoster0/GPT-Neo-visual-grounding | 06038be74a3606524a50aab932266ac681f0cdec | [
"Apache-2.0"
] | null | null | null | from transformers import GPTNeoModel, GPTNeoForCausalLM,\
GPT2Tokenizer, GPTNeoConfig, AdamW
from torch.utils.data import IterableDataset, DataLoader
from lm_dataformat import *
import torch
import torch.nn.functional as F
from torch.nn.functional import normalize, cross_entropy
from torch.nn import DataParallel
from auto_tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#initalize a smol boi
config = GPTNeoConfig(hidden_size = 128, num_layers = 24, attention_layers = 24)
#create model
model = GPTNeoForCausalLM(config)
if torch.cuda.device_count() > 1:
model = DataParallel(model)
model.to(device)
tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
#Initialize a random projection matrix
neo_hidden = model.config.hidden_size
clip_hidden = 512
projection = torch.nn.Linear(neo_hidden, clip_hidden, bias=False).to(device)
#hparams
temperature = 1.0
learning_rate = 5e-5
weight_decay = 0
grad_accum = 2
clip_bs = 8
lambda_coeff = 1.0 #relative scale for contrastive loss
temp_tensor = torch.tensor(temperature).to(device)
#pytorch dataset for clip juicing
class DistillDataset(IterableDataset):
def __init__(self,\
tokenizer, clip_batch_size,
clip_dataset_dir, pile_dataset_dir,
special_token = "<|CLIP|>", steps = 1e6):
self.clip_dataset_dir = clip_dataset_dir
self.pile_dataset_dir = pile_dataset_dir
self.clip_rdr = Reader(self.clip_dataset_dir).stream_data(get_meta=True)
self.pile_rdr = Reader(self.pile_dataset_dir).stream_data(get_meta=True)
#Steps is the total number of elements we should use. Half from CLIP, half from AR
self.steps = steps
#How many elements are in a single contrastive clip batch
self.clip_batch_size = clip_batch_size
#Start on an example of WIT.
self.cur_clip = True
#Store special token, add to tokenizer. Remember to resize token embeddings on model!
self.tokenizer = tokenizer
self.special_token=special_token
#Get the index for the special token so that we can adjust the decode mask accordingly.
self.special_token_idx=len(self.tokenizer)
self.tokenizer.add_tokens([special_token])
self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})
def __len__(self):
return int(self.steps)
def __iter__(self):
return self
def __next__(self):
tok = self.tokenizer
txts = list()
img_latents = list()
#Return an element from the pile
if not self.cur_clip:
text, _ =next(self.pile_rdr)
txts.append(text)
#Place holder
img_latents.append([[0]*clip_hidden])
#Return an element from CLIP
else:
txts = list()
for _ in range(self.clip_batch_size):
text, img_latent=next(self.clip_rdr)
#Append special token
text += "<|CLIP|>"
txts.append(text)
img_latents.append([img_latent])
#Tokenize text
toks = tok.batch_encode_plus(txts, max_length=2048, truncation=True, padding=True, return_tensors="pt").to(device)
#Get the index of the clip tokens.
clip_idx = (torch.sum(toks.attention_mask, dim=-1).to("cpu") - torch.tensor([1] * len(txts)))
#Get latent vectors
latents = torch.cat([torch.tensor(x) for x in img_latents], dim=0).to(device)
cc = self.cur_clip
#Flip cur clip
self.cur_clip = not self.cur_clip
return {
**toks,
'latent_vecs' : latents,
'clip_idx' : clip_idx,
'use_distill' : cc,
}
#Contrastive loss helper function
def clip_loss(a, b, temp):
# a ~ (b x d)
# b ~ (b x d)
batch_size, dimension = a.shape
a_normd = normalize(a, p=2, dim=1).squeeze()
b_normd = normalize(b, p=2, dim=1).squeeze()
logits = torch.einsum('i d, j d -> i j', a_normd, b_normd) * temp.exp()
labels = torch.arange(batch_size).to(device)
loss = cross_entropy(logits, labels) + cross_entropy(logits.T, labels)
return loss / 2.0
def ar_loss(model, inp, attn_mask):
# inp :: [b, seq]
logprobs = F.log_softmax(model(inp, attention_mask=attn_mask, return_dict=True)['logits'], dim=-1)
# logprobs :: [b, seq, vocab]
pred = logprobs[:, :-1]
tgt = inp[:, 1:]
is_clip_or_padding_token = tgt >= 50257
logits = torch.gather(pred, 2, tgt.unsqueeze(-1)).squeeze(-1) # [batch, seq-1]
# remove loss of clip-token
logits *= 1 - is_clip_or_padding_token.to(torch.int)
return logits.sum()
#Load dataset
data = DistillDataset(tokenizer = tokenizer, clip_batch_size = clip_bs,\
clip_dataset_dir = "../clip_latents_100k.jsonl.zst",\
pile_dataset_dir = "../val.jsonl.zst")
loader = DataLoader(dataset=data, batch_size=1)
#resize token embeddings
model.resize_token_embeddings(len(data.tokenizer))
#Set up optimizer
opt = AdamW(list(model.parameters()) + list(projection.parameters()), lr=learning_rate, weight_decay=weight_decay)
#Set up progress bar
pbar = tqdm(enumerate(loader), total=len(data))
loss_progress = 0.0
#Update the pbar description every 20 batches
report_loss_every = 20
#save every 10000 batches
save_every = 10000
for batch, data_elem in pbar:
model_input = {
'input_ids':data_elem['input_ids'],
'attention_mask':data_elem['attention_mask'],
}
#Used for CLIP. TODO: Fetch AR loss (Leo pls do this)
out_embeds = model(**model_input, return_dict=True, output_hidden_states=True)['hidden_states']
# debug shapes
#print([(k, v.shape if isinstance(v, torch.Tensor) else v) for k, v in data_elem.items()])
#If we are currently using contrastive loss
if data_elem['use_distill']:
#out_embeds ~ (b x seq_len x hidden_size)
idx = data_elem['clip_idx']
last_layer = out_embeds[-1].squeeze() # -1 for last layer
#Get predicted clip embedding. Grab from sequence_len dimension
clip_embeds = torch.zeros((data.clip_batch_size, neo_hidden)).to(device)
for i,j in enumerate(idx.tolist()[0]):
clip_embeds[i] = last_layer[i][j]
#Project to the correct size
clip_embeds = projection(clip_embeds)
#Compute contrastive loss
loss = lambda_coeff * clip_loss(clip_embeds, data_elem['latent_vecs'], temp_tensor)
#compute AR loss
n_text_toks = data_elem['clip_idx'].sum()
loss += ar_loss(model, data_elem['input_ids'], data_elem['attention_mask']) / n_text_toks
loss.backward()
#loss_progress += loss.detatch().cpu().item()
#Accumulate gradients
if (batch+1)%grad_accum==0:
opt.step()
opt.zero_grad()
#Update loss progress
if (batch+1)%report_loss_every==0:
loss_progress /= float(report_loss_every)
pbar.set_description("Current loss: " + str(loss_progress))
loss_progress = 0.0
#Save model
if (batch+1)%save_every==0:
model.save_pretrained("GPT-Neo-Enriched"+str(batch+1))
tokenizer.save_pretrained("GPT-Neo-Enriched"+str(batch+1))
model.save_pretrained("GPT-Neo-Enriched")
tokenizer.save_pretrained("GPT-Neo-Enriched")
| 34.178404 | 122 | 0.668681 |
3353bb37e9196176b9f7e234e04960ab9226f070 | 1,095 | py | Python | pkg/ndl.py | rmc8/bibliography_alert | 219846404f98493d7f59b1c2c3c06ce0950db449 | [
"MIT"
] | null | null | null | pkg/ndl.py | rmc8/bibliography_alert | 219846404f98493d7f59b1c2c3c06ce0950db449 | [
"MIT"
] | null | null | null | pkg/ndl.py | rmc8/bibliography_alert | 219846404f98493d7f59b1c2c3c06ce0950db449 | [
"MIT"
] | null | null | null | import re
import html
import itertools
from typing import Optional, Tuple
from datetime import date, timedelta
import requests
class NationalDietLibrary:
def __init__(self, offset: int = 30):
self.BASE_SRU_URL: str = "https://iss.ndl.go.jp/api/sru?operation=searchRetrieve&query={param}%3d%22{keyword}%22%20AND%20from%3d%22{dt:%Y-%m}"
self.BASE_OS_URL: str = "https://iss.ndl.go.jp/api/opensearch?title={title}"
self.dt = date.today() - timedelta(days=offset)
def get_bibliography(self, params: list, keywords: list) -> Tuple[str, str]:
for param, keyword in itertools.product(params, keywords):
res = requests.get(self.BASE_SRU_URL.format(param=param, keyword=keyword, dt=self.dt))
yield html.unescape(res.text), keyword
def get_isbn(self, title: str) -> Optional[str]:
res = requests.get(self.BASE_OS_URL.format(title=title)).text
htm = html.unescape(res)
m = re.search(r'(?<=<dc:identifier xsi:type="dcndl:ISBN">)\d+(?=</dc:identifier>)', htm)
if m:
return m.group()
| 40.555556 | 150 | 0.661187 |
3f00cfcfb46815b0a763962c3f50a4d7870e3508 | 3,835 | py | Python | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/system/selinux.py | phoenixsbk/kvmmgr | 1ed6230dc4246fe511eeb5fc9d0532d3e651b459 | [
"Apache-2.0"
] | 1 | 2019-01-12T06:46:55.000Z | 2019-01-12T06:46:55.000Z | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/system/selinux.py | phoenixsbk/kvmmgr | 1ed6230dc4246fe511eeb5fc9d0532d3e651b459 | [
"Apache-2.0"
] | null | null | null | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/system/selinux.py | phoenixsbk/kvmmgr | 1ed6230dc4246fe511eeb5fc9d0532d3e651b459 | [
"Apache-2.0"
] | 2 | 2016-03-09T16:37:23.000Z | 2022-01-19T13:12:27.000Z | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
SELinux configuration plugin.
"""
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from otopi import util
from otopi import plugin
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
@util.export
class Plugin(plugin.PluginBase):
"""
SELinux configuration plugin.
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = True
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment[osetupcons.SystemEnv.SELINUX_CONTEXTS] = []
self.environment[osetupcons.SystemEnv.SELINUX_RESTORE_PATHS] = []
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
condition=lambda self: self._enabled,
)
def _setup(self):
self.command.detect('selinuxenabled')
self.command.detect('semanage')
self.command.detect('restorecon')
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self._enabled,
priority=plugin.Stages.PRIORITY_HIGH,
)
def _validation_enable(self):
self._enabled = (
self.environment[oenginecons.CoreEnv.ENABLE] and
not self.environment[
osetupcons.CoreEnv.DEVELOPER_MODE
]
)
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self._enabled,
)
def _validation(self):
if self.command.get('selinuxenabled', optional=True) is None:
self._enabled = False
else:
rc, stdout, stderr = self.execute(
(
self.command.get('selinuxenabled'),
),
raiseOnError=False,
)
self._enabled = rc == 0
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: self._enabled,
priority=plugin.Stages.PRIORITY_LOW,
)
def _misc(self):
for entry in self.environment[osetupcons.SystemEnv.SELINUX_CONTEXTS]:
rc, stdout, stderr = self.execute(
(
self.command.get('semanage'),
'fcontext',
'-a',
'-t', entry['type'],
entry['pattern']
)
)
if rc != 0:
self.logger.error(
_('Failed to set SELINUX policy for {pattern}').format(
pattern=entry['pattern']
)
)
for path in self.environment[
osetupcons.SystemEnv.SELINUX_RESTORE_PATHS
]:
rc, stdout, stderr = self.execute(
(
self.command.get('restorecon'),
'-r',
path
)
)
if rc != 0:
self.logger.error(
_('Failed to refresh SELINUX context for {path}').format(
path=path
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| 29.274809 | 77 | 0.573924 |
c0ed6c4c582db24b1c35ef1d5c5665569f3cbd58 | 222 | py | Python | nzpower/admin.py | guoqiao/django-app | 17e9883a900edb22a9287065bf3be105cca30b6a | [
"MIT"
] | null | null | null | nzpower/admin.py | guoqiao/django-app | 17e9883a900edb22a9287065bf3be105cca30b6a | [
"MIT"
] | null | null | null | nzpower/admin.py | guoqiao/django-app | 17e9883a900edb22a9287065bf3be105cca30b6a | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models as m
class CompanyAdmin(admin.ModelAdmin):
model = m.Company
list_display = ['name', 'slug', 'bank_account_no']
admin.site.register(m.Company, CompanyAdmin)
| 22.2 | 54 | 0.738739 |
2317a78e6e8213b397b51f900ac8ce8c8d20a1d1 | 78 | py | Python | src/compas_pgs/ui/Rhino/3GS/dev/__plugin__.py | BlockResearchGroup/compas-3GS | a1f7be3a364f93bb7560688c0e7acee8f86c535f | [
"MIT"
] | 2 | 2021-11-03T23:22:33.000Z | 2021-11-03T23:22:41.000Z | src/compas_pgs/ui/Rhino/3GS/dev/__plugin__.py | BlockResearchGroup/compas-3GS | a1f7be3a364f93bb7560688c0e7acee8f86c535f | [
"MIT"
] | null | null | null | src/compas_pgs/ui/Rhino/3GS/dev/__plugin__.py | BlockResearchGroup/compas-3GS | a1f7be3a364f93bb7560688c0e7acee8f86c535f | [
"MIT"
] | null | null | null | id = "{7ce56e93-79c1-44ac-9716-1a53ca42ac9c}"
version = "0.1.0"
title = "3GS"
| 19.5 | 45 | 0.666667 |
b2473d3f6d2a23799e3b4019210e3e678544d8a0 | 1,829 | py | Python | src/onegov/election_day/layouts/detail.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/election_day/layouts/detail.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/election_day/layouts/detail.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from cached_property import cached_property
from onegov.election_day.layouts.default import DefaultLayout
class HiddenTabsMixin:
"""
Mixing for a generic handling of hiding any kind of menu or submenu
tab on election, election_compound and vote detail layouts in
combination with the yaml file config.
"""
@cached_property
def hidden_tabs(self):
return self.request.app.principal.hidden_tabs.get(self.section, [])
def hide_tab(self, tab):
return tab in self.hidden_tabs
@cached_property
def section(self):
"""Represents section under
principal:
hidden_elements:
tabs:
<section>:
- tab1
- tab2
"""
mapping = {
'votes': 'vote',
'elections': 'election',
'election_compounds': 'elections'
}
return mapping.get(self.model.__tablename__, '')
class DetailLayout(DefaultLayout, HiddenTabsMixin):
""" A common base layout for election and votes which caches some values
used in the macros.
"""
@cached_property
def has_results(self):
return self.model.has_results
@cached_property
def completed(self):
return self.model.completed
@cached_property
def last_result_change(self):
return self.model.last_result_change
@cached_property
def last_modified(self):
return self.model.last_modified
@cached_property
def related_link(self):
return self.model.related_link
@cached_property
def show_map(self):
return self.principal.is_year_available(self.model.date.year)
@cached_property
def related_link_label(self):
return self.model.related_link_label.get(self.request.locale, None)
| 25.760563 | 76 | 0.647895 |
85ea9d7bf2e59b56a7c0ee667ef7a07d2d4507cc | 1,434 | py | Python | cinderclient/v1/availability_zones.py | Acidburn0zzz/python-cinderclient | a58e14fc4f33e1f1eea7aa4ced3cf8976cb112c2 | [
"Apache-1.1"
] | null | null | null | cinderclient/v1/availability_zones.py | Acidburn0zzz/python-cinderclient | a58e14fc4f33e1f1eea7aa4ced3cf8976cb112c2 | [
"Apache-1.1"
] | null | null | null | cinderclient/v1/availability_zones.py | Acidburn0zzz/python-cinderclient | a58e14fc4f33e1f1eea7aa4ced3cf8976cb112c2 | [
"Apache-1.1"
] | null | null | null | # Copyright 2011-2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Availability Zone interface (v1 extension)"""
from cinderclient import base
class AvailabilityZone(base.Resource):
NAME_ATTR = 'display_name'
def __repr__(self):
return "<AvailabilityZone: %s>" % self.zoneName
class AvailabilityZoneManager(base.ManagerWithFind):
"""Manage :class:`AvailabilityZone` resources."""
resource_class = AvailabilityZone
def list(self, detailed=False):
"""Get a list of all availability zones
:rtype: list of :class:`AvailabilityZone`
"""
if detailed is True:
return self._list("/os-availability-zone/detail",
"availabilityZoneInfo")
else:
return self._list("/os-availability-zone", "availabilityZoneInfo")
| 33.348837 | 78 | 0.687587 |
1a03d5be737a0a28866d8c37a5d41dd4f094ba15 | 2,946 | py | Python | Support Vector Machine (SVM)/SVM.py | Cyrrav/Text-Classification-Comparator | 8f126ee7d83ee5a93c8cb531fc15f2dbd790933e | [
"CC0-1.0"
] | null | null | null | Support Vector Machine (SVM)/SVM.py | Cyrrav/Text-Classification-Comparator | 8f126ee7d83ee5a93c8cb531fc15f2dbd790933e | [
"CC0-1.0"
] | null | null | null | Support Vector Machine (SVM)/SVM.py | Cyrrav/Text-Classification-Comparator | 8f126ee7d83ee5a93c8cb531fc15f2dbd790933e | [
"CC0-1.0"
] | null | null | null | # Text classification and Evaluation utilizing SVM
import re
from sklearn.datasets import load_files
from nltk.corpus import stopwords
def DALE_SVM():
hamspam_data = load_files(r"CONSOLIDATE") # a folder containing the 2 categories of documents in individual folders.
x, y = hamspam_data.data, hamspam_data.target
documents = []
for sen in range(0, len(x)):
# Remove all the special characters
document = re.sub(r'\W', ' ', str(x[sen]))
# remove all single characters
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
# Remove single characters from the start
document = re.sub(r'\^[a-zA-Z]\s+', ' ', document)
# Substituting multiple spaces with single space
document = re.sub(r'\s+', ' ', document, flags=re.I)
# Removing prefixed 'b'
document = re.sub(r'^b\s+', '', document)
# Converting to Lowercase
document = document.lower()
# Lemmatization
document = document.split()
from nltk.stem import WordNetLemmatizer
stemmer = WordNetLemmatizer()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
# Convert the word to a vector using a BOW model.
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7, stop_words=stopwords.words('english'))
x = vectorizer.fit_transform(documents).toarray()
'''Using TFIDF instead of BOW, TFIDF also takes into account the frequency instead of just the occurance.
calculated as:
Term frequency = (Number of Occurrences of a word)/(Total words in the document)
IDF(word) = Log((Total number of documents)/(Number of documents containing the word))
TF-IDF is the product of the two.
'''
from sklearn.feature_extraction.text import TfidfTransformer
tfidfconverter = TfidfTransformer()
x = tfidfconverter.fit_transform(x).toarray()
''' Creating training and test sets of the data'''
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=0)
'''train a classifier with data'''
from sklearn import svm
classifier = svm.SVC()
classifier.fit(x_train, y_train)
'''Predict the testing data'''
y_pred = classifier.predict(x_test)
'''Print evaluation metrices'''
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print("SVM accuracy =", accuracy_score(y_test, y_pred))
print('------------------------ FINISHED CONSOLIDATED ---------------------------')
print()
# Call Function
DALE_SVM()
| 35.071429 | 122 | 0.652749 |
d04ada1208d4c662b93cc550c9cc251daa11df70 | 18,266 | py | Python | pipeline/segmentation/methods/installation/displaydataonimage.py | murphygroup/CellSegmentationEvaluator | 6f5d850148d52dc2d08576da42f700c4669d71e4 | [
"MIT"
] | 4 | 2021-12-15T09:05:51.000Z | 2022-02-22T23:44:10.000Z | pipeline/segmentation/methods/installation/displaydataonimage.py | murphygroup/CellSegmentationEvaluator | 6f5d850148d52dc2d08576da42f700c4669d71e4 | [
"MIT"
] | null | null | null | pipeline/segmentation/methods/installation/displaydataonimage.py | murphygroup/CellSegmentationEvaluator | 6f5d850148d52dc2d08576da42f700c4669d71e4 | [
"MIT"
] | null | null | null | """
DisplayDataOnImage
==================
**DisplayDataOnImage** produces an image with measured data on top of
identified objects.
This module displays either a single image measurement on an image of
your choosing, or one object measurement per object on top of every
object in an image. The display itself is an image which you can save to
a file using **SaveImages**.
|
============ ============ ===============
Supports 2D? Supports 3D? Respects masks?
============ ============ ===============
YES NO YES
============ ============ ===============
"""
import matplotlib.axes
import matplotlib.cm
import matplotlib.figure
import matplotlib.text
import numpy
from cellprofiler_core.constants.measurement import C_FILE_NAME
from cellprofiler_core.constants.measurement import C_PATH_NAME
from cellprofiler_core.constants.measurement import M_LOCATION_CENTER_X
from cellprofiler_core.constants.measurement import M_LOCATION_CENTER_Y
from cellprofiler_core.image import FileImage
from cellprofiler_core.image import Image
from cellprofiler_core.module import Module
from cellprofiler_core.preferences import get_default_colormap
from cellprofiler_core.setting import Binary
from cellprofiler_core.setting import Color
from cellprofiler_core.setting import Measurement
from cellprofiler_core.setting.choice import Choice
from cellprofiler_core.setting.choice import Colormap
from cellprofiler_core.setting.range import FloatRange
from cellprofiler_core.setting.subscriber import ImageSubscriber
from cellprofiler_core.setting.subscriber import LabelSubscriber
from cellprofiler_core.setting.text import ImageName
from cellprofiler_core.setting.text import Integer
OI_OBJECTS = "Object"
OI_IMAGE = "Image"
E_FIGURE = "Figure"
E_AXES = "Axes"
E_IMAGE = "Image"
CT_COLOR = "Color"
CT_TEXT = "Text"
CMS_USE_MEASUREMENT_RANGE = "Use this image's measurement range"
CMS_MANUAL = "Manual"
class DisplayDataOnImage(Module):
module_name = "DisplayDataOnImage"
category = "Data Tools"
variable_revision_number = 6
def create_settings(self):
"""Create your settings by subclassing this function
create_settings is called at the end of initialization.
You should create the setting variables for your module here:
# Ask the user for the input image
self.image_name = .ImageSubscriber(...)
# Ask the user for the name of the output image
self.output_image = .ImageName(...)
# Ask the user for a parameter
self.smoothing_size = .Float(...)
"""
self.objects_or_image = Choice(
"Display object or image measurements?",
[OI_OBJECTS, OI_IMAGE],
doc="""\
- *%(OI_OBJECTS)s* displays measurements made on objects.
- *%(OI_IMAGE)s* displays a single measurement made on an image.
"""
% globals(),
)
self.objects_name = LabelSubscriber(
"Select the input objects",
"None",
doc="""\
*(Used only when displaying object measurements)*
Choose the name of objects identified by some previous module (such as
**IdentifyPrimaryObjects** or **IdentifySecondaryObjects**).
""",
)
def object_fn():
if self.objects_or_image == OI_OBJECTS:
return self.objects_name.value
else:
return "Image"
self.measurement = Measurement(
"Measurement to display",
object_fn,
doc="""\
Choose the measurement to display. This will be a measurement made by
some previous module on either the whole image (if displaying a single
image measurement) or on the objects you selected.
""",
)
self.wants_image = Binary(
"Display background image?",
True,
doc="""\
Choose whether or not to display the measurements on
a background image. Usually, you will want to see the image
context for the measurements, but it may be useful to save
just the overlay of the text measurements and composite the
overlay image and the original image later. Choose "Yes" to
display the measurements on top of a background image or "No"
to display the measurements on a black background.""",
)
self.image_name = ImageSubscriber(
"Select the image on which to display the measurements",
"None",
doc="""\
Choose the image to be displayed behind the measurements.
This can be any image created or loaded by a previous module.
If you have chosen not to display the background image, the image
will only be used to determine the dimensions of the displayed image.""",
)
self.color_or_text = Choice(
"Display mode",
[CT_TEXT, CT_COLOR],
doc="""\
*(Used only when displaying object measurements)*
Choose how to display the measurement information. If you choose
%(CT_TEXT)s, **DisplayDataOnImage** will display the numeric value on
top of each object. If you choose %(CT_COLOR)s, **DisplayDataOnImage**
will convert the image to grayscale, if necessary, and display the
portion of the image within each object using a hue that indicates the
measurement value relative to the other objects in the set using the
default color map.
"""
% globals(),
)
self.colormap = Colormap(
"Color map",
doc="""\
*(Used only when displaying object measurements)*
This is the color map used as the color gradient for coloring the
objects by their measurement values. See `this page`_ for pictures
of the available colormaps.
.. _this page: http://matplotlib.org/users/colormaps.html
""",
)
self.text_color = Color(
"Text color",
"red",
doc="""This is the color that will be used when displaying the text.""",
)
self.display_image = ImageName(
"Name the output image that has the measurements displayed",
"DisplayImage",
doc="""\
The name that will be given to the image with the measurements
superimposed. You can use this name to refer to the image in subsequent
modules (such as **SaveImages**).
""",
)
self.font_size = Integer(
"Font size (points)",
10,
minval=1,
doc="""Set the font size of the letters to be displayed.""",
)
self.decimals = Integer(
"Number of decimals",
2,
minval=0,
doc="""Set how many decimals to be displayed, for example 2 decimals for 0.01; 3 decimals for 0.001.""",
)
self.saved_image_contents = Choice(
"Image elements to save",
[E_IMAGE, E_FIGURE, E_AXES],
doc="""\
This setting controls the level of annotation on the image:
- *%(E_IMAGE)s:* Saves the image with the overlaid measurement
annotations.
- *%(E_AXES)s:* Adds axes with tick marks and image coordinates.
- *%(E_FIGURE)s:* Adds a title and other decorations.
"""
% globals(),
)
self.offset = Integer(
"Annotation offset (in pixels)",
0,
doc="""\
Add a pixel offset to the measurement. Normally, the text is
placed at the object (or image) center, which can obscure relevant features of
the object. This setting adds a specified offset to the text, in a random
direction.""",
)
self.color_map_scale_choice = Choice(
"Color map scale",
[CMS_USE_MEASUREMENT_RANGE, CMS_MANUAL],
doc="""\
*(Used only when displaying object measurements as a colormap)*
**DisplayDataOnImage** assigns a color to each object’s measurement
value from a colormap when in colormap-mode, mapping the value to a
color along the colormap’s continuum. This mapping has implicit upper
and lower bounds to its range which are the extremes of the colormap.
This setting determines whether the extremes are the minimum and
maximum values of the measurement from among the objects in the
current image or manually-entered extremes.
- *%(CMS_USE_MEASUREMENT_RANGE)s:* Use the full range of colors to
get the maximum contrast within the image.
- *%(CMS_MANUAL)s:* Manually set the upper and lower bounds so that
images with different maxima and minima can be compared by a uniform
color mapping.
"""
% globals(),
)
self.color_map_scale = FloatRange(
"Color map range",
value=(0.0, 1.0),
doc="""\
*(Used only when setting a manual colormap range)*
This setting determines the lower and upper bounds of the values for the
color map.
""",
)
def settings(self):
"""Return the settings to be loaded or saved to/from the pipeline
These are the settings (from cellprofiler_core.settings) that are
either read from the strings in the pipeline or written out
to the pipeline. The settings should appear in a consistent
order so they can be matched to the strings in the pipeline.
"""
return [
self.objects_or_image,
self.objects_name,
self.measurement,
self.image_name,
self.text_color,
self.display_image,
self.font_size,
self.decimals,
self.saved_image_contents,
self.offset,
self.color_or_text,
self.colormap,
self.wants_image,
self.color_map_scale_choice,
self.color_map_scale,
]
def visible_settings(self):
"""The settings that are visible in the UI
"""
result = [self.objects_or_image]
if self.objects_or_image == OI_OBJECTS:
result += [self.objects_name]
result += [self.measurement, self.wants_image, self.image_name]
if self.objects_or_image == OI_OBJECTS:
result += [self.color_or_text]
if self.use_color_map():
result += [self.colormap, self.color_map_scale_choice]
if self.color_map_scale_choice == CMS_MANUAL:
result += [self.color_map_scale]
else:
result += [self.text_color, self.font_size, self.decimals, self.offset]
result += [self.display_image, self.saved_image_contents]
return result
def use_color_map(self):
"""True if the measurement values are rendered using a color map"""
return self.objects_or_image == OI_OBJECTS and self.color_or_text == CT_COLOR
def run(self, workspace):
import matplotlib
import matplotlib.cm
import matplotlib.backends.backend_agg
import matplotlib.transforms
from cellprofiler.gui.tools import figure_to_image, only_display_image
#
# Get the image
#
image = workspace.image_set.get_image(self.image_name.value)
if self.wants_image:
pixel_data = image.pixel_data
else:
pixel_data = numpy.zeros(image.pixel_data.shape[:2])
object_set = workspace.object_set
if self.objects_or_image == OI_OBJECTS:
if self.objects_name.value in object_set.get_object_names():
objects = object_set.get_objects(self.objects_name.value)
else:
objects = None
workspace.display_data.pixel_data = pixel_data
if self.use_color_map():
workspace.display_data.labels = objects.segmented
#
# Get the measurements and positions
#
measurements = workspace.measurements
if self.objects_or_image == OI_IMAGE:
value = measurements.get_current_image_measurement(self.measurement.value)
values = [value]
x = [pixel_data.shape[1] / 2]
x_offset = numpy.random.uniform(high=1.0, low=-1.0)
x[0] += x_offset
y = [pixel_data.shape[0] / 2]
y_offset = numpy.sqrt(1 - x_offset ** 2)
y[0] += y_offset
else:
values = measurements.get_current_measurement(
self.objects_name.value, self.measurement.value
)
if objects is not None:
if len(values) < objects.count:
temp = numpy.zeros(objects.count, values.dtype)
temp[: len(values)] = values
temp[len(values):] = numpy.nan
values = temp
elif len(values) > objects.count:
# If the values for something (say, object number) are greater
# than the actual number of objects we have, some might have been
# filtered out/removed. We'll need to diff the arrays to figure out
# what objects to remove
indices = objects.indices
diff = numpy.setdiff1d(indices, numpy.unique(objects.segmented))
values = numpy.delete(values, diff)
x = measurements.get_current_measurement(
self.objects_name.value, M_LOCATION_CENTER_X
)
x_offset = numpy.random.uniform(high=1.0, low=-1.0, size=x.shape)
y_offset = numpy.sqrt(1 - x_offset ** 2)
x += self.offset.value * x_offset
y = measurements.get_current_measurement(
self.objects_name.value, M_LOCATION_CENTER_Y
)
y += self.offset.value * y_offset
mask = ~(numpy.isnan(values) | numpy.isnan(x) | numpy.isnan(y))
values = values[mask]
x = x[mask]
y = y[mask]
workspace.display_data.mask = mask
workspace.display_data.values = values
workspace.display_data.x = x
workspace.display_data.y = y
fig = matplotlib.figure.Figure()
axes = fig.add_subplot(1, 1, 1)
def imshow_fn(pixel_data):
# Note: requires typecast to avoid failure during
# figure_to_image (IMG-764)
img = pixel_data * 255
img[img < 0] = 0
img[img > 255] = 255
img = img.astype(numpy.uint8)
axes.imshow(img, cmap=matplotlib.cm.get_cmap("Greys"))
self.display_on_figure(workspace, axes, imshow_fn)
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
if self.saved_image_contents == E_AXES:
fig.set_frameon(False)
if not self.use_color_map():
fig.subplots_adjust(0.1, 0.1, 0.9, 0.9, 0, 0)
shape = pixel_data.shape
width = float(shape[1]) / fig.dpi
height = float(shape[0]) / fig.dpi
fig.set_figheight(height)
fig.set_figwidth(width)
elif self.saved_image_contents == E_IMAGE:
# if self.use_color_map():
# fig.axes[1].set_visible(False)
# cbar = .colorbar()
# cbar.remove()
only_display_image(fig, pixel_data.shape)
else:
if not self.use_color_map():
fig.subplots_adjust(0.1, 0.1, 0.9, 0.9, 0, 0)
pixel_data = figure_to_image(fig, dpi=fig.dpi)
image = Image(pixel_data)
workspace.image_set.add(self.display_image.value, image)
def run_as_data_tool(self, workspace):
# Note: workspace.measurements.image_set_number contains the image
# number that should be displayed.
import wx
import os.path
im_id = self.image_name.value
m = workspace.measurements
image_name = self.image_name.value
pathname_feature = "_".join((C_PATH_NAME, image_name))
filename_feature = "_".join((C_FILE_NAME, image_name))
if not all(
[m.has_feature("Image", f) for f in (pathname_feature, filename_feature)]
):
with wx.FileDialog(
None,
message="Image file for display",
wildcard="Image files (*.tif, *.png, *.jpg)|*.tif;*.png;*.jpg|"
"All files (*.*)|*.*",
) as dlg:
if dlg.ShowModal() != wx.ID_OK:
return
pathname, filename = os.path.split(dlg.Path)
else:
pathname = m.get_current_image_measurement(pathname_feature)
filename = m.get_current_image_measurement(filename_feature)
# Add the image to the workspace ImageSetList
image_set_list = workspace.image_set_list
image_set = image_set_list.get_image_set(0)
ip = FileImage(im_id, pathname, filename)
image_set.providers.append(ip)
self.run(workspace)
def display(self, workspace, figure):
figure.set_subplots((1, 1))
ax = figure.subplot(0, 0)
title = "%s_%s" % (
self.objects_name.value if self.objects_or_image == OI_OBJECTS else "Image",
self.measurement.value,
)
def imshow_fn(pixel_data):
if pixel_data.ndim == 3:
figure.subplot_imshow_color(0, 0, pixel_data, title=title)
else:
figure.subplot_imshow_grayscale(0, 0, pixel_data, title=title)
self.display_on_figure(workspace, ax, imshow_fn)
def display_on_figure(self, workspace, axes, imshow_fn):
if self.use_color_map():
labels = workspace.display_data.labels
if self.wants_image:
pixel_data = workspace.display_data.pixel_data
else:
pixel_data = (labels != 0).astype(numpy.float32)
if pixel_data.ndim == 3:
pixel_data = numpy.sum(pixel_data, 2) / pixel_data.shape[2]
colormap_name = self.colormap.value
if colormap_name == "Default":
colormap_name = get_default_colormap()
colormap = matplotlib.cm.get_cmap(colormap_name)
values = workspace.display_data.values
vmask = workspace.display_data.mask
colors = numpy.ones((len(vmask) + 1, 4))
colors[1:][~vmask, :3] = 1
sm = matplotlib.cm.ScalarMappable(cmap=colormap)
if self.color_map_scale_choice == CMS_MANUAL:
sm.set_clim(self.color_map_scale.min, self.color_map_scale.max)
sm.set_array(values)
colors[1:][vmask, :] = sm.to_rgba(values)
# import numpy as np
# np.save('/data/hubmap/data/CODEX/HBM897.BMRQ.526/R001_X002_Y002/random_gaussian_0/test.npy', labels)
img = colors[labels, :3] * pixel_data[:, :, numpy.newaxis]
# img = labels
imshow_fn(img)
assert isinstance(axes, matplotlib.axes.Axes)
figure = axes.get_figure()
assert isinstance(figure, matplotlib.figure.Figure)
# edited by Haoran Chen
if not self.saved_image_contents == E_IMAGE:
figure.colorbar(sm, ax=axes)
else:
imshow_fn(workspace.display_data.pixel_data)
for x, y, value in zip(
workspace.display_data.x,
workspace.display_data.y,
workspace.display_data.values,
):
try:
fvalue = float(value)
svalue = "%.*f" % (self.decimals.value, value)
except:
svalue = str(value)
text = matplotlib.text.Text(
x=x,
y=y,
text=svalue,
size=self.font_size.value,
color=self.text_color.value,
verticalalignment="center",
horizontalalignment="center",
)
axes.add_artist(text)
def upgrade_settings(self, setting_values, variable_revision_number, module_name):
if variable_revision_number == 1:
(
objects_or_image,
objects_name,
measurement,
image_name,
text_color,
display_image,
dpi,
saved_image_contents,
) = setting_values
setting_values = [
objects_or_image,
objects_name,
measurement,
image_name,
text_color,
display_image,
10,
2,
saved_image_contents,
]
variable_revision_number = 2
if variable_revision_number == 2:
"""Added annotation offset"""
setting_values = setting_values + ["0"]
variable_revision_number = 3
if variable_revision_number == 3:
# Added color map mode
setting_values = setting_values + [
CT_TEXT,
get_default_colormap(),
]
variable_revision_number = 4
if variable_revision_number == 4:
# added wants_image
setting_values = setting_values + ["Yes"]
variable_revision_number = 5
if variable_revision_number == 5:
# added color_map_scale_choice and color_map_scale
setting_values = setting_values + [CMS_USE_MEASUREMENT_RANGE, "0.0,1.0"]
variable_revision_number = 6
return setting_values, variable_revision_number
| 31.877836 | 107 | 0.711103 |
0fa744a7ed80ed97e6af5019a6c24f5aafea44d8 | 14,699 | py | Python | ironic_python_agent/utils.py | owaisaamir/ironic-python-agent | 0a537806ce2eff8bf5ab603047892f38e2253aae | [
"Apache-2.0"
] | null | null | null | ironic_python_agent/utils.py | owaisaamir/ironic-python-agent | 0a537806ce2eff8bf5ab603047892f38e2253aae | [
"Apache-2.0"
] | null | null | null | ironic_python_agent/utils.py | owaisaamir/ironic-python-agent | 0a537806ce2eff8bf5ab603047892f38e2253aae | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import errno
import glob
import io
import os
import shutil
import subprocess
import tarfile
import tempfile
import time
from ironic_lib import utils as ironic_utils
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import base64
from oslo_utils import units
from six.moves.urllib import parse
from ironic_python_agent import errors
LOG = logging.getLogger(__name__)
# Agent parameters can be passed by kernel command-line arguments and/or
# by virtual media. Virtual media parameters passed would be available
# when the agent is started, but might not be available for re-reading
# later on because:
# * Virtual media might be exposed from Swift and swift temp url might
# expire.
# * Ironic might have removed the floppy image from Swift after starting
# the deploy.
#
# Even if it's available, there is no need to re-read from the device and
# /proc/cmdline again, because it is never going to change. So we cache the
# agent parameters that was passed (by proc/cmdline and/or virtual media)
# when we read it for the first time, and then use this cache.
AGENT_PARAMS_CACHED = dict()
COLLECT_LOGS_COMMANDS = {
'ps': ['ps', 'au'],
'df': ['df', '-a'],
'iptables': ['iptables', '-L'],
'ip_addr': ['ip', 'addr'],
}
def execute(*cmd, **kwargs):
"""Convenience wrapper around ironic_lib's execute() method.
Executes and logs results from a system command.
"""
return ironic_utils.execute(*cmd, **kwargs)
def try_execute(*cmd, **kwargs):
"""The same as execute but returns None on error.
Executes and logs results from a system command. See docs for
oslo_concurrency.processutils.execute for usage.
Instead of raising an exception on failure, this method simply
returns None in case of failure.
:param cmd: positional arguments to pass to processutils.execute()
:param kwargs: keyword arguments to pass to processutils.execute()
:raises: UnknownArgumentError on receiving unknown arguments
:returns: tuple of (stdout, stderr) or None in some error cases
"""
try:
return execute(*cmd, **kwargs)
except (processutils.ProcessExecutionError, OSError) as e:
LOG.debug('Command failed: %s', e)
def _read_params_from_file(filepath):
"""Extract key=value pairs from a file.
:param filepath: path to a file containing key=value pairs separated by
whitespace or newlines.
:returns: a dictionary representing the content of the file
"""
with open(filepath) as f:
cmdline = f.read()
options = cmdline.split()
params = {}
for option in options:
if '=' not in option:
continue
k, v = option.split('=', 1)
params[k] = v
return params
def _get_vmedia_device():
"""Finds the device filename of the virtual media device using sysfs.
:returns: a string containing the filename of the virtual media device
"""
sysfs_device_models = glob.glob("/sys/class/block/*/device/model")
vmedia_device_model = "virtual media"
for model_file in sysfs_device_models:
try:
with open(model_file) as model_file_fobj:
if vmedia_device_model in model_file_fobj.read().lower():
vmedia_device = model_file.split('/')[4]
return vmedia_device
except Exception:
pass
def _get_vmedia_params():
"""This method returns the parameters passed through virtual media floppy.
:returns: a partial dict of potential agent configuration parameters
:raises: VirtualMediaBootError when it cannot find the virtual media device
"""
parameters_file = "parameters.txt"
vmedia_device_file_lower_case = "/dev/disk/by-label/ir-vfd-dev"
vmedia_device_file_upper_case = "/dev/disk/by-label/IR-VFD-DEV"
if os.path.exists(vmedia_device_file_lower_case):
vmedia_device_file = vmedia_device_file_lower_case
elif os.path.exists(vmedia_device_file_upper_case):
vmedia_device_file = vmedia_device_file_upper_case
else:
# TODO(rameshg87): This block of code is there only for compatibility
# reasons (so that newer agent can work with older Ironic). Remove
# this after Liberty release.
vmedia_device = _get_vmedia_device()
if not vmedia_device:
msg = "Unable to find virtual media device"
raise errors.VirtualMediaBootError(msg)
vmedia_device_file = os.path.join("/dev", vmedia_device)
vmedia_mount_point = tempfile.mkdtemp()
try:
try:
stdout, stderr = execute("mount", vmedia_device_file,
vmedia_mount_point)
except processutils.ProcessExecutionError as e:
msg = ("Unable to mount virtual media device %(device)s: "
"%(error)s" % {'device': vmedia_device_file, 'error': e})
raise errors.VirtualMediaBootError(msg)
parameters_file_path = os.path.join(vmedia_mount_point,
parameters_file)
params = _read_params_from_file(parameters_file_path)
try:
stdout, stderr = execute("umount", vmedia_mount_point)
except processutils.ProcessExecutionError as e:
pass
finally:
try:
shutil.rmtree(vmedia_mount_point)
except Exception as e:
pass
return params
def _get_cached_params():
"""Helper method to get cached params to ease unit testing."""
return AGENT_PARAMS_CACHED
def _set_cached_params(params):
"""Helper method to set cached params to ease unit testing."""
global AGENT_PARAMS_CACHED
AGENT_PARAMS_CACHED = params
def get_agent_params():
"""Gets parameters passed to the agent via kernel cmdline or vmedia.
Parameters can be passed using either the kernel commandline or through
virtual media. If boot_method is vmedia, merge params provided via vmedia
with those read from the kernel command line.
Although it should never happen, if a variable is both set by vmedia and
kernel command line, the setting in vmedia will take precedence.
:returns: a dict of potential configuration parameters for the agent
"""
# Check if we have the parameters cached
params = _get_cached_params()
if not params:
params = _read_params_from_file('/proc/cmdline')
# If the node booted over virtual media, the parameters are passed
# in a text file within the virtual media floppy.
if params.get('boot_method') == 'vmedia':
vmedia_params = _get_vmedia_params()
params.update(vmedia_params)
# Cache the parameters so that it can be used later on.
_set_cached_params(params)
# Check to see if any deprecated parameters have been used
deprecated_params = {'lldp-timeout': 'ipa-lldp-timeout'}
for old_param, new_param in deprecated_params.items():
if params.get(old_param) is not None:
LOG.warning("The parameter '%s' has been deprecated. Please "
"use %s instead.", old_param, new_param)
return copy.deepcopy(params)
def normalize(string):
"""Return a normalized string.
Take a urlencoded value from Ironic and urldecode it.
:param string: a urlencoded string
:returns: a normalized version of passed in string
"""
return parse.unquote(string).lower().strip()
class AccumulatedFailures(object):
"""Object to accumulate failures without raising exception."""
def __init__(self, exc_class=RuntimeError):
self._failures = []
self._exc_class = exc_class
def add(self, fail, *fmt):
"""Add failure with optional formatting.
:param fail: exception or error string
:param fmt: formatting arguments (only if fail is a string)
"""
if fmt:
fail = fail % fmt
LOG.error('%s', fail)
self._failures.append(fail)
def get_error(self):
"""Get error string or None."""
if not self._failures:
return
msg = ('The following errors were encountered:\n%s'
% '\n'.join('* %s' % item for item in self._failures))
return msg
def raise_if_needed(self):
"""Raise exception if error list is not empty.
:raises: RuntimeError
"""
if self._failures:
raise self._exc_class(self.get_error())
def __nonzero__(self):
return bool(self._failures)
__bool__ = __nonzero__
def __repr__(self): # pragma: no cover
# This is for tests
if self:
return '<%s: %s>' % (self.__class__.__name__,
', '.join(self._failures))
else:
return '<%s: success>' % self.__class__.__name__
def guess_root_disk(block_devices, min_size_required=4 * units.Gi):
"""Find suitable disk provided that root device hints are not given.
If no hints are passed, order the devices by size (primary key) and
name (secondary key), and return the first device larger than
min_size_required as the root disk.
"""
# NOTE(arne_wiebalck): Order devices by size and name. Secondary
# ordering by name is done to increase chances of successful
# booting for BIOSes which try only one (the "first") disk.
block_devices.sort(key=lambda device: (device.size, device.name))
if not block_devices or block_devices[-1].size < min_size_required:
raise errors.DeviceNotFound(
"No suitable device was found "
"for deployment - root device hints were not provided "
"and all found block devices are smaller than %iB."
% min_size_required)
for device in block_devices:
if device.size >= min_size_required:
return device
def is_journalctl_present():
"""Check if the journalctl command is present.
:returns: True if journalctl is present, False if not.
"""
try:
devnull = open(os.devnull, 'w')
subprocess.check_call(['journalctl', '--version'], stdout=devnull,
stderr=devnull)
except OSError as e:
if e.errno == errno.ENOENT:
return False
return True
def get_command_output(command):
"""Return the output of a given command.
:param command: The command to be executed.
:raises: CommandExecutionError if the execution of the command fails.
:returns: A BytesIO string with the output.
"""
try:
out, _ = execute(*command, binary=True, log_stdout=False)
except (processutils.ProcessExecutionError, OSError) as e:
error_msg = ('Failed to get the output of the command "%(command)s". '
'Error: %(error)s' % {'command': command, 'error': e})
LOG.error(error_msg)
raise errors.CommandExecutionError(error_msg)
return io.BytesIO(out)
def get_journalctl_output(lines=None, units=None):
"""Query the contents of the systemd journal.
:param lines: Maximum number of lines to retrieve from the
logs. If None, return everything.
:param units: A list with the names of the units we should
retrieve the logs from. If None retrieve the logs
for everything.
:returns: A log string.
"""
cmd = ['journalctl', '--full', '--no-pager', '-b']
if lines is not None:
cmd.extend(['-n', str(lines)])
if units is not None:
[cmd.extend(['-u', u]) for u in units]
return get_command_output(cmd)
def gzip_and_b64encode(io_dict=None, file_list=None):
"""Gzip and base64 encode files and BytesIO buffers.
:param io_dict: A dictionary containing whose the keys are the file
names and the value a BytesIO object.
:param file_list: A list of file path.
:returns: A gzipped and base64 encoded string.
"""
io_dict = io_dict or {}
file_list = file_list or []
with io.BytesIO() as fp:
with tarfile.open(fileobj=fp, mode='w:gz') as tar:
for fname in io_dict:
ioobj = io_dict[fname]
tarinfo = tarfile.TarInfo(name=fname)
tarinfo.size = ioobj.seek(0, 2)
tarinfo.mtime = time.time()
ioobj.seek(0)
tar.addfile(tarinfo, ioobj)
for f in file_list:
tar.add(f)
fp.seek(0)
return base64.encode_as_text(fp.getvalue())
def collect_system_logs(journald_max_lines=None):
"""Collect system logs.
Collect system logs, for distributions using systemd the logs will
come from journald. On other distributions the logs will come from
the /var/log directory and dmesg output.
:param journald_max_lines: Maximum number of lines to retrieve from
the journald. if None, return everything.
:returns: A tar, gzip base64 encoded string with the logs.
"""
def try_get_command_output(io_dict, file_name, command):
try:
io_dict[file_name] = get_command_output(command)
except errors.CommandExecutionError:
pass
io_dict = {}
file_list = []
if is_journalctl_present():
io_dict['journal'] = get_journalctl_output(lines=journald_max_lines)
else:
try_get_command_output(io_dict, 'dmesg', ['dmesg'])
file_list.append('/var/log')
for name, cmd in COLLECT_LOGS_COMMANDS.items():
try_get_command_output(io_dict, name, cmd)
return gzip_and_b64encode(io_dict=io_dict, file_list=file_list)
def get_ssl_client_options(conf):
"""Format SSL-related requests options.
:param conf: oslo_config CONF object
:returns: tuple of 'verify' and 'cert' values to pass to requests
"""
if conf.insecure:
verify = False
else:
verify = conf.cafile or True
if conf.certfile and conf.keyfile:
cert = (conf.certfile, conf.keyfile)
else:
cert = None
return verify, cert
| 33.559361 | 79 | 0.659365 |
9d9e65b4e8d6d2e40bf9c263339f899439c842c3 | 13,021 | py | Python | tensorflow/python/ops/distributions/student_t.py | mohammadzainabbas/tensorflow | 352142267a1a151b04c6198de83b40b7e979d1d8 | [
"Apache-2.0"
] | 13 | 2018-07-23T18:53:35.000Z | 2021-11-18T19:56:45.000Z | tensorflow/python/ops/distributions/student_t.py | mohammadzainabbas/tensorflow | 352142267a1a151b04c6198de83b40b7e979d1d8 | [
"Apache-2.0"
] | 13 | 2020-01-28T22:20:14.000Z | 2022-03-11T23:20:14.000Z | tensorflow/python/ops/distributions/student_t.py | mohammadzainabbas/tensorflow | 352142267a1a151b04c6198de83b40b7e979d1d8 | [
"Apache-2.0"
] | 13 | 2018-09-07T13:28:38.000Z | 2020-07-17T15:06:24.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Student's t distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"StudentT",
"StudentTWithAbsDfSoftplusScale",
]
@tf_export("distributions.StudentT")
class StudentT(distribution.Distribution):
"""Student's t-distribution.
This distribution has parameters: degree of freedom `df`, location `loc`,
and `scale`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, sigma) = (1 + y**2 / df)**(-0.5 (df + 1)) / Z
where,
y = (x - mu) / sigma
Z = abs(sigma) sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1))
```
where:
* `loc = mu`,
* `scale = sigma`, and,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The StudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that `scale` has semantics more similar to standard deviation than
variance. However it is not actually the std. deviation; the Student's
t-distribution std. dev. is `scale sqrt(df / (df - 2))` when `df > 2`.
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Student t distribution.
single_dist = tf.distributions.StudentT(df=3)
# Evaluate the pdf at 1, returning a scalar Tensor.
single_dist.prob(1.)
# Define a batch of two scalar valued Student t's.
# The first has degrees of freedom 2, mean 1, and scale 11.
# The second 3, 2 and 22.
multi_dist = tf.distributions.StudentT(df=[2, 3],
loc=[1, 2.],
scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
multi_dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
multi_dist.sample(3)
```
Arguments are broadcast when possible.
```python
# Define a batch of two Student's t distributions.
# Both have df 2 and mean 1, but different scales.
dist = tf.distributions.StudentT(df=2, loc=1, scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
# pylint: enable=line-too-long
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentT"):
"""Construct Student's t distributions.
The distributions have degree of freedom `df`, mean `loc`, and scale
`scale`.
The parameters `df`, `loc`, and `scale` must be shaped in a way that
supports broadcasting (e.g. `df + loc + scale` is a valid operation).
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values.
loc: Floating-point `Tensor`. The mean(s) of the distribution(s).
scale: Floating-point `Tensor`. The scaling factor(s) for the
distribution(s). Note that `scale` is not technically the standard
deviation of this distribution but has semantics more similar to
standard deviation than variance.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[df, loc, scale]):
with ops.control_dependencies([check_ops.assert_positive(df)]
if validate_args else []):
self._df = array_ops.identity(df, name="df")
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype(
(self._df, self._loc, self._scale))
super(StudentT, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._df, self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("df", "loc", "scale"), (
[ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 3)))
@property
def df(self):
"""Degrees of freedom in these Student's t distribution(s)."""
return self._df
@property
def loc(self):
"""Locations of these Student's t distribution(s)."""
return self._loc
@property
def scale(self):
"""Scaling factors of these Student's t distribution(s)."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.df),
array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale)))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
array_ops.broadcast_static_shape(self.df.get_shape(),
self.loc.get_shape()),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=math_ops.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# The sampling method comes from the fact that if:
# X ~ Normal(0, 1)
# Z ~ Chi2(df)
# Y = X / sqrt(Z / df)
# then:
# Y ~ StudentT(df).
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
gamma_sample = random_ops.random_gamma(
[n],
0.5 * df,
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, salt="student_t"))
samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
return samples * self.scale + self.loc # Abs(scale) not wanted.
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_unnormalized_prob(self, x):
y = (x - self.loc) / self.scale # Abs(scale) superfluous.
return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df)
def _log_normalization(self):
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
0.5 * np.log(np.pi) +
math_ops.lgamma(0.5 * self.df) -
math_ops.lgamma(0.5 * (self.df + 1.)))
def _cdf(self, x):
# Take Abs(scale) to make subsequent where work correctly.
y = (x - self.loc) / math_ops.abs(self.scale)
x_t = self.df / (y**2. + self.df)
neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
return array_ops.where(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)
def _entropy(self):
v = array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)[..., array_ops.newaxis]
u = v * self.df[..., array_ops.newaxis]
beta_arg = array_ops.concat([u, v], -1) / 2.
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
special_math_ops.lbeta(beta_arg) +
0.5 * (self.df + 1.) *
(math_ops.digamma(0.5 * (self.df + 1.)) -
math_ops.digamma(0.5 * self.df)))
@distribution_util.AppendDocstring(
"""The mean of Student's T equals `loc` if `df > 1`, otherwise it is
`NaN`. If `self.allow_nan_stats=True`, then an exception will be raised
rather than returning `NaN`.""")
def _mean(self):
mean = self.loc * array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
mean,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="mean not defined for components of df <= 1"),
],
mean)
@distribution_util.AppendDocstring("""
The variance for Student's T equals
```
df / (df - 2), when df > 2
infinity, when 1 < df <= 2
NaN, when df <= 1
```
""")
def _variance(self):
# We need to put the tf.where inside the outer tf.where to ensure we never
# hit a NaN in the gradient.
denom = array_ops.where(math_ops.greater(self.df, 2.),
self.df - 2.,
array_ops.ones_like(self.df))
# Abs(scale) superfluous.
var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) *
math_ops.square(self.scale) * self.df / denom)
# When 1 < df <= 2, variance is infinite.
inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
result_where_defined = array_ops.where(
self.df > array_ops.fill(self.batch_shape_tensor(), 2.),
var,
array_ops.fill(self.batch_shape_tensor(), inf, name="inf"))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
result_where_defined,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="variance not defined for components of df <= 1"),
],
result_where_defined)
def _mode(self):
return array_ops.identity(self.loc)
class StudentTWithAbsDfSoftplusScale(StudentT):
"""StudentT with `df = floor(abs(df))` and `scale = softplus(scale)`."""
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentTWithAbsDfSoftplusScale"):
parameters = locals()
with ops.name_scope(name, values=[df, scale]):
super(StudentTWithAbsDfSoftplusScale, self).__init__(
df=math_ops.floor(math_ops.abs(df)),
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
| 35.969613 | 80 | 0.635281 |
d2ea0d19ec2f051ce8fad34153968ce503cbc72e | 17,147 | py | Python | dateparser/data/languages_info.py | cedar101/dateparser | edde3f0f10634d5b11713c830178341ca6fea9e5 | [
"BSD-3-Clause"
] | null | null | null | dateparser/data/languages_info.py | cedar101/dateparser | edde3f0f10634d5b11713c830178341ca6fea9e5 | [
"BSD-3-Clause"
] | null | null | null | dateparser/data/languages_info.py | cedar101/dateparser | edde3f0f10634d5b11713c830178341ca6fea9e5 | [
"BSD-3-Clause"
] | null | null | null | language_order = [
"en",
"ru",
"es",
"tr",
"fa",
"fr",
"de",
"ja",
"pt",
"vi",
"zh",
"zh-Hans",
"ar",
"it",
"pl",
"id",
"el",
"nl",
"ko",
"th",
"he",
"uk",
"cs",
"sv",
"ro",
"hu",
"da",
"sr",
"sr-Cyrl",
"sk",
"fi",
"bg",
"hr",
"lt",
"hi",
"nb",
"sl",
"nn",
"et",
"lv",
"bn",
"ur",
"sw",
"pa-Arab",
"te",
"mr",
"ta",
"yue",
"fil",
"gu",
"kn",
"ps",
"zh-Hant",
"ml",
"or",
"my",
"pa",
"pa-Guru",
"am",
"om",
"ha",
"uz",
"uz-Latn",
"yo",
"ms",
"ig",
"mg",
"ne",
"as",
"so",
"si",
"km",
"zu",
"sn",
"kk",
"rw",
"ckb",
"qu",
"ak",
"be",
"ti",
"az",
"az-Latn",
"af",
"ca",
"sr-Latn",
"ii",
"bm",
"ki",
"gsw",
"ug",
"zgh",
"ff",
"rn",
"sq",
"ks",
"hy",
"luy",
"lg",
"lo",
"bem",
"kok",
"luo",
"uz-Cyrl",
"ka",
"ee",
"mzn",
"bs-Cyrl",
"bs",
"bs-Latn",
"kln",
"kam",
"gl",
"tzm",
"dje",
"kab",
"bo",
"shi-Latn",
"shi",
"shi-Tfng",
"mn",
"ln",
"ky",
"sg",
"nyn",
"guz",
"cgg",
"xog",
"lrc",
"mer",
"lu",
"teo",
"brx",
"nd",
"mk",
"uz-Arab",
"mas",
"kde",
"mfe",
"seh",
"mgh",
"az-Cyrl",
"ga",
"eu",
"yi",
"ce",
"ksb",
"bez",
"ewo",
"fy",
"ebu",
"nus",
"ast",
"asa",
"ses",
"os",
"br",
"cy",
"kea",
"lag",
"sah",
"mt",
"vun",
"rof",
"jmc",
"lb",
"dav",
"dyo",
"dz",
"nnh",
"is",
"khq",
"bas",
"naq",
"mua",
"ksh",
"saq",
"se",
"dua",
"rwk",
"mgo",
"sbp",
"to",
"jgo",
"ksf",
"fo",
"gd",
"kl",
"rm",
"fur",
"agq",
"haw",
"chr",
"hsb",
"wae",
"nmg",
"lkt",
"twq",
"dsb",
"yav",
"kw",
"gv",
"smn",
"eo",
"tl"
]
language_map = {
"af": [
"af"
],
"agq": [
"agq"
],
"ak": [
"ak"
],
"am": [
"am"
],
"ar": [
"ar"
],
"as": [
"as"
],
"asa": [
"asa"
],
"ast": [
"ast"
],
"az": [
"az",
"az-Cyrl",
"az-Latn"
],
"bas": [
"bas"
],
"be": [
"be"
],
"bem": [
"bem"
],
"bez": [
"bez"
],
"bg": [
"bg"
],
"bm": [
"bm"
],
"bn": [
"bn"
],
"bo": [
"bo"
],
"br": [
"br"
],
"brx": [
"brx"
],
"bs": [
"bs",
"bs-Cyrl",
"bs-Latn"
],
"ca": [
"ca"
],
"ce": [
"ce"
],
"cgg": [
"cgg"
],
"chr": [
"chr"
],
"ckb": [
"ckb"
],
"cs": [
"cs"
],
"cy": [
"cy"
],
"da": [
"da"
],
"dav": [
"dav"
],
"de": [
"de"
],
"dje": [
"dje"
],
"dsb": [
"dsb"
],
"dua": [
"dua"
],
"dyo": [
"dyo"
],
"dz": [
"dz"
],
"ebu": [
"ebu"
],
"ee": [
"ee"
],
"el": [
"el"
],
"en": [
"en"
],
"eo": [
"eo"
],
"es": [
"es"
],
"et": [
"et"
],
"eu": [
"eu"
],
"ewo": [
"ewo"
],
"fa": [
"fa"
],
"ff": [
"ff"
],
"fi": [
"fi"
],
"fil": [
"fil"
],
"fo": [
"fo"
],
"fr": [
"fr"
],
"fur": [
"fur"
],
"fy": [
"fy"
],
"ga": [
"ga"
],
"gd": [
"gd"
],
"gl": [
"gl"
],
"gsw": [
"gsw"
],
"gu": [
"gu"
],
"guz": [
"guz"
],
"gv": [
"gv"
],
"ha": [
"ha"
],
"haw": [
"haw"
],
"he": [
"he"
],
"hi": [
"hi"
],
"hr": [
"hr"
],
"hsb": [
"hsb"
],
"hu": [
"hu"
],
"hy": [
"hy"
],
"id": [
"id"
],
"ig": [
"ig"
],
"ii": [
"ii"
],
"is": [
"is"
],
"it": [
"it"
],
"ja": [
"ja"
],
"jgo": [
"jgo"
],
"jmc": [
"jmc"
],
"ka": [
"ka"
],
"kab": [
"kab"
],
"kam": [
"kam"
],
"kde": [
"kde"
],
"kea": [
"kea"
],
"khq": [
"khq"
],
"ki": [
"ki"
],
"kk": [
"kk"
],
"kl": [
"kl"
],
"kln": [
"kln"
],
"km": [
"km"
],
"kn": [
"kn"
],
"ko": [
"ko"
],
"kok": [
"kok"
],
"ks": [
"ks"
],
"ksb": [
"ksb"
],
"ksf": [
"ksf"
],
"ksh": [
"ksh"
],
"kw": [
"kw"
],
"ky": [
"ky"
],
"lag": [
"lag"
],
"lb": [
"lb"
],
"lg": [
"lg"
],
"lkt": [
"lkt"
],
"ln": [
"ln"
],
"lo": [
"lo"
],
"lrc": [
"lrc"
],
"lt": [
"lt"
],
"lu": [
"lu"
],
"luo": [
"luo"
],
"luy": [
"luy"
],
"lv": [
"lv"
],
"mas": [
"mas"
],
"mer": [
"mer"
],
"mfe": [
"mfe"
],
"mg": [
"mg"
],
"mgh": [
"mgh"
],
"mgo": [
"mgo"
],
"mk": [
"mk"
],
"ml": [
"ml"
],
"mn": [
"mn"
],
"mr": [
"mr"
],
"ms": [
"ms"
],
"mt": [
"mt"
],
"mua": [
"mua"
],
"my": [
"my"
],
"mzn": [
"mzn"
],
"naq": [
"naq"
],
"nb": [
"nb"
],
"nd": [
"nd"
],
"ne": [
"ne"
],
"nl": [
"nl"
],
"nmg": [
"nmg"
],
"nn": [
"nn"
],
"nnh": [
"nnh"
],
"nus": [
"nus"
],
"nyn": [
"nyn"
],
"om": [
"om"
],
"or": [
"or"
],
"os": [
"os"
],
"pa": [
"pa",
"pa-Arab",
"pa-Guru"
],
"pl": [
"pl"
],
"ps": [
"ps"
],
"pt": [
"pt"
],
"qu": [
"qu"
],
"rm": [
"rm"
],
"rn": [
"rn"
],
"ro": [
"ro"
],
"rof": [
"rof"
],
"ru": [
"ru"
],
"rw": [
"rw"
],
"rwk": [
"rwk"
],
"sah": [
"sah"
],
"saq": [
"saq"
],
"sbp": [
"sbp"
],
"se": [
"se"
],
"seh": [
"seh"
],
"ses": [
"ses"
],
"sg": [
"sg"
],
"shi": [
"shi",
"shi-Latn",
"shi-Tfng"
],
"si": [
"si"
],
"sk": [
"sk"
],
"sl": [
"sl"
],
"smn": [
"smn"
],
"sn": [
"sn"
],
"so": [
"so"
],
"sq": [
"sq"
],
"sr": [
"sr",
"sr-Cyrl",
"sr-Latn"
],
"sv": [
"sv"
],
"sw": [
"sw"
],
"ta": [
"ta"
],
"te": [
"te"
],
"teo": [
"teo"
],
"th": [
"th"
],
"ti": [
"ti"
],
"tl": [
"tl"
],
"to": [
"to"
],
"tr": [
"tr"
],
"twq": [
"twq"
],
"tzm": [
"tzm"
],
"ug": [
"ug"
],
"uk": [
"uk"
],
"ur": [
"ur"
],
"uz": [
"uz",
"uz-Arab",
"uz-Cyrl",
"uz-Latn"
],
"vi": [
"vi"
],
"vun": [
"vun"
],
"wae": [
"wae"
],
"xog": [
"xog"
],
"yav": [
"yav"
],
"yi": [
"yi"
],
"yo": [
"yo"
],
"yue": [
"yue"
],
"zgh": [
"zgh"
],
"zh": [
"zh",
"zh-Hans",
"zh-Hant"
],
"zu": [
"zu"
]
}
language_locale_dict = {
"en": [
"en-001",
"en-150",
"en-AG",
"en-AI",
"en-AS",
"en-AT",
"en-AU",
"en-BB",
"en-BE",
"en-BI",
"en-BM",
"en-BS",
"en-BW",
"en-BZ",
"en-CA",
"en-CC",
"en-CH",
"en-CK",
"en-CM",
"en-CX",
"en-CY",
"en-DE",
"en-DG",
"en-DK",
"en-DM",
"en-ER",
"en-FI",
"en-FJ",
"en-FK",
"en-FM",
"en-GB",
"en-GD",
"en-GG",
"en-GH",
"en-GI",
"en-GM",
"en-GU",
"en-GY",
"en-HK",
"en-IE",
"en-IL",
"en-IM",
"en-IN",
"en-IO",
"en-JE",
"en-JM",
"en-KE",
"en-KI",
"en-KN",
"en-KY",
"en-LC",
"en-LR",
"en-LS",
"en-MG",
"en-MH",
"en-MO",
"en-MP",
"en-MS",
"en-MT",
"en-MU",
"en-MW",
"en-MY",
"en-NA",
"en-NF",
"en-NG",
"en-NL",
"en-NR",
"en-NU",
"en-NZ",
"en-PG",
"en-PH",
"en-PK",
"en-PN",
"en-PR",
"en-PW",
"en-RW",
"en-SB",
"en-SC",
"en-SD",
"en-SE",
"en-SG",
"en-SH",
"en-SI",
"en-SL",
"en-SS",
"en-SX",
"en-SZ",
"en-TC",
"en-TK",
"en-TO",
"en-TT",
"en-TV",
"en-TZ",
"en-UG",
"en-UM",
"en-VC",
"en-VG",
"en-VI",
"en-VU",
"en-WS",
"en-ZA",
"en-ZM",
"en-ZW"
],
"ru": [
"ru-BY",
"ru-KG",
"ru-KZ",
"ru-MD",
"ru-UA"
],
"es": [
"es-419",
"es-AR",
"es-BO",
"es-BR",
"es-BZ",
"es-CL",
"es-CO",
"es-CR",
"es-CU",
"es-DO",
"es-EA",
"es-EC",
"es-GQ",
"es-GT",
"es-HN",
"es-IC",
"es-MX",
"es-NI",
"es-PA",
"es-PE",
"es-PH",
"es-PR",
"es-PY",
"es-SV",
"es-US",
"es-UY",
"es-VE"
],
"tr": [
"tr-CY"
],
"fa": [
"fa-AF"
],
"fr": [
"fr-BE",
"fr-BF",
"fr-BI",
"fr-BJ",
"fr-BL",
"fr-CA",
"fr-CD",
"fr-CF",
"fr-CG",
"fr-CH",
"fr-CI",
"fr-CM",
"fr-DJ",
"fr-DZ",
"fr-GA",
"fr-GF",
"fr-GN",
"fr-GP",
"fr-GQ",
"fr-HT",
"fr-KM",
"fr-LU",
"fr-MA",
"fr-MC",
"fr-MF",
"fr-MG",
"fr-ML",
"fr-MQ",
"fr-MR",
"fr-MU",
"fr-NC",
"fr-NE",
"fr-PF",
"fr-PM",
"fr-RE",
"fr-RW",
"fr-SC",
"fr-SN",
"fr-SY",
"fr-TD",
"fr-TG",
"fr-TN",
"fr-VU",
"fr-WF",
"fr-YT"
],
"de": [
"de-AT",
"de-BE",
"de-CH",
"de-IT",
"de-LI",
"de-LU"
],
"ja": [],
"pt": [
"pt-AO",
"pt-CH",
"pt-CV",
"pt-GQ",
"pt-GW",
"pt-LU",
"pt-MO",
"pt-MZ",
"pt-PT",
"pt-ST",
"pt-TL"
],
"vi": [],
"zh": [],
"zh-Hans": [
"zh-Hans-HK",
"zh-Hans-MO",
"zh-Hans-SG"
],
"ar": [
"ar-AE",
"ar-BH",
"ar-DJ",
"ar-DZ",
"ar-EG",
"ar-EH",
"ar-ER",
"ar-IL",
"ar-IQ",
"ar-JO",
"ar-KM",
"ar-KW",
"ar-LB",
"ar-LY",
"ar-MA",
"ar-MR",
"ar-OM",
"ar-PS",
"ar-QA",
"ar-SA",
"ar-SD",
"ar-SO",
"ar-SS",
"ar-SY",
"ar-TD",
"ar-TN",
"ar-YE"
],
"it": [
"it-CH",
"it-SM",
"it-VA"
],
"pl": [],
"id": [],
"el": [
"el-CY"
],
"nl": [
"nl-AW",
"nl-BE",
"nl-BQ",
"nl-CW",
"nl-SR",
"nl-SX"
],
"ko": [
"ko-KP",
"ko-KR"
],
"th": [],
"he": [],
"uk": [],
"cs": [],
"sv": [
"sv-AX",
"sv-FI"
],
"ro": [
"ro-MD"
],
"hu": [],
"da": [
"da-GL"
],
"sr": [],
"sr-Cyrl": [
"sr-Cyrl-BA",
"sr-Cyrl-ME",
"sr-Cyrl-XK"
],
"sk": [],
"fi": [],
"bg": [],
"hr": [
"hr-BA"
],
"lt": [],
"hi": [],
"nb": [
"nb-SJ"
],
"sl": [],
"nn": [],
"et": [],
"lv": [],
"bn": [
"bn-IN"
],
"ur": [
"ur-IN"
],
"sw": [
"sw-CD",
"sw-KE",
"sw-UG"
],
"pa-Arab": [],
"te": [],
"mr": [],
"ta": [
"ta-LK",
"ta-MY",
"ta-SG"
],
"yue": [],
"fil": [],
"gu": [],
"kn": [],
"ps": [],
"zh-Hant": [
"zh-Hant-HK",
"zh-Hant-MO"
],
"ml": [],
"or": [],
"my": [],
"pa": [],
"pa-Guru": [],
"am": [],
"om": [
"om-KE"
],
"ha": [
"ha-GH",
"ha-NE"
],
"uz": [],
"uz-Latn": [],
"yo": [
"yo-BJ"
],
"ms": [
"ms-BN",
"ms-SG"
],
"ig": [],
"mg": [],
"ne": [
"ne-IN"
],
"as": [],
"so": [
"so-DJ",
"so-ET",
"so-KE"
],
"si": [],
"km": [],
"zu": [],
"sn": [],
"kk": [],
"rw": [],
"ckb": [
"ckb-IR"
],
"qu": [
"qu-BO",
"qu-EC"
],
"ak": [],
"be": [],
"ti": [
"ti-ER"
],
"az": [],
"az-Latn": [],
"af": [
"af-NA"
],
"ca": [
"ca-AD",
"ca-FR",
"ca-IT"
],
"sr-Latn": [
"sr-Latn-BA",
"sr-Latn-ME",
"sr-Latn-XK"
],
"ii": [],
"bm": [],
"ki": [],
"gsw": [
"gsw-FR",
"gsw-LI"
],
"ug": [],
"zgh": [],
"ff": [
"ff-CM",
"ff-GN",
"ff-MR"
],
"rn": [],
"sq": [
"sq-MK",
"sq-XK"
],
"ks": [],
"hy": [],
"luy": [],
"lg": [],
"lo": [],
"bem": [],
"kok": [],
"luo": [],
"uz-Cyrl": [],
"ka": [],
"ee": [
"ee-TG"
],
"mzn": [],
"bs-Cyrl": [],
"bs": [],
"bs-Latn": [],
"kln": [],
"kam": [],
"gl": [],
"tzm": [],
"dje": [],
"kab": [],
"bo": [
"bo-IN"
],
"shi-Latn": [],
"shi": [],
"shi-Tfng": [],
"mn": [],
"ln": [
"ln-AO",
"ln-CF",
"ln-CG"
],
"ky": [],
"sg": [],
"nyn": [],
"guz": [],
"cgg": [],
"xog": [],
"lrc": [
"lrc-IQ"
],
"mer": [],
"lu": [],
"teo": [
"teo-KE"
],
"brx": [],
"nd": [],
"mk": [],
"uz-Arab": [],
"mas": [
"mas-TZ"
],
"kde": [],
"mfe": [],
"seh": [],
"mgh": [],
"az-Cyrl": [],
"ga": [],
"eu": [],
"yi": [],
"ce": [],
"ksb": [],
"bez": [],
"ewo": [],
"fy": [],
"ebu": [],
"nus": [],
"ast": [],
"asa": [],
"ses": [],
"os": [
"os-RU"
],
"br": [],
"cy": [],
"kea": [],
"lag": [],
"sah": [],
"mt": [],
"vun": [],
"rof": [],
"jmc": [],
"lb": [],
"dav": [],
"dyo": [],
"dz": [],
"nnh": [],
"is": [],
"khq": [],
"bas": [],
"naq": [],
"mua": [],
"ksh": [],
"saq": [],
"se": [
"se-FI",
"se-SE"
],
"dua": [],
"rwk": [],
"mgo": [],
"sbp": [],
"to": [],
"jgo": [],
"ksf": [],
"fo": [
"fo-DK"
],
"gd": [],
"kl": [],
"rm": [],
"fur": [],
"agq": [],
"haw": [],
"chr": [],
"hsb": [],
"wae": [],
"nmg": [],
"lkt": [],
"twq": [],
"dsb": [],
"yav": [],
"kw": [],
"gv": [],
"smn": [],
"eo": [],
"tl": []
}
| 12.673319 | 24 | 0.196827 |
0308d5401bffae981a41519a2687ba3eeeae223c | 267 | py | Python | django/contrib/sessions/exceptions.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/contrib/sessions/exceptions.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/contrib/sessions/exceptions.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | from django.core.exceptions import SuspiciousOperation
class InvalidSessionKey(SuspiciousOperation):
"""Invalid characters in session key"""
pass
class SuspiciousSession(SuspiciousOperation):
"""The session may be tampered with"""
pass
| 22.25 | 55 | 0.730337 |
4d6b8a75207f20b3a4a35e88cce4fdf4dcf5c4cf | 9,627 | py | Python | trackATM.py | lorenzoreyes/QUANVAS | 8ffc5635b132e78e7c4f1561a110adacb763cbd7 | [
"Apache-2.0"
] | null | null | null | trackATM.py | lorenzoreyes/QUANVAS | 8ffc5635b132e78e7c4f1561a110adacb763cbd7 | [
"Apache-2.0"
] | null | null | null | trackATM.py | lorenzoreyes/QUANVAS | 8ffc5635b132e78e7c4f1561a110adacb763cbd7 | [
"Apache-2.0"
] | null | null | null | # This file covers the cycle of portolio management tasks
# with the purpose of being executed in the Console
import yfinance as yahoo
import pandas as pd
import numpy as np
"""4 main functions to handle the cycle of portfolio management from the clients.csv inputs
0 Do nothing.
1 portfolioMonitor to update and suggets rebalance allocaiton
2 DepositOrWithdraw suggest new composition based on a capital change given in Status = 2 & ammount to change.
3 portfolioRiskUpdated rebalance allocation according to CVaR analysis
4 BacktoBasics reverse previous functions format to original format
"""
def PortfolioMonitor(data):
"""Provide a DataFrame.index as the stock list
to update data, DataFrame['quantity'] &
Data['pricepaid'] for the stocks """
cartera = data # pd.read_excel(str(input("Type excel to work with: ")))
portfolio = pd.DataFrame(index=cartera.iloc[:,0])
portfolio['nominal'] = cartera['nominal'].values
portfolio['pricePaid'] = cartera['price'].values
portfolio['weights'] = (portfolio['nominal'] * portfolio['pricePaid']) / sum(portfolio['nominal'] * portfolio['pricePaid'])
portfolio['notionalStart'] = sum(portfolio['nominal'] * portfolio['pricePaid'])
portfolio['oldLiquidity'] = cartera['liquid'].values
stocks = list(portfolio.index)
portfolio['priceToday'] = (yahoo.download(stocks,period="7d",interval="1m",prepost=True)['Adj Close'].fillna(method='ffill')).tail(1).T
portfolio['notionalToday'] = sum(portfolio['priceToday'] * portfolio['nominal'])
portfolio['PnLpercent'] = portfolio['notionalToday'] / portfolio['notionalStart']
portfolio['PnLpercentEach'] = portfolio['priceToday'] / portfolio['pricePaid']
# En nuevo nominal sumamos el resultado obtenido mas el remanente liquido para reinvertir, siendo nuestro total disponible
portfolio['nominalNew'] = (portfolio['weights'] * (portfolio['notionalToday'] + portfolio['oldLiquidity']) // portfolio['priceToday']) # nuevo nominal
portfolio['adjust'] = portfolio['nominalNew'] - portfolio['nominal'] # ajuste nominal
portfolio['percentReb'] = (portfolio['nominalNew'] * portfolio['priceToday']) / sum(portfolio['nominalNew'] * portfolio['priceToday'])
# Columnas vinculantes para conectar mes anterior con el proximo ya armado
portfolio['notionalRebalance'] = sum(portfolio['nominalNew'] * portfolio['priceToday'])
portfolio['liquidityToReinvest'] = (portfolio['notionalToday'] + portfolio['oldLiquidity']) - portfolio['notionalRebalance']
return portfolio
def DepositOrWithdraw(data, ammount):
"""Provide a DataFrame update with the ammount to change Notional"""
ammount = float(ammount)
cartera = data # pd.read_excel(str(input("Type excel to work with: ")))
portfolio = pd.DataFrame(index=cartera['Unnamed: 0'].values)
portfolio['nominal'] = cartera['nominal'].values
portfolio['pricePaid'] = cartera['price'].values
portfolio['weights'] = (portfolio['nominal'] * portfolio['pricePaid']) / sum(portfolio['nominal'] * portfolio['pricePaid'])
portfolio['notionalStart'] = sum(portfolio['nominal'] * portfolio['pricePaid'])
portfolio['oldLiquidity'] = cartera['liquid'].values
stocks = list(portfolio.index)
portfolio['priceToday'] = (yahoo.download(stocks,period="2d",interval="1m")['Adj Close'].fillna(method='ffill')).tail(1).T
portfolio['notionalToday'] = sum(portfolio['priceToday'] * portfolio['nominal'])
portfolio['PnLpercent'] = portfolio['notionalToday'] / portfolio['notionalStart']
portfolio['PnLpercentEach'] = portfolio['priceToday'] / portfolio['pricePaid']
portfolio['DepositOrWithdraw'] = ammount
# New nominal given by updated value of our shares plus rebalance with the available liquidity
portfolio['nominalNew'] = (portfolio['weights'] * ((portfolio['notionalToday'] + portfolio['oldLiquidity']) + portfolio['DepositOrWithdraw']) // portfolio['priceToday']) # nuevo nominal
portfolio['adjust'] = portfolio['nominalNew'] - portfolio['nominal'] # ajuste nominal
portfolio['percentReb'] = (portfolio['nominalNew'] * portfolio['priceToday']) / sum(portfolio['nominalNew'] * portfolio['priceToday'])
# Link previous statements with new situation
portfolio['notionalRebalance'] = sum(portfolio['nominalNew'] * portfolio['priceToday'])
portfolio['liquidityToReinvest'] = ((portfolio['notionalToday'] +portfolio['oldLiquidity']) + portfolio['DepositOrWithdraw']) - portfolio['notionalRebalance']
return portfolio
def AdjustRisk(data):
"""Provide the stock list of your portfolio
to update risk by Component-Value-at-Risk"""
portfolio = data # pd.read_excel(str(input("Type excel to work with: ")))
listado = list(portfolio['Unnamed: 0'].values)
data = yahoo.download(listado,period="1y",interval="60m")['Adj Close'].fillna(method='ffill')
returns = data.pct_change()
correlation = returns.corr() # correlation
covariance = returns.cov() # covariance
instruments = pd.DataFrame(index= data.columns)
sample = np.random.random_sample(size=(len(data.columns),1)) + (1.0 / len(data.columns))
sample /= np.sum(sample)
instruments['weigths'] = sample # secure allocation is equal 1
#instruments['weigths'] = 1/len(instruments.index) # secure equal allocation
instruments['deltas'] = (instruments.weigths * correlation).sum() # deltas as elasticity of the assets
instruments['Stdev'] = returns.std()
instruments['stress'] = (instruments.deltas * instruments.Stdev) * 3 # stress applied at 4 deviations
instruments['portfolio_stress'] = instruments.stress.sum() # the stress of the portfolio
risk = pd.DataFrame(index=data.columns)
risk['numerator'] = (instruments.deltas.multiply(covariance)).sum()
risk['denominator'] = data.pct_change().std() * (-2.365)
risk['GradVaR'] = -risk.numerator / risk.denominator
risk['CVaRj'] = risk.GradVaR * instruments.deltas # Component VaR of the Risk Factors j
risk['thetai'] = (risk.CVaRj * correlation).sum() # Theta i of the instruments
risk['CVaRi'] = risk.thetai * (1/len(data.columns)) # Component VaR of the Instruments i
risk['totalCVaRi'] = risk.CVaRi.sum() #total CVaR of the portfolio
risk['CVaRattribution'] = risk.CVaRi / risk.totalCVaRi # risk allocation by instrument in the portfolio
riskadj = pd.DataFrame(index=data.columns)
riskadj['base'] = instruments['weigths'].values
riskadj['CVaRattribution'] = risk.CVaRattribution.sort_values(axis=0,ascending=False)
riskadj['new'] = portfolio['weights'].values # Choosing the option with the highest return
riskadj['condition'] = (riskadj.base / riskadj.CVaRattribution)
riskadj['newrisk'] = (riskadj.new / riskadj.CVaRattribution)
riskadj['differences'] = (riskadj.newrisk - riskadj.condition) # apply this result as a percentage to multiply new weights
riskadj['adjustments'] = (riskadj.newrisk - riskadj.condition) / riskadj.condition #ALARM if its negative sum up the difference,
#if it is positive rest it, you need to have 0
riskadj['suggested'] = riskadj.new * (1 + riskadj.adjustments)
riskadj['tototal'] = riskadj.suggested.sum()
riskadj['MinCVaR'] = riskadj.suggested / riskadj.tototal
result = pd.DataFrame(riskadj['MinCVaR'].values,columns=['MinCVaR'],index=data.columns)
result[result.MinCVaR>=0.12] = 0.12 # ensures bounds limit
result['MinCVaR'] = result['MinCVaR'] / sum(result['MinCVaR'])
result['lastPrice'] = (data.tail(1).T.values)
portfolio['MinCVaR'] = result['MinCVaR'].values
portfolio['lastPrice'] = result['lastPrice'].values
return portfolio
def portfolioRiskUpdated(data):
"""Provide your portfolio composition to apply
the update of risk by Component-Value-at-Risk"""
update = AdjustRisk(data)
df = pd.DataFrame(index=update['Unnamed: 0'].values)
df['nominal'] = update['nominal'].values
df['pricePaid'] = update['price'].values
df['weights'] = (update['MinCVaR'].values) / sum(update['MinCVaR'].values) # new weights according to Update and ensures it is 100%
df['notionalStart'] = sum(df['nominal'] * df['pricePaid'])
df['oldLiquidity'] = update['liquid'].values
stocks = list(df.index.values)
df['priceToday'] = update['lastPrice'].values
df['notionalToday'] = sum(df['priceToday'] * df['nominal'])
df['PnLpercent'] = df['notionalToday'] / df['notionalStart']
df['PnLpercentEach'] = df['priceToday'] / df['pricePaid']
# En nuevo nominal sumamos el resultado obtenido mas el remanente liquido para reinvertir, siendo nuestro total disponible
df['nominalNew'] = ((df['weights'] * (df['notionalToday'] + df['oldLiquidity'])) // df['priceToday']) # nuevo nominal
df['adjust'] = df['nominalNew'] - df['nominal'] # ajuste nominal
df['percentReb'] = (df['nominalNew'] * df['priceToday']) / sum(df['nominalNew'] * df['priceToday'])
# Columnas vinculantes para conectar mes anterior con el proximo ya armado
df['notionalRebalance'] = sum(df['nominalNew'] * df['priceToday'])
df['liquidityToReinvest'] = (df['notionalToday'] + df['oldLiquidity']) - df['notionalRebalance']
return df
def BacktoBasics(portfolio):
"""Convert back suggestions to original format"""
df = pd.DataFrame(pd.DataFrame(data=(portfolio.notionalRebalance.values + portfolio.liquidityToReinvest.values),columns=['capital'],index=portfolio.index))
df['price'] = portfolio['priceToday'].values
df['weights'] = portfolio['percentReb'].values
df['cash'] = (df['capital'] * df['weights'])
df['nominal'] = df['cash'] // df['price']
df['invested'] = df['price'] * df['nominal']
df['percentage'] = df['invested'] / sum(df['invested'])
df['total'] = sum(df['invested'])
df['liquid'] = df['capital'] - df['total']
return df
| 64.18 | 187 | 0.716942 |
b7dbed5e1b13ea2e5ed28f8bc2240f4a17572a74 | 16,505 | py | Python | partial_recovery/partial.py | olibraga/MySQL-AutoXtraBackup | 51afdd437fd67bede9719d46c5d5b0c94a6304dc | [
"MIT"
] | null | null | null | partial_recovery/partial.py | olibraga/MySQL-AutoXtraBackup | 51afdd437fd67bede9719d46c5d5b0c94a6304dc | [
"MIT"
] | null | null | null | partial_recovery/partial.py | olibraga/MySQL-AutoXtraBackup | 51afdd437fd67bede9719d46c5d5b0c94a6304dc | [
"MIT"
] | null | null | null | import os
import shutil
import subprocess
from general_conf.generalops import GeneralClass
import re
from general_conf import check_env
from general_conf import path_config
import logging
logger = logging.getLogger(__name__)
class PartialRecovery(GeneralClass):
def __init__(self, config=path_config.config_path_file):
self.conf = config
GeneralClass.__init__(self, self.conf)
if shutil.which('mysqlfrm') is None:
logger.critical("Could not find mysqlfrm! Please install it or check if it is in PATH")
raise RuntimeError("Could not find mysqlfrm! Please install it or check if it is in PATH")
def create_mysql_client_command(self, statement):
command_connection = '{} --defaults-file={} -u{} --password={}'
command_execute = ' -e "{}"'
if hasattr(self, 'mysql_socket'):
command_connection += ' --socket={}'
command_connection += command_execute
new_command = command_connection.format(
self.mysql,
self.mycnf,
self.mysql_user,
self.mysql_password,
self.mysql_socket,
statement)
return new_command
else:
command_connection += ' --port={}'
command_connection += command_execute
new_command = command_connection.format(
self.mysql,
self.mycnf,
self.mysql_user,
self.mysql_password,
self.mysql_host,
self.mysql_port,
statement)
return new_command
def check_innodb_file_per_table(self):
"""
Function for checking MySQL innodb_file_per_table option.
It is needed for "Transportable Tablespace" concept.
:return: True/False
"""
statement = "select @@global.innodb_file_per_table"
run_command = self.create_mysql_client_command(statement=statement)
logger.info("Checking if innodb_file_per_table is enabled")
status, output = subprocess.getstatusoutput(run_command)
if status == 0 and int(output[-1]) == 1:
logger.info("OK: innodb_file_per_table is enabled!")
return True
elif status == 0 and int(output[-1]) == 0:
logger.info("OK: innodb_file_per_table is disabled!")
return False
else:
logger.error("FAILED: InnoDB file per-table Check")
logger.error(output)
raise RuntimeError("FAILED: InnoDB file per-table Check")
def check_mysql_version(self):
"""
Function for checking MySQL version.
Version must be >= 5.6 for using "Transportable Tablespace" concept.
:return: True/False
"""
statement = "select @@version"
run_command = self.create_mysql_client_command(statement=statement)
logger.info("Checking MySQL version")
status, output = subprocess.getstatusoutput(run_command)
if status == 0 and ('5.6' in output):
logger.info("You have correct version of MySQL")
return True
elif status == 0 and ('5.7' in output):
logger.info("You have correct version of MySQL")
return True
elif status == 0 and ('5.7' not in output) and ('5.6' not in output):
logger.error("Your MySQL server is not supported. MySQL version must be >= 5.6")
raise RuntimeError("Your MySQL server is not supported. MySQL version must be >= 5.6")
else:
logger.error("FAILED: MySQL version check")
logger.error(output)
raise RuntimeError("FAILED: MySQL version check")
def check_database_exists_on_mysql(self, database_name):
"""
Function check if this database already exists in MySQL Server.(.frm and .ibd files are exist)
In other words database is not dropped. If there is no such database, there is an input for creation.
:param database_name: Specified database name
:return: True/False
"""
statement = "SELECT count(*) FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = '%s'" % database_name
run_command = self.create_mysql_client_command(statement=statement)
logger.info("Checking if database exists in MySQL")
status, output = subprocess.getstatusoutput(run_command)
if status == 0 and int(output[-1]) == 1:
logger.info("Database exists!")
return True
if status == 0 and int(output[-1]) == 0:
logger.info("There is no such database!")
logger.info("Create Specified Database in MySQL Server, before restoring single table")
answer = input("We can create it for you do you want? (yes/no): ")
if answer == 'yes':
create_db = "create database %s" % database_name
run_command = self.create_mysql_client_command(statement=create_db)
logger.info("Creating specified database")
status, output = subprocess.getstatusoutput(run_command)
if status == 0:
logger.info("OK: {} database created".format(database_name))
return True
else:
logger.error("FAILED: to create database!")
logger.error(output)
raise RuntimeError("FAILED: to create database!")
else: # if you type non-yes word
logger.error("Exited!")
return False
else:
logger.error("FAILED: Check for database")
logger.error(output)
raise RuntimeError("FAILED: Check for database")
def check_table_exists_on_mysql(
self,
path_to_frm_file,
database_name,
table_name):
"""
Function to check if table exists on MySQL.
If it is dropped, we will try to extract table create statement from .frm file from backup file.
:param path_to_frm_file: Path for .frm file
:param database_name: Specified database name
:param table_name: Specified table name
:return: True/False
"""
statement = "select count(*) from INFORMATION_SCHEMA.tables " \
"where table_schema = '%s'" \
"and table_name = '%s'" % (database_name, table_name)
run_command = self.create_mysql_client_command(statement=statement)
logger.info("Checking if table exists in MySQL Server")
status, output = subprocess.getstatusoutput(run_command)
if status == 0 and int(output[-1]) == 1:
logger.info("Table exists in MySQL Server.")
return True
elif status == 0 and int(output[-1]) == 0:
logger.info("Table does not exist in MySQL Server.")
logger.info("You can not restore table, with not existing tablespace file(.ibd)!")
logger.info("We will try to extract table create statement from .frm file, from backup folder")
create = self.run_mysqlfrm_utility(path_to_frm_file=path_to_frm_file)
regex = re.compile(r'((\n)CREATE((?!#).)*ENGINE=\w+)', re.DOTALL)
matches = [m.groups() for m in regex.finditer(create)]
for m in matches:
create_table = m[0]
new_create_table = create_table.replace("`", "")
run_command = self.create_mysql_client_command(statement=new_create_table)
status, output = subprocess.getstatusoutput(run_command)
if status == 0:
logger.info("Table Created from .frm file!")
return True
else:
logger.error("Failed to create table from .frm file!")
logger.error(output)
raise RuntimeError("Failed to create table from .frm file!")
else:
logger.error("FAILED: Check if table exists")
logger.error(output)
raise RuntimeError("FAILED: Check if table exists")
@staticmethod
def run_mysqlfrm_utility(path_to_frm_file):
command = '/usr/bin/mysqlfrm --diagnostic %s' % path_to_frm_file
logger.info("Running mysqlfrm tool")
status, output = subprocess.getstatusoutput(command)
if status == 0:
logger.info("OK: Success to run mysqlfrm")
return output
else:
logger.error("FAILED: run mysqlfrm")
logger.error(output)
raise RuntimeError("FAILED: run mysqlfrm")
def get_table_ibd_file(self, database_name, table_name):
"""
Locate backed up database and table.
Exactly we are looking for .ibd file.
.ibd file is a tablespace file where table data located.
:param database_name: Specified database name
:param table_name: Specified table name
:return .ibd file full path / False if not exists
"""
database_dir_list = []
database_objects_full_path = []
find_objects_full_path = []
table_dir_list = []
# Look for all files in database directory
for i in os.listdir(self.full_dir):
for x in os.listdir(self.full_dir + "/" + i):
if os.path.isdir(
self.full_dir +
"/" +
i +
"/" +
x) and x == database_name:
for z in os.listdir(self.full_dir + "/" + i + "/" + x):
database_dir_list.append(z)
database_objects_full_path.append(
self.full_dir + "/" + i + "/" + x + "/" + z)
# If database directory exists find already provided table in database
# directory
if len(database_dir_list) > 0:
for i in database_dir_list:
base_file = os.path.splitext(i)[0]
ext = os.path.splitext(i)[1]
if table_name == base_file:
table_dir_list.append(i)
# If table name from input is valid and it is located in database
# directory return .ibd file name
if len(database_dir_list) > 0 and len(
table_dir_list) == 2: # Why 2? because every InnoDB table must have .frm and .ibd file
for i in table_dir_list:
ext = os.path.splitext(i)[1]
if ext == '.ibd':
for a in database_objects_full_path:
if i in a:
find_objects_full_path.append(a)
if len(find_objects_full_path) > 0:
for x in find_objects_full_path:
return x
else:
logger.error("Sorry, There is no such Database or Table in backup directory")
logger.error("Or maybe table storage engine is not InnoDB")
raise RuntimeError("Sorry, There is no such Database or Table in backup directory "
"Or maybe table storage engine is not InnoDB ")
def lock_table(self, database_name, table_name):
# Executing lock tables write on specified table
statement = "LOCK TABLES %s.%s WRITE" % (database_name, table_name)
run_command = self.create_mysql_client_command(statement=statement)
status, output = subprocess.getstatusoutput(run_command)
logger.info("Applying write lock!")
if status == 0:
logger.info("OK: Table is locked")
return True
else:
logger.error("FAILED: to LOCK!")
logger.error(output)
raise RuntimeError("FAILED: to LOCK!")
def alter_tablespace(self, database_name, table_name):
# Running alter table discard tablespace here
statement = "ALTER TABLE %s.%s DISCARD TABLESPACE" % (
database_name, table_name)
run_command = self.create_mysql_client_command(statement=statement)
status, output = subprocess.getstatusoutput(run_command)
logger.info("Discarding tablespace")
if status == 0:
logger.info("OK: Tablespace discarded successfully")
return True
else:
logger.error("FAILED: discard tablespace!")
logger.error(output)
raise RuntimeError("FAILED: discard tablespace!")
@staticmethod
def copy_ibd_file_back(path_of_ibd_file, path_to_mysql_database_dir):
# Copy .ibd file back
try:
logger.info("OK: Copying .ibd file back")
shutil.copy(path_of_ibd_file, path_to_mysql_database_dir)
return True
except Exception as err:
logger.error("FAILED: copy .ibd file back")
logger.error(err)
raise RuntimeError("FAILED: copy .ibd file back")
def give_chown(self, path_to_mysql_database_dir):
# run chown command
comm = '%s %s' % (self.chown_command, path_to_mysql_database_dir)
status, output = subprocess.getstatusoutput(comm)
logger.info("Running chown command!")
if status == 0:
logger.info("OK: Chown command completed")
return True
else:
logger.error("FAILED: Chown Command")
raise RuntimeError("FAILED: Chown Command")
def import_tablespace(self, database_name, table_name):
# Running alter table import tablespace
statement = "ALTER TABLE %s.%s IMPORT TABLESPACE" % (
database_name, table_name)
run_command = self.create_mysql_client_command(statement=statement)
status, output = subprocess.getstatusoutput(run_command)
logger.info("Importing Tablespace!")
if status == 0:
logger.info("OK: Tablespace imported")
return True
else:
logger.error("FAILED: Tablespace import")
logger.error(output)
raise RuntimeError("FAILED: Tablespace import")
def unlock_tables(self):
# Run unlock tables command
statement = "unlock tables"
run_command = self.create_mysql_client_command(statement=statement)
status, output = subprocess.getstatusoutput(run_command)
logger.info("Unlocking tables!")
if status == 0:
logger.info("OK: Unlocked!")
return True
else:
logger.error("FAILED: Unlocking")
logger.error(output)
raise RuntimeError("FAILED: Unlocking")
def final_actions(self):
# Type Database name of table which you want to restore
database_name = input("Type Database name: ")
# Type name of table which you want to restore
table_name = input("Type Table name: ")
path = self.get_table_ibd_file(
database_name=database_name,
table_name=table_name)
path_to_mysql_datadir = self.datadir + "/" + database_name
if path:
path_to_frm_file = path[:-3] + 'frm'
obj_check_env = check_env.CheckEnv(self.conf)
if path:
try:
obj_check_env.check_mysql_uptime()
self.check_innodb_file_per_table()
self.check_mysql_version()
self.check_database_exists_on_mysql(
database_name=database_name)
self.check_table_exists_on_mysql(
path_to_frm_file=path_to_frm_file,
database_name=database_name,
table_name=table_name)
self.lock_table(database_name=database_name, table_name=table_name)
self.alter_tablespace(database_name=database_name, table_name=table_name)
self.copy_ibd_file_back(path_of_ibd_file=path, path_to_mysql_database_dir=path_to_mysql_datadir)
self.give_chown(path_to_mysql_database_dir=path_to_mysql_datadir)
self.import_tablespace(database_name=database_name, table_name=table_name)
self.unlock_tables()
except Exception as err:
logger.error("FAILED: Table is not recovered")
logger.error(err)
raise RuntimeError("FAILED: Table is not recovered")
else:
logger.info("OK: Table Recovered! ...")
return True
| 43.434211 | 112 | 0.593941 |
e18ec946340160d8f85beebaadd4a90b145346c0 | 4,783 | py | Python | docs/conf.py | qTipTip/SEAL | 1fe9f26abe49127263a16c67e28dcc07a7ecee06 | [
"MIT"
] | 13 | 2017-04-02T21:16:06.000Z | 2021-09-13T13:50:24.000Z | docs/conf.py | qTipTip/SEAL | 1fe9f26abe49127263a16c67e28dcc07a7ecee06 | [
"MIT"
] | 1 | 2017-05-10T22:55:34.000Z | 2017-05-10T22:55:34.000Z | docs/conf.py | qTipTip/SEAL | 1fe9f26abe49127263a16c67e28dcc07a7ecee06 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SEAL documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 12 21:18:04 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.coverage', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary']
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SEAL'
copyright = '2017, Ivar Stangeby'
author = 'Ivar Stangeby'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SEALdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SEAL.tex', 'SEAL Documentation',
'Ivar Stangeby', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'seal', 'SEAL Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SEAL', 'SEAL Documentation',
author, 'SEAL', 'One line description of project.',
'Miscellaneous'),
]
| 30.272152 | 84 | 0.67949 |
87e0582ab205eb3d62c81e53087a4e82cd786ce3 | 3,664 | py | Python | influxdb_client/domain/resource_members.py | MASIFAYUB/influxdb-client-python | a067fa5670a6fbc600db2ac4e54e29e1b7124998 | [
"MIT"
] | null | null | null | influxdb_client/domain/resource_members.py | MASIFAYUB/influxdb-client-python | a067fa5670a6fbc600db2ac4e54e29e1b7124998 | [
"MIT"
] | null | null | null | influxdb_client/domain/resource_members.py | MASIFAYUB/influxdb-client-python | a067fa5670a6fbc600db2ac4e54e29e1b7124998 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResourceMembers(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'links': 'ResourceMembersLinks',
'users': 'list[ResourceMember]'
}
attribute_map = {
'links': 'links',
'users': 'users'
}
def __init__(self, links=None, users=None): # noqa: E501,D401,D403
"""ResourceMembers - a model defined in OpenAPI.""" # noqa: E501
self._links = None
self._users = None
self.discriminator = None
if links is not None:
self.links = links
if users is not None:
self.users = users
@property
def links(self):
"""Get the links of this ResourceMembers.
:return: The links of this ResourceMembers.
:rtype: ResourceMembersLinks
""" # noqa: E501
return self._links
@links.setter
def links(self, links):
"""Set the links of this ResourceMembers.
:param links: The links of this ResourceMembers.
:type: ResourceMembersLinks
""" # noqa: E501
self._links = links
@property
def users(self):
"""Get the users of this ResourceMembers.
:return: The users of this ResourceMembers.
:rtype: list[ResourceMember]
""" # noqa: E501
return self._users
@users.setter
def users(self, users):
"""Set the users of this ResourceMembers.
:param users: The users of this ResourceMembers.
:type: list[ResourceMember]
""" # noqa: E501
self._users = users
def to_dict(self):
"""Return the model properties as a dict."""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Return the string representation of the model."""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`."""
return self.to_str()
def __eq__(self, other):
"""Return true if both objects are equal."""
if not isinstance(other, ResourceMembers):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Return true if both objects are not equal."""
return not self == other
| 27.548872 | 159 | 0.566048 |
31cf98c8ddd3ecca097dd96b9b7f91040a324935 | 7,247 | py | Python | scripts/cros_set_lsb_release.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | scripts/cros_set_lsb_release.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | scripts/cros_set_lsb_release.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for setting the /etc/lsb-release file of an image."""
from __future__ import print_function
import getpass
import os
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import image_lib
# LSB keys:
# Set google-specific version numbers:
# CHROMEOS_RELEASE_BOARD is the target board identifier.
# CHROMEOS_RELEASE_BRANCH_NUMBER is the Chrome OS branch number
# CHROMEOS_RELEASE_BUILD_NUMBER is the Chrome OS build number
# CHROMEOS_RELEASE_BUILD_TYPE is the type of build (official, from developers,
# etc..)
# CHROMEOS_RELEASE_CHROME_MILESTONE is the Chrome milestone (also named Chrome
# branch).
# CHROMEOS_RELEASE_DESCRIPTION is the version displayed by Chrome; see
# chrome/browser/chromeos/chromeos_version_loader.cc.
# CHROMEOS_RELEASE_NAME is a human readable name for the build.
# CHROMEOS_RELEASE_PATCH_NUMBER is the patch number for the current branch.
# CHROMEOS_RELEASE_TRACK and CHROMEOS_RELEASE_VERSION are used by the software
# update service.
# TODO(skrul): Remove GOOGLE_RELEASE once Chromium is updated to look at
# CHROMEOS_RELEASE_VERSION for UserAgent data.
LSB_KEY_NAME = 'CHROMEOS_RELEASE_NAME'
LSB_KEY_AUSERVER = 'CHROMEOS_AUSERVER'
LSB_KEY_DEVSERVER = 'CHROMEOS_DEVSERVER'
LSB_KEY_TRACK = 'CHROMEOS_RELEASE_TRACK'
LSB_KEY_BUILD_TYPE = 'CHROMEOS_RELEASE_BUILD_TYPE'
LSB_KEY_DESCRIPTION = 'CHROMEOS_RELEASE_DESCRIPTION'
LSB_KEY_BOARD = 'CHROMEOS_RELEASE_BOARD'
LSB_KEY_BRANCH_NUMBER = 'CHROMEOS_RELEASE_BRANCH_NUMBER'
LSB_KEY_BUILD_NUMBER = 'CHROMEOS_RELEASE_BUILD_NUMBER'
LSB_KEY_CHROME_MILESTONE = 'CHROMEOS_RELEASE_CHROME_MILESTONE'
LSB_KEY_PATCH_NUMBER = 'CHROMEOS_RELEASE_PATCH_NUMBER'
LSB_KEY_VERSION = 'CHROMEOS_RELEASE_VERSION'
LSB_KEY_BUILDER_PATH = 'CHROMEOS_RELEASE_BUILDER_PATH'
LSB_KEY_GOOGLE_RELEASE = 'GOOGLE_RELEASE'
LSB_KEY_APPID_RELEASE = 'CHROMEOS_RELEASE_APPID'
LSB_KEY_APPID_BOARD = 'CHROMEOS_BOARD_APPID'
LSB_KEY_APPID_CANARY = 'CHROMEOS_CANARY_APPID'
LSB_KEY_ARC_VERSION = 'CHROMEOS_ARC_VERSION'
CANARY_APP_ID = "{90F229CE-83E2-4FAF-8479-E368A34938B1}"
def _ParseArguments(argv):
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('--app_id', default=None,
help='The APP_ID to install.')
parser.add_argument('--board', help='The board name.', required=True)
parser.add_argument('--sysroot', required=True, type='path',
help='The sysroot to install the lsb-release file into.')
parser.add_argument('--version_string', required=True,
help='The image\'s version string.')
parser.add_argument('--builder_path', default=None,
help='The image\'s builder path.')
parser.add_argument('--auserver', default=None,
help='The auserver url to use.')
parser.add_argument('--devserver', default=None,
help='The devserver url to use.')
parser.add_argument('--official', action='store_true',
help='Whether or not to populate with fields for an '
'official image.')
parser.add_argument('--buildbot_build', default='N/A',
help='The build number, for use with the continuous '
'builder.')
parser.add_argument('--track', default='developer-build',
help='The type of release track.')
parser.add_argument('--branch_number', default='0',
help='The branch number.')
parser.add_argument('--build_number', default='0',
help='The build number.')
parser.add_argument('--chrome_milestone', default='0',
help='The Chrome milestone.')
parser.add_argument('--patch_number', default='0',
help='The patch number for the given branch.')
parser.add_argument('--arc_version', default=None,
help='The ARC version.')
opts = parser.parse_args(argv)
# If the auserver or devserver isn't specified or is set to blank, set it
# to the host's hostname.
hostname = cros_build_lib.GetHostName(fully_qualified=True)
if not opts.auserver:
opts.auserver = 'http://%s:8080/update' % hostname
if not opts.devserver:
opts.devserver = 'http://%s:8080' % hostname
opts.Freeze()
if not os.path.isdir(opts.sysroot):
cros_build_lib.Die('The target sysroot does not exist: %s' % opts.sysroot)
if not opts.version_string:
cros_build_lib.Die('version_string must not be empty. Was '
'chromeos_version.sh sourced correctly in the calling '
'script?')
return opts
def main(argv):
opts = _ParseArguments(argv)
fields = {
LSB_KEY_NAME: 'Chromium OS',
LSB_KEY_AUSERVER: opts.auserver,
LSB_KEY_DEVSERVER: opts.devserver,
}
if opts.app_id is not None:
fields.update({
LSB_KEY_APPID_RELEASE: opts.app_id,
LSB_KEY_APPID_BOARD: opts.app_id,
LSB_KEY_APPID_CANARY: CANARY_APP_ID,
})
if opts.arc_version is not None:
fields.update({
LSB_KEY_ARC_VERSION: opts.arc_version,
})
if opts.builder_path is not None:
fields.update({
LSB_KEY_BUILDER_PATH: opts.builder_path,
})
if opts.official:
# Official builds (i.e. buildbot).
track = 'dev-channel'
build_type = 'Official Build'
fields.update({
LSB_KEY_TRACK: track,
LSB_KEY_NAME: 'Chrome OS',
LSB_KEY_BUILD_TYPE: build_type,
LSB_KEY_DESCRIPTION: ('%s (%s) %s %s test' %
(opts.version_string,
build_type,
track,
opts.board)),
LSB_KEY_AUSERVER: 'https://tools.google.com/service/update2',
LSB_KEY_DEVSERVER: '',
})
elif getpass.getuser() == 'chrome-bot':
# Continuous builder.
build_type = 'Continuous Builder - Builder: %s' % opts.buildbot_build
fields.update({
LSB_KEY_TRACK: 'buildbot-build',
LSB_KEY_BUILD_TYPE: build_type,
LSB_KEY_DESCRIPTION: '%s (%s) %s' % (opts.version_string,
build_type,
opts.board),
})
else:
# Developer manual builds.
build_type = 'Developer Build - %s' % getpass.getuser()
fields.update({
LSB_KEY_TRACK: opts.track,
LSB_KEY_BUILD_TYPE: build_type,
LSB_KEY_DESCRIPTION: '%s (%s) %s %s' % (opts.version_string,
build_type,
opts.track,
opts.board),
})
fields.update({
LSB_KEY_BOARD: opts.board,
LSB_KEY_BRANCH_NUMBER: opts.branch_number,
LSB_KEY_BUILD_NUMBER: opts.build_number,
LSB_KEY_CHROME_MILESTONE: opts.chrome_milestone,
LSB_KEY_PATCH_NUMBER: opts.patch_number,
LSB_KEY_VERSION: opts.version_string,
LSB_KEY_GOOGLE_RELEASE: opts.version_string,
})
image_lib.WriteLsbRelease(opts.sysroot, fields)
| 38.142105 | 79 | 0.666207 |
c0cea5e575834734b50b38475a0c39f4bf5a9895 | 41,855 | py | Python | appengine_utilities/sessions.py | joerussbowman/gaeutilities | b74958c25e131fbcaa8392f6d4f8afc45a9fb054 | [
"BSD-3-Clause"
] | 2 | 2015-03-31T05:46:07.000Z | 2015-11-05T01:48:27.000Z | appengine_utilities/sessions.py | joerussbowman/gaeutilities | b74958c25e131fbcaa8392f6d4f8afc45a9fb054 | [
"BSD-3-Clause"
] | null | null | null | appengine_utilities/sessions.py | joerussbowman/gaeutilities | b74958c25e131fbcaa8392f6d4f8afc45a9fb054 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# main python imports
import os
import time
import datetime
import random
import hashlib
import Cookie
import pickle
import sys
import logging
from time import strftime
# google appengine imports
from google.appengine.ext import db
from google.appengine.api import memcache
from django.utils import simplejson
# settings
try:
import settings_default
import settings
if settings.__name__.rsplit('.', 1)[0] != settings_default.__name__.rsplit('.', 1)[0]:
settings = settings_default
except:
settings = settings_default
class _AppEngineUtilities_Session(db.Model):
"""
Model for the sessions in the datastore. This contains the identifier and
validation information for the session.
"""
sid = db.StringListProperty()
ip = db.StringProperty()
ua = db.StringProperty()
last_activity = db.DateTimeProperty()
dirty = db.BooleanProperty(default=False)
working = db.BooleanProperty(default=False)
deleted = db.BooleanProperty(default=False)
def put(self):
"""
Extends put so that it writes vaules to memcache as well as the
datastore, and keeps them in sync, even when datastore writes fails.
Returns the session object.
"""
try:
memcache.set(u"_AppEngineUtilities_Session_%s" % \
(str(self.key())), self)
except:
# new session, generate a new key, which will handle the
# put and set the memcache
db.put(self)
self.last_activity = datetime.datetime.now()
try:
self.dirty = False
db.put(self)
memcache.set(u"_AppEngineUtilities_Session_%s" % \
(str(self.key())), self)
except:
self.dirty = True
memcache.set(u"_AppEngineUtilities_Session_%s" % \
(str(self.key())), self)
return self
@classmethod
def get_session(cls, session_obj=None):
"""
Uses the passed objects sid to get a session object from memcache,
or datastore if a valid one exists.
Args:
session_obj: a session object
Returns a validated session object.
"""
if session_obj.sid == None:
return None
session_key = session_obj.sid.rsplit(u'_', 1)[0]
session = memcache.get(u"_AppEngineUtilities_Session_%s" % \
(str(session_key)))
if session:
if session.deleted == True:
session.delete()
return None
if session.dirty == True and session.working != False:
# the working bit is used to make sure multiple requests,
# which can happen with ajax oriented sites, don't try to put
# at the same time
session.working = True
memcache.set(u"_AppEngineUtilities_Session_%s" % \
(str(session_key)), session)
session.put()
if session_obj.sid in session.sid:
sessionAge = datetime.datetime.now() - session.last_activity
if sessionAge.seconds > session_obj.session_expire_time:
session.delete()
return None
return session
else:
return None
# Not in memcache, check datastore
try:
ds_session = db.get(str(session_key))
except:
ds_session = None
if ds_session:
sessionAge = datetime.datetime.now() - ds_session.last_activity
if sessionAge.seconds > session_obj.session_expire_time:
ds_session.delete()
return None
memcache.set(u"_AppEngineUtilities_Session_%s" % \
(str(session_key)), ds_session)
memcache.set(u"_AppEngineUtilities_SessionData_%s" % \
(str(session_key)), ds_session.get_items_ds())
return ds_session
def get_items(self):
"""
Returns all the items stored in a session. Queries memcache first
and will try the datastore next.
"""
items = memcache.get(u"_AppEngineUtilities_SessionData_%s" % \
(str(self.key())))
if items:
for item in items:
if item.deleted == True:
item.delete()
items.remove(item)
return items
query = _AppEngineUtilities_SessionData.all()
query.filter(u"session", self)
results = query.fetch(1000)
return results
def get_item(self, keyname = None):
"""
Returns a single session data item from the memcache or datastore
Args:
keyname: keyname of the session data object
Returns the session data object if it exists, otherwise returns None
"""
mc = memcache.get(u"_AppEngineUtilities_SessionData_%s" % \
(str(self.key())))
if mc:
for item in mc:
if item.keyname == keyname:
if item.deleted == True:
item.delete()
return None
return item
query = _AppEngineUtilities_SessionData.all()
query.filter(u"session = ", self)
query.filter(u"keyname = ", keyname)
results = query.fetch(1)
if len(results) > 0:
memcache.set(u"_AppEngineUtilities_SessionData_%s" % \
(str(self.key())), self.get_items_ds())
return results[0]
return None
def get_items_ds(self):
"""
This gets all session data objects from the datastore, bypassing
memcache.
Returns a list of session data entities.
"""
query = _AppEngineUtilities_SessionData.all()
query.filter(u"session", self)
results = query.fetch(1000)
return results
def delete(self):
"""
Deletes a session and all it's associated data from the datastore and
memcache.
Returns True
"""
try:
query = _AppEngineUtilities_SessionData.all()
query.filter(u"session = ", self)
results = query.fetch(1000)
db.delete(results)
db.delete(self)
memcache.delete_multi([u"_AppEngineUtilities_Session_%s" % \
(str(self.key())), \
u"_AppEngineUtilities_SessionData_%s" % \
(str(self.key()))])
except:
mc = memcache.get(u"_AppEngineUtilities_Session_%s" % \
(str(self.key())))
if mc:
mc.deleted = True
else:
# not in the memcache, check to see if it should be
query = _AppEngineUtilities_Session.all()
query.filter(u"sid = ", self.sid)
results = query.fetch(1)
if len(results) > 0:
results[0].deleted = True
memcache.set(u"_AppEngineUtilities_Session_%s" % \
(unicode(self.key())), results[0])
return True
class _AppEngineUtilities_SessionData(db.Model):
"""
Model for the session data in the datastore.
"""
# session_key = db.FloatProperty()
keyname = db.StringProperty()
content = db.BlobProperty()
model = db.ReferenceProperty()
session = db.ReferenceProperty(_AppEngineUtilities_Session)
dirty = db.BooleanProperty(default=False)
deleted = db.BooleanProperty(default=False)
def put(self):
"""
Adds a keyname/value for session to the datastore and memcache
Returns the key from the datastore put or u"dirty"
"""
# update or insert in datastore
try:
return_val = db.put(self)
self.dirty = False
except:
return_val = u"dirty"
self.dirty = True
# update or insert in memcache
mc_items = memcache.get(u"_AppEngineUtilities_SessionData_%s" % \
(str(self.session.key())))
if mc_items:
value_updated = False
for item in mc_items:
if value_updated == True:
break
if item.keyname == self.keyname:
item.content = self.content
item.model = self.model
memcache.set(u"_AppEngineUtilities_SessionData_%s" % \
(str(self.session.key())), mc_items)
value_updated = True
break
if value_updated == False:
mc_items.append(self)
memcache.set(u"_AppEngineUtilities_SessionData_%s" % \
(str(self.session.key())), mc_items)
return return_val
def delete(self):
"""
Deletes an entity from the session in memcache and the datastore
Returns True
"""
try:
db.delete(self)
except:
self.deleted = True
mc_items = memcache.get(u"_AppEngineUtilities_SessionData_%s" % \
(str(self.session.key())))
value_handled = False
for item in mc_items:
if value_handled == True:
break
if item.keyname == self.keyname:
if self.deleted == True:
item.deleted = True
else:
mc_items.remove(item)
memcache.set(u"_AppEngineUtilities_SessionData_%s" % \
(str(self.session.key())), mc_items)
return True
class _DatastoreWriter(object):
def put(self, keyname, value, session):
"""
Insert a keyname/value pair into the datastore for the session.
Args:
keyname: The keyname of the mapping.
value: The value of the mapping.
Returns the model entity key
"""
keyname = session._validate_key(keyname)
if value is None:
raise ValueError(u"You must pass a value to put.")
# datestore write trumps cookie. If there is a cookie value
# with this keyname, delete it so we don't have conflicting
# entries.
if session.cookie_vals.has_key(keyname):
del(session.cookie_vals[keyname])
session.output_cookie["%s_data" % (session.cookie_name)] = \
simplejson.dumps(session.cookie_vals)
session.output_cookie["%s_data" % (session.cookie_name)]["path"] = \
session.cookie_path
if session.cookie_domain:
session.output_cookie["%s_data" % \
(session.cookie_name)]["domain"] = session.cookie_domain
print session.output_cookie.output()
sessdata = session._get(keyname=keyname)
if sessdata is None:
sessdata = _AppEngineUtilities_SessionData()
# sessdata.session_key = session.session.key()
sessdata.keyname = keyname
try:
db.model_to_protobuf(value)
if not value.is_saved():
value.put()
sessdata.model = value
except:
sessdata.content = pickle.dumps(value)
sessdata.model = None
sessdata.session = session.session
session.cache[keyname] = value
return sessdata.put()
class _CookieWriter(object):
def put(self, keyname, value, session):
"""
Insert a keyname/value pair into the datastore for the session.
Args:
keyname: The keyname of the mapping.
value: The value of the mapping.
Returns True
"""
keyname = session._validate_key(keyname)
if value is None:
raise ValueError(u"You must pass a value to put.")
# Use simplejson for cookies instead of pickle.
session.cookie_vals[keyname] = value
# update the requests session cache as well.
session.cache[keyname] = value
# simplejson will raise any error I'd raise about an invalid value
# so let it raise exceptions
session.output_cookie["%s_data" % (session.cookie_name)] = \
simplejson.dumps(session.cookie_vals)
session.output_cookie["%s_data" % (session.cookie_name)]["path"] = \
session.cookie_path
if session.cookie_domain:
session.output_cookie["%s_data" % \
(session.cookie_name)]["domain"] = session.cookie_domain
print session.output_cookie.output()
return True
class Session(object):
"""
Sessions are used to maintain user presence between requests.
Sessions can either be stored server side in the datastore/memcache, or
be kept entirely as cookies. This is set either with the settings file
or on initialization, using the writer argument/setting field. Valid
values are "datastore" or "cookie".
Session can be used as a standard dictionary object.
session = appengine_utilities.sessions.Session()
session["keyname"] = "value" # sets keyname to value
print session["keyname"] # will print value
Datastore Writer:
The datastore writer was written with the focus being on security,
reliability, and performance. In that order.
It is based off of a session token system. All data is stored
server side in the datastore and memcache. A token is given to
the browser, and stored server side. Optionally (and on by default),
user agent and ip checking is enabled. Tokens have a configurable
time to live (TTL), which defaults to 5 seconds. The current token,
plus the previous 2, are valid for any request. This is done in order
to manage ajax enabled sites which may have more than on request
happening at a time. This means any token is valid for 15 seconds.
A request with a token who's TTL has passed will have a new token
generated.
In order to take advantage of the token system for an authentication
system, you will want to tie sessions to accounts, and make sure
only one session is valid for an account. You can do this by setting
a db.ReferenceProperty(_AppEngineUtilities_Session) attribute on
your user Model, and use the get_ds_entity() method on a valid
session to populate it on login.
Note that even with this complex system, sessions can still be hijacked
and it will take the user logging in to retrieve the account. In the
future an ssl only cookie option may be implemented for the datastore
writer, which would further protect the session token from being
sniffed, however it would be restricted to using cookies on the
.appspot.com domain, and ssl requests are a finite resource. This is
why such a thing is not currently implemented.
Session data objects are stored in the datastore pickled, so any
python object is valid for storage.
Cookie Writer:
Sessions using the cookie writer are stored entirely in the browser
and no interaction with the datastore is required. This creates
a drastic improvement in performance, but provides no security for
session hijack. This is useful for requests where identity is not
important, but you wish to keep state between requests.
Information is stored in a json format, as pickled data from the
server is unreliable.
Note: There is no checksum validation of session data on this method,
it's streamlined for pure performance. If you need to make sure data
is not tampered with, use the datastore writer which stores the data
server side.
django-middleware:
Included with the GAEUtilties project is a
django-middleware.middleware.SessionMiddleware which can be included in
your settings file. This uses the cookie writer for anonymous requests,
and you can switch to the datastore writer on user login. This will
require an extra set in your login process of calling
request.session.save() once you validated the user information. This
will convert the cookie writer based session to a datastore writer.
"""
# cookie name declaration for class methods
COOKIE_NAME = settings.session["COOKIE_NAME"]
def __init__(self, cookie_path=settings.session["DEFAULT_COOKIE_PATH"],
cookie_domain=settings.session["DEFAULT_COOKIE_DOMAIN"],
cookie_name=settings.session["COOKIE_NAME"],
session_expire_time=settings.session["SESSION_EXPIRE_TIME"],
clean_check_percent=settings.session["CLEAN_CHECK_PERCENT"],
integrate_flash=settings.session["INTEGRATE_FLASH"],
check_ip=settings.session["CHECK_IP"],
check_user_agent=settings.session["CHECK_USER_AGENT"],
set_cookie_expires=settings.session["SET_COOKIE_EXPIRES"],
session_token_ttl=settings.session["SESSION_TOKEN_TTL"],
last_activity_update=settings.session["UPDATE_LAST_ACTIVITY"],
writer=settings.session["WRITER"]):
"""
Initializer
Args:
cookie_path: The path setting for the cookie.
cookie_domain: The domain setting for the cookie. (Set to False
to not use)
cookie_name: The name for the session cookie stored in the browser.
session_expire_time: The amount of time between requests before the
session expires.
clean_check_percent: The percentage of requests the will fire off a
cleaning routine that deletes stale session data.
integrate_flash: If appengine-utilities flash utility should be
integrated into the session object.
check_ip: If browser IP should be used for session validation
check_user_agent: If the browser user agent should be used for
sessoin validation.
set_cookie_expires: True adds an expires field to the cookie so
it saves even if the browser is closed.
session_token_ttl: Number of sessions a session token is valid
for before it should be regenerated.
"""
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_name = cookie_name
self.session_expire_time = session_expire_time
self.integrate_flash = integrate_flash
self.check_user_agent = check_user_agent
self.check_ip = check_ip
self.set_cookie_expires = set_cookie_expires
self.session_token_ttl = session_token_ttl
self.last_activity_update = last_activity_update
self.writer = writer
# make sure the page is not cached in the browser
print self.no_cache_headers()
# Check the cookie and, if necessary, create a new one.
self.cache = {}
string_cookie = os.environ.get(u"HTTP_COOKIE", u"")
self.cookie = Cookie.SimpleCookie()
self.output_cookie = Cookie.SimpleCookie()
if string_cookie == "":
self.cookie_vals = {}
else:
self.cookie.load(string_cookie)
try:
self.cookie_vals = \
simplejson.loads(self.cookie["%s_data" % (self.cookie_name)].value)
# sync self.cache and self.cookie_vals which will make those
# values available for all gets immediately.
for k in self.cookie_vals:
self.cache[k] = self.cookie_vals[k]
# sync the input cookie with the output cookie
self.output_cookie["%s_data" % (self.cookie_name)] = \
simplejson.dumps(self.cookie_vals) #self.cookie["%s_data" % (self.cookie_name)]
except Exception, e:
self.cookie_vals = {}
if writer == "cookie":
pass
else:
self.sid = None
new_session = True
# do_put is used to determine if a datastore write should
# happen on this request.
do_put = False
# check for existing cookie
if self.cookie.get(cookie_name):
self.sid = self.cookie[cookie_name].value
# The following will return None if the sid has expired.
self.session = _AppEngineUtilities_Session.get_session(self)
if self.session:
new_session = False
if new_session:
# start a new session
self.session = _AppEngineUtilities_Session()
self.session.put()
self.sid = self.new_sid()
if u"HTTP_USER_AGENT" in os.environ:
self.session.ua = os.environ[u"HTTP_USER_AGENT"]
else:
self.session.ua = None
if u"REMOTE_ADDR" in os.environ:
self.session.ip = os.environ["REMOTE_ADDR"]
else:
self.session.ip = None
self.session.sid = [self.sid]
# do put() here to get the session key
self.session.put()
else:
# check the age of the token to determine if a new one
# is required
duration = datetime.timedelta(seconds=self.session_token_ttl)
session_age_limit = datetime.datetime.now() - duration
if self.session.last_activity < session_age_limit:
self.sid = self.new_sid()
if len(self.session.sid) > 2:
self.session.sid.remove(self.session.sid[0])
self.session.sid.append(self.sid)
do_put = True
else:
self.sid = self.session.sid[-1]
# check if last_activity needs updated
ula = datetime.timedelta(seconds=self.last_activity_update)
if datetime.datetime.now() > self.session.last_activity + \
ula:
do_put = True
self.output_cookie[cookie_name] = self.sid
self.output_cookie[cookie_name]["path"] = self.cookie_path
if self.cookie_domain:
self.output_cookie[cookie_name]["domain"] = self.cookie_domain
if self.set_cookie_expires:
self.output_cookie[cookie_name]["expires"] = \
self.session_expire_time
self.cache[u"sid"] = self.sid
if do_put:
if self.sid != None or self.sid != u"":
self.session.put()
# Only set the "_data" cookie if there is actual data
if self.output_cookie.has_key("%s_data" % (cookie_name)):
# Set the path of the "_data" cookie
self.output_cookie["%s_data" % (cookie_name)]["path"] = cookie_path
if self.set_cookie_expires:
self.output_cookie["%s_data" % (cookie_name)]["expires"] = \
self.session_expire_time
print self.output_cookie.output()
# fire up a Flash object if integration is enabled
if self.integrate_flash:
import flash
self.flash = flash.Flash(cookie=self.cookie)
# randomly delete old stale sessions in the datastore (see
# CLEAN_CHECK_PERCENT variable)
if random.randint(1, 100) < clean_check_percent:
self._clean_old_sessions()
def new_sid(self):
"""
Create a new session id.
Returns session id as a unicode string.
"""
sid = u"%s_%s" % (str(self.session.key()),
hashlib.md5(repr(time.time()) + \
unicode(random.random())).hexdigest()
)
#sid = unicode(self.session.session_key) + "_" + \
# hashlib.md5(repr(time.time()) + \
# unicode(random.random())).hexdigest()
return sid
def _get(self, keyname=None):
"""
private method
Return all of the SessionData object data from the datastore only,
unless keyname is specified, in which case only that instance of
SessionData is returned.
Important: This does not interact with memcache and pulls directly
from the datastore. This also does not get items from the cookie
store.
Args:
keyname: The keyname of the value you are trying to retrieve.
Returns a list of datastore entities.
"""
if hasattr(self, 'session'):
if keyname != None:
return self.session.get_item(keyname)
return self.session.get_items()
return None
def _validate_key(self, keyname):
"""
private method
Validate the keyname, making sure it is set and not a reserved name.
Returns the validated keyname.
"""
if keyname is None:
raise ValueError(
u"You must pass a keyname for the session data content."
)
elif keyname in (u"sid", u"flash"):
raise ValueError(u"%s is a reserved keyname." % keyname)
if type(keyname) != type([str, unicode]):
return unicode(keyname)
return keyname
def _put(self, keyname, value):
"""
Insert a keyname/value pair into the datastore for the session.
Args:
keyname: The keyname of the mapping.
value: The value of the mapping.
Returns the value from the writer put operation, varies based on writer.
"""
if self.writer == "datastore":
writer = _DatastoreWriter()
else:
writer = _CookieWriter()
return writer.put(keyname, value, self)
def _delete_session(self):
"""
private method
Delete the session and all session data.
Returns True.
"""
# if the event class has been loaded, fire off the preSessionDelete event
if u"AEU_Events" in sys.modules['__main__'].__dict__:
sys.modules['__main__'].AEU_Events.fire_event(u"preSessionDelete")
if hasattr(self, u"session"):
self.session.delete()
self.cookie_vals = {}
self.cache = {}
self.output_cookie["%s_data" % (self.cookie_name)] = \
simplejson.dumps(self.cookie_vals)
self.output_cookie["%s_data" % (self.cookie_name)]["path"] = \
self.cookie_path
if self.cookie_domain:
self.output_cookie["%s_data" % \
(self.cookie_name)]["domain"] = self.cookie_domain
# Delete the cookies (session & data) in the browser
self.output_cookie[self.cookie_name]["expires"] = 0
self.output_cookie["%s_data" % (self.cookie_name)]["expires"] = 0
print self.output_cookie.output()
# if the event class has been loaded, fire off the sessionDelete event
if u"AEU_Events" in sys.modules['__main__'].__dict__:
sys.modules['__main__'].AEU_Events.fire_event(u"sessionDelete")
return True
def delete(self):
"""
Delete the current session and start a new one.
This is useful for when you need to get rid of all data tied to a
current session, such as when you are logging out a user.
Returns True
"""
self._delete_session()
@classmethod
def delete_all_sessions(cls):
"""
Deletes all sessions and session data from the data store. This
does not delete the entities from memcache (yet). Depending on the
amount of sessions active in your datastore, this request could
timeout before completion and may have to be called multiple times.
NOTE: This can not delete cookie only sessions as it has no way to
access them. It will only delete datastore writer sessions.
Returns True on completion.
"""
all_sessions_deleted = False
while not all_sessions_deleted:
query = _AppEngineUtilities_Session.all()
results = query.fetch(75)
if len(results) is 0:
all_sessions_deleted = True
else:
for result in results:
result.delete()
return True
def _clean_old_sessions(self):
"""
Delete 50 expired sessions from the datastore.
This is only called for CLEAN_CHECK_PERCENT percent of requests because
it could be rather intensive.
Returns True on completion
"""
self.clean_old_sessions(self.session_expire_time, 50)
@classmethod
def clean_old_sessions(cls, session_expire_time, count=50):
"""
Delete expired sessions from the datastore.
This is a class method which can be used by applications for
maintenance if they don't want to use the built in session
cleaning.
Args:
count: The amount of session to clean.
session_expire_time: The age in seconds to determine outdated
sessions.
Returns True on completion
"""
duration = datetime.timedelta(seconds=session_expire_time)
session_age = datetime.datetime.now() - duration
query = _AppEngineUtilities_Session.all()
query.filter(u"last_activity <", session_age)
results = query.fetch(50)
for result in results:
result.delete()
return True
def cycle_key(self):
"""
Changes the session id/token.
Returns new token.
"""
self.sid = self.new_sid()
if len(self.session.sid) > 2:
self.session.sid.remove(self.session.sid[0])
self.session.sid.append(self.sid)
return self.sid
def flush(self):
"""
Delete's the current session, creating a new one.
Returns True
"""
self._delete_session()
self.__init__()
return True
def no_cache_headers(self):
"""
Generates headers to avoid any page caching in the browser.
Useful for highly dynamic sites.
Returns a unicode string of headers.
"""
return u"".join([u"Expires: Tue, 03 Jul 2001 06:00:00 GMT",
strftime("Last-Modified: %a, %d %b %y %H:%M:%S %Z").decode("utf-8"),
u"Cache-Control: no-store, no-cache, must-revalidate, max-age=0",
u"Cache-Control: post-check=0, pre-check=0",
u"Pragma: no-cache",
])
def clear(self):
"""
Removes session data items, doesn't delete the session. It does work
with cookie sessions, and must be called before any output is sent
to the browser, as it set cookies.
Returns True
"""
sessiondata = self._get()
# delete from datastore
if sessiondata is not None:
for sd in sessiondata:
sd.delete()
# delete from memcache
self.cache = {}
self.cookie_vals = {}
self.output_cookie["%s_data" % (self.cookie_name)] = \
simplejson.dumps(self.cookie_vals)
self.output_cookie["%s_data" % (self.cookie_name)]["path"] = \
self.cookie_path
if self.cookie_domain:
self.output_cookie["%s_data" % \
(self.cookie_name)]["domain"] = self.cookie_domain
# Delete the "_data" cookie in the browser
self.output_cookie["%s_data" % (self.cookie_name)]["expires"] = 0
print self.output_cookie.output()
return True
def has_key(self, keyname):
"""
Equivalent to k in a, use that form in new code
Args:
keyname: keyname to check
Returns True/False
"""
return self.__contains__(keyname)
def items(self):
"""
Creates a copy of just the data items.
Returns dictionary of session data objects.
"""
op = {}
for k in self:
op[k] = self[k]
return op
def keys(self):
"""
Returns a list of keys.
"""
l = []
for k in self:
l.append(k)
return l
def update(self, *dicts):
"""
Updates with key/value pairs from b, overwriting existing keys
Returns None
"""
for dict in dicts:
for k in dict:
self._put(k, dict[k])
return None
def values(self):
"""
Returns a list object of just values in the session.
"""
v = []
for k in self:
v.append(self[k])
return v
def get(self, keyname, default = None):
"""
Returns either the value for the keyname or a default value
passed.
Args:
keyname: keyname to look up
default: (optional) value to return on keyname miss
Returns value of keyname, or default, or None
"""
try:
return self.__getitem__(keyname)
except KeyError:
if default is not None:
return default
return None
def setdefault(self, keyname, default = None):
"""
Returns either the value for the keyname or a default value
passed. If keyname lookup is a miss, the keyname is set with
a value of default.
Args:
keyname: keyname to look up
default: (optional) value to return on keyname miss
Returns value of keyname, or default, or None
"""
try:
return self.__getitem__(keyname)
except KeyError:
if default is not None:
self.__setitem__(keyname, default)
return default
return None
@classmethod
def check_token(cls, cookie_name=COOKIE_NAME, delete_invalid=True):
"""
Retrieves the token from a cookie and validates that it is
a valid token for an existing cookie. Cookie validation is based
on the token existing on a session that has not expired.
This is useful for determining if datastore or cookie writer
should be used in hybrid implementations.
Args:
cookie_name: Name of the cookie to check for a token.
delete_invalid: If the token is not valid, delete the session
cookie, to avoid datastore queries on future
requests.
Returns True/False
"""
string_cookie = os.environ.get(u"HTTP_COOKIE", u"")
cookie = Cookie.SimpleCookie()
cookie.load(string_cookie)
if cookie.has_key(cookie_name):
query = _AppEngineUtilities_Session.all()
query.filter(u"sid", cookie[cookie_name].value)
results = query.fetch(1)
if len(results) > 0:
return True
else:
if delete_invalid:
output_cookie = Cookie.SimpleCookie()
output_cookie[cookie_name] = cookie[cookie_name]
output_cookie[cookie_name][u"expires"] = 0
print output_cookie.output()
return False
def get_ds_entity(self):
"""
Will return the session entity from the datastore if one
exists, otherwise will return None (as in the case of cookie writer
session.
"""
if hasattr(self, u"session"):
return self.session
return None
# Implement Python container methods
def __getitem__(self, keyname):
"""
Get item from session data.
keyname: The keyname of the mapping.
"""
# flash messages don't go in the datastore
if self.integrate_flash and (keyname == u"flash"):
return self.flash.msg
if keyname in self.cache:
return self.cache[keyname]
if keyname in self.cookie_vals:
return self.cookie_vals[keyname]
if hasattr(self, u"session"):
data = self._get(keyname)
if data:
# TODO: It's broke here, but I'm not sure why, it's
# returning a model object, but I can't seem to modify
# it.
try:
if data.model != None:
self.cache[keyname] = data.model
return self.cache[keyname]
else:
self.cache[keyname] = pickle.loads(data.content)
return self.cache[keyname]
except:
self.delete_item(keyname)
else:
raise KeyError(unicode(keyname))
raise KeyError(unicode(keyname))
def __setitem__(self, keyname, value):
"""
Set item in session data.
Args:
keyname: They keyname of the mapping.
value: The value of mapping.
"""
if self.integrate_flash and (keyname == u"flash"):
self.flash.msg = value
else:
keyname = self._validate_key(keyname)
self.cache[keyname] = value
return self._put(keyname, value)
def delete_item(self, keyname, throw_exception=False):
"""
Delete item from session data, ignoring exceptions if
necessary.
Args:
keyname: The keyname of the object to delete.
throw_exception: false if exceptions are to be ignored.
Returns:
Nothing.
"""
if throw_exception:
self.__delitem__(keyname)
return None
else:
try:
self.__delitem__(keyname)
except KeyError:
return None
def __delitem__(self, keyname):
"""
Delete item from session data.
Args:
keyname: The keyname of the object to delete.
"""
bad_key = False
sessdata = self._get(keyname = keyname)
if sessdata is None:
bad_key = True
else:
sessdata.delete()
if keyname in self.cookie_vals:
del self.cookie_vals[keyname]
bad_key = False
self.output_cookie["%s_data" % (self.cookie_name)] = \
simplejson.dumps(self.cookie_vals)
self.output_cookie["%s_data" % (self.cookie_name)]["path"] = \
self.cookie_path
if self.cookie_domain:
self.output_cookie["%s_data" % \
(self.cookie_name)]["domain"] = self.cookie_domain
print self.output_cookie.output()
if bad_key:
raise KeyError(unicode(keyname))
if keyname in self.cache:
del self.cache[keyname]
def __len__(self):
"""
Return size of session.
"""
# check memcache first
if hasattr(self, u"session"):
results = self._get()
if results is not None:
return len(results) + len(self.cookie_vals)
else:
return 0
return len(self.cookie_vals)
def __contains__(self, keyname):
"""
Check if an item is in the session data.
Args:
keyname: The keyname being searched.
"""
try:
self.__getitem__(keyname)
except KeyError:
return False
return True
def __iter__(self):
"""
Iterate over the keys in the session data.
"""
# try memcache first
if hasattr(self, u"session"):
vals = self._get()
if vals is not None:
for k in vals:
yield k.keyname
for k in self.cookie_vals:
yield k
def __str__(self):
"""
Return string representation.
"""
return u"{%s}" % ', '.join(['"%s" = "%s"' % (k, self[k]) for k in self])
| 35.957904 | 103 | 0.587194 |
b299ccbe5fa1a2125d3634defcead04bda5d40c6 | 152 | py | Python | sourcerers/freight_forwarding/doctype/foreign_shippers/test_foreign_shippers.py | p1c4ss0/sourcerers | e64db0186e91d6a1fb17b0f59ab1f2d5d9803e15 | [
"MIT"
] | null | null | null | sourcerers/freight_forwarding/doctype/foreign_shippers/test_foreign_shippers.py | p1c4ss0/sourcerers | e64db0186e91d6a1fb17b0f59ab1f2d5d9803e15 | [
"MIT"
] | null | null | null | sourcerers/freight_forwarding/doctype/foreign_shippers/test_foreign_shippers.py | p1c4ss0/sourcerers | e64db0186e91d6a1fb17b0f59ab1f2d5d9803e15 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, Himanshu and Contributors
# See license.txt
# import frappe
import unittest
class TestForeignShippers(unittest.TestCase):
pass
| 16.888889 | 47 | 0.789474 |
b371f244d280ae69de48db16af851f3eafc01d52 | 13,953 | py | Python | SentiLARE/pretrain/data_label.py | authorAnonymousGit/WOCEL | 5edcf1c0cce07c8280ef3c10c9e01ad0d2643885 | [
"Apache-2.0"
] | null | null | null | SentiLARE/pretrain/data_label.py | authorAnonymousGit/WOCEL | 5edcf1c0cce07c8280ef3c10c9e01ad0d2643885 | [
"Apache-2.0"
] | null | null | null | SentiLARE/pretrain/data_label.py | authorAnonymousGit/WOCEL | 5edcf1c0cce07c8280ef3c10c9e01ad0d2643885 | [
"Apache-2.0"
] | null | null | null | #-*- coding: utf-8 -*-
import os
import json
import logging
import copy
from tqdm import tqdm, trange
import re
import random
import numpy as np
from multiprocessing import Pool
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler
import time
import codecs
from utils import set_log
from tokenization_roberta import RobertaTokenizer
logger = logging.getLogger()
YELP_POS_DIR = './yelp_bert_format_word_and_pos.txt'
YELP_STAR_DIR = './yelp_stars.txt'
SENTIWORD_DIR = './yelp_sentiment_label.txt'
class Yelp(Dataset):
def __init__(self, args, tokenizer, max_seq_length=512):
self.tokenizer = tokenizer
self.pattern = r'([.?!])'
self.max_seq_length = max_seq_length
self.pos_tag_ids_map = {'#v':0, '#a':1, '#r':2, '#n':3, '#u':4}
self.load_sentiscore()
self.load_pos()
self.args = args
# Load the word-level sentiment polarity
def load_sentiscore(self, filename=SENTIWORD_DIR):
self.sentiscores_total = []
self.sentiwords_list = []
with open(filename, 'r') as f:
for line in f.readlines():
self.sentiscores_total.append(line.strip())
print('load sentiment scores complete')
# Load the POS tags
def load_pos(self, filename=YELP_POS_DIR, starfilename = YELP_STAR_DIR):
self.exs = []
self.exs_senti = []
senti_cnt = 0
with open(filename, 'r') as f:
para = []
para_senti = []
for line in f.readlines():
if line.strip() != '':
para.append(line.strip())
para_senti.append(self.sentiscores_total[senti_cnt])
senti_cnt += 1
else:
self.exs.append(para)
self.exs_senti.append(para_senti)
para = []
para_senti = []
count = 0
print(len(self.exs))
print(senti_cnt)
with open(starfilename, 'r') as f:
for i, line in enumerate(f.readlines()):
self.exs[i].append(eval(line) - 1.0)
count += 1
print('load yelp star complete')
assert count == len(self.exs)
print('load pos tags complete')
def __len__(self):
return len(self.exs)
def get_pos_tag_ids(self, text):
# acquire the pos tag
ids = []
for c in text.split():
ids.append(self.pos_tag_ids_map[c[-2:]])
return ids
def get_senti_ids(self, text, senti):
# acquire the word-level polarity
return [int(label) for label in senti.split()]
def get_clean_text(self, text):
# remove the pos tag to get texts
clean_text = []
for c in text.split():
clean_text.append(c[:-2])
return clean_text
def get_ids(self, text, senti):
# get token id, pos id and polarity id
clean_text = self.get_clean_text(text)
p_ids = self.get_pos_tag_ids(text)
s_ids = self.get_senti_ids(text, senti)
assert len(clean_text) == len(p_ids)
assert len(clean_text) == len(s_ids)
return clean_text, p_ids, s_ids
def vectorize(self, text, ids):
# convert tokens to ids
toks = []
toks_ids = []
toks_text = []
for i, c in enumerate(text):
if type(c) != list:
t = self.tokenizer.tokenize(c)
toks_text.extend(t)
t = self.tokenizer.convert_tokens_to_ids(t)
elif type(c[0]) == int:
t = c
toks_text.extend([self.tokenizer.mask_token] * len(t))
else:
toks_text.extend(c)
t = self.tokenizer.convert_tokens_to_ids(c)
toks.extend(t)
if type(ids[i]) == list:
toks_ids.extend(self.tokenizer.convert_tokens_to_ids(ids[i]))
elif type(ids[i]) == str:
_t = self.tokenizer.tokenize(ids[i])
toks_ids.extend(self.tokenizer.convert_tokens_to_ids(_t))
else:
toks_ids.extend([ids[i]] * len(t))
assert len(toks) == len(toks_ids)
assert len(toks) == len(toks_text)
return toks, toks_ids,toks_text
def __getitem__(self, idx):
sents = self.exs[idx]
senti_ids = self.exs_senti[idx]
input_seg = " ".join(sents[:-1])
senti_seg = " ".join(senti_ids)
rating = sents[-1]
# Roberta has no nsp objective
nsp_label = -1
input_triple = self.get_ids(input_seg, senti_seg)
input_triple, input_labels = self.random_whole_word(input_triple)
_input_text, _input_ids_list = input_triple[0], input_triple[1:] + input_labels
input_ids_list = []
for input_ids in _input_ids_list:
input_text, input_ids, input_text_backup = self.vectorize(_input_text, input_ids)
input_ids_list.append(input_ids)
_truncate_seq(input_text, self.max_seq_length - 2)
for s in input_ids_list:
_truncate_seq(s, self.max_seq_length - 2)
_truncate_seq(input_text_backup, self.max_seq_length - 2)
p_ids = [4] + input_ids_list[0] + [4]
s_ids = [2] + input_ids_list[1] + [2]
lm_label = [-1] + input_ids_list[2] + [-1]
p_label = [-1] + input_ids_list[3] + [-1]
s_label = [-1] + input_ids_list[4] + [-1]
prob = random.random()
if prob < self.args.task_ratio:
# Late Supervision
polarity_ids = [5] * len(p_ids)
polarity_label = [int(rating)] + [-1] * (len(p_ids) - 1)
else:
# Early Fusion
polarity_ids = [5] + [int(rating)] * len(input_text) + [5]
polarity_label = [-1] * len(p_ids)
tokens = []
segment_ids = []
tokens.append(self.tokenizer.cls_token_id)
segment_ids.append(0)
for token in input_text:
tokens.append(token)
segment_ids.append(0)
tokens.append(self.tokenizer.sep_token_id)
segment_ids.append(0)
input_text_backup = [self.tokenizer.cls_token] + input_text_backup + [self.tokenizer.sep_token]
input_ids = tokens
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self.max_seq_length:
input_ids.append(self.tokenizer.pad_token_id)
input_text_backup.append(self.tokenizer.pad_token)
input_mask.append(0)
segment_ids.append(0)
s_ids.append(2)
p_ids.append(4)
lm_label.append(-1)
p_label.append(-1)
s_label.append(-1)
polarity_ids.append(5)
polarity_label.append(-1)
assert len(input_ids) == self.max_seq_length, len(input_ids)
assert len(input_text_backup) == self.max_seq_length
assert len(input_mask) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
assert len(s_ids) == self.max_seq_length
assert len(p_ids) == self.max_seq_length
assert len(lm_label) == self.max_seq_length, len(lm_label)
assert len(p_label) == self.max_seq_length
assert len(s_label) == self.max_seq_length
assert len(polarity_ids) == self.max_seq_length
assert len(polarity_label) == self.max_seq_length
return_tensors = (torch.tensor(input_ids),
torch.tensor(input_mask),
torch.tensor(segment_ids),
torch.tensor(lm_label),
torch.tensor(nsp_label),
torch.tensor(p_ids),
torch.tensor(s_ids),
torch.tensor(polarity_ids),
torch.tensor(p_label),
torch.tensor(s_label),
torch.tensor(polarity_label))
return return_tensors
def random_whole_word(self, vecs):
words, p_ids, s_ids = vecs
# words: the tokenized clean text
# p_ids: pos_tag indices
# s_ids: senti_word indices
words_label = []
p_label = []
s_label = []
new_words = []
for i, word in enumerate(words):
prob = random.random()
if s_ids[i] == 2:
# mask ordinary token with 15% probability
if prob < 0.15:
ori_p = p_ids[i]
ori_s = s_ids[i]
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
toks = self.tokenizer.tokenize(word)
if toks != []:
assert(len(toks) > 0)
# mask each subword in this word
new_words.append([self.tokenizer.mask_token_id] * len(toks))
words_label.append(toks)
p_ids[i] = 4
s_ids[i] = 2
# 10% (0.8~0.9) randomly change token to random token
elif prob < 0.9:
# random get one word from the example
rand_ex = self.exs[random.randint(0, len(self.exs)-1)][0].split()
to_replace = self.tokenizer.tokenize(rand_ex[random.randint(0, len(rand_ex)-1)][:-2])
toks = self.tokenizer.tokenize(word)
if to_replace != [] and toks != []:
if len(to_replace) >= len(toks):
to_replace = to_replace[:len(toks)]
else:
to_replace += (len(toks) - len(to_replace)) * [self.tokenizer.mask_token]
assert(to_replace != [])
new_words.append(to_replace)
words_label.append(toks)
p_ids[i] = 4
s_ids[i] = 2
# -> rest 10% randomly keep current token
else:
new_words.append(word)
words_label.append(word)
if ori_p != 4: # the pos_tag of ordinary word is not unknown
p_label.append(ori_p)
s_label.append(ori_s)
else:
p_label.append(-1)
s_label.append(-1)
else:
# no masking token (will be ignored by loss function later)
new_words.append(word)
words_label.append(-1)
p_label.append(-1)
s_label.append(-1)
else:
# mask senti words with 30% probability
if prob < 0.3:
ori_p = p_ids[i]
ori_s = s_ids[i]
prob /= 0.3
if prob < 0.8:
toks = self.tokenizer.tokenize(word)
if toks != []:
assert(len(toks) > 0), '{}, {}'.format(word, words)
new_words.append([self.tokenizer.mask_token_id] * len(toks))
words_label.append(toks)
p_ids[i] = 4
s_ids[i] = 2
elif prob < 0.9:
# replace with another word
rand_ex = self.exs[random.randint(0, len(self.exs)-1)][0].split()
to_replace = self.tokenizer.tokenize(rand_ex[random.randint(0, len(rand_ex)-1)][:-2])
toks = self.tokenizer.tokenize(word)
if to_replace != [] and toks != []:
if len(to_replace) >= len(toks):
to_replace = to_replace[:len(toks)]
else:
to_replace += (len(toks) - len(to_replace)) * [self.tokenizer.mask_token]
assert(to_replace != [])
new_words.append(to_replace)
words_label.append(toks)
p_ids[i] = 4
s_ids[i] = 2
else:
new_words.append(word)
words_label.append(word)
p_label.append(ori_p)
s_label.append(ori_s)
else:
new_words.append(word)
words_label.append(-1)
p_label.append(-1)
s_label.append(-1)
label_vecs = (words_label, p_label, s_label)
vecs = (new_words, p_ids, s_ids)
return vecs, label_vecs
def _truncate_seq(tokens, max_length):
while len(tokens) > max_length:
tokens.pop()
def main():
set_log()
tokenizer = RobertaTokenizer.from_pretrained('./pretrain_model/roberta-base')
yelp = Yelp(None, tokenizer)
for item_id in range(10):
yelp.__getitem__(item_id)
if __name__ == '__main__':
main()
| 38.544199 | 110 | 0.490002 |
3b012a8ae12b951de60c82b134c2d90a8fdd3302 | 5,624 | py | Python | player/games.py | paconte/tournaments | 525162bc9f0de245597f8aa33bf1f4088a087692 | [
"MIT"
] | null | null | null | player/games.py | paconte/tournaments | 525162bc9f0de245597f8aa33bf1f4088a087692 | [
"MIT"
] | null | null | null | player/games.py | paconte/tournaments | 525162bc9f0de245597f8aa33bf1f4088a087692 | [
"MIT"
] | null | null | null | import crypt
from time import strftime
from datetime import datetime
from player import csvdata
def hashing():
return crypt.crypt();
class DrawError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class PadelResult:
def __init__(self, scores):
self._asset_init(scores)
self.scores = scores
self.local_score = []
self.visitor_score = []
for x in range(0, len(scores)):
try:
score = int(scores[x])
if scores[x] == '' or score < 0 or score > 10:
raise ValueError
if x % 2 == 0:
self.local_score.append(score)
else:
self.visitor_score.append(score)
except ValueError:
pass
def get_local_score(self):
sets = 0
for x in range(0, len(self.local_score)):
if self.local_score[x] > self.visitor_score[x]:
sets += 1
return sets
def get_visitor_score(self):
sets = 0
for x in range(0, len(self.visitor_score)):
if self.visitor_score[x] > self.local_score[x]:
sets += 1
return sets
def is_draw(self):
"""Returns True if the result is a draw otherwise False."""
try:
return self.get_winner() == 0
except DrawError:
return True
return False
def get_winner(self, allow_draw=True):
"""Returns: 1 if local team won, 2 if visitor team won, 0 if is a draw or an exception if draw is not allowed"""
local_sets = self.get_local_score()
visitor_sets = self.get_visitor_score()
if local_sets > visitor_sets:
return 1
elif local_sets < visitor_sets:
return 2
else:
if allow_draw:
return 0
raise DrawError("The game is a draw.")
def _asset_init(self, scores):
assert len(scores) % 2 == 0, "Scores list argument length must be modulo 2."
for score in scores[:-1]:
if score and not isinstance(int(score), int):
raise ValueError("Scores argument must be list of integers.")
def __str__(self):
return str(self.scores)
class PadelTeamNames:
def __init__(self, csv):
if len(csv) != 8:
raise ValueError("Touch games has a local and a visitor names")
for name in csv:
if not isinstance(name, str):
raise ValueError("Names must be a string.")
self.local_first_first_name = csv[1]
self.local_first_last_name = csv[0]
self.local_second_first_name = csv[3]
self.local_second_last_name = csv[2]
self.visitor_first_first_name = csv[5]
self.visitor_first_last_name = csv[4]
self.visitor_second_first_name = csv[7]
self.visitor_second_last_name = csv[6]
self.local = self.local_first_last_name + " - " + self.local_second_last_name
self.visitor = self.visitor_first_last_name + " - " + self.visitor_second_last_name
class Game:
def __init__(self):
self.local = self.visitor = self.padel_team_names = None
self.round = self.category = self.nteams = None
self.tournament_name = self.division = self.result = None
self.date_time = self.field = None
def get_local_score(self):
return self.result.get_local_score()
def get_visitor_score(self):
return self.result.get_visitor_score()
def get_result(self):
return self.result
def set_local(self, local):
self.local = local
def get_winner(self):
return self.result.get_winner()
def is_draw(self):
return self.is_draw()
def get_date(self):
return strftime("%m/%d/%y", self.date_time)
def get_time(self):
return strftime("%H:%M", self.date_time)
def get_touch_csv_list(self):
result = list(range(13))
result[csvdata.TG_TOURNAMENT_INDEX] = self.tournament_name
result[csvdata.TG_DIVISION_INDEX] = self.division
result[csvdata.TG_DATE_INDEX] = self.get_date()
result[csvdata.TG_TIME_INDEX] = self.get_time()
result[csvdata.TG_FIELD_INDEX] = self.field
result[csvdata.TG_PHASE_INDEX] = self.round
result[csvdata.TG_CATEGORY_INDEX] = self.category
result[csvdata.TG_PHASE_TEAMS_INDEX] = self.n_teams
result[8] = 'xx'
result[csvdata.TG_LOCAL_TEAM_INDEX] = self.local
result[csvdata.TG_LOCAL_TEAM_SCORE_INDEX] = self.local_score
result[csvdata.TG_VISITOR_TEAM_SCORE_INDEX] = self.visitor_score
result[csvdata.TG_VISITOR_TEAM_INDEX] = self.visitor
return result
@classmethod
def padel_from_csv_list(cls, csv):
game = cls()
game.tournament_name = csv[0]
game.ranking = csv[1]
game.division = csv[2]
game.date_time = datetime.strptime(csv[3], '%d/%m/%y')
# game.date = strftime("%m/%d/%y", game.date_time)
# game.time = None
# 4 => time , 5 => field
game.round = csv[6]
game.category = csv[7]
game.nteams = csv[8]
game.padel_team_names = PadelTeamNames(csv[9:17])
game.local = game.padel_team_names.local
game.visitor = game.padel_team_names.visitor
game.padel_result = PadelResult(csv[18:])
game.local_score = game.padel_result.get_local_score()
game.visitor_score = game.padel_result.get_visitor_score()
return game
| 32.321839 | 120 | 0.608997 |
29b563d100edba43dd84bd30dc61d5cbc648006a | 151 | py | Python | microsetta_admin/tests/base.py | dhakim87/microsetta-admin | 306efb273e8fc7efa99f6bfd28372da3f3cf5f2e | [
"BSD-3-Clause"
] | null | null | null | microsetta_admin/tests/base.py | dhakim87/microsetta-admin | 306efb273e8fc7efa99f6bfd28372da3f3cf5f2e | [
"BSD-3-Clause"
] | null | null | null | microsetta_admin/tests/base.py | dhakim87/microsetta-admin | 306efb273e8fc7efa99f6bfd28372da3f3cf5f2e | [
"BSD-3-Clause"
] | null | null | null | import unittest
from microsetta_admin.server import app
class TestBase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
| 18.875 | 39 | 0.735099 |
c1dabd4eadc3715dcf5941bea81bbc5e1381c579 | 6,748 | py | Python | yolox/evaluators/coco_evaluator.py | FateScript/YOLOX-1 | 435d5836f8b0fb1e2c738a1a9897522eafcd3ef5 | [
"Apache-2.0"
] | 63 | 2021-07-26T02:40:08.000Z | 2022-03-18T10:43:50.000Z | yolox/evaluators/coco_evaluator.py | FateScript/YOLOX-1 | 435d5836f8b0fb1e2c738a1a9897522eafcd3ef5 | [
"Apache-2.0"
] | 7 | 2021-08-10T06:00:45.000Z | 2022-01-10T03:49:20.000Z | yolox/evaluators/coco_evaluator.py | FateScript/YOLOX-1 | 435d5836f8b0fb1e2c738a1a9897522eafcd3ef5 | [
"Apache-2.0"
] | 10 | 2021-08-15T13:44:17.000Z | 2022-01-28T23:44:10.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import contextlib
import io
import json
import tempfile
import time
from loguru import logger
from tqdm import tqdm
import numpy as np
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from yolox.utils import gather_pyobj, postprocess, time_synchronized, xyxy2xywh
class COCOEvaluator:
"""
COCO AP Evaluation class. All the data in the val2017 dataset are processed
and evaluated by COCO API.
"""
def __init__(
self, dataloader, img_size, confthre, nmsthre, num_classes, testdev=False
):
"""
Args:
dataloader (Dataloader): evaluate dataloader.
img_size (int): image size after preprocess. images are resized
to squares whose shape is (img_size, img_size).
confthre (float): confidence threshold ranging from 0 to 1, which
is defined in the config file.
nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.
"""
self.dataloader = dataloader
self.img_size = img_size
self.confthre = confthre
self.nmsthre = nmsthre
self.num_classes = num_classes
self.testdev = testdev
self.is_main_process = dist.get_rank() == 0
def evaluate(self, model, distributed=False, half=False, test_size=None):
"""
COCO average precision (AP) Evaluation. Iterate inference on the test dataset
and the results are evaluated by COCO API.
NOTE: This function will change training mode to False, please save states if needed.
Args:
model : model to evaluate.
Returns:
ap50_95 (float) : COCO AP of IoU=50:95
ap50 (float) : COCO AP of IoU=50
summary (sr): summary info of evaluation.
"""
model.eval()
ids = []
data_list = []
progress_bar = tqdm if self.is_main_process else iter
inference_time = 0
nms_time = 0
n_samples = len(self.dataloader) - 1
for cur_iter, (imgs, _, info_imgs, ids) in enumerate(progress_bar(self.dataloader)):
# skip the the last iters since batchsize might be not enough for batch inference
is_time_record = cur_iter < len(self.dataloader) - 1
if is_time_record:
start = time.time()
imgs = mge.tensor(imgs.cpu().numpy())
outputs = model(imgs)
if is_time_record:
infer_end = time_synchronized()
inference_time += infer_end - start
outputs = postprocess(
outputs, self.num_classes, self.confthre, self.nmsthre
)
if is_time_record:
nms_end = time_synchronized()
nms_time += nms_end - infer_end
data_list.extend(self.convert_to_coco_format(outputs, info_imgs, ids))
statistics = mge.tensor([inference_time, nms_time, n_samples])
if distributed:
statistics = F.distributed.all_reduce_sum(statistics)
statistics /= dist.get_world_size()
results = gather_pyobj(data_list, obj_name="data_list", target_rank_id=0)
for x in results[1:]:
data_list.extend(x)
eval_results = self.evaluate_prediction(data_list, statistics)
dist.group_barrier()
return eval_results
def convert_to_coco_format(self, outputs, info_imgs, ids):
data_list = []
for (output, img_h, img_w, img_id) in zip(outputs, info_imgs[0], info_imgs[1], ids):
if output is None:
continue
output = np.array(output)
bboxes = output[:, 0:4]
# preprocessing: resize
scale = min(self.img_size[0] / float(img_h), self.img_size[1] / float(img_w))
bboxes /= scale
bboxes = xyxy2xywh(bboxes)
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
for ind in range(bboxes.shape[0]):
label = self.dataloader.dataset.class_ids[int(cls[ind])]
pred_data = {
"image_id": int(img_id),
"category_id": label,
"bbox": bboxes[ind].tolist(),
"score": scores[ind].item(),
"segmentation": [],
} # COCO json format
data_list.append(pred_data)
return data_list
def evaluate_prediction(self, data_dict, statistics):
if not self.is_main_process:
return 0, 0, None
logger.info("Evaluate in main process...")
annType = ["segm", "bbox", "keypoints"]
inference_time = statistics[0].item()
nms_time = statistics[1].item()
n_samples = statistics[2].item()
a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)
a_nms_time = 1000 * nms_time / (n_samples * self.dataloader.batch_size)
time_info = ", ".join(
[
"Average {} time: {:.2f} ms".format(k, v)
for k, v in zip(
["forward", "NMS", "inference"],
[a_infer_time, a_nms_time, (a_infer_time + a_nms_time)],
)
]
)
info = time_info + "\n"
# Evaluate the Dt (detection) json comparing with the ground truth
if len(data_dict) > 0:
cocoGt = self.dataloader.dataset.coco
# TODO: since pycocotools can't process dict in py36, write data to json file.
if self.testdev:
json.dump(data_dict, open("./yolox_testdev_2017.json", "w"))
cocoDt = cocoGt.loadRes("./yolox_testdev_2017.json")
else:
_, tmp = tempfile.mkstemp()
json.dump(data_dict, open(tmp, "w"))
cocoDt = cocoGt.loadRes(tmp)
try:
from yolox.layers import COCOeval_opt as COCOeval
except ImportError:
from .cocoeval_mr import COCOeval
logger.warning("Use standard COCOeval.")
cocoEval = COCOeval(cocoGt, cocoDt, annType[1])
cocoEval.evaluate()
cocoEval.accumulate()
redirect_string = io.StringIO()
with contextlib.redirect_stdout(redirect_string):
cocoEval.summarize()
info += redirect_string.getvalue()
logger.info("\n" + info)
return cocoEval.stats[0], cocoEval.stats[1], info
else:
logger.info("No results!!!!")
return 0, 0, info
| 35.515789 | 93 | 0.574837 |
c36ac5ac1b1baf1ab3a45ab0a4b299a2b52e4475 | 438 | py | Python | panda_gym/envs/__init__.py | lubiluk/panda-gym | 3f70c0306938ca0684268590c9c036070dbf7cb9 | [
"MIT"
] | null | null | null | panda_gym/envs/__init__.py | lubiluk/panda-gym | 3f70c0306938ca0684268590c9c036070dbf7cb9 | [
"MIT"
] | null | null | null | panda_gym/envs/__init__.py | lubiluk/panda-gym | 3f70c0306938ca0684268590c9c036070dbf7cb9 | [
"MIT"
] | null | null | null | from panda_gym.envs.panda_tasks import PandaReachEnv
from panda_gym.envs.panda_tasks import PandaPushEnv
from panda_gym.envs.panda_tasks import PandaSlideEnv
from panda_gym.envs.panda_tasks import PandaPickAndPlaceEnv
from panda_gym.envs.panda_tasks import PandaStackEnv
from panda_gym.envs.panda_tasks import PandaReachCamEnv
from panda_gym.envs.panda_tasks import PandaPushCamEnv
from panda_gym.envs.panda_tasks import FreePandaPushEnv
| 48.666667 | 59 | 0.890411 |
98d4e61f05dee921ccc9df3c27f69b7cb7090aff | 336 | py | Python | old files/problem0024.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | old files/problem0024.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | old files/problem0024.py | kmarcini/Project-Euler-Python | d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3 | [
"BSD-3-Clause"
] | null | null | null | ###########################
# Project Euler Problem 24
# Lexicographic permutations
#
# Code by Kevin Marciniak
###########################
# easy mode
import itertools
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
permutations = list(itertools.permutations(digits))
print(''.join(permutations[999999]))
# hard mode??
| 19.764706 | 59 | 0.544643 |
a88c13d3525bed9a664b0b515b4fe6635136912f | 953 | py | Python | sharepoint/utils.py | clerik/python-sharepoint | f1a1e19189d78115fcfc25850d27319e34d7e699 | [
"BSD-3-Clause"
] | 1 | 2019-06-10T23:58:09.000Z | 2019-06-10T23:58:09.000Z | sharepoint/utils.py | SG-Gank/python-sharepoint | f1a1e19189d78115fcfc25850d27319e34d7e699 | [
"BSD-3-Clause"
] | 1 | 2020-02-11T11:46:32.000Z | 2020-02-11T11:46:32.000Z | sharepoint/utils.py | SG-Gank/python-sharepoint | f1a1e19189d78115fcfc25850d27319e34d7e699 | [
"BSD-3-Clause"
] | 1 | 2021-04-22T21:58:17.000Z | 2021-04-22T21:58:17.000Z | import re
from six import unichr
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def decode_entities(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
| 25.756757 | 75 | 0.535152 |
f8d29b771d90199864cbf8a52e1cffed17256666 | 536 | py | Python | Hyperparameters.py | TimoleonLatinopoulos/MortalKombatOpenAI | 59dc89d1f50dd74690859e5e1fa18701a5246382 | [
"MIT"
] | 1 | 2020-08-12T08:16:06.000Z | 2020-08-12T08:16:06.000Z | Hyperparameters.py | TimoleonLatinopoulos/MortalKombatOpenAI | 59dc89d1f50dd74690859e5e1fa18701a5246382 | [
"MIT"
] | null | null | null | Hyperparameters.py | TimoleonLatinopoulos/MortalKombatOpenAI | 59dc89d1f50dd74690859e5e1fa18701a5246382 | [
"MIT"
] | null | null | null | TRAIN = True
LOAD_MODEL = False
GAME = 'MortalKombat-SNES'
STATE = 'Level1.SubZeroVsJohnnyCage'
SCENARIO = 'scenario'
FRAME_HEIGHT = 63
FRAME_WIDTH = 113
STACKED_FRAMES = 4
FRAME_SKIP = 2
LEARNING_RATE = 0.00005
BATCH_SIZE = 32
GAMMA = 0.999
E_START = 1
E_END = 0.1
E_ANNEALING_FRAMES = 1000000
MEMORY_SIZE = 800000
MAX_EPISODE_LENGTH = 18000
UPDATE_FRAMES = 10000
EPOCH_EPISODES = 100
REPLAY_MEMORY_START = 20000
SAVE_STEP = 10
MAX_EPISODES = 20000
SAVER = 'output/'
GIF = 'gif/'
SUMMARIES = 'summaries/'
SAVED_FILE_NAME = '.ckpt'
| 17.290323 | 36 | 0.755597 |
48a567fde76dbda7492b3aa4ef83f80cb2bfea7c | 3,151 | py | Python | util/db/initialization.py | Ajuaman/bot | c5fcd0126617e52e36e475061847ac7d8430f2a5 | [
"BSD-3-Clause"
] | null | null | null | util/db/initialization.py | Ajuaman/bot | c5fcd0126617e52e36e475061847ac7d8430f2a5 | [
"BSD-3-Clause"
] | null | null | null | util/db/initialization.py | Ajuaman/bot | c5fcd0126617e52e36e475061847ac7d8430f2a5 | [
"BSD-3-Clause"
] | null | null | null | """
A simple database migration manager. A module can request to initialize something in the database with the @init_for
and @init decorators.
"""
import static_config
import hashlib
import plugins
import util.db as db
from typing import Callable
with db.connection() as conn:
with conn.cursor() as cur:
cur.execute("""
CREATE SCHEMA IF NOT EXISTS meta
""")
cur.execute("""
CREATE TABLE IF NOT EXISTS meta.schema_hashes
( name TEXT NOT NULL PRIMARY KEY
, sha1 BYTEA NOT NULL )
""")
def init_for(name: str) -> Callable[[Callable[[], str]], Callable[[], str]]:
"""
Decorate a function that returns a piece of SQL to initialize something in the database.
@init_for("module name")
def init():
return "CREATE TABLE foo (bar TEXT)"
The returned SQL will be hashed. If a hash for this module doesn't yet exist the SQL code will be executed and the
hash saved. If the known hash for the module matches the computed one, nothing happens. Otherwise we look for a
migration file in a configurable directory and run it, updating the known hash.
"""
def init(fun: Callable[[], str]) -> Callable[[], str]:
conn = db.connection()
with conn.cursor() as cur:
cur.execute("""
SELECT sha1 FROM meta.schema_hashes WHERE name = %(name)s
""", {"name": name})
old_row = cur.fetchone()
sql = fun()
sha = hashlib.sha1(sql.encode("utf")).digest()
if old_row is not None:
old_sha = bytes(old_row[0])
if old_sha != sha:
for dirname in static_config.DB["migrations"].split(":"):
filename = "{}/{}-{}-{}.sql".format(dirname, name, old_sha.hex(), sha.hex())
try:
fp = open(filename, "r", encoding="utf")
break
except FileNotFoundError:
continue
else:
raise FileNotFoundError(
"Could not find {}-{}-{}.sql in {}".format(name, old_sha.hex(), sha.hex(),
static_config.DB["migrations"]))
with fp:
cur.execute(fp.read())
cur.execute("""
UPDATE meta.schema_hashes
SET sha1 = %(sha)s
WHERE name = %(name)s
""", {"name": name, "sha": sha})
conn.commit()
else:
cur.execute(sql)
cur.execute("""
INSERT INTO meta.schema_hashes (name, sha1)
VALUES (%(name)s, %(sha)s)
""", {"name": name, "sha": sha})
conn.commit()
return fun
return init
def init(fun: Callable[[], str]) -> Callable[[], str]:
"""
Request database initialization for the current plugin.
"""
return init_for(plugins.current_plugin())(fun)
| 38.901235 | 118 | 0.509997 |
b9e9649f66d45eeda0365e94cc0827d7d28a9a9c | 3,125 | py | Python | alex/applications/wsrouter/wsrouter.py | cifkao/alex | 9573ef5d24919b2b368b35f4dd02aa98f35f0f59 | [
"Apache-2.0"
] | 184 | 2015-02-11T04:14:41.000Z | 2022-03-24T21:43:58.000Z | alex/applications/wsrouter/wsrouter.py | cifkao/alex | 9573ef5d24919b2b368b35f4dd02aa98f35f0f59 | [
"Apache-2.0"
] | 69 | 2015-01-11T04:57:22.000Z | 2019-04-24T10:25:56.000Z | alex/applications/wsrouter/wsrouter.py | cifkao/alex | 9573ef5d24919b2b368b35f4dd02aa98f35f0f59 | [
"Apache-2.0"
] | 61 | 2015-03-04T10:52:13.000Z | 2022-03-04T12:14:06.000Z | import sys
import time
from autobahn.twisted.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from twisted.python import log
from twisted.internet import reactor
from alex.components.hub.wsio_messages_pb2 import WSRouterRequestProto, WSRouterRoutingResponseProto, PingProto
class WSRouterServerFactory(WebSocketServerFactory):
def __init__(self, addr, port, entry_timeout):
super(WSRouterServerFactory, self).__init__(url="ws://%s:%d" % (addr, port), debug=False)
self.protocol = WSRouterServerProtocol
self.instances = {}
self.timestamps = {}
self.entry_timeout = entry_timeout
def route_request(self):
print 'ROUTING', 'current instances:', self.instances
for addr in self.instances.keys():
key, status = self.instances[addr]
entry_is_time_outed = time.time() - self.timestamps[addr] > self.entry_timeout
if status == PingProto.AVAILABLE and not entry_is_time_outed:
del self.instances[addr]
return addr, key
elif entry_is_time_outed:
del self.instances[addr]
del self.timestamps[addr]
return "", "" # In case no Alex is available.
def ping(self, addr, key, status):
self.instances[addr] = (key, status)
self.timestamps[addr] = time.time()
print ' > Got ping from', addr, key, 'with status', status
class WSRouterServerProtocol(WebSocketServerProtocol):
"""Handles messages sent by Alex instances and the clients."""
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onMessage(self, payload, isBinary):
if isBinary:
self._process_message(payload)
def _process_message(self, payload):
msg = WSRouterRequestProto()
msg.ParseFromString(payload)
if msg.type == WSRouterRequestProto.PING:
# Ping was received, update the list of available Alex instances.
self.factory.ping(msg.ping.addr, msg.ping.key, msg.ping.status)
elif msg.type == WSRouterRequestProto.ROUTE_REQUEST:
# The message was a routing request. Find an available Alex and send back its address.
addr, key = self.factory.route_request()
resp = WSRouterRoutingResponseProto()
resp.addr = addr
resp.key = key
self.sendMessage(resp.SerializeToString(), True)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
class WSRouter(object):
"""Takes care of providing clients with the address of an available Alex instance."""
def __init__(self, addr, port, entry_timeout):
self.addr = addr
self.port = port
self.entry_timeout = entry_timeout
def run(self):
factory = WSRouterServerFactory(self.addr, self.port, self.entry_timeout)
log.startLogging(sys.stdout)
reactor.listenTCP(self.port, factory)
reactor.run()
| 35.511364 | 111 | 0.6608 |
2576d51241c4e0cf181b6fb361f12e5c1468ab59 | 2,179 | py | Python | setup.py | williamirick/hatch | 704cdcd1a0cd3a621235ac9f5b2b90e7524e3cd3 | [
"Apache-2.0",
"MIT"
] | 2,549 | 2017-09-05T06:44:17.000Z | 2022-03-31T23:21:02.000Z | setup.py | williamirick/hatch | 704cdcd1a0cd3a621235ac9f5b2b90e7524e3cd3 | [
"Apache-2.0",
"MIT"
] | 97 | 2017-06-07T23:14:12.000Z | 2022-03-30T14:22:34.000Z | setup.py | williamirick/hatch | 704cdcd1a0cd3a621235ac9f5b2b90e7524e3cd3 | [
"Apache-2.0",
"MIT"
] | 140 | 2017-06-10T14:16:47.000Z | 2022-03-23T09:25:01.000Z | from setuptools import find_packages, setup
with open('hatch/__init__.py', 'r') as f:
for line in f:
if line.startswith('__version__'):
version = line.strip().split('=')[1].strip(' \'"')
break
else:
version = '0.0.1'
with open('README.rst', 'r', encoding='utf-8') as f:
readme = f.read()
setup(
name='hatch',
version=version,
description='A modern project, package, and virtual env manager',
long_description=readme,
author='Ofek Lev',
author_email='ofekmeister@gmail.com',
maintainer='Ofek Lev',
maintainer_email='ofekmeister@gmail.com',
url='https://github.com/ofek/hatch',
license='MIT or Apache-2.0',
keywords=(
'productivity',
'virtual env',
'packaging',
'package manager',
'cookiecutter',
'project template',
'bump version',
'versioning',
'cleanup',
'testing',
'cli',
),
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
),
install_requires=(
'appdirs',
'atomicwrites',
'click',
'colorama',
'coverage',
'pexpect',
'pip>=9.0.1',
'pytest',
'semver>=2.7.8',
'setuptools>=36.0.0',
'sortedcontainers>=1.5.7',
'toml>=0.9.3',
'twine>=1.9.1',
'userpath>=1.3.0',
'virtualenv',
'wheel>=0.27.0',
),
packages=find_packages(include=['hatch', 'hatch.*']),
entry_points={
'console_scripts': (
'hatch = hatch.cli:hatch',
),
},
)
| 26.573171 | 70 | 0.537861 |
c5076423f72fef9d2ab57c93e8a27e53b2da2601 | 564 | py | Python | test/simple_source/stmts/11_return_val.py | gauravssnl/python-uncompyle6 | 136f42a610c0701e0770c1c278efd1107b1c6ed1 | [
"MIT"
] | 1 | 2021-03-24T11:54:03.000Z | 2021-03-24T11:54:03.000Z | test/simple_source/stmts/11_return_val.py | gauravssnl/python-uncompyle6 | 136f42a610c0701e0770c1c278efd1107b1c6ed1 | [
"MIT"
] | null | null | null | test/simple_source/stmts/11_return_val.py | gauravssnl/python-uncompyle6 | 136f42a610c0701e0770c1c278efd1107b1c6ed1 | [
"MIT"
] | null | null | null | # 2.5.6 decimal.py
# Bug on 2.5 and 2.6 by incorrectly changing opcode to
# RETURN_VALUE to psuedo op: RETURN_END_IF
def _formatparam(param, value=None, quote=True):
if value is not None and len(value) > 0:
if isinstance(value, tuple):
value = 'a'
if quote or param:
pass
else:
return '%s=%s' % (param, value)
else:
return param
# python 2.7 SimpleXMLRPCServer.py
# Bug was turning return into "pass"
def system_methodSignature(seflf, method_name):
return 'signatures not supported'
| 29.684211 | 54 | 0.640071 |
58188cc6087df554f86907b2a7c7a801dd873be4 | 31,271 | py | Python | django_rq/tests/tests.py | 1024inc/django-rq | 74229b8ea7b874e8748786765b8a86b9dd1f732f | [
"MIT"
] | null | null | null | django_rq/tests/tests.py | 1024inc/django-rq | 74229b8ea7b874e8748786765b8a86b9dd1f732f | [
"MIT"
] | null | null | null | django_rq/tests/tests.py | 1024inc/django-rq | 74229b8ea7b874e8748786765b8a86b9dd1f732f | [
"MIT"
] | null | null | null | import time
import uuid
from unittest import skipIf
from django.contrib.auth.models import User
from django.core.management import call_command
from django.test import TestCase, override_settings
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.conf import settings
from mock import patch, PropertyMock
from rq import get_current_job, Queue
from rq.job import Job
from rq.registry import (DeferredJobRegistry, FinishedJobRegistry,
StartedJobRegistry)
from rq.worker import Worker
from django_rq.decorators import job
from django_rq.jobs import get_job_class
from django_rq.queues import (
get_connection, get_queue, get_queue_by_index, get_queues,
get_unique_connection_configs, DjangoRQ
)
from django_rq import thread_queue
from django_rq.templatetags.django_rq import to_localtime
from django_rq.workers import (get_worker, get_worker_class,
collect_workers_by_connection,
get_all_workers_by_configuration)
try:
from rq_scheduler import Scheduler
from ..queues import get_scheduler
RQ_SCHEDULER_INSTALLED = True
except ImportError:
RQ_SCHEDULER_INSTALLED = False
QUEUES = settings.RQ_QUEUES
def access_self():
return get_current_job().id
def divide(a, b):
return a / b
def long_running_job(timeout=10):
time.sleep(timeout)
return 'Done sleeping...'
def get_failed_queue_index(name='default'):
"""
Returns the position of FailedQueue for the named queue in QUEUES_LIST
"""
# Get the index of FailedQueue for 'default' Queue in QUEUES_LIST
queue_index = None
connection = get_connection(name)
connection_kwargs = connection.connection_pool.connection_kwargs
for i in range(0, 100):
q = get_queue_by_index(i)
if q.name == 'failed' and q.connection.connection_pool.connection_kwargs == connection_kwargs:
queue_index = i
break
return queue_index
def get_queue_index(name='default'):
"""
Returns the position of Queue for the named queue in QUEUES_LIST
"""
queue_index = None
connection = get_connection(name)
connection_kwargs = connection.connection_pool.connection_kwargs
for i in range(0, 100):
q = get_queue_by_index(i)
if q.name == name and q.connection.connection_pool.connection_kwargs == connection_kwargs:
queue_index = i
break
return queue_index
class RqstatsTest(TestCase):
def test_get_connection_default(self):
"""
Test that rqstats returns the right statistics
"""
# Override testing RQ_QUEUES
queues = [{
'connection_config': {
'DB': 0,
'HOST': 'localhost',
'PORT': 6379,
},
'name': 'default'
}]
with patch('django_rq.utils.QUEUES_LIST', new_callable=PropertyMock(return_value=queues)):
# Only to make sure it doesn't crash
call_command('rqstats')
call_command('rqstats', '-j')
call_command('rqstats', '-y')
@override_settings(RQ={'AUTOCOMMIT': True})
class QueuesTest(TestCase):
def test_get_connection_default(self):
"""
Test that get_connection returns the right connection based for
`defaut` queue.
"""
config = QUEUES['default']
connection = get_connection()
connection_kwargs = connection.connection_pool.connection_kwargs
self.assertEqual(connection_kwargs['host'], config['HOST'])
self.assertEqual(connection_kwargs['port'], config['PORT'])
self.assertEqual(connection_kwargs['db'], config['DB'])
def test_get_connection_test(self):
"""
Test that get_connection returns the right connection based for
`test` queue.
"""
config = QUEUES['test']
connection = get_connection('test')
connection_kwargs = connection.connection_pool.connection_kwargs
self.assertEqual(connection_kwargs['host'], config['HOST'])
self.assertEqual(connection_kwargs['port'], config['PORT'])
self.assertEqual(connection_kwargs['db'], config['DB'])
def test_get_queue_default(self):
"""
Test that get_queue use the right parameters for `default`
connection.
"""
config = QUEUES['default']
queue = get_queue('default')
connection_kwargs = queue.connection.connection_pool.connection_kwargs
self.assertEqual(queue.name, 'default')
self.assertEqual(connection_kwargs['host'], config['HOST'])
self.assertEqual(connection_kwargs['port'], config['PORT'])
self.assertEqual(connection_kwargs['db'], config['DB'])
def test_get_queue_url(self):
"""
Test that get_queue use the right parameters for queues using URL for
connection.
"""
config = QUEUES['url']
queue = get_queue('url')
connection_kwargs = queue.connection.connection_pool.connection_kwargs
self.assertEqual(queue.name, 'url')
self.assertEqual(connection_kwargs['host'], 'host')
self.assertEqual(connection_kwargs['port'], 1234)
self.assertEqual(connection_kwargs['db'], 4)
self.assertEqual(connection_kwargs['password'], 'password')
def test_get_queue_url_with_db(self):
"""
Test that get_queue use the right parameters for queues using URL for
connection, where URL contains the db number (either as querystring
or path segment).
"""
config = QUEUES['url_with_db']
queue = get_queue('url_with_db')
connection_kwargs = queue.connection.connection_pool.connection_kwargs
self.assertEqual(queue.name, 'url_with_db')
self.assertEqual(connection_kwargs['host'], 'host')
self.assertEqual(connection_kwargs['port'], 1234)
self.assertEqual(connection_kwargs['db'], 5)
self.assertEqual(connection_kwargs['password'], 'password')
def test_get_queue_url_with_db_default(self):
"""
Test that get_queue use the right parameters for queues using URL for
connection, where no DB given and URL does not contain the db number
(redis-py defaults to 0, should not break).
"""
config = QUEUES['url_default_db']
queue = get_queue('url_default_db')
connection_kwargs = queue.connection.connection_pool.connection_kwargs
self.assertEqual(queue.name, 'url_default_db')
self.assertEqual(connection_kwargs['host'], 'host')
self.assertEqual(connection_kwargs['port'], 1234)
self.assertEqual(connection_kwargs['db'], 0)
self.assertEqual(connection_kwargs['password'], 'password')
def test_get_queue_test(self):
"""
Test that get_queue use the right parameters for `test`
connection.
"""
config = QUEUES['test']
queue = get_queue('test')
connection_kwargs = queue.connection.connection_pool.connection_kwargs
self.assertEqual(queue.name, 'test')
self.assertEqual(connection_kwargs['host'], config['HOST'])
self.assertEqual(connection_kwargs['port'], config['PORT'])
self.assertEqual(connection_kwargs['db'], config['DB'])
def test_get_queues_same_connection(self):
"""
Checks that getting queues with the same redis connection is ok.
"""
self.assertEqual(get_queues('test', 'test2'), [get_queue('test'), get_queue('test2')])
def test_get_queues_different_connections(self):
"""
Checks that getting queues with different redis connections raise
an exception.
"""
self.assertRaises(ValueError, get_queues, 'default', 'test')
def test_get_queues_different_classes(self):
"""
Checks that getting queues with different classes (defined in configuration)
raises an exception.
"""
self.assertRaises(ValueError, get_queues, 'test', 'test1')
def test_pass_queue_via_commandline_args(self):
"""
Checks that passing queues via commandline arguments works
"""
queue_names = ['django_rq_test', 'django_rq_test2']
jobs = []
for queue_name in queue_names:
queue = get_queue(queue_name)
jobs.append({
'job': queue.enqueue(divide, 42, 1),
'finished_job_registry': FinishedJobRegistry(queue.name, queue.connection),
})
call_command('rqworker', *queue_names, burst=True)
for job in jobs:
self.assertTrue(job['job'].is_finished)
self.assertIn(job['job'].id, job['finished_job_registry'].get_job_ids())
def test_get_unique_connection_configs(self):
connection_params_1 = {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
}
connection_params_2 = {
'HOST': 'localhost',
'PORT': 6379,
'DB': 1,
}
config = {
'default': connection_params_1,
'test': connection_params_2
}
unique_configs = get_unique_connection_configs(config)
self.assertEqual(len(unique_configs), 2)
self.assertIn(connection_params_1, unique_configs)
self.assertIn(connection_params_2, unique_configs)
# self.assertEqual(get_unique_connection_configs(config),
# [connection_params_1, connection_params_2])
config = {
'default': connection_params_1,
'test': connection_params_1
}
# Should return one connection config since it filters out duplicates
self.assertEqual(get_unique_connection_configs(config),
[connection_params_1])
def test_get_unique_connection_configs_with_different_timeout(self):
connection_params_1 = {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
}
connection_params_2 = {
'HOST': 'localhost',
'PORT': 6379,
'DB': 1,
}
queue_params_a = dict(connection_params_1)
queue_params_b = dict(connection_params_2)
queue_params_c = dict(connection_params_2)
queue_params_c["DEFAULT_TIMEOUT"] = 1
config = {
'default': queue_params_a,
'test_b': queue_params_b,
'test_c': queue_params_c,
}
unique_configs = get_unique_connection_configs(config)
self.assertEqual(len(unique_configs), 2)
self.assertIn(connection_params_1, unique_configs)
self.assertIn(connection_params_2, unique_configs)
def test_async(self):
"""
Checks whether asynchronous settings work
"""
# Make sure async is not set by default
default_queue = get_queue('default')
self.assertTrue(default_queue._async)
# Make sure async override works
default_queue_async = get_queue('default', async=False)
self.assertFalse(default_queue_async._async)
# Make sure async setting works
async_queue = get_queue('async')
self.assertFalse(async_queue._async)
@override_settings(RQ={'AUTOCOMMIT': False})
def test_autocommit(self):
"""
Checks whether autocommit is set properly.
"""
queue = get_queue(autocommit=True)
self.assertTrue(queue._autocommit)
queue = get_queue(autocommit=False)
self.assertFalse(queue._autocommit)
# Falls back to default AUTOCOMMIT mode
queue = get_queue()
self.assertFalse(queue._autocommit)
queues = get_queues(autocommit=True)
self.assertTrue(queues[0]._autocommit)
queues = get_queues(autocommit=False)
self.assertFalse(queues[0]._autocommit)
queues = get_queues()
self.assertFalse(queues[0]._autocommit)
def test_default_timeout(self):
"""Ensure DEFAULT_TIMEOUT are properly parsed."""
queue = get_queue()
self.assertEqual(queue._default_timeout, 500)
queue = get_queue('test1')
self.assertEqual(queue._default_timeout, 400)
@override_settings(RQ={'AUTOCOMMIT': True})
class DecoratorTest(TestCase):
def test_job_decorator(self):
# Ensure that decorator passes in the right queue from settings.py
queue_name = 'test3'
config = QUEUES[queue_name]
@job(queue_name)
def test():
pass
result = test.delay()
queue = get_queue(queue_name)
self.assertEqual(result.origin, queue_name)
result.delete()
def test_job_decorator_default(self):
# Ensure that decorator passes in the right queue from settings.py
@job
def test():
pass
result = test.delay()
self.assertEqual(result.origin, 'default')
result.delete()
def test_job_decorator_result_ttl_default(self):
from rq.defaults import DEFAULT_RESULT_TTL
@job
def test():
pass
result = test.delay()
self.assertEqual(result.result_ttl, DEFAULT_RESULT_TTL)
result.delete()
@override_settings(RQ={'AUTOCOMMIT': True, 'DEFAULT_RESULT_TTL': 5432})
def test_job_decorator_result_ttl(self):
@job
def test():
pass
result = test.delay()
self.assertEqual(result.result_ttl, 5432)
result.delete()
@override_settings(RQ={'AUTOCOMMIT': True})
class WorkersTest(TestCase):
def test_get_worker_default(self):
"""
By default, ``get_worker`` should return worker for ``default`` queue.
"""
worker = get_worker()
queue = worker.queues[0]
self.assertEqual(queue.name, 'default')
def test_get_worker_specified(self):
"""
Checks if a worker with specified queues is created when queue
names are given.
"""
w = get_worker('test')
self.assertEqual(len(w.queues), 1)
queue = w.queues[0]
self.assertEqual(queue.name, 'test')
def test_get_worker_custom_classes(self):
w = get_worker('test',
job_class='django_rq.tests.DummyJob',
queue_class='django_rq.tests.DummyQueue',
worker_class='django_rq.tests.DummyWorker')
self.assertIs(w.job_class, DummyJob)
self.assertIsInstance(w.queues[0], DummyQueue)
self.assertIsInstance(w, DummyWorker)
def test_get_current_job(self):
"""
Ensure that functions using RQ's ``get_current_job`` doesn't fail
when run from rqworker (the job id is not in the failed queue).
"""
queue = get_queue()
job = queue.enqueue(access_self)
call_command('rqworker', '--burst')
failed_queue = Queue(name='failed', connection=queue.connection)
self.assertFalse(job.id in failed_queue.job_ids)
job.delete()
def test_collects_worker_various_connections_get_multiple_collection(self):
queues = [
{'name': 'default', 'connection_config': settings.RQ_QUEUES['default']},
{'name': 'django_rq_test', 'connection_config': settings.RQ_QUEUES['django_rq_test']},
{'name': 'test3', 'connection_config': settings.RQ_QUEUES['test3']},
]
collections = collect_workers_by_connection(queues)
self.assertEqual(len(collections), 2)
@override_settings(RQ={'AUTOCOMMIT': True})
class ViewTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('foo', password='pass')
self.user.is_staff = True
self.user.is_active = True
self.user.save()
self.client = Client()
self.client.login(username=self.user.username, password='pass')
get_queue('django_rq_test').connection.flushall()
def test_requeue_job(self):
"""
Ensure that a failed job gets requeued when rq_requeue_job is called
"""
def failing_job():
raise ValueError
queue = get_queue('default')
queue_index = get_failed_queue_index('default')
job = queue.enqueue(failing_job)
worker = get_worker('default')
worker.work(burst=True)
job.refresh()
self.assertTrue(job.is_failed)
self.client.post(reverse('rq_requeue_job', args=[queue_index, job.id]),
{'requeue': 'Requeue'})
self.assertIn(job, queue.jobs)
job.delete()
def test_delete_job(self):
"""
In addition to deleting job from Redis, the job id also needs to be
deleted from Queue.
"""
queue = get_queue('django_rq_test')
queue_index = get_queue_index('django_rq_test')
job = queue.enqueue(access_self)
self.client.post(reverse('rq_delete_job', args=[queue_index, job.id]),
{'post': 'yes'})
self.assertFalse(Job.exists(job.id, connection=queue.connection))
self.assertNotIn(job.id, queue.job_ids)
def test_action_delete_jobs(self):
queue = get_queue('django_rq_test')
queue_index = get_queue_index('django_rq_test')
# enqueue some jobs
job_ids = []
for _ in range(0, 3):
job = queue.enqueue(access_self)
job_ids.append(job.id)
# remove those jobs using view
self.client.post(reverse('rq_actions', args=[queue_index]),
{'action': 'delete', 'job_ids': job_ids})
# check if jobs are removed
for job_id in job_ids:
self.assertFalse(Job.exists(job_id, connection=queue.connection))
self.assertNotIn(job_id, queue.job_ids)
def test_action_requeue_jobs(self):
def failing_job():
raise ValueError
queue = get_queue('django_rq_test')
failed_queue_index = get_failed_queue_index('django_rq_test')
# enqueue some jobs that will fail
jobs = []
job_ids = []
for _ in range(0, 3):
job = queue.enqueue(failing_job)
jobs.append(job)
job_ids.append(job.id)
# do those jobs = fail them
worker = get_worker('django_rq_test')
worker.work(burst=True)
# check if all jobs are really failed
for job in jobs:
self.assertTrue(job.is_failed)
# renqueue failed jobs from failed queue
self.client.post(reverse('rq_actions', args=[failed_queue_index]),
{'action': 'requeue', 'job_ids': job_ids})
# check if we requeue all failed jobs
for job in jobs:
self.assertFalse(job.is_failed)
def test_clear_queue(self):
"""Test that the queue clear actually clears the queue."""
queue = get_queue('django_rq_test')
queue_index = get_queue_index('django_rq_test')
job = queue.enqueue(access_self)
self.client.post(reverse('rq_clear', args=[queue_index]),
{'post': 'yes'})
self.assertFalse(Job.exists(job.id, connection=queue.connection))
self.assertNotIn(job.id, queue.job_ids)
def test_finished_jobs(self):
"""Ensure that finished jobs page works properly."""
queue = get_queue('django_rq_test')
queue_index = get_queue_index('django_rq_test')
job = queue.enqueue(access_self)
registry = FinishedJobRegistry(queue.name, queue.connection)
registry.add(job, 2)
response = self.client.get(
reverse('rq_finished_jobs', args=[queue_index])
)
self.assertEqual(response.context['jobs'], [job])
def test_started_jobs(self):
"""Ensure that active jobs page works properly."""
queue = get_queue('django_rq_test')
queue_index = get_queue_index('django_rq_test')
job = queue.enqueue(access_self)
registry = StartedJobRegistry(queue.name, queue.connection)
registry.add(job, 2)
response = self.client.get(
reverse('rq_started_jobs', args=[queue_index])
)
self.assertEqual(response.context['jobs'], [job])
def test_deferred_jobs(self):
"""Ensure that active jobs page works properly."""
queue = get_queue('django_rq_test')
queue_index = get_queue_index('django_rq_test')
job = queue.enqueue(access_self)
registry = DeferredJobRegistry(queue.name, queue.connection)
registry.add(job, 2)
response = self.client.get(
reverse('rq_deferred_jobs', args=[queue_index])
)
self.assertEqual(response.context['jobs'], [job])
def test_get_all_workers(self):
worker1 = get_worker()
worker2 = get_worker('test')
workers_collections = [
{'config': {'URL': 'redis://'}, 'all_workers': [worker1]},
{'config': {'URL': 'redis://localhost/1'}, 'all_workers': [worker2]},
]
result = get_all_workers_by_configuration({'URL': 'redis://'}, workers_collections)
self.assertEqual(result, [worker1])
def test_workers(self):
"""Worker index page should show workers for a specific queue"""
queue = get_queue('django_rq_test')
queue_index = get_queue_index('django_rq_test')
worker1 = get_worker('django_rq_test', name=uuid.uuid4().hex)
worker1.register_birth()
worker2 = get_worker('test3')
worker2.register_birth()
response = self.client.get(
reverse('rq_workers', args=[queue_index])
)
self.assertEqual(response.context['workers'], [worker1])
def test_worker_details(self):
"""Worker index page should show workers for a specific queue"""
queue = get_queue('django_rq_test')
queue_index = get_queue_index('django_rq_test')
worker = get_worker('django_rq_test', name=uuid.uuid4().hex)
worker.register_birth()
response = self.client.get(
reverse('rq_worker_details', args=[queue_index, worker.key])
)
self.assertEqual(response.context['worker'], worker)
def test_statistics_json_view(self):
"""
Django-RQ's statistic as JSON only viewable by staff or with API_TOKEN
"""
# Override testing RQ_QUEUES
queues = [{
'connection_config': {
'DB': 0,
'HOST': 'localhost',
'PORT': 6379,
},
'name': 'default'
}]
with patch('django_rq.utils.QUEUES_LIST', new_callable=PropertyMock(return_value=queues)):
response = self.client.get(reverse('rq_home'))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('rq_home_json'))
self.assertEqual(response.status_code, 200)
# Not staff, only token
self.user.is_staff = False
self.user.save()
response = self.client.get(reverse('rq_home'))
self.assertEqual(response.status_code, 302)
# Error, but with 200 code
response = self.client.get(reverse('rq_home_json'))
self.assertEqual(response.status_code, 200)
self.assertIn("error", response.content.decode('utf-8'))
# With token,
token = '12345abcde'
with patch('django_rq.views.API_TOKEN', new_callable=PropertyMock(return_value=token)):
response = self.client.get(reverse('rq_home_json', args=[token]))
self.assertEqual(response.status_code, 200)
self.assertIn("name", response.content.decode('utf-8'))
self.assertNotIn('"error": true', response.content.decode('utf-8'))
# Wrong token
response = self.client.get(reverse('rq_home_json', args=["wrong_token"]))
self.assertEqual(response.status_code, 200)
self.assertNotIn("name", response.content.decode('utf-8'))
self.assertIn('"error": true', response.content.decode('utf-8'))
class ThreadQueueTest(TestCase):
@override_settings(RQ={'AUTOCOMMIT': True})
def test_enqueue_autocommit_on(self):
"""
Running ``enqueue`` when AUTOCOMMIT is on should
immediately persist job into Redis.
"""
queue = get_queue()
job = queue.enqueue(divide, 1, 1)
self.assertTrue(job.id in queue.job_ids)
job.delete()
@override_settings(RQ={'AUTOCOMMIT': False})
def test_enqueue_autocommit_off(self):
"""
Running ``enqueue`` when AUTOCOMMIT is off should
put the job in the delayed queue instead of enqueueing it right away.
"""
queue = get_queue()
job = queue.enqueue(divide, 1, b=1)
self.assertTrue(job is None)
delayed_queue = thread_queue.get_queue()
self.assertEqual(delayed_queue[0][0], queue)
self.assertEqual(delayed_queue[0][1], ())
kwargs = delayed_queue[0][2]
self.assertEqual(kwargs['args'], (1,))
self.assertEqual(kwargs['result_ttl'], None)
self.assertEqual(kwargs['kwargs'], {'b': 1})
self.assertEqual(kwargs['func'], divide)
self.assertEqual(kwargs['timeout'], None)
def test_commit(self):
"""
Ensure that commit_delayed_jobs properly enqueue jobs and clears
delayed_queue.
"""
queue = get_queue()
delayed_queue = thread_queue.get_queue()
queue.empty()
self.assertEqual(queue.count, 0)
queue.enqueue_call(divide, args=(1,), kwargs={'b': 1})
thread_queue.commit()
self.assertEqual(queue.count, 1)
self.assertEqual(len(delayed_queue), 0)
def test_clear(self):
queue = get_queue()
delayed_queue = thread_queue.get_queue()
delayed_queue.append((queue, divide, (1,), {'b': 1}))
thread_queue.clear()
delayed_queue = thread_queue.get_queue()
self.assertEqual(delayed_queue, [])
@override_settings(RQ={'AUTOCOMMIT': False})
def test_success(self):
queue = get_queue()
queue.empty()
thread_queue.clear()
self.assertEqual(queue.count, 0)
self.client.get(reverse('success'))
self.assertEqual(queue.count, 1)
@override_settings(RQ={'AUTOCOMMIT': False})
def test_error(self):
queue = get_queue()
queue.empty()
self.assertEqual(queue.count, 0)
url = reverse('error')
self.assertRaises(ValueError, self.client.get, url)
self.assertEqual(queue.count, 0)
class SchedulerTest(TestCase):
@skipIf(RQ_SCHEDULER_INSTALLED is False, 'RQ Scheduler not installed')
def test_get_scheduler(self):
"""
Ensure get_scheduler creates a scheduler instance with the right
connection params for `test` queue.
"""
config = QUEUES['test']
scheduler = get_scheduler('test')
connection_kwargs = scheduler.connection.connection_pool.connection_kwargs
self.assertEqual(scheduler.queue_name, 'test')
self.assertEqual(connection_kwargs['host'], config['HOST'])
self.assertEqual(connection_kwargs['port'], config['PORT'])
self.assertEqual(connection_kwargs['db'], config['DB'])
class RedisCacheTest(TestCase):
@skipIf(settings.REDIS_CACHE_TYPE != 'django-redis',
'django-redis not installed')
def test_get_queue_django_redis(self):
"""
Test that the USE_REDIS_CACHE option for configuration works.
"""
queueName = 'django-redis'
queue = get_queue(queueName)
connection_kwargs = queue.connection.connection_pool.connection_kwargs
self.assertEqual(queue.name, queueName)
cacheHost = settings.CACHES[queueName]['LOCATION'].split(':')[0]
cachePort = settings.CACHES[queueName]['LOCATION'].split(':')[1]
cacheDBNum = settings.CACHES[queueName]['LOCATION'].split(':')[2]
self.assertEqual(connection_kwargs['host'], cacheHost)
self.assertEqual(connection_kwargs['port'], int(cachePort))
self.assertEqual(connection_kwargs['db'], int(cacheDBNum))
self.assertEqual(connection_kwargs['password'], None)
@skipIf(settings.REDIS_CACHE_TYPE != 'django-redis-cache',
'django-redis-cache not installed')
def test_get_queue_django_redis_cache(self):
"""
Test that the USE_REDIS_CACHE option for configuration works.
"""
queueName = 'django-redis-cache'
queue = get_queue(queueName)
connection_kwargs = queue.connection.connection_pool.connection_kwargs
self.assertEqual(queue.name, queueName)
cacheHost = settings.CACHES[queueName]['LOCATION'].split(':')[0]
cachePort = settings.CACHES[queueName]['LOCATION'].split(':')[1]
cacheDBNum = settings.CACHES[queueName]['OPTIONS']['DB']
self.assertEqual(connection_kwargs['host'], cacheHost)
self.assertEqual(connection_kwargs['port'], int(cachePort))
self.assertEqual(connection_kwargs['db'], int(cacheDBNum))
self.assertEqual(connection_kwargs['password'], None)
class DummyJob(Job):
pass
class JobClassTest(TestCase):
def test_default_job_class(self):
job_class = get_job_class()
self.assertIs(job_class, Job)
@override_settings(RQ={'JOB_CLASS': 'django_rq.tests.DummyJob'})
def test_custom_class(self):
job_class = get_job_class()
self.assertIs(job_class, DummyJob)
def test_local_override(self):
self.assertIs(get_job_class('django_rq.tests.DummyJob'), DummyJob)
class DummyQueue(DjangoRQ):
"""Just Fake class for the following test"""
class QueueClassTest(TestCase):
def test_default_queue_class(self):
queue = get_queue('test')
self.assertIsInstance(queue, DjangoRQ)
def test_for_queue(self):
queue = get_queue('test1')
self.assertIsInstance(queue, DummyQueue)
def test_in_kwargs(self):
queue = get_queue('test', queue_class=DummyQueue)
self.assertIsInstance(queue, DummyQueue)
class DummyWorker(Worker):
pass
class WorkerClassTest(TestCase):
def test_default_worker_class(self):
worker = get_worker('test')
self.assertIsInstance(worker, Worker)
@override_settings(RQ={'WORKER_CLASS': 'django_rq.tests.DummyWorker'})
def test_custom_class(self):
worker = get_worker('test')
self.assertIsInstance(worker, DummyWorker)
def test_local_override(self):
self.assertIs(get_worker_class('django_rq.tests.DummyWorker'), DummyWorker)
@override_settings(RQ={'AUTOCOMMIT': True})
class TemplateTagTest(TestCase):
def test_to_localtime(self):
with self.settings(TIME_ZONE='Asia/Jakarta'):
queue = get_queue()
job = queue.enqueue(access_self)
time = to_localtime(job.created_at)
self.assertIsNotNone(time.tzinfo)
self.assertEqual(time.strftime("%z"), '+0700')
| 35.738286 | 102 | 0.635413 |
e0faf77ebff27f46982f16b4ce0eff36f818019d | 14,196 | py | Python | tests/test_runner/tests.py | dnozay/django | 5dcdbe95c749d36072f527e120a8cb463199ae0d | [
"BSD-3-Clause"
] | 1 | 2015-04-07T01:54:11.000Z | 2015-04-07T01:54:11.000Z | tests/test_runner/tests.py | rmutter/django | 5d044339037be879a11b03fe8bd8c3ef1d520b1a | [
"BSD-3-Clause"
] | null | null | null | tests/test_runner/tests.py | rmutter/django | 5d044339037be879a11b03fe8bd8c3ef1d520b1a | [
"BSD-3-Clause"
] | null | null | null | """
Tests for django test runner
"""
from __future__ import unicode_literals
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django import db
from django.test import runner, TestCase, TransactionTestCase, skipUnlessDBFeature
from django.test.testcases import connections_support_transactions
from django.test.utils import override_system_checks
from django.utils import six
from admin_scripts.tests import AdminScriptTestCase
from .models import Person
class DependencyOrderingTests(unittest.TestCase):
def test_simple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['charlie'],
'bravo': ['charlie'],
}
ordered = runner.dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
def test_chained_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
]
dependencies = {
'alpha': ['bravo'],
'bravo': ['charlie'],
}
ordered = runner.dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, value in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
# Implied dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_multiple_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
('s3', ('s3_db', ['charlie'])),
('s4', ('s4_db', ['delta'])),
]
dependencies = {
'alpha': ['bravo', 'delta'],
'bravo': ['charlie'],
'delta': ['charlie'],
}
ordered = runner.dependency_ordered(raw, dependencies=dependencies)
ordered_sigs = [sig for sig, aliases in ordered]
self.assertIn('s1', ordered_sigs)
self.assertIn('s2', ordered_sigs)
self.assertIn('s3', ordered_sigs)
self.assertIn('s4', ordered_sigs)
# Explicit dependencies
self.assertLess(ordered_sigs.index('s2'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s4'), ordered_sigs.index('s1'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s2'))
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s4'))
# Implicit dependencies
self.assertLess(ordered_sigs.index('s3'), ordered_sigs.index('s1'))
def test_circular_dependencies(self):
raw = [
('s1', ('s1_db', ['alpha'])),
('s2', ('s2_db', ['bravo'])),
]
dependencies = {
'bravo': ['alpha'],
'alpha': ['bravo'],
}
self.assertRaises(ImproperlyConfigured, runner.dependency_ordered, raw, dependencies=dependencies)
def test_own_alias_dependency(self):
raw = [
('s1', ('s1_db', ['alpha', 'bravo']))
]
dependencies = {
'alpha': ['bravo']
}
with self.assertRaises(ImproperlyConfigured):
runner.dependency_ordered(raw, dependencies=dependencies)
# reordering aliases shouldn't matter
raw = [
('s1', ('s1_db', ['bravo', 'alpha']))
]
with self.assertRaises(ImproperlyConfigured):
runner.dependency_ordered(raw, dependencies=dependencies)
class MockTestRunner(object):
invoked = False
def __init__(self, *args, **kwargs):
pass
def run_tests(self, test_labels, extra_tests=None, **kwargs):
MockTestRunner.invoked = True
class ManageCommandTests(unittest.TestCase):
def test_custom_test_runner(self):
call_command('test', 'sites',
testrunner='test_runner.tests.MockTestRunner')
self.assertTrue(MockTestRunner.invoked,
"The custom test runner has not been invoked")
def test_bad_test_runner(self):
with self.assertRaises(AttributeError):
call_command('test', 'sites',
testrunner='test_runner.NonExistentRunner')
class CustomOptionsTestRunner(runner.DiscoverRunner):
def __init__(self, verbosity=1, interactive=True, failfast=True, option_a=None, option_b=None, option_c=None, **kwargs):
super(CustomOptionsTestRunner, self).__init__(verbosity=verbosity, interactive=interactive,
failfast=failfast)
self.option_a = option_a
self.option_b = option_b
self.option_c = option_c
@classmethod
def add_arguments(cls, parser):
parser.add_argument('--option_a', '-a', action='store', dest='option_a', default='1'),
parser.add_argument('--option_b', '-b', action='store', dest='option_b', default='2'),
parser.add_argument('--option_c', '-c', action='store', dest='option_c', default='3'),
def run_tests(self, test_labels, extra_tests=None, **kwargs):
print("%s:%s:%s" % (self.option_a, self.option_b, self.option_c))
class CustomTestRunnerOptionsTests(AdminScriptTestCase):
def setUp(self):
settings = {
'TEST_RUNNER': '\'test_runner.tests.CustomOptionsTestRunner\'',
}
self.write_settings('settings.py', sdict=settings)
def tearDown(self):
self.remove_settings('settings.py')
def test_default_options(self):
args = ['test', '--settings=test_project.settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:2:3')
def test_default_and_given_options(self):
args = ['test', '--settings=test_project.settings', '--option_b=foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_option_name_and_value_separated(self):
args = ['test', '--settings=test_project.settings', '--option_b', 'foo']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, '1:foo:3')
def test_all_options_given(self):
args = ['test', '--settings=test_project.settings', '--option_a=bar',
'--option_b=foo', '--option_c=31337']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'bar:foo:31337')
class Ticket17477RegressionTests(AdminScriptTestCase):
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_ticket_17477(self):
"""'manage.py help test' works after r16352."""
args = ['help', 'test']
out, err = self.run_manage(args)
self.assertNoOutput(err)
class Sqlite3InMemoryTestDbs(TestCase):
available_apps = []
# `setup_databases` triggers system check framework, but we do not want to
# perform checks.
@override_system_checks([])
@unittest.skipUnless(all(db.connections[conn].vendor == 'sqlite' for conn in db.connections),
"This is an sqlite-specific issue")
def test_transaction_support(self):
"""Ticket #16329: sqlite3 in-memory test databases"""
old_db_connections = db.connections
for option_key, option_value in (
('NAME', ':memory:'), ('TEST', {'NAME': ':memory:'})):
try:
db.connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.sqlite3',
option_key: option_value,
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
option_key: option_value,
},
})
other = db.connections['other']
runner.DiscoverRunner(verbosity=0).setup_databases()
msg = "DATABASES setting '%s' option set to sqlite3's ':memory:' value shouldn't interfere with transaction support detection." % option_key
# Transaction support should be properly initialized for the 'other' DB
self.assertTrue(other.features.supports_transactions, msg)
# And all the DBs should report that they support transactions
self.assertTrue(connections_support_transactions(), msg)
finally:
db.connections = old_db_connections
class DummyBackendTest(unittest.TestCase):
def test_setup_databases(self):
"""
Test that setup_databases() doesn't fail with dummy database backend.
"""
runner_instance = runner.DiscoverRunner(verbosity=0)
old_db_connections = db.connections
try:
db.connections = db.ConnectionHandler({})
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
except Exception as e:
self.fail("setup_databases/teardown_databases unexpectedly raised "
"an error: %s" % e)
finally:
db.connections = old_db_connections
class AliasedDefaultTestSetupTest(unittest.TestCase):
def test_setup_aliased_default_database(self):
"""
Test that setup_datebases() doesn't fail when 'default' is aliased
"""
runner_instance = runner.DiscoverRunner(verbosity=0)
old_db_connections = db.connections
try:
db.connections = db.ConnectionHandler({
'default': {
'NAME': 'dummy'
},
'aliased': {
'NAME': 'dummy'
}
})
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
except Exception as e:
self.fail("setup_databases/teardown_databases unexpectedly raised "
"an error: %s" % e)
finally:
db.connections = old_db_connections
class AliasedDatabaseTeardownTest(unittest.TestCase):
def test_setup_aliased_databases(self):
from django.db.backends.dummy.base import DatabaseCreation
runner_instance = runner.DiscoverRunner(verbosity=0)
old_db_connections = db.connections
old_destroy_test_db = DatabaseCreation.destroy_test_db
old_create_test_db = DatabaseCreation.create_test_db
try:
destroyed_names = []
DatabaseCreation.destroy_test_db = lambda self, old_database_name, verbosity=1, keepdb=False, serialize=True: destroyed_names.append(old_database_name)
DatabaseCreation.create_test_db = lambda self, verbosity=1, autoclobber=False, keepdb=False, serialize=True: self._get_test_db_name()
db.connections = db.ConnectionHandler({
'default': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
},
'other': {
'ENGINE': 'django.db.backends.dummy',
'NAME': 'dbname',
}
})
old_config = runner_instance.setup_databases()
runner_instance.teardown_databases(old_config)
self.assertEqual(destroyed_names.count('dbname'), 1)
finally:
DatabaseCreation.create_test_db = old_create_test_db
DatabaseCreation.destroy_test_db = old_destroy_test_db
db.connections = old_db_connections
class DeprecationDisplayTest(AdminScriptTestCase):
# tests for 19546
def setUp(self):
settings = {
'DATABASES': '{"default": {"ENGINE":"django.db.backends.sqlite3", "NAME":":memory:"}}'
}
self.write_settings('settings.py', sdict=settings)
def tearDown(self):
self.remove_settings('settings.py')
def test_runner_deprecation_verbosity_default(self):
args = ['test', '--settings=test_project.settings', 'test_runner_deprecation_app']
out, err = self.run_django_admin(args)
self.assertIn("Ran 1 test", err)
six.assertRegex(self, err, r"RemovedInDjango\d\dWarning: warning from test")
six.assertRegex(self, err, r"RemovedInDjango\d\dWarning: module-level warning from deprecation_app")
def test_runner_deprecation_verbosity_zero(self):
args = ['test', '--settings=test_project.settings', '--verbosity=0', 'test_runner_deprecation_app']
out, err = self.run_django_admin(args)
self.assertIn("Ran 1 test", err)
self.assertFalse("warning from test" in err)
class AutoIncrementResetTest(TransactionTestCase):
"""
Here we test creating the same model two times in different test methods,
and check that both times they get "1" as their PK value. That is, we test
that AutoField values start from 1 for each transactional test case.
"""
available_apps = ['test_runner']
reset_sequences = True
@skipUnlessDBFeature('supports_sequence_reset')
def test_autoincrement_reset1(self):
p = Person.objects.create(first_name='Jack', last_name='Smith')
self.assertEqual(p.pk, 1)
@skipUnlessDBFeature('supports_sequence_reset')
def test_autoincrement_reset2(self):
p = Person.objects.create(first_name='Jack', last_name='Smith')
self.assertEqual(p.pk, 1)
| 37.259843 | 163 | 0.616723 |
3516a0363ec5ed4432e2c2a41e1b8b95acf00066 | 934 | py | Python | vbs.py | paulfears/vbs | 037aa51d594a40380409058131c9700aa73885f3 | [
"MIT"
] | 4 | 2018-12-16T21:39:54.000Z | 2019-12-22T21:48:04.000Z | vbs.py | paulfears/vbs | 037aa51d594a40380409058131c9700aa73885f3 | [
"MIT"
] | null | null | null | vbs.py | paulfears/vbs | 037aa51d594a40380409058131c9700aa73885f3 | [
"MIT"
] | 1 | 2018-12-17T23:50:21.000Z | 2018-12-17T23:50:21.000Z | import os, time
def tts(words):
return os.popen("cscript scripts/tts.vbs "+'"%s"'%words).read().split()[-1]
def sendkeys(key_stirng):
return os.popen("cscript scripts/sendkeys.vbs "+'"%s"'%key_stirng).read().split()[-1]
def alert(messege, boxtype="ok", title="alert", timeout=6000):
boxtypes = {"ok":0, "okay":0, "cancel":1, "cancelable":1, "retry":2, "yesnocancel":3, "yesno":4, "warning":48, "critical":16, "info":64}
try:
boxtype = boxtypes[boxtype]
except KeyError:
boxtype = 0
returned_index = os.popen('cscript scripts/alert.vbs //T:%s "%s" "%s" "%s"'%(timeout, messege, boxtype, title)).read().split()[-1]
if(returned_index == 'terminated.'):
return "timed out"
return_values = ["", "ok", "cancel", "abort", "retry", "ignore", "yes", "no"]
returned_value = return_values[int(returned_index)]
return returned_value
if __name__ == '__main__':
print(alert("yello", boxtype="yesno"))
| 37.36 | 138 | 0.64454 |
4304c9d645db4afecbcb696cc2e5a075305fe810 | 12,964 | py | Python | code/preprocessing/squad_preprocess.py | greatgang/squad | 15c2cbce64505e0e06cb195024541be2b0671542 | [
"Apache-2.0"
] | null | null | null | code/preprocessing/squad_preprocess.py | greatgang/squad | 15c2cbce64505e0e06cb195024541be2b0671542 | [
"Apache-2.0"
] | null | null | null | code/preprocessing/squad_preprocess.py | greatgang/squad | 15c2cbce64505e0e06cb195024541be2b0671542 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads SQuAD train and dev sets, preprocesses and writes tokenized versions to file"""
import os
import sys
import random
import argparse
import json
import nltk
import numpy as np
from tqdm import tqdm
from six.moves.urllib.request import urlretrieve
reload(sys)
sys.setdefaultencoding('utf8')
random.seed(42)
np.random.seed(42)
SQUAD_BASE_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", required=True)
return parser.parse_args()
def write_to_file(out_file, line):
out_file.write(line.encode('utf8') + '\n')
def data_from_json(filename):
"""Loads JSON data from filename and returns"""
with open(filename) as data_file:
data = json.load(data_file)
return data
def tokenize(sequence):
tokens = [token.replace("``", '"').replace("''", '"').lower() for token in nltk.word_tokenize(sequence)]
return tokens
def tokenizeOri(sequence):
tokens = [token.replace("``", '"').replace("''", '"') for token in nltk.word_tokenize(sequence)]
return tokens
def total_exs(dataset):
"""
Returns the total number of (context, question, answer) triples,
given the data read from the SQuAD json file.
"""
total = 0
for article in dataset['data']:
for para in article['paragraphs']:
total += len(para['qas'])
return total
def reporthook(t):
"""https://github.com/tqdm/tqdm"""
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
"""
b: int, optional
Number of blocks just transferred [default: 1].
bsize: int, optional
Size of each block (in tqdm units) [default: 1].
tsize: int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
def maybe_download(url, filename, prefix, num_bytes=None):
"""Takes an URL, a filename, and the expected bytes, download
the contents and returns the filename.
num_bytes=None disables the file size check."""
local_filename = None
if not os.path.exists(os.path.join(prefix, filename)):
try:
print "Downloading file {}...".format(url + filename)
with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
local_filename, _ = urlretrieve(url + filename, os.path.join(prefix, filename), reporthook=reporthook(t))
except AttributeError as e:
print "An error occurred when downloading the file! Please get the dataset using a browser."
raise e
# We have a downloaded file
# Check the stats and make sure they are ok
file_stats = os.stat(os.path.join(prefix, filename))
if num_bytes is None or file_stats.st_size == num_bytes:
print "File {} successfully loaded".format(filename)
else:
raise Exception("Unexpected dataset size. Please get the dataset using a browser.")
return local_filename
def get_char_word_loc_mapping(context, context_tokens):
"""
Return a mapping that maps from character locations to the corresponding token locations.
If we're unable to complete the mapping e.g. because of special characters, we return None.
Inputs:
context: string (unicode)
context_tokens: list of strings (unicode)
Returns:
mapping: dictionary from ints (character locations) to (token, token_idx) pairs
Only ints corresponding to non-space character locations are in the keys
e.g. if context = "hello world" and context_tokens = ["hello", "world"] then
0,1,2,3,4 are mapped to ("hello", 0) and 6,7,8,9,10 are mapped to ("world", 1)
"""
acc = '' # accumulator
current_token_idx = 0 # current word loc
mapping = dict()
for char_idx, char in enumerate(context): # step through original characters
if char != u' ' and char != u'\n': # if it's not a space:
acc += char # add to accumulator
context_token = unicode(context_tokens[current_token_idx]) # current word token
if acc == context_token: # if the accumulator now matches the current word token
syn_start = char_idx - len(acc) + 1 # char loc of the start of this word
for char_loc in range(syn_start, char_idx+1):
mapping[char_loc] = (acc, current_token_idx) # add to mapping
acc = '' # reset accumulator
current_token_idx += 1
if current_token_idx != len(context_tokens):
return None
else:
return mapping
def preprocess_and_write(dataset, tier, out_dir):
"""Reads the dataset, extracts context, question, answer, tokenizes them,
and calculates answer span in terms of token indices.
Note: due to tokenization issues, and the fact that the original answer
spans are given in terms of characters, some examples are discarded because
we cannot get a clean span in terms of tokens.
This function produces the {train/dev}.{context/question/answer/span/feature} files.
Inputs:
dataset: read from JSON
tier: string ("train" or "dev")
out_dir: directory to write the preprocessed files
Returns:
the number of (context, question, answer) triples written to file by the dataset.
"""
num_exs = 0 # number of examples written to file
num_mappingprob, num_tokenprob, num_spanalignprob = 0, 0, 0
examples = []
for articles_id in tqdm(range(len(dataset['data'])), desc="Preprocessing {}".format(tier)):
article_paragraphs = dataset['data'][articles_id]['paragraphs']
for pid in range(len(article_paragraphs)):
context = unicode(article_paragraphs[pid]['context']) # string
# The following replacements are suggested in the paper
# BidAF (Seo et al., 2016)
context = context.replace("''", '" ')
context = context.replace("``", '" ')
context_tokens = tokenize(context) # list of strings (lowercase)
context_tokens_ori = tokenizeOri(context) # list of strings (original case)
context = context.lower()
qas = article_paragraphs[pid]['qas'] # list of questions
charloc2wordloc = get_char_word_loc_mapping(context, context_tokens) # charloc2wordloc maps the character location (int) of a context token to a pair giving (word (string), word loc (int)) of that token
if charloc2wordloc is None: # there was a problem
num_mappingprob += len(qas)
continue # skip this context example
# for each question, process the question and answer and write to file
for qn in qas:
# read the question text and tokenize
question = unicode(qn['question']) # string
question_tokens = tokenize(question) # list of strings
question_tokens_ori = tokenizeOri(question) # list of strings
# of the three answers, just take the first
ans_text = unicode(qn['answers'][0]['text']).lower() # get the answer text
ans_start_charloc = qn['answers'][0]['answer_start'] # answer start loc (character count)
ans_end_charloc = ans_start_charloc + len(ans_text) # answer end loc (character count) (exclusive)
# Check that the provided character spans match the provided answer text
if context[ans_start_charloc:ans_end_charloc] != ans_text:
# Sometimes this is misaligned, mostly because "narrow builds" of Python 2 interpret certain Unicode characters to have length 2 https://stackoverflow.com/questions/29109944/python-returns-length-of-2-for-single-unicode-character-string
# We should upgrade to Python 3 next year!
num_spanalignprob += 1
continue
# get word locs for answer start and end (inclusive)
ans_start_wordloc = charloc2wordloc[ans_start_charloc][1] # answer start word loc
ans_end_wordloc = charloc2wordloc[ans_end_charloc-1][1] # answer end word loc
assert ans_start_wordloc <= ans_end_wordloc
# Check retrieved answer tokens match the provided answer text.
# Sometimes they won't match, e.g. if the context contains the phrase "fifth-generation"
# and the answer character span is around "generation",
# but the tokenizer regards "fifth-generation" as a single token.
# Then ans_tokens has "fifth-generation" but the ans_text is "generation", which doesn't match.
ans_tokens = context_tokens[ans_start_wordloc:ans_end_wordloc+1]
if "".join(ans_tokens) != "".join(ans_text.split()):
num_tokenprob += 1
continue # skip this question/answer pair
question_tokens_set = set(question_tokens)
question_tokens_ori_set = set(question_tokens_ori)
feature_tokens = []
for i in range(len(context_tokens)):
feature_id = 0
if context_tokens[i] in question_tokens_set:
feature_id += 1
if context_tokens_ori[i] in question_tokens_ori_set:
feature_id += 2
feature_tokens.append(str(feature_id))
examples.append((' '.join(context_tokens), ' '.join(question_tokens), ' '.join(ans_tokens), ' '.join([str(ans_start_wordloc), str(ans_end_wordloc)]), ' '.join(feature_tokens)))
num_exs += 1
print "Number of (context, question, answer) triples discarded due to char -> token mapping problems: ", num_mappingprob
print "Number of (context, question, answer) triples discarded because character-based answer span is unaligned with tokenization: ", num_tokenprob
print "Number of (context, question, answer) triples discarded due character span alignment problems (usually Unicode problems): ", num_spanalignprob
print "Processed %i examples of total %i\n" % (num_exs, num_exs + num_mappingprob + num_tokenprob + num_spanalignprob)
# shuffle examples
indices = range(len(examples))
np.random.shuffle(indices)
with open(os.path.join(out_dir, tier +'.context'), 'w') as context_file, \
open(os.path.join(out_dir, tier +'.question'), 'w') as question_file,\
open(os.path.join(out_dir, tier +'.answer'), 'w') as ans_text_file, \
open(os.path.join(out_dir, tier +'.span'), 'w') as span_file,\
open(os.path.join(out_dir, tier +'.feature'), 'w') as feature_file:
for i in indices:
(context, question, answer, answer_span, context_feature) = examples[i]
# write tokenized data to file
write_to_file(context_file, context)
write_to_file(question_file, question)
write_to_file(ans_text_file, answer)
write_to_file(span_file, answer_span)
write_to_file(feature_file, context_feature)
def main():
args = setup_args()
print "Will download SQuAD datasets to {}".format(args.data_dir)
print "Will put preprocessed SQuAD datasets in {}".format(args.data_dir)
if not os.path.exists(args.data_dir):
os.makedirs(args.data_dir)
train_filename = "train-v1.1.json"
dev_filename = "dev-v1.1.json"
# download train set
maybe_download(SQUAD_BASE_URL, train_filename, args.data_dir, 30288272L)
# read train set
train_data = data_from_json(os.path.join(args.data_dir, train_filename))
print "Train data has %i examples total" % total_exs(train_data)
# preprocess train set and write to file
preprocess_and_write(train_data, 'train', args.data_dir)
# download dev set
maybe_download(SQUAD_BASE_URL, dev_filename, args.data_dir, 4854279L)
# read dev set
dev_data = data_from_json(os.path.join(args.data_dir, dev_filename))
print "Dev data has %i examples total" % total_exs(dev_data)
# preprocess dev set and write to file
preprocess_and_write(dev_data, 'dev', args.data_dir)
if __name__ == '__main__':
main()
| 41.286624 | 254 | 0.655122 |
1edfb6bd6738c049de0170f8488706a088cd7c12 | 47,150 | py | Python | telethon/client/messages.py | SlavikMIPT/Telethon | fece5660f46b1f5b8f464c162914cc51bff6550f | [
"MIT"
] | 1 | 2019-06-21T19:19:50.000Z | 2019-06-21T19:19:50.000Z | telethon/client/messages.py | SlavikMIPT/Telethon | fece5660f46b1f5b8f464c162914cc51bff6550f | [
"MIT"
] | 1 | 2020-06-30T20:56:35.000Z | 2020-06-30T20:56:35.000Z | telethon/client/messages.py | SlavikMIPT/Telethon | fece5660f46b1f5b8f464c162914cc51bff6550f | [
"MIT"
] | null | null | null | import itertools
import typing
from .. import utils, errors, hints
from ..requestiter import RequestIter
from ..tl import types, functions
_MAX_CHUNK_SIZE = 100
if typing.TYPE_CHECKING:
from .telegramclient import TelegramClient
class _MessagesIter(RequestIter):
"""
Common factor for all requests that need to iterate over messages.
"""
async def _init(
self, entity, offset_id, min_id, max_id,
from_user, offset_date, add_offset, filter, search
):
# Note that entity being ``None`` will perform a global search.
if entity:
self.entity = await self.client.get_input_entity(entity)
else:
self.entity = None
if self.reverse:
raise ValueError('Cannot reverse global search')
# Telegram doesn't like min_id/max_id. If these IDs are low enough
# (starting from last_id - 100), the request will return nothing.
#
# We can emulate their behaviour locally by setting offset = max_id
# and simply stopping once we hit a message with ID <= min_id.
if self.reverse:
offset_id = max(offset_id, min_id)
if offset_id and max_id:
if max_id - offset_id <= 1:
raise StopAsyncIteration
if not max_id:
max_id = float('inf')
else:
offset_id = max(offset_id, max_id)
if offset_id and min_id:
if offset_id - min_id <= 1:
raise StopAsyncIteration
if self.reverse:
if offset_id:
offset_id += 1
elif not offset_date:
# offset_id has priority over offset_date, so don't
# set offset_id to 1 if we want to offset by date.
offset_id = 1
if from_user:
from_user = await self.client.get_input_entity(from_user)
if not isinstance(from_user, (
types.InputPeerUser, types.InputPeerSelf)):
from_user = None # Ignore from_user unless it's a user
if from_user:
self.from_id = await self.client.get_peer_id(from_user)
else:
self.from_id = None
if not self.entity:
self.request = functions.messages.SearchGlobalRequest(
q=search or '',
offset_rate=offset_date,
offset_peer=types.InputPeerEmpty(),
offset_id=offset_id,
limit=1
)
elif search is not None or filter or from_user:
if filter is None:
filter = types.InputMessagesFilterEmpty()
# Telegram completely ignores `from_id` in private chats
if isinstance(
self.entity, (types.InputPeerUser, types.InputPeerSelf)):
# Don't bother sending `from_user` (it's ignored anyway),
# but keep `from_id` defined above to check it locally.
from_user = None
else:
# Do send `from_user` to do the filtering server-side,
# and set `from_id` to None to avoid checking it locally.
self.from_id = None
self.request = functions.messages.SearchRequest(
peer=self.entity,
q=search or '',
filter=filter() if isinstance(filter, type) else filter,
min_date=None,
max_date=offset_date,
offset_id=offset_id,
add_offset=add_offset,
limit=0, # Search actually returns 0 items if we ask it to
max_id=0,
min_id=0,
hash=0,
from_id=from_user
)
# Workaround issue #1124 until a better solution is found.
# Telegram seemingly ignores `max_date` if `filter` (and
# nothing else) is specified, so we have to rely on doing
# a first request to offset from the ID instead.
#
# Even better, using `filter` and `from_id` seems to always
# trigger `RPC_CALL_FAIL` which is "internal issues"...
if filter and offset_date and not search and not offset_id:
async for m in self.client.iter_messages(
self.entity, 1, offset_date=offset_date):
self.request.offset_id = m.id + 1
else:
self.request = functions.messages.GetHistoryRequest(
peer=self.entity,
limit=1,
offset_date=offset_date,
offset_id=offset_id,
min_id=0,
max_id=0,
add_offset=add_offset,
hash=0
)
if self.limit <= 0:
# No messages, but we still need to know the total message count
result = await self.client(self.request)
if isinstance(result, types.messages.MessagesNotModified):
self.total = result.count
else:
self.total = getattr(result, 'count', len(result.messages))
raise StopAsyncIteration
if self.wait_time is None:
self.wait_time = 1 if self.limit > 3000 else 0
# When going in reverse we need an offset of `-limit`, but we
# also want to respect what the user passed, so add them together.
if self.reverse:
self.request.add_offset -= _MAX_CHUNK_SIZE
self.add_offset = add_offset
self.max_id = max_id
self.min_id = min_id
self.last_id = 0 if self.reverse else float('inf')
async def _load_next_chunk(self):
self.request.limit = min(self.left, _MAX_CHUNK_SIZE)
if self.reverse and self.request.limit != _MAX_CHUNK_SIZE:
# Remember that we need -limit when going in reverse
self.request.add_offset = self.add_offset - self.request.limit
r = await self.client(self.request)
self.total = getattr(r, 'count', len(r.messages))
entities = {utils.get_peer_id(x): x
for x in itertools.chain(r.users, r.chats)}
messages = reversed(r.messages) if self.reverse else r.messages
for message in messages:
if (isinstance(message, types.MessageEmpty)
or self.from_id and message.from_id != self.from_id):
continue
if not self._message_in_range(message):
return True
# There has been reports that on bad connections this method
# was returning duplicated IDs sometimes. Using ``last_id``
# is an attempt to avoid these duplicates, since the message
# IDs are returned in descending order (or asc if reverse).
self.last_id = message.id
message._finish_init(self.client, entities, self.entity)
self.buffer.append(message)
if len(r.messages) < self.request.limit:
return True
# Get the last message that's not empty (in some rare cases
# it can happen that the last message is :tl:`MessageEmpty`)
if self.buffer:
self._update_offset(self.buffer[-1])
else:
# There are some cases where all the messages we get start
# being empty. This can happen on migrated mega-groups if
# the history was cleared, and we're using search. Telegram
# acts incredibly weird sometimes. Messages are returned but
# only "empty", not their contents. If this is the case we
# should just give up since there won't be any new Message.
return True
def _message_in_range(self, message):
"""
Determine whether the given message is in the range or
it should be ignored (and avoid loading more chunks).
"""
# No entity means message IDs between chats may vary
if self.entity:
if self.reverse:
if message.id <= self.last_id or message.id >= self.max_id:
return False
else:
if message.id >= self.last_id or message.id <= self.min_id:
return False
return True
def _update_offset(self, last_message):
"""
After making the request, update its offset with the last message.
"""
self.request.offset_id = last_message.id
if self.reverse:
# We want to skip the one we already have
self.request.offset_id += 1
if isinstance(self.request, functions.messages.SearchRequest):
# Unlike getHistory and searchGlobal that use *offset* date,
# this is *max* date. This means that doing a search in reverse
# will break it. Since it's not really needed once we're going
# (only for the first request), it's safe to just clear it off.
self.request.max_date = None
else:
# getHistory and searchGlobal call it offset_date
self.request.offset_date = last_message.date
if isinstance(self.request, functions.messages.SearchGlobalRequest):
self.request.offset_peer = last_message.input_chat
class _IDsIter(RequestIter):
async def _init(self, entity, ids):
# TODO We never actually split IDs in chunks, but maybe we should
if not utils.is_list_like(ids):
ids = [ids]
elif not ids:
raise StopAsyncIteration
elif self.reverse:
ids = list(reversed(ids))
else:
ids = ids
if entity:
entity = await self.client.get_input_entity(entity)
self.total = len(ids)
from_id = None # By default, no need to validate from_id
if isinstance(entity, (types.InputChannel, types.InputPeerChannel)):
try:
r = await self.client(
functions.channels.GetMessagesRequest(entity, ids))
except errors.MessageIdsEmptyError:
# All IDs were invalid, use a dummy result
r = types.messages.MessagesNotModified(len(ids))
else:
r = await self.client(functions.messages.GetMessagesRequest(ids))
if entity:
from_id = await self.client.get_peer_id(entity)
if isinstance(r, types.messages.MessagesNotModified):
self.buffer.extend(None for _ in ids)
return
entities = {utils.get_peer_id(x): x
for x in itertools.chain(r.users, r.chats)}
# Telegram seems to return the messages in the order in which
# we asked them for, so we don't need to check it ourselves,
# unless some messages were invalid in which case Telegram
# may decide to not send them at all.
#
# The passed message IDs may not belong to the desired entity
# since the user can enter arbitrary numbers which can belong to
# arbitrary chats. Validate these unless ``from_id is None``.
for message in r.messages:
if isinstance(message, types.MessageEmpty) or (
from_id and message.chat_id != from_id):
self.buffer.append(None)
else:
message._finish_init(self.client, entities, entity)
self.buffer.append(message)
async def _load_next_chunk(self):
return True # no next chunk, all done in init
class MessageMethods:
# region Public methods
# region Message retrieval
def iter_messages(
self: 'TelegramClient',
entity: 'hints.EntityLike',
limit: float = None,
*,
offset_date: 'hints.DateLike' = None,
offset_id: int = 0,
max_id: int = 0,
min_id: int = 0,
add_offset: int = 0,
search: str = None,
filter: 'typing.Union[types.TypeMessagesFilter, typing.Type[types.TypeMessagesFilter]]' = None,
from_user: 'hints.EntityLike' = None,
wait_time: float = None,
ids: 'typing.Union[int, typing.Sequence[int]]' = None,
reverse: bool = False
) -> 'typing.Union[_MessagesIter, _IDsIter]':
"""
Iterator over the messages for the given chat.
The default order is from newest to oldest, but this
behaviour can be changed with the `reverse` parameter.
If either `search`, `filter` or `from_user` are provided,
:tl:`messages.Search` will be used instead of :tl:`messages.getHistory`.
.. note::
Telegram's flood wait limit for :tl:`GetHistoryRequest` seems to
be around 30 seconds per 10 requests, therefore a sleep of 1
second is the default for this limit (or above).
Arguments
entity (`entity`):
The entity from whom to retrieve the message history.
It may be ``None`` to perform a global search, or
to get messages by their ID from no particular chat.
Note that some of the offsets will not work if this
is the case.
Note that if you want to perform a global search,
you **must** set a non-empty `search` string.
limit (`int` | `None`, optional):
Number of messages to be retrieved. Due to limitations with
the API retrieving more than 3000 messages will take longer
than half a minute (or even more based on previous calls).
The limit may also be ``None``, which would eventually return
the whole history.
offset_date (`datetime`):
Offset date (messages *previous* to this date will be
retrieved). Exclusive.
offset_id (`int`):
Offset message ID (only messages *previous* to the given
ID will be retrieved). Exclusive.
max_id (`int`):
All the messages with a higher (newer) ID or equal to this will
be excluded.
min_id (`int`):
All the messages with a lower (older) ID or equal to this will
be excluded.
add_offset (`int`):
Additional message offset (all of the specified offsets +
this offset = older messages).
search (`str`):
The string to be used as a search query.
filter (:tl:`MessagesFilter` | `type`):
The filter to use when returning messages. For instance,
:tl:`InputMessagesFilterPhotos` would yield only messages
containing photos.
from_user (`entity`):
Only messages from this user will be returned.
This parameter will be ignored if it is not an user.
wait_time (`int`):
Wait time (in seconds) between different
:tl:`GetHistoryRequest`. Use this parameter to avoid hitting
the ``FloodWaitError`` as needed. If left to ``None``, it will
default to 1 second only if the limit is higher than 3000.
ids (`int`, `list`):
A single integer ID (or several IDs) for the message that
should be returned. This parameter takes precedence over
the rest (which will be ignored if this is set). This can
for instance be used to get the message with ID 123 from
a channel. Note that if the message doesn't exist, ``None``
will appear in its place, so that zipping the list of IDs
with the messages can match one-to-one.
.. note::
At the time of writing, Telegram will **not** return
:tl:`MessageEmpty` for :tl:`InputMessageReplyTo` IDs that
failed (i.e. the message is not replying to any, or is
replying to a deleted message). This means that it is
**not** possible to match messages one-by-one, so be
careful if you use non-integers in this parameter.
reverse (`bool`, optional):
If set to ``True``, the messages will be returned in reverse
order (from oldest to newest, instead of the default newest
to oldest). This also means that the meaning of `offset_id`
and `offset_date` parameters is reversed, although they will
still be exclusive. `min_id` becomes equivalent to `offset_id`
instead of being `max_id` as well since messages are returned
in ascending order.
You cannot use this if both `entity` and `ids` are ``None``.
Yields
Instances of `Message <telethon.tl.custom.message.Message>`.
Example
.. code-block:: python
# From most-recent to oldest
for message in client.iter_messages(chat):
print(message.id, message.text)
# From oldest to most-recent
for message in client.iter_messages(chat, reverse=True):
print(message.id, message.text)
# Filter by sender
for message in client.iter_messages(chat, from_user='me'):
print(message.text)
# Server-side search with fuzzy text
for message in client.iter_messages(chat, search='hello'):
print(message.id)
# Filter by message type:
from telethon.tl.types import InputMessagesFilterPhotos
for message in client.iter_messages(chat, filter=InputMessagesFilterPhotos):
print(message.photo)
"""
if ids is not None:
return _IDsIter(self, reverse=reverse, limit=limit, entity=entity, ids=ids)
return _MessagesIter(
client=self,
reverse=reverse,
wait_time=wait_time,
limit=limit,
entity=entity,
offset_id=offset_id,
min_id=min_id,
max_id=max_id,
from_user=from_user,
offset_date=offset_date,
add_offset=add_offset,
filter=filter,
search=search
)
async def get_messages(self: 'TelegramClient', *args, **kwargs) -> 'hints.TotalList':
"""
Same as `iter_messages()`, but returns a
`TotalList <telethon.helpers.TotalList>` instead.
If the `limit` is not set, it will be 1 by default unless both
`min_id` **and** `max_id` are set (as *named* arguments), in
which case the entire range will be returned.
This is so because any integer limit would be rather arbitrary and
it's common to only want to fetch one message, but if a range is
specified it makes sense that it should return the entirety of it.
If `ids` is present in the *named* arguments and is not a list,
a single `Message <telethon.tl.custom.message.Message>` will be
returned for convenience instead of a list.
Example
.. code-block:: python
# Get 0 photos and print the total to show how many photos there are
from telethon.tl.types import InputMessagesFilterPhotos
photos = client.get_messages(chat, 0, filter=InputMessagesFilterPhotos)
print(photos.total)
# Get all the photos
photos = client.get_messages(chat, None, filter=InputMessagesFilterPhotos)
# Get messages by ID:
message_1337 = client.get_messages(chats, ids=1337)
"""
if len(args) == 1 and 'limit' not in kwargs:
if 'min_id' in kwargs and 'max_id' in kwargs:
kwargs['limit'] = None
else:
kwargs['limit'] = 1
it = self.iter_messages(*args, **kwargs)
ids = kwargs.get('ids')
if ids and not utils.is_list_like(ids):
async for message in it:
return message
else:
# Iterator exhausted = empty, to handle InputMessageReplyTo
return None
return await it.collect()
# endregion
# region Message sending/editing/deleting
async def send_message(
self: 'TelegramClient',
entity: 'hints.EntityLike',
message: 'hints.MessageLike' = '',
*,
reply_to: 'typing.Union[int, types.Message]' = None,
parse_mode: typing.Optional[str] = (),
link_preview: bool = True,
file: 'hints.FileLike' = None,
force_document: bool = False,
clear_draft: bool = False,
buttons: 'hints.MarkupLike' = None,
silent: bool = None) -> 'types.Message':
"""
Sends a message to the specified user, chat or channel.
The default parse mode is the same as the official applications
(a custom flavour of markdown). ``**bold**, `code` or __italic__``
are available. In addition you can send ``[links](https://example.com)``
and ``[mentions](@username)`` (or using IDs like in the Bot API:
``[mention](tg://user?id=123456789)``) and ``pre`` blocks with three
backticks.
Sending a ``/start`` command with a parameter (like ``?start=data``)
is also done through this method. Simply send ``'/start data'`` to
the bot.
See also `Message.respond() <telethon.tl.custom.message.Message.respond>`
and `Message.reply() <telethon.tl.custom.message.Message.reply>`.
Arguments
entity (`entity`):
To who will it be sent.
message (`str` | `Message <telethon.tl.custom.message.Message>`):
The message to be sent, or another message object to resend.
The maximum length for a message is 35,000 bytes or 4,096
characters. Longer messages will not be sliced automatically,
and you should slice them manually if the text to send is
longer than said length.
reply_to (`int` | `Message <telethon.tl.custom.message.Message>`, optional):
Whether to reply to a message or not. If an integer is provided,
it should be the ID of the message that it should reply to.
parse_mode (`object`, optional):
See the `TelegramClient.parse_mode
<telethon.client.messageparse.MessageParseMethods.parse_mode>`
property for allowed values. Markdown parsing will be used by
default.
link_preview (`bool`, optional):
Should the link preview be shown?
file (`file`, optional):
Sends a message with a file attached (e.g. a photo,
video, audio or document). The ``message`` may be empty.
force_document (`bool`, optional):
Whether to send the given file as a document or not.
clear_draft (`bool`, optional):
Whether the existing draft should be cleared or not.
Has no effect when sending a file.
buttons (`list`, `custom.Button <telethon.tl.custom.button.Button>`, :tl:`KeyboardButton`):
The matrix (list of lists), row list or button to be shown
after sending the message. This parameter will only work if
you have signed in as a bot. You can also pass your own
:tl:`ReplyMarkup` here.
All the following limits apply together:
* There can be 100 buttons at most (any more are ignored).
* There can be 8 buttons per row at most (more are ignored).
* The maximum callback data per button is 64 bytes.
* The maximum data that can be embedded in total is just
over 4KB, shared between inline callback data and text.
silent (`bool`, optional):
Whether the message should notify people in a broadcast
channel or not. Defaults to ``False``, which means it will
notify them. Set it to ``True`` to alter this behaviour.
Returns
The sent `custom.Message <telethon.tl.custom.message.Message>`.
Example
.. code-block:: python
# Markdown is the default
client.send_message('lonami', 'Thanks for the **Telethon** library!')
# Default to another parse mode
client.parse_mode = 'html'
client.send_message('me', 'Some <b>bold</b> and <i>italic</i> text')
client.send_message('me', 'An <a href="https://example.com">URL</a>')
# code and pre tags also work, but those break the documentation :)
client.send_message('me', '<a href="tg://user?id=me">Mentions</a>')
# Explicit parse mode
# No parse mode by default
client.parse_mode = None
# ...but here I want markdown
client.send_message('me', 'Hello, **world**!', parse_mode='md')
# ...and here I need HTML
client.send_message('me', 'Hello, <i>world</i>!', parse_mode='html')
# If you logged in as a bot account, you can send buttons
from telethon import events, Button
@client.on(events.CallbackQuery)
async def callback(event):
await event.edit('Thank you for clicking {}!'.format(event.data))
# Single inline button
client.send_message(chat, 'A single button, with "clk1" as data',
buttons=Button.inline('Click me', b'clk1'))
# Matrix of inline buttons
client.send_message(chat, 'Pick one from this grid', buttons=[
[Button.inline('Left'), Button.inline('Right')],
[Button.url('Check this site!', 'https://lonamiwebs.github.io')]
])
# Reply keyboard
client.send_message(chat, 'Welcome', buttons=[
Button.text('Thanks!', resize=True, single_use=True),
Button.request_phone('Send phone'),
Button.request_location('Send location')
])
# Forcing replies or clearing buttons.
client.send_message(chat, 'Reply to me', buttons=Button.force_reply())
client.send_message(chat, 'Bye Keyboard!', buttons=Button.clear())
"""
if file is not None:
return await self.send_file(
entity, file, caption=message, reply_to=reply_to,
parse_mode=parse_mode, force_document=force_document,
buttons=buttons
)
elif not message:
raise ValueError(
'The message cannot be empty unless a file is provided'
)
entity = await self.get_input_entity(entity)
if isinstance(message, types.Message):
if buttons is None:
markup = message.reply_markup
else:
markup = self.build_reply_markup(buttons)
if silent is None:
silent = message.silent
if (message.media and not isinstance(
message.media, types.MessageMediaWebPage)):
return await self.send_file(
entity,
message.media,
caption=message.message,
silent=silent,
reply_to=reply_to,
buttons=markup,
entities=message.entities
)
request = functions.messages.SendMessageRequest(
peer=entity,
message=message.message or '',
silent=silent,
reply_to_msg_id=utils.get_message_id(reply_to),
reply_markup=markup,
entities=message.entities,
clear_draft=clear_draft,
no_webpage=not isinstance(
message.media, types.MessageMediaWebPage)
)
message = message.message
else:
message, msg_ent = await self._parse_message_text(message,
parse_mode)
request = functions.messages.SendMessageRequest(
peer=entity,
message=message,
entities=msg_ent,
no_webpage=not link_preview,
reply_to_msg_id=utils.get_message_id(reply_to),
clear_draft=clear_draft,
silent=silent,
reply_markup=self.build_reply_markup(buttons)
)
result = await self(request)
if isinstance(result, types.UpdateShortSentMessage):
message = types.Message(
id=result.id,
to_id=utils.get_peer(entity),
message=message,
date=result.date,
out=result.out,
media=result.media,
entities=result.entities,
reply_markup=request.reply_markup
)
message._finish_init(self, {}, entity)
return message
return self._get_response_message(request, result, entity)
async def forward_messages(
self: 'TelegramClient',
entity: 'hints.EntityLike',
messages: 'typing.Union[hints.MessageIDLike, typing.Sequence[hints.MessageIDLike]]',
from_peer: 'hints.EntityLike' = None,
*,
silent: bool = None,
as_album: bool = None) -> 'typing.Sequence[types.Message]':
"""
Forwards the given messages to the specified entity.
If you want to "forward" a message without the forward header
(the "forwarded from" text), you should use `send_message` with
the original message instead. This will send a copy of it.
See also `Message.forward_to() <telethon.tl.custom.message.Message.forward_to>`.
Arguments
entity (`entity`):
To which entity the message(s) will be forwarded.
messages (`list` | `int` | `Message <telethon.tl.custom.message.Message>`):
The message(s) to forward, or their integer IDs.
from_peer (`entity`):
If the given messages are integer IDs and not instances
of the ``Message`` class, this *must* be specified in
order for the forward to work. This parameter indicates
the entity from which the messages should be forwarded.
silent (`bool`, optional):
Whether the message should notify people in a broadcast
channel or not. Defaults to ``False``, which means it will
notify them. Set it to ``True`` to alter this behaviour.
as_album (`bool`, optional):
Whether several image messages should be forwarded as an
album (grouped) or not. The default behaviour is to treat
albums specially and send outgoing requests with
``as_album=True`` only for the albums if message objects
are used. If IDs are used it will group by default.
In short, the default should do what you expect,
``True`` will group always (even converting separate
images into albums), and ``False`` will never group.
Returns
The list of forwarded `Message <telethon.tl.custom.message.Message>`,
or a single one if a list wasn't provided as input.
Note that if all messages are invalid (i.e. deleted) the call
will fail with ``MessageIdInvalidError``. If only some are
invalid, the list will have ``None`` instead of those messages.
Example
.. code-block:: python
# a single one
client.forward_messages(chat, message)
# or
client.forward_messages(chat, message_id, from_chat)
# or
message.forward_to(chat)
# multiple
client.forward_messages(chat, messages)
# or
client.forward_messages(chat, message_ids, from_chat)
# Forwarding as a copy
client.send_message(chat, message)
"""
single = not utils.is_list_like(messages)
if single:
messages = (messages,)
entity = await self.get_input_entity(entity)
if from_peer:
from_peer = await self.get_input_entity(from_peer)
from_peer_id = await self.get_peer_id(from_peer)
else:
from_peer_id = None
def _get_key(m):
if isinstance(m, int):
if from_peer_id is not None:
return from_peer_id, None
raise ValueError('from_peer must be given if integer IDs are used')
elif isinstance(m, types.Message):
return m.chat_id, m.grouped_id
else:
raise TypeError('Cannot forward messages of type {}'.format(type(m)))
# We want to group outgoing chunks differently if we are "smart"
# about sending as album.
#
# Why? We need separate requests for ``as_album=True/False``, so
# if we want that behaviour, when we group messages to create the
# chunks, we need to consider the grouped ID too. But if we don't
# care about that, we don't need to consider it for creating the
# chunks, so we can make less requests.
if as_album is None:
get_key = _get_key
else:
def get_key(m):
return _get_key(m)[0] # Ignore grouped_id
sent = []
for chat_id, chunk in itertools.groupby(messages, key=get_key):
chunk = list(chunk)
if isinstance(chunk[0], int):
chat = from_peer
grouped = True if as_album is None else as_album
else:
chat = await chunk[0].get_input_chat()
if as_album is None:
grouped = any(m.grouped_id is not None for m in chunk)
else:
grouped = as_album
chunk = [m.id for m in chunk]
req = functions.messages.ForwardMessagesRequest(
from_peer=chat,
id=chunk,
to_peer=entity,
silent=silent,
# Trying to send a single message as grouped will cause
# GROUPED_MEDIA_INVALID. If more than one message is forwarded
# (even without media...), this error goes away.
grouped=len(chunk) > 1 and grouped
)
result = await self(req)
sent.extend(self._get_response_message(req, result, entity))
return sent[0] if single else sent
async def edit_message(
self: 'TelegramClient',
entity: 'typing.Union[hints.EntityLike, types.Message]',
message: 'hints.MessageLike' = None,
text: str = None,
*,
parse_mode: str = (),
link_preview: bool = True,
file: 'hints.FileLike' = None,
buttons: 'hints.MarkupLike' = None) -> 'types.Message':
"""
Edits the given message to change its text or media.
See also `Message.edit() <telethon.tl.custom.message.Message.edit>`.
Arguments
entity (`entity` | `Message <telethon.tl.custom.message.Message>`):
From which chat to edit the message. This can also be
the message to be edited, and the entity will be inferred
from it, so the next parameter will be assumed to be the
message text.
You may also pass a :tl:`InputBotInlineMessageID`,
which is the only way to edit messages that were sent
after the user selects an inline query result.
message (`int` | `Message <telethon.tl.custom.message.Message>` | `str`):
The ID of the message (or `Message
<telethon.tl.custom.message.Message>` itself) to be edited.
If the `entity` was a `Message
<telethon.tl.custom.message.Message>`, then this message
will be treated as the new text.
text (`str`, optional):
The new text of the message. Does nothing if the `entity`
was a `Message <telethon.tl.custom.message.Message>`.
parse_mode (`object`, optional):
See the `TelegramClient.parse_mode
<telethon.client.messageparse.MessageParseMethods.parse_mode>`
property for allowed values. Markdown parsing will be used by
default.
link_preview (`bool`, optional):
Should the link preview be shown?
file (`str` | `bytes` | `file` | `media`, optional):
The file object that should replace the existing media
in the message.
buttons (`list`, `custom.Button <telethon.tl.custom.button.Button>`, :tl:`KeyboardButton`):
The matrix (list of lists), row list or button to be shown
after sending the message. This parameter will only work if
you have signed in as a bot. You can also pass your own
:tl:`ReplyMarkup` here.
Returns
The edited `Message <telethon.tl.custom.message.Message>`,
unless `entity` was a :tl:`InputBotInlineMessageID` in which
case this method returns a boolean.
Raises
``MessageAuthorRequiredError`` if you're not the author of the
message but tried editing it anyway.
``MessageNotModifiedError`` if the contents of the message were
not modified at all.
Example
.. code-block:: python
message = client.send_message(chat, 'hello')
client.edit_message(chat, message, 'hello!')
# or
client.edit_message(chat, message.id, 'hello!!')
# or
client.edit_message(message, 'hello!!!')
"""
if isinstance(entity, types.InputBotInlineMessageID):
text = message
message = entity
elif isinstance(entity, types.Message):
text = message # Shift the parameters to the right
message = entity
entity = entity.to_id
text, msg_entities = await self._parse_message_text(text, parse_mode)
file_handle, media, image = await self._file_to_media(file)
if isinstance(entity, types.InputBotInlineMessageID):
return await self(functions.messages.EditInlineBotMessageRequest(
id=entity,
message=text,
no_webpage=not link_preview,
entities=msg_entities,
media=media,
reply_markup=self.build_reply_markup(buttons)
))
entity = await self.get_input_entity(entity)
request = functions.messages.EditMessageRequest(
peer=entity,
id=utils.get_message_id(message),
message=text,
no_webpage=not link_preview,
entities=msg_entities,
media=media,
reply_markup=self.build_reply_markup(buttons)
)
msg = self._get_response_message(request, await self(request), entity)
await self._cache_media(msg, file, file_handle, image=image)
return msg
async def delete_messages(
self: 'TelegramClient',
entity: 'hints.EntityLike',
message_ids: 'typing.Union[hints.MessageIDLike, typing.Sequence[hints.MessageIDLike]]',
*,
revoke: bool = True) -> 'typing.Sequence[types.messages.AffectedMessages]':
"""
Deletes the given messages, optionally "for everyone".
See also `Message.delete() <telethon.tl.custom.message.Message.delete>`.
.. warning::
This method does **not** validate that the message IDs belong
to the chat that you passed! It's possible for the method to
delete messages from different private chats and small group
chats at once, so make sure to pass the right IDs.
Arguments
entity (`entity`):
From who the message will be deleted. This can actually
be ``None`` for normal chats, but **must** be present
for channels and megagroups.
message_ids (`list` | `int` | `Message <telethon.tl.custom.message.Message>`):
The IDs (or ID) or messages to be deleted.
revoke (`bool`, optional):
Whether the message should be deleted for everyone or not.
By default it has the opposite behaviour of official clients,
and it will delete the message for everyone.
`Since 24 March 2019
<https://telegram.org/blog/unsend-privacy-emoji>`_, you can
also revoke messages of any age (i.e. messages sent long in
the past) the *other* person sent in private conversations
(and of course your messages too).
Disabling this has no effect on channels or megagroups,
since it will unconditionally delete the message for everyone.
Returns
A list of :tl:`AffectedMessages`, each item being the result
for the delete calls of the messages in chunks of 100 each.
Example
.. code-block:: python
client.delete_messages(chat, messages)
"""
if not utils.is_list_like(message_ids):
message_ids = (message_ids,)
message_ids = (
m.id if isinstance(m, (
types.Message, types.MessageService, types.MessageEmpty))
else int(m) for m in message_ids
)
entity = await self.get_input_entity(entity) if entity else None
if isinstance(entity, types.InputPeerChannel):
return await self([functions.channels.DeleteMessagesRequest(
entity, list(c)) for c in utils.chunks(message_ids)])
else:
return await self([functions.messages.DeleteMessagesRequest(
list(c), revoke) for c in utils.chunks(message_ids)])
# endregion
# region Miscellaneous
async def send_read_acknowledge(
self: 'TelegramClient',
entity: 'hints.EntityLike',
message: 'typing.Union[hints.MessageIDLike, typing.Sequence[hints.MessageIDLike]]' = None,
*,
max_id: int = None,
clear_mentions: bool = False) -> bool:
"""
Marks messages as read and optionally clears mentions.
This effectively marks a message as read (or more than one) in the
given conversation.
If neither message nor maximum ID are provided, all messages will be
marked as read by assuming that ``max_id = 0``.
Arguments
entity (`entity`):
The chat where these messages are located.
message (`list` | `Message <telethon.tl.custom.message.Message>`):
Either a list of messages or a single message.
max_id (`int`):
Overrides messages, until which message should the
acknowledge should be sent.
clear_mentions (`bool`):
Whether the mention badge should be cleared (so that
there are no more mentions) or not for the given entity.
If no message is provided, this will be the only action
taken.
Example
.. code-block:: python
client.send_read_acknowledge(last_message)
# or
client.send_read_acknowledge(last_message_id)
# or
client.send_read_acknowledge(messages)
"""
if max_id is None:
if not message:
max_id = 0
else:
if utils.is_list_like(message):
max_id = max(msg.id for msg in message)
else:
max_id = message.id
entity = await self.get_input_entity(entity)
if clear_mentions:
await self(functions.messages.ReadMentionsRequest(entity))
if max_id is None:
return True
if max_id is not None:
if isinstance(entity, types.InputPeerChannel):
return await self(functions.channels.ReadHistoryRequest(
utils.get_input_channel(entity), max_id=max_id))
else:
return await self(functions.messages.ReadHistoryRequest(
entity, max_id=max_id))
return False
async def pin_message(
self: 'TelegramClient',
entity: 'hints.EntityLike',
message: 'typing.Optional[hints.MessageIDLike]',
*,
notify: bool = False
):
"""
Pins or unpins a message in a chat.
The default behaviour is to *not* notify members, unlike the
official applications.
See also `Message.pin() <telethon.tl.custom.message.Message.pin>`.
Arguments
entity (`entity`):
The chat where the message should be pinned.
message (`int` | `Message <telethon.tl.custom.message.Message>`):
The message or the message ID to pin. If it's
``None``, the message will be unpinned instead.
notify (`bool`, optional):
Whether the pin should notify people or not.
Example
.. code-block:: python
# Send and pin a message to annoy everyone
message = client.send_message(chat, 'Pinotifying is fun!')
client.pin_message(chat, message, notify=True)
"""
if not message:
message = 0
entity = await self.get_input_entity(entity)
await self(functions.messages.UpdatePinnedMessageRequest(
peer=entity,
id=message,
silent=not notify
))
# endregion
# endregion
| 40.299145 | 107 | 0.567678 |
032440c9df61cf031418873079787f69f2e4cc93 | 2,545 | py | Python | tests/bcdfo_projgrad_test.py | DLR-SC/sqpdfo | ae3213764fdd8d0d0a05bcc3d13be63d811a0a37 | [
"BSD-3-Clause"
] | 10 | 2020-03-03T21:56:01.000Z | 2022-03-29T08:36:01.000Z | tests/bcdfo_projgrad_test.py | DLR-SC/sqpdfo | ae3213764fdd8d0d0a05bcc3d13be63d811a0a37 | [
"BSD-3-Clause"
] | 6 | 2020-03-03T22:02:41.000Z | 2021-11-18T12:31:00.000Z | tests/bcdfo_projgrad_test.py | DLR-SC/sqpdfo | ae3213764fdd8d0d0a05bcc3d13be63d811a0a37 | [
"BSD-3-Clause"
] | 3 | 2021-02-17T14:43:30.000Z | 2022-03-05T08:46:48.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 02 15:13:04 2014
@author: jaco_da
"""
import unittest
from sqpdfo.bcdfo_projgrad import bcdfo_projgrad_
from sqpdfo.runtime import compare_array
from numpy import array
class Test_bcdfo_projgrad(unittest.TestCase):
"""
Reminder :
This class is a test for bcdfo_projgrad which computes the projected gradient and its infinity norm.
"""
def setUp(self):
#self.options = helper.dummyOptions()
#self.values = helper.dummyValues()
self.abs_tol=1e-14;
self.rel_tol=1e-14;
pass
def test_bcdfo_projgrad(self):
"""
Small tests using the logic answer of the projected gradient to see if the function works correctly
"""
#print "No test specified..."
# n : dimension
# x : current iterate
# g : gradient at x
# bl : lower bounds
# bu : upper bounds
#TEST 1 WITH UNDISTURBING BOUNDS
n = 2
x = array([[0.5,0.5]]).T
g = array([[-1,-1]]).T
bl = array([[0,0]]).T
bu = array([[2,2]]).T
gnorm, gn = bcdfo_projgrad_(n,x,g,bl,bu)
#print "gnorm", gnorm
#print "gn", gn
correctgn = array([[-1,-1]]).T
correctgnorm = 1
self.assertTrue(compare_array(correctgn, gn, self.abs_tol, self.rel_tol))
self.assertEqual(gnorm, correctgnorm)
#TEST 2 WITH A DISTURBING UPPER BOUND
n = 2
x = array([[0.5,0.5]]).T
g = array([[-1,-1]]).T
bl = array([[0,0]]).T
bu = array([[1,2]]).T
gnorm, gn = bcdfo_projgrad_(n,x,g,bl,bu)
#print "gnorm", gnorm
#print "gn", gn
correctgn = array([[-0.5,-1]]).T
correctgnorm = 1
self.assertTrue(compare_array(correctgn, gn, self.abs_tol, self.rel_tol))
self.assertEqual(gnorm, correctgnorm)
#1 TEST 3 WITH A DISTURBING LOWER BOUND
n= 2
x = array([[0.5,0.5]]).T
g = array([[-1,1]]).T
bl = array([[0,0]]).T
bu = array([[2,2]]).T
gnorm, gn = bcdfo_projgrad_(n,x,g,bl,bu)
#print "gnorm", gnorm
#print "gn", gn
correctgn = array([[-1,0.5]]).T
correctgnorm = 1
self.assertTrue(compare_array(correctgn, gn, self.abs_tol, self.rel_tol))
self.assertEqual(gnorm, correctgnorm)
if __name__ == '__main__':
unittest.main() | 27.365591 | 111 | 0.535167 |
2f57ec96d65f6b4ff90231b106d5a9023b3d1f56 | 12,799 | py | Python | utils/delete_resources.py | bbonik/personalize-infrequent-interactions | cb3610b4cd5aa2ee94f86994c6e5ee74fb5436c8 | [
"MIT"
] | 2 | 2021-12-23T09:33:06.000Z | 2022-01-03T00:13:35.000Z | utils/delete_resources.py | bbonik/personalize-infrequent-interactions | cb3610b4cd5aa2ee94f86994c6e5ee74fb5436c8 | [
"MIT"
] | null | null | null | utils/delete_resources.py | bbonik/personalize-infrequent-interactions | cb3610b4cd5aa2ee94f86994c6e5ee74fb5436c8 | [
"MIT"
] | null | null | null |
import sys
import getopt
import logging
import botocore
import boto3
import time
from packaging import version
from time import sleep
from botocore.exceptions import ClientError
logger = logging.getLogger()
personalize = None
def _get_dataset_group_arn(dataset_group_name):
dsg_arn = None
paginator = personalize.get_paginator('list_dataset_groups')
for paginate_result in paginator.paginate():
for dataset_group in paginate_result["datasetGroups"]:
if dataset_group['name'] == dataset_group_name:
dsg_arn = dataset_group['datasetGroupArn']
break
if dsg_arn:
break
if not dsg_arn:
raise NameError(f'Dataset Group "{dataset_group_name}" does not exist; verify region is correct')
return dsg_arn
def _get_solutions(dataset_group_arn):
solution_arns = []
paginator = personalize.get_paginator('list_solutions')
for paginate_result in paginator.paginate(datasetGroupArn = dataset_group_arn):
for solution in paginate_result['solutions']:
solution_arns.append(solution['solutionArn'])
return solution_arns
def _delete_campaigns(solution_arns):
campaign_arns = []
for solution_arn in solution_arns:
paginator = personalize.get_paginator('list_campaigns')
for paginate_result in paginator.paginate(solutionArn = solution_arn):
for campaign in paginate_result['campaigns']:
if campaign['status'] in ['ACTIVE', 'CREATE FAILED']:
logger.info('Deleting campaign: ' + campaign['campaignArn'])
personalize.delete_campaign(campaignArn = campaign['campaignArn'])
elif campaign['status'].startswith('DELETE'):
logger.warning('Campaign {} is already being deleted so will wait for delete to complete'.format(campaign['campaignArn']))
else:
raise Exception('Campaign {} has a status of {} so cannot be deleted'.format(campaign['campaignArn'], campaign['status']))
campaign_arns.append(campaign['campaignArn'])
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for campaign_arn in campaign_arns:
try:
describe_response = personalize.describe_campaign(campaignArn = campaign_arn)
logger.debug('Campaign {} status is {}'.format(campaign_arn, describe_response['campaign']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
campaign_arns.remove(campaign_arn)
if len(campaign_arns) == 0:
logger.info('All campaigns have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} campaign(s) to be deleted'.format(len(campaign_arns)))
time.sleep(20)
if len(campaign_arns) > 0:
raise Exception('Timed out waiting for all campaigns to be deleted')
def _delete_solutions(solution_arns):
for solution_arn in solution_arns:
try:
describe_response = personalize.describe_solution(solutionArn = solution_arn)
solution = describe_response['solution']
if solution['status'] in ['ACTIVE', 'CREATE FAILED']:
logger.info('Deleting solution: ' + solution_arn)
personalize.delete_solution(solutionArn = solution_arn)
elif solution['status'].startswith('DELETE'):
logger.warning('Solution {} is already being deleted so will wait for delete to complete'.format(solution_arn))
else:
raise Exception('Solution {} has a status of {} so cannot be deleted'.format(solution_arn, solution['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code != 'ResourceNotFoundException':
raise e
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for solution_arn in solution_arns:
try:
describe_response = personalize.describe_solution(solutionArn = solution_arn)
logger.debug('Solution {} status is {}'.format(solution_arn, describe_response['solution']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
solution_arns.remove(solution_arn)
if len(solution_arns) == 0:
logger.info('All solutions have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} solution(s) to be deleted'.format(len(solution_arns)))
time.sleep(20)
if len(solution_arns) > 0:
raise Exception('Timed out waiting for all solutions to be deleted')
def _delete_event_trackers(dataset_group_arn):
event_tracker_arns = []
event_trackers_paginator = personalize.get_paginator('list_event_trackers')
for event_tracker_page in event_trackers_paginator.paginate(datasetGroupArn = dataset_group_arn):
for event_tracker in event_tracker_page['eventTrackers']:
if event_tracker['status'] in [ 'ACTIVE', 'CREATE FAILED' ]:
logger.info('Deleting event tracker {}'.format(event_tracker['eventTrackerArn']))
personalize.delete_event_tracker(eventTrackerArn = event_tracker['eventTrackerArn'])
elif event_tracker['status'].startswith('DELETE'):
logger.warning('Event tracker {} is already being deleted so will wait for delete to complete'.format(event_tracker['eventTrackerArn']))
else:
raise Exception('Solution {} has a status of {} so cannot be deleted'.format(event_tracker['eventTrackerArn'], event_tracker['status']))
event_tracker_arns.append(event_tracker['eventTrackerArn'])
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for event_tracker_arn in event_tracker_arns:
try:
describe_response = personalize.describe_event_tracker(eventTrackerArn = event_tracker_arn)
logger.debug('Event tracker {} status is {}'.format(event_tracker_arn, describe_response['eventTracker']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
event_tracker_arns.remove(event_tracker_arn)
if len(event_tracker_arns) == 0:
logger.info('All event trackers have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} event tracker(s) to be deleted'.format(len(event_tracker_arns)))
time.sleep(20)
if len(event_tracker_arns) > 0:
raise Exception('Timed out waiting for all event trackers to be deleted')
def _delete_filters(dataset_group_arn):
filter_arns = []
filters_response = personalize.list_filters(datasetGroupArn = dataset_group_arn, maxResults = 100)
for filter in filters_response['Filters']:
logger.info('Deleting filter ' + filter['filterArn'])
personalize.delete_filter(filterArn = filter['filterArn'])
filter_arns.append(filter['filterArn'])
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for filter_arn in filter_arns:
try:
describe_response = personalize.describe_filter(filterArn = filter_arn)
logger.debug('Filter {} status is {}'.format(filter_arn, describe_response['filter']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
filter_arns.remove(filter_arn)
if len(filter_arns) == 0:
logger.info('All filters have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} filter(s) to be deleted'.format(len(filter_arns)))
time.sleep(20)
if len(filter_arns) > 0:
raise Exception('Timed out waiting for all filter to be deleted')
def _delete_datasets_and_schemas(dataset_group_arn):
dataset_arns = []
schema_arns = []
dataset_paginator = personalize.get_paginator('list_datasets')
for dataset_page in dataset_paginator.paginate(datasetGroupArn = dataset_group_arn):
for dataset in dataset_page['datasets']:
describe_response = personalize.describe_dataset(datasetArn = dataset['datasetArn'])
schema_arns.append(describe_response['dataset']['schemaArn'])
if dataset['status'] in ['ACTIVE', 'CREATE FAILED']:
logger.info('Deleting dataset ' + dataset['datasetArn'])
personalize.delete_dataset(datasetArn = dataset['datasetArn'])
elif dataset['status'].startswith('DELETE'):
logger.warning('Dataset {} is already being deleted so will wait for delete to complete'.format(dataset['datasetArn']))
else:
raise Exception('Dataset {} has a status of {} so cannot be deleted'.format(dataset['datasetArn'], dataset['status']))
dataset_arns.append(dataset['datasetArn'])
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
for dataset_arn in dataset_arns:
try:
describe_response = personalize.describe_dataset(datasetArn = dataset_arn)
logger.debug('Dataset {} status is {}'.format(dataset_arn, describe_response['dataset']['status']))
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
dataset_arns.remove(dataset_arn)
if len(dataset_arns) == 0:
logger.info('All datasets have been deleted or none exist for dataset group')
break
else:
logger.info('Waiting for {} dataset(s) to be deleted'.format(len(dataset_arns)))
time.sleep(20)
if len(dataset_arns) > 0:
raise Exception('Timed out waiting for all datasets to be deleted')
for schema_arn in schema_arns:
try:
logger.info('Deleting schema ' + schema_arn)
personalize.delete_schema(schemaArn = schema_arn)
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceInUseException':
logger.info('Schema {} is still in-use by another dataset (likely in another dataset group)'.format(schema_arn))
else:
raise e
logger.info('All schemas used exclusively by datasets have been deleted or none exist for dataset group')
def _delete_dataset_group(dataset_group_arn):
logger.info('Deleting dataset group ' + dataset_group_arn)
personalize.delete_dataset_group(datasetGroupArn = dataset_group_arn)
max_time = time.time() + 30*60 # 30 mins
while time.time() < max_time:
try:
describe_response = personalize.describe_dataset_group(datasetGroupArn = dataset_group_arn)
logger.debug('Dataset group {} status is {}'.format(dataset_group_arn, describe_response['datasetGroup']['status']))
break
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
logger.info('Dataset group {} has been fully deleted'.format(dataset_group_arn))
else:
raise e
logger.info('Waiting for dataset group to be deleted')
time.sleep(20)
def delete_dataset_groups(dataset_group_arns, region = None):
global personalize
personalize = boto3.client(service_name = 'personalize', region_name = region)
for dataset_group_arn in dataset_group_arns:
logger.info('Dataset Group ARN: ' + dataset_group_arn)
solution_arns = _get_solutions(dataset_group_arn)
# 1. Delete campaigns
_delete_campaigns(solution_arns)
# 2. Delete solutions
_delete_solutions(solution_arns)
# 3. Delete event trackers
_delete_event_trackers(dataset_group_arn)
# 4. Delete filters
_delete_filters(dataset_group_arn)
# 5. Delete datasets and their schemas
_delete_datasets_and_schemas(dataset_group_arn)
# 6. Delete dataset group
_delete_dataset_group(dataset_group_arn)
logger.info(f'Dataset group {dataset_group_arn} fully deleted')
| 44.134483 | 152 | 0.649973 |
731b6926be8fefd6dca25146103cb51cc78afb29 | 10,236 | py | Python | electrum/mnemonic_bip39.py | asuka431/electrum | 144b19660451bbbdc8314ac8fccd6e79fdf3a4b1 | [
"MIT"
] | null | null | null | electrum/mnemonic_bip39.py | asuka431/electrum | 144b19660451bbbdc8314ac8fccd6e79fdf3a4b1 | [
"MIT"
] | null | null | null | electrum/mnemonic_bip39.py | asuka431/electrum | 144b19660451bbbdc8314ac8fccd6e79fdf3a4b1 | [
"MIT"
] | 1 | 2022-02-09T09:56:12.000Z | 2022-02-09T09:56:12.000Z | #
# Copyright (c) 2013 Pavol Rusnak
# Copyright (c) 2017 mruddy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import binascii
import bisect
import hashlib
import hmac
import itertools
import os
import sys
import unicodedata
PBKDF2_ROUNDS = 2048
class ConfigurationError(Exception):
pass
# From <https://stackoverflow.com/questions/212358/binary-search-bisection-in-python/2233940#2233940>
def binary_search(a, x, lo=0, hi=None): # can't use a to specify default for hi
hi = hi if hi is not None else len(a) # hi defaults to len(a)
pos = bisect.bisect_left(a, x, lo, hi) # find insertion position
return pos if pos != hi and a[pos] == x else -1 # don't walk off the end
# Refactored code segments from <https://github.com/keis/base58>
def b58encode(v):
alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
p, acc = 1, 0
for c in reversed(v):
if sys.version < "3":
c = ord(c)
acc += p * c
p = p << 8
string = ""
while acc:
acc, idx = divmod(acc, 58)
string = alphabet[idx : idx + 1] + string
return string
class Mnemonic(object):
def __init__(self, language):
self.language = language
self.radix = 2048
if sys.version < "3":
with open("%s/%s.txt" % (self._get_directory(), language), "r") as f:
self.wordlist = [w.strip().decode("utf8") for w in f.readlines()]
else:
with open(
"%s/%s.txt" % (self._get_directory(), language), "r", encoding="utf-8"
) as f:
self.wordlist = [w.strip() for w in f.readlines()]
if len(self.wordlist) != self.radix:
raise ConfigurationError(
"Wordlist should contain %d words, but it contains %d words."
% (self.radix, len(self.wordlist))
)
@classmethod
def _get_directory(cls):
return os.path.join(os.path.dirname(__file__), "wordlist")
@classmethod
def normalize_string(cls, txt):
if isinstance(txt, str if sys.version < "3" else bytes):
utxt = txt.decode("utf8")
elif isinstance(txt, unicode if sys.version < "3" else str): # noqa: F821
utxt = txt
else:
raise TypeError("String value expected")
return unicodedata.normalize("NFKD", utxt)
def generate(self, strength=128):
if strength not in [128, 160, 192, 224, 256]:
raise ValueError(
"Strength should be one of the following [128, 160, 192, 224, 256], but it is not (%d)."
% strength
)
return self.to_mnemonic(os.urandom(strength // 8))
# Adapted from <http://tinyurl.com/oxmn476>
def to_entropy(self, words):
if not isinstance(words, list):
words = words.split(" ")
if len(words) not in [12, 15, 18, 21, 24]:
raise ValueError(
"Number of words must be one of the following: [12, 15, 18, 21, 24], but it is not (%d)."
% len(words)
)
# Look up all the words in the list and construct the
# concatenation of the original entropy and the checksum.
concatLenBits = len(words) * 11
concatBits = [False] * concatLenBits
wordindex = 0
if self.language == "english":
use_binary_search = True
else:
use_binary_search = False
for word in words:
# Find the words index in the wordlist
ndx = (
binary_search(self.wordlist, word)
if use_binary_search
else self.wordlist.index(word)
)
if ndx < 0:
raise LookupError('Unable to find "%s" in word list.' % word)
# Set the next 11 bits to the value of the index.
for ii in range(11):
concatBits[(wordindex * 11) + ii] = (ndx & (1 << (10 - ii))) != 0
wordindex += 1
checksumLengthBits = concatLenBits // 33
entropyLengthBits = concatLenBits - checksumLengthBits
# Extract original entropy as bytes.
entropy = bytearray(entropyLengthBits // 8)
for ii in range(len(entropy)):
for jj in range(8):
if concatBits[(ii * 8) + jj]:
entropy[ii] |= 1 << (7 - jj)
# Take the digest of the entropy.
hashBytes = hashlib.sha256(entropy).digest()
if sys.version < "3":
hashBits = list(
itertools.chain.from_iterable(
(
[ord(c) & (1 << (7 - i)) != 0 for i in range(8)]
for c in hashBytes
)
)
)
else:
hashBits = list(
itertools.chain.from_iterable(
([c & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes)
)
)
# Check all the checksum bits.
for i in range(checksumLengthBits):
if concatBits[entropyLengthBits + i] != hashBits[i]:
raise ValueError("Failed checksum.")
return entropy
def to_mnemonic(self, data):
if len(data) not in [16, 20, 24, 28, 32]:
raise ValueError(
"Data length should be one of the following: [16, 20, 24, 28, 32], but it is not (%d)."
% len(data)
)
h = hashlib.sha256(data).hexdigest()
b = (
bin(int(binascii.hexlify(data), 16))[2:].zfill(len(data) * 8)
+ bin(int(h, 16))[2:].zfill(256)[: len(data) * 8 // 32]
)
result = []
for i in range(len(b) // 11):
idx = int(b[i * 11 : (i + 1) * 11], 2)
result.append(self.wordlist[idx])
if (
self.language == "japanese"
): # Japanese must be joined by ideographic space.
result_phrase = u"\u3000".join(result)
else:
result_phrase = " ".join(result)
return result_phrase
def check(self, mnemonic):
mnemonic = self.normalize_string(mnemonic).split(" ")
# list of valid mnemonic lengths
if len(mnemonic) not in [12, 15, 18, 21, 24]:
return False
try:
idx = map(lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic)
b = "".join(idx)
except ValueError:
return False
l = len(b) # noqa: E741
d = b[: l // 33 * 32]
h = b[-l // 33 :]
nd = binascii.unhexlify(hex(int(d, 2))[2:].rstrip("L").zfill(l // 33 * 8))
nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[: l // 33]
return h == nh
def expand_word(self, prefix):
if prefix in self.wordlist:
return prefix
else:
matches = [word for word in self.wordlist if word.startswith(prefix)]
if len(matches) == 1: # matched exactly one word in the wordlist
return matches[0]
else:
# exact match not found.
# this is not a validation routine, just return the input
return prefix
def expand(self, mnemonic):
return " ".join(map(self.expand_word, mnemonic.split(" ")))
@classmethod
def to_seed(cls, mnemonic, passphrase=""):
mnemonic = cls.normalize_string(mnemonic)
passphrase = cls.normalize_string(passphrase)
passphrase = "mnemonic" + passphrase
mnemonic = mnemonic.encode("utf-8")
passphrase = passphrase.encode("utf-8")
stretched = hashlib.pbkdf2_hmac("sha512", mnemonic, passphrase, PBKDF2_ROUNDS)
return stretched[:64]
@classmethod
def to_hd_master_key(cls, seed):
if len(seed) != 64:
raise ValueError("Provided seed should have length of 64")
# Compute HMAC-SHA512 of seed
seed = hmac.new(b"Bitcoin seed", seed, digestmod=hashlib.sha512).digest()
# Serialization format can be found at: https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#Serialization_format
xprv = b"\x04\x88\xad\xe4" # Version for private mainnet
xprv += b"\x00" * 9 # Depth, parent fingerprint, and child number
xprv += seed[32:] # Chain code
xprv += b"\x00" + seed[:32] # Master key
# Double hash using SHA256
hashed_xprv = hashlib.sha256(xprv).digest()
hashed_xprv = hashlib.sha256(hashed_xprv).digest()
# Append 4 bytes of checksum
xprv += hashed_xprv[:4]
# Return base58
return b58encode(xprv)
def main():
import binascii
import sys
if len(sys.argv) > 1:
data = sys.argv[1]
else:
data = sys.stdin.readline().strip()
data = binascii.unhexlify(data)
m = Mnemonic("english")
print(m.to_mnemonic(data))
if __name__ == "__main__":
main()
| 37.632353 | 132 | 0.560571 |
9599d177188925a22ba25af153f92ede3843c8d0 | 6,615 | py | Python | licenses/management/commands/publish.py | brylie/cc-licenses | 9367a268ef5d136fab194b09685f69e72564ddef | [
"MIT"
] | null | null | null | licenses/management/commands/publish.py | brylie/cc-licenses | 9367a268ef5d136fab194b09685f69e72564ddef | [
"MIT"
] | null | null | null | licenses/management/commands/publish.py | brylie/cc-licenses | 9367a268ef5d136fab194b09685f69e72564ddef | [
"MIT"
] | null | null | null | # Standard library
import os
from argparse import ArgumentParser
from shutil import rmtree
# Third-party
import git
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import BaseCommand, CommandError
from django.urls import reverse
# First-party/Local
from licenses.git_utils import commit_and_push_changes, setup_local_branch
from licenses.models import LegalCode, TranslationBranch
from licenses.utils import relative_symlink, save_url_as_static_file
def list_open_translation_branches():
"""
Return list of names of open translation branches
"""
return list(
TranslationBranch.objects.filter(complete=False).values_list(
"branch_name", flat=True
)
)
class Command(BaseCommand):
"""
Command to push the static files in the build directory to a specified
branch in cc-licenses-data repository
Arguments:
branch_name - Branch name in cc-license-data to pull translations from
and publish artifacts too.
list_branches - A list of active branches in cc-licenses-data will be
displayed
If no arguments are supplied all cc-licenses-data branches are checked and
then updated.
"""
def add_arguments(self, parser: ArgumentParser):
parser.add_argument(
"-b",
"--branch_name",
help="Translation branch name to pull translations from and push"
" artifacts to. Use --list_branches to see available branch names."
" With no option, all active branches are published.",
)
parser.add_argument(
"-l",
"--list_branches",
action="store_true",
help="A list of active translation branches will be displayed.",
)
parser.add_argument(
"--nopush",
action="store_true",
help="Update the local branches, but don't push upstream.",
)
parser.add_argument(
"--nogit",
action="store_true",
help="Update the local files without any attempt to manage them in"
" git (implies --nopush)",
)
def _quiet(self, *args, **kwargs):
pass
def run_django_distill(self):
"""Outputs static files into the output dir."""
if not os.path.isdir(settings.STATIC_ROOT):
e = "Static source directory does not exist, run collectstatic"
raise CommandError(e)
output_dir = self.output_dir
if os.path.isdir(output_dir):
rmtree(output_dir)
os.makedirs(output_dir)
self.stdout.write(f"\n{self.output_dir}")
save_url_as_static_file(output_dir, "/status/", "status/index.html")
tbranches = TranslationBranch.objects.filter(complete=False)
for tbranch_id in tbranches.values_list("id", flat=True):
save_url_as_static_file(
output_dir,
f"/status/{tbranch_id}/",
f"status/{tbranch_id}.html",
)
legalcodes = LegalCode.objects.validgroups()
for group in legalcodes.keys():
self.stdout.write(f"\n{self.output_dir}")
for legalcode in legalcodes[group]:
# deed
filepath, symlinks = legalcode.get_file_and_links("deed")
save_url_as_static_file(
output_dir,
legalcode.deed_url,
filepath,
)
for symlink in symlinks:
relative_symlink(output_dir, filepath, symlink)
# legalcode
filepath, symlinks = legalcode.get_file_and_links("legalcode")
save_url_as_static_file(
output_dir,
legalcode.license_url,
filepath,
)
for symlink in symlinks:
relative_symlink(output_dir, filepath, symlink)
self.stdout.write(f"\n{self.output_dir}")
save_url_as_static_file(
output_dir, reverse("metadata"), "licenses/metadata.yaml"
)
def publish_branch(self, branch: str):
"""Workflow for publishing a single branch"""
self.stdout.write(f"Publishing branch {branch}")
with git.Repo(settings.TRANSLATION_REPOSITORY_DIRECTORY) as repo:
setup_local_branch(repo, branch)
self.run_django_distill()
if repo.is_dirty(untracked_files=True):
# Add any changes and new files
commit_and_push_changes(
repo,
"Updated built HTML files",
self.relpath,
push=self.push,
)
if repo.is_dirty(untracked_files=True):
raise Exception(
"Something went wrong, the repo is still dirty"
)
else:
self.stdout.write(f"\n{branch} build dir is up to date.\n")
def publish_all(self):
"""Workflow for checking branches and updating their build dir"""
branch_list = list_open_translation_branches()
self.stdout.write(
f"\n\nChecking and updating build dirs for {len(branch_list)}"
" translation branches\n\n"
)
for b in branch_list:
self.publish_branch(b)
def handle(self, *args, **options):
self.options = options
self.output_dir = os.path.abspath(settings.DISTILL_DIR)
git_dir = os.path.abspath(settings.TRANSLATION_REPOSITORY_DIRECTORY)
if not self.output_dir.startswith(git_dir):
raise ImproperlyConfigured(
f"In Django settings, DISTILL_DIR must be inside "
f"TRANSLATION_REPOSITORY_DIRECTORY, "
f"but DISTILL_DIR={self.output_dir} is outside "
f"TRANSLATION_REPOSITORY_DIRECTORY={git_dir}."
)
self.relpath = os.path.relpath(self.output_dir, git_dir)
self.push = not options["nopush"]
if options.get("list_branches"):
branches = list_open_translation_branches()
self.stdout.write("\n\nWhich branch are we publishing to?\n")
for b in branches:
self.stdout.write(b)
elif options.get("nogit"):
self.run_django_distill()
elif options.get("branch_name"):
self.publish_branch(options["branch_name"])
else:
self.publish_all()
| 36.75 | 79 | 0.595767 |
c3ad0e3068b35644c9dc66887242271c0e3b9183 | 647 | py | Python | tests/unit/via/resources_test.py | mattdricker/via-1 | 5f4919518337540d183bc079abbd3ea502a0e281 | [
"BSD-2-Clause"
] | null | null | null | tests/unit/via/resources_test.py | mattdricker/via-1 | 5f4919518337540d183bc079abbd3ea502a0e281 | [
"BSD-2-Clause"
] | null | null | null | tests/unit/via/resources_test.py | mattdricker/via-1 | 5f4919518337540d183bc079abbd3ea502a0e281 | [
"BSD-2-Clause"
] | null | null | null | import pytest
from pyramid.httpexceptions import HTTPBadRequest
from via.resources import URLResource
class TestURLResource:
def test_it_returns_url(self, pyramid_request):
url = pyramid_request.params["url"] = "http://example.com"
context = URLResource(pyramid_request)
assert context.url() == url
@pytest.mark.parametrize("params", ({}, {"urk": "foo"}, {"url": ""}))
def test_it_raises_HTTPBadRequest_for_bad_urls(self, params, pyramid_request):
pyramid_request.params = params
context = URLResource(pyramid_request)
with pytest.raises(HTTPBadRequest):
context.url()
| 30.809524 | 82 | 0.695518 |
3b1ca119f0c54f256c452c89146f5d3cd73da5c0 | 2,538 | py | Python | ax/taskapp/celery.py | Lierian/axion | cc91bb601f5c9047f961ca19b7beec5e5c6546e7 | [
"MIT"
] | null | null | null | ax/taskapp/celery.py | Lierian/axion | cc91bb601f5c9047f961ca19b7beec5e5c6546e7 | [
"MIT"
] | null | null | null | ax/taskapp/celery.py | Lierian/axion | cc91bb601f5c9047f961ca19b7beec5e5c6546e7 | [
"MIT"
] | null | null | null |
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('ax')
class CeleryConfig(AppConfig):
name = 'ax.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
# Since raven is required in production only,
# imports might (most surely will) be wiped out
# during PyCharm code clean up started
# in other environments.
# @formatter:off
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
# @formatter:on
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
if hasattr(settings, 'OPBEAT'):
# Since opbeat is required in production only,
# imports might (most surely will) be wiped out
# during PyCharm code clean up started
# in other environments.
# @formatter:off
from opbeat.contrib.django.models import client as opbeat_client
from opbeat.contrib.django.models import logger as opbeat_logger
from opbeat.contrib.django.models import register_handlers as opbeat_register_handlers
from opbeat.contrib.celery import register_signal as opbeat_register_signal
# @formatter:on
try:
opbeat_register_signal(opbeat_client)
except Exception as e:
opbeat_logger.exception('Failed installing celery hook: %s' % e)
if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
opbeat_register_handlers()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
| 37.880597 | 99 | 0.680851 |
29d3f1a7960aada4e82773067c7a38c62c61b0f5 | 5,903 | py | Python | modules/storage/PrefabRestic.py | threefoldtech/jumpscale_prefab9 | 75cb6267618d9087d4a9a7eaad121a14e497f07d | [
"Apache-2.0"
] | null | null | null | modules/storage/PrefabRestic.py | threefoldtech/jumpscale_prefab9 | 75cb6267618d9087d4a9a7eaad121a14e497f07d | [
"Apache-2.0"
] | 31 | 2018-07-31T15:40:07.000Z | 2019-02-20T11:07:15.000Z | modules/storage/PrefabRestic.py | threefoldtech/jumpscale_prefab | 75cb6267618d9087d4a9a7eaad121a14e497f07d | [
"Apache-2.0"
] | null | null | null | from jumpscale import j
app = j.tools.prefab._getBaseAppClass()
class PrefabRestic(app):
NAME = 'restic'
def _init(self):
self.BUILDDIR = self.core.replace("$BUILDDIR/restic")
self.DOWNLOAD_DEST = '{}/linux_amd64.bz2'.format(self.BUILDDIR)
self.FILE_NAME = '{}/linux_amd64'.format(self.BUILDDIR)
@property
def CODEDIR(self):
return "{}/src/github.com/restic/restic".format(self.prefab.runtimes.golang.GOPATH)
def reset(self):
"""
helper method to clean what this module generates.
"""
super().reset()
self.core.dir_remove(self.BUILDDIR)
self.core.dir_remove(self.CODEDIR)
def quick_install(self, install=True, reset=False):
if reset is False and (self.isInstalled() or self.doneGet('quick_install')):
return
if not self.prefab.core.file_exists(self.DOWNLOAD_DEST):
self.prefab.core.file_download('https://github.com/restic/restic/releases/download/v0.9.0/restic_0.9.0_linux_amd64.bz2', self.DOWNLOAD_DEST)
self.prefab.core.file_expand(self.DOWNLOAD_DEST)
self.prefab.core.run('chmod +x {}'.format(self.FILE_NAME))
self.doneSet("quick_install")
if install:
self.install(source=self.FILE_NAME)
def build(self, install=True, reset=False):
if reset is False and (self.isInstalled() or self.doneGet('build')):
return
if reset:
self.reset()
self.prefab.runtimes.golang.install()
# build
url = "https://github.com/restic/restic/"
self.prefab.tools.git.pullRepo(url, dest=self.CODEDIR, ssh=False, depth=1)
build_cmd = 'cd {dir}; go run build.go -k -v'.format(dir=self.CODEDIR)
self.prefab.core.run(build_cmd, profile=True)
self.doneSet("build")
if install:
self.install()
def install(self, source=None, reset=False):
"""
download, install, move files to appropriate places, and create relavent configs
"""
if self.doneGet("install") and not reset:
return
if source:
self.prefab.core.file_copy(self.FILE_NAME, '$BINDIR/restic' )
else:
self.prefab.core.file_copy(self.CODEDIR + '/restic', '$BINDIR')
self.doneSet("install")
def getRepository(self, path, password, repo_env=None):
"""
@param repo_env (dict) sets needed environemnt params to create/use repo
@return ResticRepository object. If the repo doesn't exist yet, it will
be created and initialized
"""
return ResticRepository(path, password, self.prefab, repo_env)
class ResticRepository:
"""This class represent a restic repository used for backup"""
def __init__(self, path, password, prefab, repo_env=None):
self.path = path
self.__password = password
self.repo_env = repo_env
self.prefab = prefab
if not self._exists():
self.initRepository()
def _exists(self):
rc, _, _ = self._run('$BINDIR/restic snapshots > /dev/null', die=False)
if rc > 0:
return False
return True
def _run(self, cmd, env=None, die=True, showout=True):
env_vars = {
'RESTIC_REPOSITORY': self.path,
'RESTIC_PASSWORD': self.__password
}
if self.repo_env:
env_vars.update(self.repo_env)
if env:
env_vars.update(env)
return self.prefab.core.run(cmd=cmd, env=env_vars, die=die, showout=showout)
def initRepository(self):
"""
initialize the repository at self.path location
"""
cmd = '$BINDIR/restic init'
self._run(cmd)
def snapshot(self, path, tag=None):
"""
@param path: directory/file to snapshot
@param tag: tag to add to the snapshot
"""
cmd = '$BINDIR/restic backup {} '.format(path)
if tag:
cmd += " --tag {}".format(tag)
self._run(cmd)
def restore_snapshot(self, snapshot_id, dest):
"""
@param snapshot_id: id of the snapshot to restore
@param dest: path where to restore the snapshot to
"""
cmd = '$BINDIR/restic restore --target {dest} {id} '.format(dest=dest, id=snapshot_id)
self._run(cmd)
def list_snapshots(self):
"""
@return: list of dict representing a snapshot
{ 'date': '2017-01-17 16:15:28',
'directory': '/optvar/cfg',
'host': 'myhost',
'id': 'ec853b5d',
'tags': 'backup1'
}
"""
cmd = '$BINDIR/restic snapshots'
_, out, _ = self._run(cmd, showout=False)
snapshots = []
for line in out.splitlines()[2:-2]:
ss = list(self._chunk(line))
snapshot = {
'id': ss[0],
'date': ' '.join(ss[1:3]),
'host': ss[3]
}
if len(ss) == 6:
snapshot['tags'] = ss[4]
snapshot['directory'] = ss[5]
else:
snapshot['tags'] = ''
snapshot['directory'] = ss[4]
snapshots.append(snapshot)
return snapshots
def check_repo_integrity(self):
"""
@return: True if integrity is ok else False
"""
cmd = '$BINDIR/restic check'
rc, _, _ = self._run(cmd)
if rc != 0:
return False
return True
def _chunk(self, line):
"""
passe line and yield each word separated by space
"""
word = ''
for c in line:
if c == ' ':
if word:
yield word
word = ''
continue
else:
word += c
if word:
yield word
| 30.117347 | 152 | 0.556158 |
d79b76623c321145bceb17bca87f518f545df7ff | 10,805 | py | Python | PPBottleApp.py | govtmirror/PriceHistoryAPI | acb9e28c37b59bddfa5c6f92411c37e95dac8347 | [
"CC0-1.0"
] | 5 | 2016-06-09T23:25:01.000Z | 2021-12-09T22:15:33.000Z | PPBottleApp.py | govtmirror/PriceHistoryAPI | acb9e28c37b59bddfa5c6f92411c37e95dac8347 | [
"CC0-1.0"
] | null | null | null | PPBottleApp.py | govtmirror/PriceHistoryAPI | acb9e28c37b59bddfa5c6f92411c37e95dac8347 | [
"CC0-1.0"
] | 9 | 2016-06-09T23:25:02.000Z | 2021-08-19T21:35:23.000Z | # This should actually be renamed so that it is not confused
# with the file of the same name in PricesPaidGUI
from bottle import Bottle, run, template,request,TEMPLATE_PATH,static_file
from bottle import response
import urllib
import urlparse
import json
import os
import PriceHistoryAuth.LogActivity
import sys
from StandardCSVReader import loadFromCSVString,getDictionaryFromStandard
from SearchApi import searchApiSolr,getP3ids
from SolrLodr import loadSolr
from ppApiConfig import PathToDataFiles,URLToSolr,LIMIT_NUM_MATCHING_TRANSACTIONS,\
CAS_SERVER,CAS_PROXY,CAS_RETURN_SERVICE_URL,CAS_LEVEL_OF_ASSURANCE,CAS_CREATE_SESSION_IF_AUTHENTICATED,CAS_LEVEL_OF_ASSURANCE_PREDICATE
app = Bottle()
import PriceHistoryAuth.auth
import PriceHistoryAuth.pycas
import logging
logger = logging.getLogger('PricesPaidTrans')
hdlr = logging.FileHandler('../logs/PricesPaidTrans.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.ERROR)
P3APISALT = None
def convertPSCToLegalPattern(str):
if (str is None) or (str == 'None') or (str == ''):
return '*';
else:
return str;
# this needs to be improved, but basically,
# we don't want them to do a blank search.
# it could be prevented at the GUI layer as well.
def convertSearchStringToLegalPattern(str):
if (str is None) or (str == 'None') or (str == ''):
return 'nothing at all, choose something';
else:
return str;
def processSearchRequest(user,password,search_string,
psc_pattern,clientData,numRows = LIMIT_NUM_MATCHING_TRANSACTIONS):
global P3APISALT
if (P3APISALT is None):
P3APISALT=os.environ.get("P3APISALT")
if (not PriceHistoryAuth.auth.does_authenticate(user,password,P3APISALT)):
dict = {0: {"status": "BadAuthentication"}}
logger.error('Bad Authentication Request '+ repr(user))
return dict;
search_string = convertSearchStringToLegalPattern(search_string);
psc_pattern = convertPSCToLegalPattern(psc_pattern);
if (numRows is None):
numRows = LIMIT_NUM_MATCHING_TRANSACTIONS;
result = searchApiSolr(URLToSolr,PathToDataFiles,search_string,psc_pattern,numRows)
if clientData:
result["clientdata"] = clientData
return result
def processSearchRequestSession(session,acsrf,search_string,
psc_pattern,clientData,numRows = LIMIT_NUM_MATCHING_TRANSACTIONS):
global P3APISALT
PriceHistoryAuth.LogActivity.logDebugInfo("processSearchRequestSession fired:"+repr(session))
if (P3APISALT is None):
P3APISALT=os.environ.get("P3APISALT")
# Here we need to use session and acsrf
if (not PriceHistoryAuth.auth.is_valid_acsrf(session,acsrf)):
PriceHistoryAuth.LogActivity.logDebugInfo("not valid:"+repr(session)+repr(acsrf))
dict = {0: {"status": "BadAuthentication"}}
logger.error('Bad Authentication Request '+ repr(session))
return dict;
search_string = convertSearchStringToLegalPattern(search_string);
psc_pattern = convertPSCToLegalPattern(psc_pattern);
if (numRows is None):
numRows = LIMIT_NUM_MATCHING_TRANSACTIONS;
result = searchApiSolr(URLToSolr,PathToDataFiles,search_string,psc_pattern,numRows)
if clientData:
result["clientdata"] = clientData
return result;
@app.route('/hello',method='GET')
def trivtest():
return "true"
# The problem is this is using Pythonism (the unicode string)
# when it shouldn't. I need to look into Bottle and
# understand do whatever it does to convert unicode strings
# to javascript strings...
def jsonp(request, dictionary):
if (request.query.callback):
return "%s(%s)" % (request.query.callback, json.dumps(dictionary))
return dictionary
@app.route('/',method='GET')
def apisolr():
user = request.query.get('p3username')
password = request.query.get('p3password')
clientData = request.query.get('clientdata')
numRows = request.query.get('numRows')
search_string = request.query.get('search_string')
psc_pattern = request.query.get('psc_pattern')
if (request.query.callback):
response.content_type = "application/javascript"
return jsonp(request,
processSearchRequest(user,password,
search_string,psc_pattern,clientData,numRows))
return processSearchRequest(user,password,
search_string,psc_pattern,clientData,numRows)
@app.route('/session',method='GET')
def apisolr():
session = request.query.get('p3session_id')
acsrf = request.query.get('p3acsrf')
numRows = request.query.get('numRows')
clientData = request.query.get('clientdata')
search_string = request.query.get('search_string')
psc_pattern = request.query.get('psc_pattern')
if (request.query.callback):
response.content_type = "application/javascript"
return jsonp(request,
processSearchRequestSession(session,acsrf,
search_string,psc_pattern,clientData,numRows))
return processSearchRequestSession(session,acsrf,
search_string,psc_pattern,clientData,numRows)
@app.route('/',method='POST')
def apisolr():
user = request.forms.get('username')
password = request.forms.get('password')
clientData = request.forms.get('clientdata')
search_string = request.forms.get('search_string')
psc_pattern = request.forms.get('psc_pattern')
max_results = request.forms.get('numRows')
logger.error('Normal post called '+ repr(user))
return processSearchRequest(user,password,search_string,psc_pattern,clientData,max_results)
def processFromIds(user,password,p3ids,numRows = LIMIT_NUM_MATCHING_TRANSACTIONS):
global P3APISALT
if (P3APISALT is None):
P3APISALT=os.environ.get("P3APISALT")
if (not PriceHistoryAuth.auth.does_authenticate(user,password,P3APISALT)):
dict = {0: {"status": "BadAuthentication"}}
logger.error('Bad Authentication Request '+ repr(user))
return dict;
return getP3ids(URLToSolr,PathToDataFiles,p3ids,numRows)
@app.route('/fromIds',method='POST')
def fromIds():
user = request.forms.get('username')
password = request.forms.get('password')
p3ids = request.forms.get('p3ids')
logger.error('fromIds post called '+ repr(user))
return processFromIds(user,password,p3ids)
@app.route('/AddCSVFile',method='POST')
def addCSVFile():
user = request.forms.get('username')
password = request.forms.get('password')
global P3APISALT
if (P3APISALT is None):
P3APISALT=os.environ.get("P3APISALT")
if (not PriceHistoryAuth.auth.does_authenticate(user,password,P3APISALT)):
dict = {0: {"status": "BadAuthentication"}}
logger.error('Bad Authentication Request '+ repr(user))
return dict;
csv_file = request.forms.get('csv_file')
filename = "SAMPLEUPLOAD"
trans = loadFromCSVString(csv_file,getDictionaryFromStandard,filename)
try:
loadSolr(filename,trans)
except:
return "Probably failed: "+repr(sys.exc_info()[0])
return "Might have added "+repr(len(trans))+" rows."
# This is a count to keep things straight
requestNumber = 0
# map
mapRequestToReturnURL = {}
@app.route('/ReturnSessionViaMax/<requestId:int>')
def returnSessionViaMax(requestId):
global mapRequestToReturnURL
PriceHistoryAuth.LogActivity.logDebugInfo("return ID:"+repr(requestId))
PriceHistoryAuth.LogActivity.logPageTurn("nosession","ReturnMaxLoginPage")
ticket = request.query['ticket']
PriceHistoryAuth.LogActivity.logDebugInfo("MAX AUTHENTICATED ticket :"+ticket)
amendedReturnURL = CAS_CREATE_SESSION_IF_AUTHENTICATED+"/"+repr(requestId)
status, id, cookie = PriceHistoryAuth.pycas.check_authenticated_p(CAS_LEVEL_OF_ASSURANCE_PREDICATE,ticket,CAS_SERVER,
amendedReturnURL, lifetime=None, secure=1, protocol=2, path="/", opt="")
maxAuthenticatedProperly = (status == PriceHistoryAuth.pycas.CAS_OK);
PriceHistoryAuth.LogActivity.logDebugInfo("MAX AUTHENTICATED WITH ID:"+id)
PriceHistoryAuth.LogActivity.logDebugInfo("ReturnSessionViaMax authenticated :"+repr(maxAuthenticatedProperly))
if (maxAuthenticatedProperly):
sendTokensBackTo = mapRequestToReturnURL[requestId]
response.status = 303
domain,path = urlparse.urlparse(CAS_RETURN_SERVICE_URL)[1:3]
secure=1
setCookieCommand = PriceHistoryAuth.pycas.make_pycas_cookie("gateway",domain,path,secure)
strip = setCookieCommand[12:]
# We will set this cookie to make it easier for the user
# to avoid multiple logins---but technically, this is not
# what is being used and the user, who is probably using the API,
# will want to ignore this.
response.set_header('Set-Cookie', strip)
ses_id = PriceHistoryAuth.auth.create_session_id()
acsrf = PriceHistoryAuth.auth.get_acsrf(ses_id)
response.add_header('Location',sendTokensBackTo+"?p3session_id="+ses_id+"&p3acsrf="+acsrf)
return "You will be redirected."+strip+sendTokensBackTo
else:
PriceHistoryAuth.LogActivity.logBadCredentials("Failed to Authenticate with Max")
return template('Login',message='Improper Credentials.',
footer_html=FOOTER_HTML,
extra_login_methods=EXTRA_LOGIN_METHODS,
goog_anal_script=GoogleAnalyticsInclusionScript)
@app.route('/GetTokensViaMax')
def getTokensViaMax():
PriceHistoryAuth.LogActivity.logPageTurn("nosession","GetTokensViaMax")
global requestNumber
global mapRequestToReturnURL
sendTokensBackTo = request.query['redirectbackto']
response.status = 303
domain,path = urlparse.urlparse(CAS_RETURN_SERVICE_URL)[1:3]
secure=1
setCookieCommand = PriceHistoryAuth.pycas.make_pycas_cookie("gateway",domain,path,secure)
strip = setCookieCommand[12:]
response.set_header('Set-Cookie', strip)
opt=""
# There is a danger that we might have multiple requested
# get crossed here because we are treating this "statelessly".
# Since we need to make sure that we go back to the proper
# redirect, we add the request number to the URL
amendedReturnURL = CAS_CREATE_SESSION_IF_AUTHENTICATED+"/"+repr(requestNumber)
mapRequestToReturnURL[requestNumber] = sendTokensBackTo
requestNumber = requestNumber + 1
location = PriceHistoryAuth.pycas.get_url_redirect_as_string(CAS_SERVER,amendedReturnURL,opt,secure)
response.set_header('Location',location)
return "You will be redirected."+strip+location
| 39.870849 | 139 | 0.717631 |
e2e1353a4431c254c4e1e43e38fab732cf0291af | 2,389 | py | Python | BotConfigurations.py | Peterjjones98/AMDStockCheck | 752def28d42533a34db7cb6a1951c31e34534677 | [
"BSD-2-Clause"
] | 1 | 2021-04-22T10:28:20.000Z | 2021-04-22T10:28:20.000Z | BotConfigurations.py | Peterjjones98/AMDStockCheck | 752def28d42533a34db7cb6a1951c31e34534677 | [
"BSD-2-Clause"
] | 2 | 2021-05-10T16:32:28.000Z | 2021-05-11T17:15:24.000Z | BotConfigurations.py | Peterjjones98/AMDStockCheck | 752def28d42533a34db7cb6a1951c31e34534677 | [
"BSD-2-Clause"
] | 1 | 2021-04-22T20:12:35.000Z | 2021-04-22T20:12:35.000Z |
###-------------START HERE--------------------###
"""
1. download Python : https://www.python.org/ftp/python/3.9.4/python-3.9.4-amd64.exe
make sure when you're installing it you select 'Add to .PATH' in the installation Wizzard.
2. Run CMD and enter these commands one after the other:
pip install -U discord.py
pip install requests
pip install lxml
pip install bs4
3. Create a discord channel you want your bot to hangout in.
4. Create an account at https://discord.com/developers
5. Create a New Application (Bot)
6. Once you've selected your Application go to: OAuth2 > Scopes **Select bot** > Bot permissions **Select Administrator**
7. Under Scopes you should see a new link.
Going to that link will take you to discord where you can select which channel you authorize your bot to opperate in.
8. After these steps yor bot is ready to go. Now fill out the intializers below.
"""
#Initializers------>>
#---------------------TIME DELAY------------------------------------
#This is the time (in seconds) the script will wait before sending an HTTP request to AMD's servers.
#Less time means the bot will be faster but AMD might flag your IP.
REFRESHDELAY = .5
#-------------------------TOKEN-------------------------------------
#This is the Authorization Token of your DiscordBot. This is found under your Discord Developer Portal > Select your Bot > Bot > Token
TOKEN = ""
#-------------------------GUILD--------------------------------------
#This is the ID of the discord channel you want the bot to message when it finds an item in stock.
#You can find this by going into the discord channel and right clicking the channel-name in the top left of the screen.
#There will be an option to 'Copy ID'
GUILD = ""
#-------------------------FILE LOCATION------------------------------
#this is the path of your links.txt file with the AMD links.
#Leave this default if the file is in the same dir as the script.
LINKFILE = "links.txt"
#------------------------OPTIONAL------------------------------------
#This will run the script as normal but it will print off the stock of each item.
#run this to make sure the bot is gathering information correctly and to see custom error messages.
DEBUG = True
| 38.532258 | 135 | 0.593554 |
0ebacd64593fe312fc45876cb9ca20469fdda15e | 656 | py | Python | main.py | viargentum/lucid-bot | 3c7279221193e09455b93a642507c08c140d3600 | [
"MIT"
] | 3 | 2021-05-12T02:18:55.000Z | 2021-07-27T23:44:03.000Z | main.py | viargentum/lucid-bot | 3c7279221193e09455b93a642507c08c140d3600 | [
"MIT"
] | 2 | 2021-03-23T18:01:52.000Z | 2021-03-23T23:58:51.000Z | main.py | viargentum/lucid-bot | 3c7279221193e09455b93a642507c08c140d3600 | [
"MIT"
] | 2 | 2021-03-23T14:40:30.000Z | 2021-03-23T18:04:34.000Z | #!/usr/bin/env python
import discord
from discord.ext import commands
from lucid_bot.config import config
from lucid_bot.utils import Utils
from lucid_bot.extension_config import extensions
intents: discord.Intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(
command_prefix=config["prefix"],
case_insensitive=False,
intents=intents,
)
bot.remove_command("help")
for extension in extensions:
optional_params = []
current_extension = extensions[extension]
time = Utils.time()
print(f"{time}Loading {extension.capitalize()}....")
bot.load_extension(current_extension)
bot.run(config["token"])
| 22.62069 | 56 | 0.75 |
673154d4419b0a613964a846ad47e73753a3846c | 5,957 | py | Python | tensorflow_datasets/text/imdb.py | sourcery-ai-bot/datasets | b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28 | [
"Apache-2.0"
] | 1 | 2021-05-10T10:41:27.000Z | 2021-05-10T10:41:27.000Z | tensorflow_datasets/text/imdb.py | sourcery-ai-bot/datasets | b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/text/imdb.py | sourcery-ai-bot/datasets | b623ab0abf3f03bacf6a7ba22c8d37bf76a4db28 | [
"Apache-2.0"
] | 1 | 2021-07-04T11:07:35.000Z | 2021-07-04T11:07:35.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IMDB movie reviews dataset."""
import os
import re
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
Large Movie Review Dataset.
This is a dataset for binary sentiment classification containing substantially \
more data than previous benchmark datasets. We provide a set of 25,000 highly \
polar movie reviews for training, and 25,000 for testing. There is additional \
unlabeled data for use as well.\
"""
_CITATION = """\
@InProceedings{maas-EtAl:2011:ACL-HLT2011,
author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},
title = {Learning Word Vectors for Sentiment Analysis},
booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},
month = {June},
year = {2011},
address = {Portland, Oregon, USA},
publisher = {Association for Computational Linguistics},
pages = {142--150},
url = {http://www.aclweb.org/anthology/P11-1015}
}
"""
_DOWNLOAD_URL = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
class IMDBReviewsConfig(tfds.core.BuilderConfig):
"""BuilderConfig for IMDBReviews."""
def __init__(self, *, text_encoder_config=None, **kwargs):
"""BuilderConfig for IMDBReviews.
Args:
text_encoder_config: `tfds.deprecated.text.TextEncoderConfig`,
configuration for the `tfds.deprecated.text.TextEncoder` used for the
IMDB `"text"` feature.
**kwargs: keyword arguments forwarded to super.
"""
super(IMDBReviewsConfig, self).__init__(
version=tfds.core.Version("1.0.0"),
release_notes={
"1.0.0": "New split API (https://tensorflow.org/datasets/splits)",
},
**kwargs)
self.text_encoder_config = (
text_encoder_config or tfds.deprecated.text.TextEncoderConfig())
class IMDBReviews(tfds.core.GeneratorBasedBuilder):
"""IMDB movie reviews dataset."""
BUILDER_CONFIGS = [
IMDBReviewsConfig(
name="plain_text",
description="Plain text",
),
IMDBReviewsConfig(
name="bytes",
description=("Uses byte-level text encoding with "
"`tfds.deprecated.text.ByteTextEncoder`"),
text_encoder_config=tfds.deprecated.text.TextEncoderConfig(
encoder=tfds.deprecated.text.ByteTextEncoder()),
),
IMDBReviewsConfig(
name="subwords8k",
description=("Uses `tfds.deprecated.text.SubwordTextEncoder` with 8k "
"vocab size"),
text_encoder_config=tfds.deprecated.text.TextEncoderConfig(
encoder_cls=tfds.deprecated.text.SubwordTextEncoder,
vocab_size=2**13),
),
IMDBReviewsConfig(
name="subwords32k",
description=("Uses `tfds.deprecated.text.SubwordTextEncoder` with "
"32k vocab size"),
text_encoder_config=tfds.deprecated.text.TextEncoderConfig(
encoder_cls=tfds.deprecated.text.SubwordTextEncoder,
vocab_size=2**15),
),
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"text": tfds.features.Text(
encoder_config=self.builder_config.text_encoder_config),
"label": tfds.features.ClassLabel(names=["neg", "pos"]),
}),
supervised_keys=("text", "label"),
homepage="http://ai.stanford.edu/~amaas/data/sentiment/",
citation=_CITATION,
)
def _vocab_text_gen(self, archive):
for _, ex in self._generate_examples(
archive, os.path.join("aclImdb", "train")):
yield ex["text"]
def _split_generators(self, dl_manager):
arch_path = dl_manager.download(_DOWNLOAD_URL)
archive = lambda: dl_manager.iter_archive(arch_path)
# Generate vocabulary from training data if SubwordTextEncoder configured
self.info.features["text"].maybe_build_from_corpus(
self._vocab_text_gen(archive()))
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={"archive": archive(),
"directory": os.path.join("aclImdb", "train")}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={"archive": archive(),
"directory": os.path.join("aclImdb", "test")}),
tfds.core.SplitGenerator(
name=tfds.Split("unsupervised"),
gen_kwargs={"archive": archive(),
"directory": os.path.join("aclImdb", "train"),
"labeled": False}),
]
def _generate_examples(self, archive, directory, labeled=True):
"""Generate IMDB examples."""
# For labeled examples, extract the label from the path.
reg_path = "(?P<label>neg|pos)" if labeled else "unsup"
reg = re.compile(
os.path.join("^%s" % directory, reg_path, "").replace("\\", "\\\\"))
for path, imdb_f in archive:
res = reg.match(path)
if not res:
continue
text = imdb_f.read().strip()
label = res.groupdict()["label"] if labeled else -1
yield path, {
"text": text,
"label": label,
}
| 37 | 138 | 0.644284 |
6bf9a6ffd6bb5d6c7ef0575a18764f9056435dcf | 2,655 | py | Python | content/test/faceswap/plugins/train/model/dfl_h128.py | gwdgithubnom/gjgr | 581957a296b13a4231ea1e67ec62083b7da445bf | [
"MIT"
] | 3 | 2019-08-08T03:27:26.000Z | 2020-08-17T13:11:24.000Z | content/test/faceswap/plugins/train/model/dfl_h128.py | gwdgithubnom/gjgr | 581957a296b13a4231ea1e67ec62083b7da445bf | [
"MIT"
] | 6 | 2020-03-04T23:21:03.000Z | 2020-07-23T07:46:40.000Z | content/test/faceswap/plugins/train/model/dfl_h128.py | gwdgithubnom/gjgr | 581957a296b13a4231ea1e67ec62083b7da445bf | [
"MIT"
] | 2 | 2019-09-26T08:52:22.000Z | 2020-03-27T00:33:00.000Z | #!/usr/bin/env python3
""" DeepFakesLab H128 Model
Based on https://github.com/iperov/DeepFaceLab
"""
from keras.layers import Dense, Flatten, Input, Reshape
from keras.models import Model as KerasModel
from .original import logger, Model as OriginalModel
class Model(OriginalModel):
""" Low Memory version of Original Faceswap Model """
def __init__(self, *args, **kwargs):
logger.debug("Initializing %s: (args: %s, kwargs: %s",
self.__class__.__name__, args, kwargs)
self.configfile = kwargs.get("configfile", None)
kwargs["input_shape"] = (128, 128, 3)
kwargs["encoder_dim"] = 256 if self.config["lowmem"] else 512
super().__init__(*args, **kwargs)
logger.debug("Initialized %s", self.__class__.__name__)
def encoder(self):
""" DFL H128 Encoder """
input_ = Input(shape=self.input_shape)
var_x = input_
var_x = self.blocks.conv(var_x, 128)
var_x = self.blocks.conv(var_x, 256)
var_x = self.blocks.conv(var_x, 512)
var_x = self.blocks.conv(var_x, 1024)
var_x = Dense(self.encoder_dim)(Flatten()(var_x))
var_x = Dense(8 * 8 * self.encoder_dim)(var_x)
var_x = Reshape((8, 8, self.encoder_dim))(var_x)
var_x = self.blocks.upscale(var_x, self.encoder_dim)
return KerasModel(input_, var_x)
def decoder(self):
""" DFL H128 Decoder """
input_ = Input(shape=(16, 16, self.encoder_dim))
# Face
var_x = input_
var_x = self.blocks.upscale(var_x, self.encoder_dim)
var_x = self.blocks.upscale(var_x, self.encoder_dim // 2)
var_x = self.blocks.upscale(var_x, self.encoder_dim // 4)
var_x = self.blocks.conv2d(var_x, 3,
kernel_size=5,
padding="same",
activation="sigmoid",
name="face_out")
outputs = [var_x]
# Mask
if self.config.get("mask_type", None):
var_y = input_
var_y = self.blocks.upscale(var_y, self.encoder_dim)
var_y = self.blocks.upscale(var_y, self.encoder_dim // 2)
var_y = self.blocks.upscale(var_y, self.encoder_dim // 4)
var_y = self.blocks.conv2d(var_y, 1,
kernel_size=5,
padding="same",
activation="sigmoid",
name="mask_out")
outputs.append(var_y)
return KerasModel(input_, outputs=outputs)
| 40.227273 | 69 | 0.557815 |
04277ef993b4b526bfbdd93c65c68d58fb954546 | 21,646 | py | Python | venv/lib/python3.8/site-packages/statsmodels/graphics/tests/test_gofplots.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 6,931 | 2015-01-01T11:41:55.000Z | 2022-03-31T17:03:24.000Z | venv/lib/python3.8/site-packages/statsmodels/graphics/tests/test_gofplots.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 6,137 | 2015-01-01T00:33:45.000Z | 2022-03-31T22:53:17.000Z | venv/lib/python3.8/site-packages/statsmodels/graphics/tests/test_gofplots.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 2,608 | 2015-01-02T21:32:31.000Z | 2022-03-31T07:38:30.000Z | import numpy as np
import numpy.testing as nptest
from numpy.testing import assert_equal
import pytest
from scipy import stats
import statsmodels.api as sm
from statsmodels.graphics import gofplots
from statsmodels.graphics.gofplots import (
ProbPlot,
qqline,
qqplot,
qqplot_2samples,
)
from statsmodels.graphics.utils import _import_mpl
class BaseProbplotMixin:
def setup(self):
try:
import matplotlib.pyplot as plt
self.fig, self.ax = plt.subplots()
except ImportError:
pass
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = ProbPlot(self.other_array)
self.plot_options = dict(
marker="d",
markerfacecolor="cornflowerblue",
markeredgecolor="white",
alpha=0.5,
)
@pytest.mark.matplotlib
def test_qqplot(self, close_figures):
self.prbplt.qqplot(ax=self.ax, line=self.line, **self.plot_options)
@pytest.mark.matplotlib
def test_ppplot(self, close_figures):
self.prbplt.ppplot(ax=self.ax, line=self.line)
@pytest.mark.matplotlib
def test_probplot(self, close_figures):
self.prbplt.probplot(ax=self.ax, line=self.line, **self.plot_options)
@pytest.mark.matplotlib
def test_probplot_exceed(self, close_figures):
self.prbplt.probplot(
ax=self.ax, exceed=True, line=self.line, **self.plot_options
)
@pytest.mark.matplotlib
def test_qqplot_other_array(self, close_figures):
self.prbplt.qqplot(
ax=self.ax,
line=self.line,
other=self.other_array,
**self.plot_options,
)
@pytest.mark.matplotlib
def test_ppplot_other_array(self, close_figures):
self.prbplt.ppplot(
ax=self.ax,
line=self.line,
other=self.other_array,
**self.plot_options,
)
@pytest.mark.xfail(strict=True)
@pytest.mark.matplotlib
def test_probplot_other_array(self, close_figures):
self.prbplt.probplot(
ax=self.ax,
line=self.line,
other=self.other_array,
**self.plot_options,
)
@pytest.mark.matplotlib
def test_qqplot_other_prbplt(self, close_figures):
self.prbplt.qqplot(
ax=self.ax,
line=self.line,
other=self.other_prbplot,
**self.plot_options,
)
@pytest.mark.matplotlib
def test_ppplot_other_prbplt(self, close_figures):
self.prbplt.ppplot(
ax=self.ax,
line=self.line,
other=self.other_prbplot,
**self.plot_options,
)
@pytest.mark.xfail(strict=True)
@pytest.mark.matplotlib
def test_probplot_other_prbplt(self, close_figures):
self.prbplt.probplot(
ax=self.ax,
line=self.line,
other=self.other_prbplot,
**self.plot_options,
)
@pytest.mark.matplotlib
def test_qqplot_custom_labels(self, close_figures):
self.prbplt.qqplot(
ax=self.ax,
line=self.line,
xlabel="Custom X-Label",
ylabel="Custom Y-Label",
**self.plot_options,
)
@pytest.mark.matplotlib
def test_ppplot_custom_labels(self, close_figures):
self.prbplt.ppplot(
ax=self.ax,
line=self.line,
xlabel="Custom X-Label",
ylabel="Custom Y-Label",
**self.plot_options,
)
@pytest.mark.matplotlib
def test_probplot_custom_labels(self, close_figures):
self.prbplt.probplot(
ax=self.ax,
line=self.line,
xlabel="Custom X-Label",
ylabel="Custom Y-Label",
**self.plot_options,
)
@pytest.mark.matplotlib
def test_qqplot_pltkwargs(self, close_figures):
self.prbplt.qqplot(
ax=self.ax,
line=self.line,
marker="d",
markerfacecolor="cornflowerblue",
markeredgecolor="white",
alpha=0.5,
)
@pytest.mark.matplotlib
def test_ppplot_pltkwargs(self, close_figures):
self.prbplt.ppplot(
ax=self.ax,
line=self.line,
marker="d",
markerfacecolor="cornflowerblue",
markeredgecolor="white",
alpha=0.5,
)
@pytest.mark.matplotlib
def test_probplot_pltkwargs(self, close_figures):
self.prbplt.probplot(
ax=self.ax,
line=self.line,
marker="d",
markerfacecolor="cornflowerblue",
markeredgecolor="white",
alpha=0.5,
)
def test_fit_params(self):
assert self.prbplt.fit_params[-2] == self.prbplt.loc
assert self.prbplt.fit_params[-1] == self.prbplt.scale
class TestProbPlotLongelyNoFit(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.prbplt = ProbPlot(
self.mod_fit.resid, dist=stats.t, distargs=(4,), fit=False
)
self.line = "r"
super().setup()
class TestProbPlotLongelyWithFit(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.prbplt = ProbPlot(
self.mod_fit.resid, dist=stats.t, distargs=(4,), fit=True
)
self.line = "r"
super().setup()
class TestProbPlotRandomNormalMinimal(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = ProbPlot(self.data)
self.line = None
super(TestProbPlotRandomNormalMinimal, self).setup()
class TestProbPlotRandomNormalWithFit(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = ProbPlot(self.data, fit=True)
self.line = "q"
super(TestProbPlotRandomNormalWithFit, self).setup()
class TestProbPlotRandomNormalFullDist(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = ProbPlot(self.data, dist=stats.norm(loc=8.5, scale=3.0))
self.line = "45"
super().setup()
def test_loc_set(self):
assert self.prbplt.loc == 8.5
def test_scale_set(self):
assert self.prbplt.scale == 3.0
def test_exceptions(self):
with pytest.raises(ValueError):
ProbPlot(self.data, dist=stats.norm(loc=8.5, scale=3.0), fit=True)
with pytest.raises(ValueError):
ProbPlot(
self.data,
dist=stats.norm(loc=8.5, scale=3.0),
distargs=(8.5, 3.0),
)
with pytest.raises(ValueError):
ProbPlot(self.data, dist=stats.norm(loc=8.5, scale=3.0), loc=8.5)
with pytest.raises(ValueError):
ProbPlot(self.data, dist=stats.norm(loc=8.5, scale=3.0), scale=3.0)
class TestCompareSamplesDifferentSize:
def setup(self):
np.random.seed(5)
self.data1 = ProbPlot(np.random.normal(loc=8.25, scale=3.25, size=37))
self.data2 = ProbPlot(np.random.normal(loc=8.25, scale=3.25, size=55))
@pytest.mark.matplotlib
def test_qqplot(self, close_figures):
self.data1.qqplot(other=self.data2)
with pytest.raises(ValueError):
self.data2.qqplot(other=self.data1)
@pytest.mark.matplotlib
def test_ppplot(self, close_figures):
self.data1.ppplot(other=self.data2)
self.data2.ppplot(other=self.data1)
class TestProbPlotRandomNormalLocScaleDist(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = ProbPlot(self.data, loc=8, scale=3)
self.line = "45"
super(TestProbPlotRandomNormalLocScaleDist, self).setup()
def test_loc_set(self):
assert self.prbplt.loc == 8
def test_scale_set(self):
assert self.prbplt.scale == 3
def test_loc_set_in_dist(self):
assert self.prbplt.dist.mean() == 8.0
def test_scale_set_in_dist(self):
assert self.prbplt.dist.var() == 9.0
class TestTopLevel:
def setup(self):
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.res = self.mod_fit.resid
self.prbplt = ProbPlot(self.mod_fit.resid, dist=stats.t, distargs=(4,))
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = ProbPlot(self.other_array)
@pytest.mark.matplotlib
def test_qqplot(self, close_figures):
qqplot(self.res, line="r")
@pytest.mark.matplotlib
def test_qqplot_pltkwargs(self, close_figures):
qqplot(
self.res,
line="r",
marker="d",
markerfacecolor="cornflowerblue",
markeredgecolor="white",
alpha=0.5,
)
@pytest.mark.matplotlib
def test_qqplot_2samples_prob_plot_objects(self, close_figures):
# also tests all valuesg for line
for line in ["r", "q", "45", "s"]:
# test with `ProbPlot` instances
qqplot_2samples(self.prbplt, self.other_prbplot, line=line)
@pytest.mark.matplotlib
def test_qqplot_2samples_arrays(self, close_figures):
# also tests all values for line
for line in ["r", "q", "45", "s"]:
# test with arrays
qqplot_2samples(self.res, self.other_array, line=line)
def test_invalid_dist_config(close_figures):
# GH 4226
np.random.seed(5)
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=False)
mod_fit = sm.OLS(data.endog, data.exog).fit()
with pytest.raises(TypeError, match=r"dist\(0, 1, 4, loc=0, scale=1\)"):
ProbPlot(mod_fit.resid, stats.t, distargs=(0, 1, 4))
@pytest.mark.matplotlib
def test_qqplot_unequal():
rs = np.random.RandomState(0)
data1 = rs.standard_normal(100)
data2 = rs.standard_normal(200)
fig1 = qqplot_2samples(data1, data2)
fig2 = qqplot_2samples(data2, data1)
x1, y1 = fig1.get_axes()[0].get_children()[0].get_data()
x2, y2 = fig2.get_axes()[0].get_children()[0].get_data()
np.testing.assert_allclose(x1, x2)
np.testing.assert_allclose(y1, y2)
numobj1 = len(fig1.get_axes()[0].get_children())
numobj2 = len(fig2.get_axes()[0].get_children())
assert numobj1 == numobj2
@pytest.mark.matplotlib
def test_qqplot(self, close_figures):
qqplot(self.res, line="r")
@pytest.mark.matplotlib
def test_qqplot_2samples_prob_plot_obj(self, close_figures):
# also tests all values for line
for line in ["r", "q", "45", "s"]:
# test with `ProbPlot` instances
qqplot_2samples(self.prbplt, self.other_prbplot, line=line)
@pytest.mark.matplotlib
def test_qqplot_2samples_arrays(self, close_figures):
# also tests all values for line
for line in ["r", "q", "45", "s"]:
# test with arrays
qqplot_2samples(self.res, self.other_array, line=line)
class TestCheckDist:
def test_good(self):
gofplots._check_for(stats.norm, "ppf")
gofplots._check_for(stats.norm, "cdf")
def test_bad(self):
with pytest.raises(AttributeError):
gofplots._check_for("junk", "ppf")
with pytest.raises(AttributeError):
gofplots._check_for("junk", "cdf")
class TestDoPlot:
def setup(self):
try:
import matplotlib.pyplot as plt
self.fig, self.ax = plt.subplots()
except ImportError:
pass
self.x = [0.2, 0.6, 2.0, 4.5, 10.0, 50.0, 83.0, 99.1, 99.7]
self.y = [1.2, 1.4, 1.7, 2.1, 3.2, 3.7, 4.5, 5.1, 6.3]
self.full_options = {
"marker": "s",
"markerfacecolor": "cornflowerblue",
"markeredgecolor": "firebrick",
"markeredgewidth": 1.25,
"linestyle": "--",
}
self.step_options = {"linestyle": "-", "where": "mid"}
@pytest.mark.matplotlib
def test_baseline(self, close_figures):
plt = _import_mpl()
fig, ax = gofplots._do_plot(self.x, self.y)
assert isinstance(fig, plt.Figure)
assert isinstance(ax, plt.Axes)
assert self.fig is not fig
assert self.ax is not ax
@pytest.mark.matplotlib
def test_with_ax(self, close_figures):
plt = _import_mpl()
fig, ax = gofplots._do_plot(self.x, self.y, ax=self.ax)
assert isinstance(fig, plt.Figure)
assert isinstance(ax, plt.Axes)
assert self.fig is fig
assert self.ax is ax
@pytest.mark.matplotlib
def test_plot_full_options(self, close_figures):
gofplots._do_plot(
self.x,
self.y,
ax=self.ax,
step=False,
**self.full_options,
)
@pytest.mark.matplotlib
def test_step_baseline(self, close_figures):
gofplots._do_plot(
self.x,
self.y,
ax=self.ax,
step=True,
**self.step_options,
)
@pytest.mark.matplotlib
def test_step_full_options(self, close_figures):
gofplots._do_plot(
self.x,
self.y,
ax=self.ax,
step=True,
**self.full_options,
)
@pytest.mark.matplotlib
def test_plot_qq_line(self, close_figures):
gofplots._do_plot(self.x, self.y, ax=self.ax, line="r")
@pytest.mark.matplotlib
def test_step_qq_line(self, close_figures):
gofplots._do_plot(self.x, self.y, ax=self.ax, step=True, line="r")
class TestQQLine:
def setup(self):
np.random.seed(0)
self.x = np.sort(np.random.normal(loc=2.9, scale=1.2, size=37))
self.y = np.sort(np.random.normal(loc=3.0, scale=1.1, size=37))
try:
import matplotlib.pyplot as plt
self.fig, self.ax = plt.subplots()
self.ax.plot(self.x, self.y, "ko")
except ImportError:
pass
self.lineoptions = {
"linewidth": 2,
"dashes": (10, 1, 3, 4),
"color": "green",
}
self.fmt = "bo-"
@pytest.mark.matplotlib
def test_badline(self):
with pytest.raises(ValueError):
qqline(self.ax, "junk")
@pytest.mark.matplotlib
def test_non45_no_x(self, close_figures):
with pytest.raises(ValueError):
qqline(self.ax, "s", y=self.y)
@pytest.mark.matplotlib
def test_non45_no_y(self, close_figures):
with pytest.raises(ValueError):
qqline(self.ax, "s", x=self.x)
@pytest.mark.matplotlib
def test_non45_no_x_no_y(self, close_figures):
with pytest.raises(ValueError):
qqline(self.ax, "s")
@pytest.mark.matplotlib
def test_45(self, close_figures):
nchildren = len(self.ax.get_children())
qqline(self.ax, "45")
assert len(self.ax.get_children()) > nchildren
@pytest.mark.matplotlib
def test_45_fmt(self, close_figures):
qqline(self.ax, "45", fmt=self.fmt)
@pytest.mark.matplotlib
def test_45_fmt_lineoptions(self, close_figures):
qqline(self.ax, "45", fmt=self.fmt, **self.lineoptions)
@pytest.mark.matplotlib
def test_r(self, close_figures):
nchildren = len(self.ax.get_children())
qqline(self.ax, "r", x=self.x, y=self.y)
assert len(self.ax.get_children()) > nchildren
@pytest.mark.matplotlib
def test_r_fmt(self, close_figures):
qqline(self.ax, "r", x=self.x, y=self.y, fmt=self.fmt)
@pytest.mark.matplotlib
def test_r_fmt_lineoptions(self, close_figures):
qqline(
self.ax, "r", x=self.x, y=self.y, fmt=self.fmt, **self.lineoptions
)
@pytest.mark.matplotlib
def test_s(self, close_figures):
nchildren = len(self.ax.get_children())
qqline(self.ax, "s", x=self.x, y=self.y)
assert len(self.ax.get_children()) > nchildren
@pytest.mark.matplotlib
def test_s_fmt(self, close_figures):
qqline(self.ax, "s", x=self.x, y=self.y, fmt=self.fmt)
@pytest.mark.matplotlib
def test_s_fmt_lineoptions(self, close_figures):
qqline(
self.ax, "s", x=self.x, y=self.y, fmt=self.fmt, **self.lineoptions
)
@pytest.mark.matplotlib
def test_q(self, close_figures):
nchildren = len(self.ax.get_children())
qqline(self.ax, "q", dist=stats.norm, x=self.x, y=self.y)
assert len(self.ax.get_children()) > nchildren
@pytest.mark.matplotlib
def test_q_fmt(self, close_figures):
qqline(self.ax, "q", dist=stats.norm, x=self.x, y=self.y, fmt=self.fmt)
@pytest.mark.matplotlib
def test_q_fmt_lineoptions(self, close_figures):
qqline(
self.ax,
"q",
dist=stats.norm,
x=self.x,
y=self.y,
fmt=self.fmt,
**self.lineoptions,
)
class TestPlottingPosition:
def setup(self):
self.N = 13
self.data = np.arange(self.N)
def do_test(self, alpha, beta):
smpp = gofplots.plotting_pos(self.N, a=alpha, b=beta)
sppp = stats.mstats.plotting_positions(
self.data, alpha=alpha, beta=beta
)
nptest.assert_array_almost_equal(smpp, sppp, decimal=5)
@pytest.mark.matplotlib
def test_weibull(self, close_figures):
self.do_test(0, 0)
@pytest.mark.matplotlib
def test_lininterp(self, close_figures):
self.do_test(0, 1)
@pytest.mark.matplotlib
def test_piecewise(self, close_figures):
self.do_test(0.5, 0.5)
@pytest.mark.matplotlib
def test_approx_med_unbiased(self, close_figures):
self.do_test(1.0 / 3.0, 1.0 / 3.0)
@pytest.mark.matplotlib
def test_cunnane(self, close_figures):
self.do_test(0.4, 0.4)
def test_param_unpacking():
expected = np.array([2.0, 3, 0, 1])
pp = ProbPlot(np.empty(100), dist=stats.beta(2, 3))
assert_equal(pp.fit_params, expected)
pp = ProbPlot(np.empty(100), stats.beta(2, b=3))
assert_equal(pp.fit_params, expected)
pp = ProbPlot(np.empty(100), stats.beta(a=2, b=3))
assert_equal(pp.fit_params, expected)
expected = np.array([2.0, 3, 4, 1])
pp = ProbPlot(np.empty(100), stats.beta(2, 3, 4))
assert_equal(pp.fit_params, expected)
pp = ProbPlot(np.empty(100), stats.beta(a=2, b=3, loc=4))
assert_equal(pp.fit_params, expected)
expected = np.array([2.0, 3, 4, 5])
pp = ProbPlot(np.empty(100), stats.beta(2, 3, 4, 5))
assert_equal(pp.fit_params, expected)
pp = ProbPlot(np.empty(100), stats.beta(2, 3, 4, scale=5))
assert_equal(pp.fit_params, expected)
pp = ProbPlot(np.empty(100), stats.beta(2, 3, loc=4, scale=5))
assert_equal(pp.fit_params, expected)
pp = ProbPlot(np.empty(100), stats.beta(2, b=3, loc=4, scale=5))
assert_equal(pp.fit_params, expected)
pp = ProbPlot(np.empty(100), stats.beta(a=2, b=3, loc=4, scale=5))
assert_equal(pp.fit_params, expected)
@pytest.mark.matplotlib
@pytest.mark.parametrize("labels", [{}, {"xlabel": "X", "ylabel": "Y"}])
@pytest.mark.parametrize("x_size", [30, 50])
@pytest.mark.parametrize("y_size", [30, 50])
@pytest.mark.parametrize("line", [None, "45", "s", "r", "q"])
def test_correct_labels(
close_figures, reset_randomstate, line, x_size, y_size, labels
):
rs = np.random.RandomState(9876554)
x = rs.normal(loc=0, scale=0.1, size=x_size)
y = rs.standard_t(3, size=y_size)
pp_x = sm.ProbPlot(x)
pp_y = sm.ProbPlot(y)
fig = qqplot_2samples(pp_x, pp_y, line=line, **labels)
ax = fig.get_axes()[0]
x_label = ax.get_xlabel()
y_label = ax.get_ylabel()
if x_size < y_size:
if not labels:
assert "2nd" in x_label
assert "1st" in y_label
else:
assert "Y" in x_label
assert "X" in y_label
else:
if not labels:
assert "1st" in x_label
assert "2nd" in y_label
else:
assert "X" in x_label
assert "Y" in y_label
@pytest.mark.matplotlib
def test_axis_order(close_figures):
xx = np.random.normal(10, 1, (100,))
xy = np.random.normal(1, 0.01, (100,))
fig = qqplot_2samples(xx, xy, "x", "y")
ax = fig.get_axes()[0]
y_range = np.diff(ax.get_ylim())[0]
x_range = np.diff(ax.get_xlim())[0]
assert y_range < x_range
xx_long = np.random.normal(10, 1, (1000,))
fig = qqplot_2samples(xx_long, xy, "x", "y")
ax = fig.get_axes()[0]
y_range = np.diff(ax.get_ylim())[0]
x_range = np.diff(ax.get_xlim())[0]
assert y_range < x_range
xy_long = np.random.normal(1, 0.01, (1000,))
fig = qqplot_2samples(xx, xy_long, "x", "y")
ax = fig.get_axes()[0]
y_range = np.diff(ax.get_ylim())[0]
x_range = np.diff(ax.get_xlim())[0]
assert x_range < y_range
| 31.416546 | 79 | 0.607225 |
473f3be23fc36f62930ab57d450f2e09fa19ccd8 | 3,020 | py | Python | corehq/util/metrics/prometheus.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | null | null | null | corehq/util/metrics/prometheus.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | null | null | null | corehq/util/metrics/prometheus.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | null | null | null | from typing import List, Dict
from prometheus_client import Counter as PCounter
from prometheus_client import Gauge as PGauge
from prometheus_client import Histogram as PHistogram
from corehq.util.metrics.metrics import HqMetrics
class PrometheusMetrics(HqMetrics):
"""Prometheus Metrics Provider"""
def __init__(self):
self._metrics = {}
def _counter(self, name: str, value: float = 1, tags: Dict[str, str] = None, documentation: str = ''):
"""See https://prometheus.io/docs/concepts/metric_types/#counter"""
self._get_metric(PCounter, name, tags, documentation).inc(value)
def _gauge(self, name: str, value: float, tags: Dict[str, str] = None, documentation: str = ''):
"""See https://prometheus.io/docs/concepts/metric_types/#histogram"""
self._get_metric(PGauge, name, tags, documentation).set(value)
def _histogram(self, name: str, value: float, bucket_tag: str, buckets: List[int], bucket_unit: str = '',
tags: Dict[str, str] = None, documentation: str = ''):
"""
A cumulative histogram with a base metric name of <name> exposes multiple time series
during a scrape:
* cumulative counters for the observation buckets, exposed as
`<name>_bucket{le="<upper inclusive bound>"}`
* the total sum of all observed values, exposed as `<name>_sum`
* the count of events that have been observed, exposed as `<name>_count`
(identical to `<name>_bucket{le="+Inf"}` above)
For example
::
h = metrics_histogram(
'commcare.request_duration', 1.4,
bucket_tag='duration', buckets=[1,2,3], bucket_units='ms',
tags=tags
)
# resulting metrics
# commcare_request_duration_bucket{...tags..., le="1.0"} 0.0
# commcare_request_duration_bucket{...tags..., le="2.0"} 1.0
# commcare_request_duration_bucket{...tags..., le="3.0"} 1.0
# commcare_request_duration_bucket{...tags..., le="+Inf"} 1.0
# commcare_request_duration_sum{...tags...} 1.4
# commcare_request_duration_count{...tags...} 1.0
See https://prometheus.io/docs/concepts/metric_types/#histogram"""
self._get_metric(PHistogram, name, tags, documentation, buckets=buckets).observe(value)
def _get_metric(self, metric_type, name, tags, documentation, **kwargs):
name = name.replace('.', '_')
if isinstance(metric_type, PCounter) and name.endswith('_total'):
# this suffix get's added to counter metrics by the Prometheus client
name = name[:-6]
metric = self._metrics.get(name)
if not metric:
tags = tags or {}
metric = metric_type(name, documentation, labelnames=tags.keys(), **kwargs)
self._metrics[name] = metric
else:
assert metric.__class__ == metric_type
return metric.labels(**tags) if tags else metric
| 43.768116 | 109 | 0.630132 |
ce91baee5c4e29ee6e6db3a12f9628b3d76af10d | 8,853 | py | Python | cons3rt/models/abstract_composition_status.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | cons3rt/models/abstract_composition_status.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | cons3rt/models/abstract_composition_status.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | """
Copyright 2020 Jackpine Technologies Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# coding: utf-8
"""
cons3rt - Copyright Jackpine Technologies Corp.
NOTE: This file is auto-generated. Do not edit the file manually.
"""
import pprint
import re # noqa: F401
import six
from cons3rt.configuration import Configuration
__author__ = 'Jackpine Technologies Corporation'
__copyright__ = 'Copyright 2020, Jackpine Technologies Corporation'
__license__ = 'Apache 2.0',
__version__ = '1.0.0'
__maintainer__ = 'API Support'
__email__ = 'support@cons3rt.com'
class AbstractCompositionStatus(object):
"""NOTE: This class is auto-generated. Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'name': 'str',
'project_id': 'int',
'stoppable': 'bool',
'connectable': 'bool',
'startable': 'bool',
'type': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'project_id': 'projectId',
'stoppable': 'stoppable',
'connectable': 'connectable',
'startable': 'startable',
'type': 'type'
}
discriminator_value_class_map = {
'ActiveCompositionStatus': 'ActiveCompositionStatus',
'InactiveCompositionStatus': 'InactiveCompositionStatus'
}
def __init__(self, id=None, name=None, project_id=None, stoppable=None, connectable=None, startable=None, type=None, local_vars_configuration=None): # noqa: E501
"""AbstractCompositionStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._project_id = None
self._stoppable = None
self._connectable = None
self._startable = None
self._type = None
self.discriminator = 'type'
if id is not None:
self.id = id
if name is not None:
self.name = name
if project_id is not None:
self.project_id = project_id
if stoppable is not None:
self.stoppable = stoppable
if connectable is not None:
self.connectable = connectable
if startable is not None:
self.startable = startable
self.type = type
@property
def id(self):
"""Gets the id of this AbstractCompositionStatus. # noqa: E501
:return: The id of this AbstractCompositionStatus. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AbstractCompositionStatus.
:param id: The id of this AbstractCompositionStatus. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this AbstractCompositionStatus. # noqa: E501
:return: The name of this AbstractCompositionStatus. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AbstractCompositionStatus.
:param name: The name of this AbstractCompositionStatus. # noqa: E501
:type: str
"""
self._name = name
@property
def project_id(self):
"""Gets the project_id of this AbstractCompositionStatus. # noqa: E501
:return: The project_id of this AbstractCompositionStatus. # noqa: E501
:rtype: int
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this AbstractCompositionStatus.
:param project_id: The project_id of this AbstractCompositionStatus. # noqa: E501
:type: int
"""
self._project_id = project_id
@property
def stoppable(self):
"""Gets the stoppable of this AbstractCompositionStatus. # noqa: E501
:return: The stoppable of this AbstractCompositionStatus. # noqa: E501
:rtype: bool
"""
return self._stoppable
@stoppable.setter
def stoppable(self, stoppable):
"""Sets the stoppable of this AbstractCompositionStatus.
:param stoppable: The stoppable of this AbstractCompositionStatus. # noqa: E501
:type: bool
"""
self._stoppable = stoppable
@property
def connectable(self):
"""Gets the connectable of this AbstractCompositionStatus. # noqa: E501
:return: The connectable of this AbstractCompositionStatus. # noqa: E501
:rtype: bool
"""
return self._connectable
@connectable.setter
def connectable(self, connectable):
"""Sets the connectable of this AbstractCompositionStatus.
:param connectable: The connectable of this AbstractCompositionStatus. # noqa: E501
:type: bool
"""
self._connectable = connectable
@property
def startable(self):
"""Gets the startable of this AbstractCompositionStatus. # noqa: E501
:return: The startable of this AbstractCompositionStatus. # noqa: E501
:rtype: bool
"""
return self._startable
@startable.setter
def startable(self, startable):
"""Sets the startable of this AbstractCompositionStatus.
:param startable: The startable of this AbstractCompositionStatus. # noqa: E501
:type: bool
"""
self._startable = startable
@property
def type(self):
"""Gets the type of this AbstractCompositionStatus. # noqa: E501
:return: The type of this AbstractCompositionStatus. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this AbstractCompositionStatus.
:param type: The type of this AbstractCompositionStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AbstractCompositionStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AbstractCompositionStatus):
return True
return self.to_dict() != other.to_dict()
| 29.02623 | 166 | 0.615272 |
f6455ec9a8a291b62f48f9ccbbe144f3a95b5ee8 | 8,854 | py | Python | info/modules/news/views.py | ZiYin-ss/python-Flask-News | 10a71d4317ee9c4e7bcb0525696b96e5f43327ad | [
"MIT"
] | null | null | null | info/modules/news/views.py | ZiYin-ss/python-Flask-News | 10a71d4317ee9c4e7bcb0525696b96e5f43327ad | [
"MIT"
] | null | null | null | info/modules/news/views.py | ZiYin-ss/python-Flask-News | 10a71d4317ee9c4e7bcb0525696b96e5f43327ad | [
"MIT"
] | null | null | null | from flask import render_template, jsonify, current_app, abort, session, g, request
from ... import db
from ...models import News, User, Comment, CommentLike
from ...utils.commons import user_login_data
from ...utils.response_code import RET
from . import news_blue
# 新闻详情展示(新闻)
# 请求路径: /news/<int:news_id>
# 请求方式: GET
# 请求参数:news_id
# 返回值: detail.html页面, 用户,新闻data字典数据
# <int:news_id>这个和django一样啊 路由这样写动态类型 对应的视图函数还可以接收到这个news_id
@news_blue.route('/<int:news_id>')
@user_login_data
def news_detail(news_id):
# 根据新闻编号查询新闻对象
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="新闻获取失败")
# 用户id在session中 只要一登录就可以取session 就这个意思 所以和index里面一样的写法
user_id = session.get("user_id")
user = None
if user_id:
try:
user = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
# 这个地方是获取前十条热门新闻 因为这个地方 不需要什么 直接就可以获取啊 就点赞最多的呗
# 多说一句 其实当用户点赞的时候 我们会添加对应的clicks的值 那么这个地方查的时候是不是会更新
try:
click_news = News.query.order_by(News.clicks.desc()).limit(10).all()
except Exception as e:
return jsonify(errno=RET.DBERR, errmsg="新闻获取失败")
news_list = []
for item in click_news:
news_list.append(item.to_dict())
# 判断用户是否收藏过该新闻
is_collect = False
# 已经登录并且在用户收藏过的新闻列表中
if user:
if news in user.collection_news:
is_collect = True
# 查询数据库中该新闻的所有评论内容
try:
comments = Comment.query.filter(Comment.news_id == news_id).order_by(Comment.create_time.desc()).all()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="新闻获取失败")
# 该用户点过所有的赞
try:
commentlikes = []
if g.user:
commentlikes = CommentLike.query.filter(CommentLike.user_id == g.user.id).all()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="新闻获取失败")
# 获取用户所有点赞过的评论编号
mylike_comment_ids = []
for commentlike in commentlikes:
mylike_comment_ids.append(commentlike.comment_id)
comments_list = []
for comment in comments:
comm_dict = comment.to_dict()
# 添加is_like记录点赞
comm_dict["is_like"] = False
# 判断用户是否点过赞
if g.user and comment.id in mylike_comment_ids:
comm_dict["is_like"] = True
comments_list.append(comm_dict)
# 判断登录的用户是否关注了新闻的作者
is_followed = False
if g.user and news.user:
if g.user in news.user.followers:
is_followed = True
if not news:
abort(404)
# 如果出错返回404 abort()函数的作用
# 可以让开发者在检测到web访问错误时,立即将错误信息返回回去,返回的错误码必须是已知http协议中的错误码
# 携带数据,渲染页面
data = {
"news_info": news.to_dict(),
"news_list": news_list,
"user_info": user.to_dict() if user else "",
"is_collected": is_collect,
"comments": comments_list,
"is_followed": is_followed
}
return render_template("news/detail.html", data=data)
# 收藏功能接口
# 请求路径: /news/news_collect
# 请求方式: POST
# 请求参数:news_id,action, g.user
# 返回值: errno,errmsg
@news_blue.route('/news_collect', methods=['POST'])
@user_login_data
def news_collect():
# 1. 判断用户是否登陆了
if not g.user:
return jsonify(errno=RET.NODATA, errmsg="用户未登录")
# 2. 获取参数
news_id = request.json.get("news_id")
action = request.json.get("action")
# 3. 参数校验,为空校验
if not all([news_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数不全")
# 4. 操作类型校验
if not action in ["collect", "cancel_collect"]:
return jsonify(errno=RET.DATAERR, errmsg="操作类型有误")
# 5. 根据新闻的编号取出新闻对象
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="新闻获取失败")
# 6. 判断新闻对象是否存在
if not news:
return jsonify(errno=RET.NODATA, errmsg="新闻不存在")
# 7. 根据操作类型,进行收藏&取消收藏操作
if action == "collect":
if not news in g.user.collection_news:
g.user.collection_news.append(news)
else:
if news in g.user.collection_news:
# 移除特定的数据 这个其实说到底也是对数据库的操作 但是自动提交了
g.user.collection_news.remove(news)
# 8. 返回响应
return jsonify(errno=RET.OK, errmsg="操作成功")
# 新闻评论后端
# 请求路径: /news/news_comment
# 请求方式: POST
# 请求参数:news_id,comment,parent_id, g.user
# 返回值: errno,errmsg,评论字典
@news_blue.route("/news_comment", methods=['POST'])
@user_login_data
def news_comment():
if not g.user:
return jsonify(errno=RET.NODATA, errmsg="用户未登录")
news_id = request.json.get("news_id")
content = request.json.get("comment")
parent_id = request.json.get("parent_id")
if not all([news_id, content]):
return jsonify(errno=RET.PARAMERR, errmsg="参数不全")
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="新闻获取失败")
if not news:
return jsonify(errno=RET.NODATA, errmsg="新闻不存在")
# 创建评论对象 设置属性
comment = Comment()
comment.user_id = g.user.id
comment.news_id = news_id
comment.content = content
if parent_id: # 这个父id其实就相当于我们在别人的评论下面评论 这个父ID其实也是一条评论 就这
comment.parent_id = parent_id
# 将上面的评论对象保存到数据库 其实这个地方也是多对多关系 评论 还要显示的呢
try:
db.session.add(comment)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="评论失败")
return jsonify(errno=RET.OK, errmsg="评论成功", data=comment.to_dict())
# 评论点赞
# 请求路径: /news/comment_like
# 请求方式: POST
# 请求参数:comment_id,action,g.user
# 返回值: errno,errmsg
@news_blue.route("/comment_like", methods=['POST'])
@user_login_data
def comment_like():
if not g.user:
return jsonify(errno=RET.NODATA, errmsg="用户未登录")
comment_id = request.json.get("comment_id")
action = request.json.get("action")
if not all([comment_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数不全")
if not action in ["add", "remove"]:
return jsonify(errno=RET.DATAERR, errmsg="操作类型有误")
try:
comment = Comment.query.get(comment_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="获取评论失败")
if not comment:
return jsonify(errno=RET.NODATA, errmsg="评论不存在")
# 根据操作类型点赞和取消点赞
try:
if action == "add":
# 这个是判断用户是否点赞了 用户id和评论id都有的东西 不就是点赞了吗
comment_like = CommentLike.query.filter(CommentLike.user_id == g.user.id,
CommentLike.comment_id == comment_id).first()
if not comment_like:
comment_like = CommentLike()
comment_like.user_id = g.user.id
comment_like.comment_id = comment_id
# 添加到数据库中
db.session.add(comment_like)
db.session.commit()
# 将该评论点赞数量+1
comment.like_count += 1
db.session.commit()
else:
comment_like = CommentLike.query.filter(CommentLike.user_id == g.user.id,
CommentLike.comment_id == comment_id).first()
if comment_like:
db.session.delete(comment_like)
# 将该评论点赞数量-1
if comment.like_count > 0:
comment.like_count -= 1
db.session.commit()
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="操作失败")
return jsonify(errno=RET.OK, errmsg="操作成功")
# 关注与取消关注
# 请求路径: /news/followed_user
# 请求方式: POST
# 请求参数:user_id,action
# 返回值: errno, errmsg
@news_blue.route('/followed_user', methods=['POST'])
@user_login_data
def followed_user():
if not g.user:
return jsonify(errno=RET.NODATA, errmsg="用户未登录")
user_id = request.json.get("user_id")
action = request.json.get("action")
if not all([user_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数不全")
if not action in ["follow", "unfollow"]:
return jsonify(errno=RET.DATAERR, errmsg="操作类型有误")
try:
# 这个author其实就是user的实例 而前端是 news.author.id 传过来的用户id
author = User.query.get(user_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="作者获取失败")
if not author:
return jsonify(errno=RET.NODATA, errmsg="作者不存在")
if action == "follow":
if not g.user in author.followers:
author.followers.append(g.user)
else:
if not g.user in author.followers:
author.followers.remove(g.user)
return jsonify(errno=RET.OK, errmsg="操作成功") | 29.029508 | 110 | 0.631466 |
6d04aed7757b6600715020803724dd4f2db217d3 | 728 | py | Python | plugin/utils.py | jfcherng-sublime/LSP-intelephense-patcher | 9be50080e6acc8ce25e14bbf5ca3c3615c938e0f | [
"MIT"
] | 3 | 2020-11-07T07:11:18.000Z | 2021-06-11T13:24:48.000Z | plugin/utils.py | jfcherng-sublime/ST-patcher-LSP-intelephense | 97520041a572c8e07bef59388935020257768307 | [
"MIT"
] | null | null | null | plugin/utils.py | jfcherng-sublime/ST-patcher-LSP-intelephense | 97520041a572c8e07bef59388935020257768307 | [
"MIT"
] | null | null | null | from typing import Iterable, Iterator, TypeVar, Union
import re
T = TypeVar("T")
def unique(it: Iterable[T], stable: bool = False) -> Iterator[T]:
"""
Generates a collection of unique items from the iterable.
@param stable If True, returned items are garanteed to be in their original relative ordering.
"""
from collections import OrderedDict
return (OrderedDict.fromkeys(it).keys() if stable else set(it)).__iter__()
def get_command_name(var: Union[type, str]) -> str:
name = var.__name__ if isinstance(var, type) else str(var)
name = re.sub(r"Command$", "", name)
name = re.sub(r"([A-Z])", r"_\1", name)
name = re.sub(r"_{2,}", "_", name)
return name.strip("_").lower()
| 26 | 98 | 0.65522 |
cf0f3642c6d83c8b5f76c697311922433a409e37 | 3,142 | py | Python | userbot/modules/user_id.py | oxyda-fox/XBot-Remix | 3d97bea5395b223fc89a8cc6cb699cc624ccc967 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/user_id.py | oxyda-fox/XBot-Remix | 3d97bea5395b223fc89a8cc6cb699cc624ccc967 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/user_id.py | oxyda-fox/XBot-Remix | 3d97bea5395b223fc89a8cc6cb699cc624ccc967 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | #Encript Marshal By XVenom
#https://github.com/xvenom15
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00sf\x00\x00\x00d\x00d\x01l\x00m\x01Z\x01\x01\x00d\x00d\x02l\x02m\x03Z\x03\x01\x00d\x00d\x03l\x04m\x05Z\x05\x01\x00d\x00d\x04l\x06m\x07Z\x07\x01\x00d\x00d\x05l\x08m\tZ\tm\nZ\n\x01\x00e\x07d\x06d\x07d\x08\x8d\x02d\td\n\x84\x00\x83\x01Z\x0be\n\xa0\x0cd\x0bd\x0ci\x01\xa1\x01\x01\x00d\rS\x00)\x0e\xe9\x00\x00\x00\x00)\x01\xda\x06events)\x01\xda\x13YouBlockedUserError)\x01\xda\x1bUpdateNotifySettingsRequest)\x01\xda\x08register)\x02\xda\x03bot\xda\x08CMD_HELPTz\x11^\\.gid(?: |$)(.*))\x02Z\x08outgoingZ\x07patternc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\n\x00\x00\x00\xc3\x00\x00\x00sN\x01\x00\x00|\x00j\x00r\nd\x00S\x00|\x00j\x01s$|\x00\xa0\x02d\x01\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00|\x00\xa0\x03\xa1\x00I\x00d\x00H\x00}\x01|\x01j\x04sL|\x00\xa0\x02d\x02\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00d\x03}\x02|\x01j\x05}\x03|\x01j\x05j\x06rr|\x00\xa0\x02d\x04\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00|\x00\xa0\x02d\x05\xa1\x01I\x00d\x00H\x00\x01\x00t\x06\xa0\x07|\x02\xa1\x014\x00I\x00d\x00H\x00\x9a\xaa}\x04z4|\x04\xa0\x08t\tj\nd\x06d\x07d\x08\x8d\x02\xa1\x01}\x05t\x06\xa0\x0b|\x02|\x01\xa1\x02I\x00d\x00H\x00\x01\x00|\x05I\x00d\x00H\x00}\x05W\x00n8\x04\x00t\x0ck\n\x90\x01r\x02\x01\x00\x01\x00\x01\x00|\x00\xa0\rd\t\xa1\x01I\x00d\x00H\x00\x01\x00Y\x00W\x005\x00Q\x00I\x00d\x00H\x00R\x00\xa3\x00d\x00S\x00X\x00|\x05j\x04\xa0\x0ed\n\xa1\x01\x90\x01r$|\x00\xa0\x02d\x0b\xa1\x01I\x00d\x00H\x00\x01\x00n\x16|\x00\xa0\x02|\x05j\x0fj\x0f\x9b\x00\xa1\x01I\x00d\x00H\x00\x01\x00W\x005\x00Q\x00I\x00d\x00H\x00R\x00X\x00d\x00S\x00)\x0cNz\x15`Balas Di Teks Ajg!!`z\x1c```Balas Di Teks Goblok!!```z\n@getidsbotz\x15`Balas Di Teks Asu!!`z\x16`Membongkar ID.......`Ti\xb0p \x0b)\x02Z\x08incomingZ\nfrom_usersz7`Bunuh @getidsbot dulu bos, biar botnya bisa jalan -_-`Z\x07Forwardz `Profil Buriq Tidak Punya ID...`)\x10Z\x08fwd_fromZ\x0freply_to_msg_idZ\x04editZ\x11get_reply_message\xda\x04text\xda\x06senderr\x06\x00\x00\x00Z\x0cconversationZ\nwait_eventr\x02\x00\x00\x00Z\nNewMessageZ\x10forward_messagesr\x03\x00\x00\x00Z\x05reply\xda\nstartswith\xda\x07message)\x06Z\x05eventZ\rreply_messageZ\x04chatr\t\x00\x00\x00Z\x04convZ\x08response\xa9\x00r\x0c\x00\x00\x00\xda\x00\xda\x01_\x07\x00\x00\x00s4\x00\x00\x00\x00\x02\x06\x01\x04\x01\x06\x01\x10\x01\x04\x01\x0e\x01\x06\x01\x10\x01\x04\x01\x04\x01\x06\x01\x08\x01\x10\x01\x04\x01\x10\x01\x14\x01\x02\x01\x14\x01\x12\x01\x0e\x01\x10\x01\x10\x01\x18\x01\x0e\x01\x12\x02r\x0e\x00\x00\x00Z\x07get_uidz.`.gid`\nUsage: Reply in message to get user ID.N)\rZ\x08telethonr\x02\x00\x00\x00Z\x1ctelethon.errors.rpcerrorlistr\x03\x00\x00\x00Z\x1dtelethon.tl.functions.accountr\x04\x00\x00\x00Z\x0euserbot.eventsr\x05\x00\x00\x00Z\x07userbotr\x06\x00\x00\x00r\x07\x00\x00\x00r\x0e\x00\x00\x00\xda\x06updater\x0c\x00\x00\x00r\x0c\x00\x00\x00r\x0c\x00\x00\x00r\r\x00\x00\x00\xda\x08<module>\x01\x00\x00\x00s\x14\x00\x00\x00\x0c\x01\x0c\x01\x0c\x01\x0c\x01\x10\x02\n\x01\n\x1e\x04\x01\x02\x01\x02\xfe'))
| 628.4 | 3,070 | 0.772438 |
c48c0d062a5d2ac81b0e2330a4922fb378d78526 | 735 | py | Python | Python/PickingNumber.py | WinrichSy/HackerRank-Solutions | ed928de50cbbbdf0aee471630f6c04f9a0f69a1f | [
"Apache-2.0"
] | null | null | null | Python/PickingNumber.py | WinrichSy/HackerRank-Solutions | ed928de50cbbbdf0aee471630f6c04f9a0f69a1f | [
"Apache-2.0"
] | null | null | null | Python/PickingNumber.py | WinrichSy/HackerRank-Solutions | ed928de50cbbbdf0aee471630f6c04f9a0f69a1f | [
"Apache-2.0"
] | null | null | null | #Picking Numbers
#https://www.hackerrank.com/challenges/picking-numbers/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'pickingNumbers' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY a as parameter.
#
def pickingNumbers(a):
sorted_a_list = sorted(list(set(a)))
counts = []
for i in sorted_a_list:
counts.append(a.count(i)+a.count(i+1))
return max(counts)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
a = list(map(int, input().rstrip().split()))
result = pickingNumbers(a)
fptr.write(str(result) + '\n')
fptr.close()
| 18.375 | 62 | 0.668027 |
9ce46e0d39465658d9099989ec3205caa81a1213 | 18,271 | py | Python | cavsim/pipes/pipe.py | DHaspel/cavsim | a23e344b47b970e1a90e04c071e06860935d1694 | [
"Apache-2.0"
] | null | null | null | cavsim/pipes/pipe.py | DHaspel/cavsim | a23e344b47b970e1a90e04c071e06860935d1694 | [
"Apache-2.0"
] | null | null | null | cavsim/pipes/pipe.py | DHaspel/cavsim | a23e344b47b970e1a90e04c071e06860935d1694 | [
"Apache-2.0"
] | null | null | null | #! /opt/conda/bin/python3
""" Pipe class implementing the actual pipe simulation calculations """
# Copyright 2019 FAU-iPAT (http://ipat.uni-erlangen.de/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
from .base_pipe import BasePipe
from ..base.connectors.connector import Connector
from ..measure import Measure
from ..base.channels.import_channel import ImportChannel
from ..base.channels.export_channel import ExportChannel
class Pipe(BasePipe): # pylint: disable=too-many-instance-attributes
"""
Pipe class implementing the pipe simulation calculations
"""
def __init__(
self,
diameter: float,
length: float,
wall_thickness: float,
bulk_modulus: float,
roughness: float,
inner_points: int = None,
) -> None:
"""
Initialization of the class
:param diameter: Diameter of the fluid volume [m]
:param length: Length of the pipe [m]
:param wall_thickness: Thickness of the wall [m]
:param bulk_modulus: Bulk modulus of wall material [Pa]
:param roughness: Roughness of the wall [m]
:param inner_points: Minimal number of inner points for discretization
:raises TypeError: Wrong type of at least one parameter
:raises ValueError: Value of at least one parameter out of bounds
"""
super(Pipe, self).__init__(diameter, length, wall_thickness, bulk_modulus, roughness)
if inner_points is not None and not isinstance(inner_points, int):
raise TypeError('Wrong type for parameter inner_points ({} != {})'.format(type(inner_points), int))
if inner_points is not None and inner_points < 3:
raise ValueError('Number of inner points ({}) needs to greater than 2!'.format(inner_points))
# Register internal fields
self._inner_points = inner_points
self._pressure: np.ndarray = self.field_create('pressure', 3)
self._velocity: np.ndarray = self.field_create('velocity', 3)
self.field_create('reynolds', 3)
self.field_create('brunone', 3)
self.field_create('darcy_friction_factor', 3)
self._friction_steady = self.field_create('friction_steady', 3)
self._friction_unsteady_a = self.field_create('friction_unsteady_a', 3)
self._friction_unsteady_b = self.field_create('friction_unsteady_b', 3)
self._sos: np.ndarray = self.field_create('speed_of_sound', 3)
# Create the left connector
self._left: Connector = Connector(self, [
ExportChannel(Measure.deltaX, lambda: self._delta_x),
ImportChannel(Measure.boundaryPoint, False),
ExportChannel(Measure.diameter, lambda: self.diameter),
ExportChannel(Measure.length, lambda: self.length),
ExportChannel(Measure.area, lambda: self.area),
ImportChannel(Measure.pressureLast, False),
ExportChannel(Measure.pressureCurrent, lambda: self._pressure[0, 1]),
ExportChannel(Measure.pressureLast, lambda: self._pressure[1, 1]),
ImportChannel(Measure.velocityPlusCurrent, False),
ImportChannel(Measure.velocityPlusLast, False),
ExportChannel(Measure.velocityMinusCurrent, lambda: -self._velocity[0, 1]),
ExportChannel(Measure.velocityMinusLast, lambda: -self._velocity[1, 1]),
ExportChannel(Measure.frictionCurrent, lambda: self._friction_steady[0, 1] + self._friction_unsteady_b[0, 1]),
ExportChannel(Measure.frictionLast, lambda: self._friction_steady[1, 1] + self._friction_unsteady_b[1, 1]),
ExportChannel(Measure.BPspeedOfSoundCurrent, lambda: self._sos[0, 0]),
ExportChannel(Measure.BPspeedOfSoundLast, lambda: self._sos[1, 0]),
])
# Create the right connector
self._right: Connector = Connector(self, [
ExportChannel(Measure.deltaX, lambda: self._delta_x),
ImportChannel(Measure.boundaryPoint, False),
ExportChannel(Measure.diameter, lambda: self.diameter),
ExportChannel(Measure.length, lambda: self.length),
ExportChannel(Measure.area, lambda: self.area),
ImportChannel(Measure.pressureLast, False),
ExportChannel(Measure.pressureCurrent, lambda: self._pressure[0, -2]),
ExportChannel(Measure.pressureLast, lambda: self._pressure[1, -2]),
ImportChannel(Measure.velocityMinusCurrent, False),
ImportChannel(Measure.velocityMinusLast, False),
ExportChannel(Measure.velocityPlusCurrent, lambda: self._velocity[0, -2]),
ExportChannel(Measure.velocityPlusLast, lambda: self._velocity[1, -2]),
ExportChannel(Measure.frictionCurrent, lambda: self._friction_steady[0, -2] + self._friction_unsteady_a[0, -2]),
ExportChannel(Measure.frictionLast, lambda: self._friction_steady[1, -2] + self._friction_unsteady_a[1, -2]),
ExportChannel(Measure.BPspeedOfSoundCurrent, lambda: self._sos[0, -1]),
ExportChannel(Measure.BPspeedOfSoundLast, lambda: self._sos[1, -1]),
])
@property
def left(self) -> Connector:
"""
Left connector property
:return: Left sided connector of the pipe
"""
return self._left
@property
def right(self) -> Connector:
"""
Right connector property
:return: Right sided connector of the pipe
"""
return self._right
def get_max_delta_t(self) -> Optional[float]:
"""
Method to return the maximum allowed timestep width for this component
:return: Maximum allowed timestep width or None if any is suitable
"""
n_min = self._inner_points if self._inner_points is not None else 3
result = self.length / ((n_min + 1) * self.norm_speed_of_sound)
return result
def discretize(self, delta_t: float) -> None:
"""
Method handling the discretization of the component (for a given timestep width)
:param delta_t: Timestep width to discretize for
:raises ValueError: Timestep too large to fit at least 3 inner points
"""
self._delta_t = delta_t
nodes = int(np.ceil(self.length / (self.norm_speed_of_sound * delta_t)) - 1)
if nodes < 3:
raise ValueError('Timestep to large!')
self._delta_x = self.length / float(nodes + 1)
self.fields_resize(nodes + 2)
def initialize(self) -> None:
"""
Initialize the internal state of the component (after discretization was called)
"""
self.field('velocity')[:, :] = np.zeros(self.field('velocity').shape)[:, :]
self.field('pressure')[:, :] = self.fluid.norm_pressure * np.ones(self.field('pressure').shape)[:, :]
# Initialize derived properties
for _ in range(2):
self._calculate_reynolds()
self._calculate_friction()
self._calculate_speed_of_sound()
self.fields_move()
def prepare_next_timestep(self, delta_t: float, next_total_time: float) -> None:
"""
Prepare the internal state for the next timestep to be calculated
:param delta_t: Timestep width for the next timestep
:param next_total_time: Total simulation time at the end of the next timestep
"""
# Shift all internal fields
self.fields_move()
def exchange_last_boundaries(self) -> None:
"""
Exchange the boundary values from the last time steps
"""
# Exchange previous values with the left boundary
self._pressure[1, 0] = self.left.value(Measure.pressureLast)
self._velocity[1, 0] = self.left.value(Measure.velocityPlusLast)
# Exchange previous values with the right boundary
self._pressure[1, -1] = self.right.value(Measure.pressureLast)
self._velocity[1, -1] = -self.right.value(Measure.velocityMinusLast)
def finalize_current_timestep(self) -> None:
"""
Method to perform final calculations at the end of the current timestep
"""
# Exchange current values
self._velocity[0, 0] = self.left.value(Measure.velocityPlusCurrent)
self._velocity[0, -1] = -self.right.value(Measure.velocityMinusCurrent)
# Calculate static values
self._calculate_reynolds()
self._calculate_friction()
self._calculate_speed_of_sound()
def prepare_next_inner_iteration(self, iteration: int) -> None:
"""
Method to prepare the internal state for the next inner iteration of the current timestep
:param iteration: Number of the next inner iteration to prepare for
"""
def exchange_current_boundaries(self) -> None:
"""
Exchange boundary values from the current time step
"""
def calculate_next_inner_iteration(self, iteration: int) -> bool:
"""
Method to do the calculations of the next inner iteration
:param iteration: Number of the next inner iteration
:return: Whether this component needs another inner iteration afterwards
"""
self._calculate_pressure()
self._calculate_velocity()
return False
def _calculate_speed_of_sound(self) -> None:
"""
Calculate the current speed of sound
"""
pressure = self.field_wide_slice('pressure', 0)
result = self.speed_of_sound(pressure=pressure, temperature=None)
self.field_wide_slice('speed_of_sound', 0)[:] = result[:]
def _calculate_reynolds(self) -> None:
"""
Calculate the Reynolds number based on the values from the previous time step
"""
# Get the input fields
pressure = self.field_wide_slice('pressure', 0)
velocity = self.field_wide_slice('velocity', 0)
# Calculate fluid properties
viscosity = self.fluid.viscosity(temperature=None, shear_rate=None)
density = self.fluid.density(pressure=pressure, temperature=None)
# Calculate the reynolds number
result = (density * np.abs(velocity) * self.diameter) / viscosity
# Store/return the calculated result
self.field_wide_slice('reynolds', 0)[:] = result[:]
def _calculate_darcy_friction_factor(self) -> None:
"""
Calculates darcy's friction coefficient within the pipe
"""
# Get the input fields
reynolds = self.field_wide_slice('reynolds', 0)
result = np.ones(reynolds.shape)
# Calculate the friction factor (low Re)
selector = np.logical_and(reynolds > 0.0, reynolds < 2100.0)
if np.sum(selector) > 0:
local_reynolds = reynolds[selector]
factor = 64.0 / local_reynolds
result[selector] = factor
# Calculate the friction factor (high Re)
selector = (reynolds >= 2100.0)
if np.sum(selector) > 0:
local_reynolds = reynolds[selector]
factor = 10.0 * np.ones(local_reynolds.shape)
error = np.ones(local_reynolds.shape)
while np.any(error > 1e-12):
term1 = self.roughness / (3.7 * self.diameter)
term2 = 2.51 / (local_reynolds * np.sqrt(factor))
temp = -2.0 * np.log10(term1 + term2)
old_factor, factor = factor, np.square(1.0 / temp)
error = np.abs(factor - old_factor)
result[selector] = factor
# Store/return the calculated result
self.field_wide_slice('darcy_friction_factor', 0)[:] = result[:]
def _calculate_friction_steady(self) -> None:
"""
Calculate the steady friction using darcy's factor
"""
# Get the input fields
velocity = self.field_wide_slice('velocity', 0)
friction_factor = self.field_wide_slice('darcy_friction_factor', 0)
# Calculate the friction
result = (friction_factor / (2.0 * self.diameter)) * np.abs(velocity) * velocity
# Store/return the calculated result
self.field_wide_slice('friction_steady', 0)[:] = result[:]
def _calculate_friction(self) -> None:
"""
Calculate the total friction (steady + unsteady)
"""
# Calculate steady friction
self._calculate_darcy_friction_factor()
self._calculate_friction_steady()
# Calculate unsteady friction
self._calculate_brunone()
self._calculate_unsteady_friction_a()
self._calculate_unsteady_friction_b()
def _calculate_pressure(self) -> None:
"""
Calculate the pressure of the current time step
"""
# Get the input fields
pressure_center = self.field_slice('pressure', 1, 0)
pressure_a = self.field_slice('pressure', 1, -1)
pressure_b = self.field_slice('pressure', 1, +1)
velocity_a = self.field_slice('velocity', 1, -1)
velocity_b = self.field_slice('velocity', 1, +1)
friction_a = self.field_slice('friction_steady', 1, -1) + self.field_slice('friction_unsteady_a', 1, -1)
friction_b = self.field_slice('friction_steady', 1, +1) + self.field_slice('friction_unsteady_b', 1, +1)
# Calculate fluid properties
speed_of_sound = self.speed_of_sound(pressure=pressure_center, temperature=None)
density = self.fluid.density(pressure=pressure_center, temperature=None)
# Calculate the reynolds number
result = 0.5 * (
(speed_of_sound * density * (velocity_a - velocity_b))
+ (pressure_a + pressure_b)
+ (self._delta_t * speed_of_sound * density * (friction_b - friction_a))
# todo: height terms
)
# Store/return the calculated result
self.field_slice('pressure', 0, 0)[:] = result[:]
def _calculate_velocity(self) -> None:
"""
Calculate the velocity of the current time step
"""
# Get the input fields
pressure_center = self.field_slice('pressure', 1, 0)
pressure_a = self.field_slice('pressure', 1, -1)
pressure_b = self.field_slice('pressure', 1, +1)
velocity_a = self.field_slice('velocity', 1, -1)
velocity_b = self.field_slice('velocity', 1, +1)
friction_a = self.field_slice('friction_steady', 1, -1) + self.field_slice('friction_unsteady_a', 1, -1)
friction_b = self.field_slice('friction_steady', 1, +1) + self.field_slice('friction_unsteady_b', 1, +1)
# Calculate fluid properties
speed_of_sound = self.speed_of_sound(pressure=pressure_center, temperature=None)
density = self.fluid.density(pressure=pressure_center, temperature=None)
# Calculate the reynolds number
result = 0.5 * (
(velocity_a + velocity_b)
+ ((1.0 / (speed_of_sound * density)) * (pressure_a - pressure_b))
- (self._delta_t * (friction_a + friction_b))
# todo: height terms
)
# Store/return the calculated result
self.field_slice('velocity', 0, 0)[:] = result[:]
def _calculate_brunone(self) -> None:
"""
Calculate the Brunone factor for unsteady friction
"""
# Get the input fields
reynolds = self.field_wide_slice('reynolds', 0)
# Calculate the Brunone factor
result = 0.000476 * np.ones(reynolds.shape)
selector = (reynolds >= 2320.0)
if np.sum(selector) > 0:
local_reynolds = reynolds[selector]
factor = 14.3 / np.power(local_reynolds, 0.05)
factor = 7.41 / np.power(local_reynolds, np.log10(factor))
result[selector] = factor
result = np.sqrt(result) / 2.0
# Store/return the calculated result
self.field_wide_slice('brunone', 0)[:] = result[:]
def _calculate_unsteady_friction_a(self) -> None:
"""
Calculate the unsteady friction to left side
"""
# Get the input fields
brunone = self.field_ext_slice('brunone', 0, 0)
velocity_a = self.field_ext_slice('velocity', 0, 0)
velocity_aa = self.field_ext_slice('velocity', 1, 0)
velocity_p = self.field_ext_slice('velocity', 0, 1)
pressure_a = self.field_ext_slice('pressure', 0, 0)
# Calculate fluid properties
speed_of_sound = self.speed_of_sound(pressure=pressure_a, temperature=None)
# Calculate the friction
vdt = (velocity_a - velocity_aa) / self._delta_t
vdx = (velocity_p - velocity_a) / self._delta_x
result = brunone * (vdt + (speed_of_sound * np.sign(velocity_a * vdx) * vdx))
# Store/return the calculated result
self.field_ext_slice('friction_unsteady_a', 0, 0)[:] = result[:]
def _calculate_unsteady_friction_b(self) -> None:
"""
Calculate the unsteady friction to right side
"""
# Get the input fields
brunone = self.field_ext_slice('brunone', 0, 1)
velocity_b = self.field_ext_slice('velocity', 0, 1)
velocity_bb = self.field_ext_slice('velocity', 1, 1)
velocity_p = self.field_ext_slice('velocity', 0, 0)
pressure_b = self.field_ext_slice('pressure', 0, 1)
# Calculate fluid properties
speed_of_sound = self.speed_of_sound(pressure=pressure_b, temperature=None)
# Calculate the friction
vdt = (velocity_b - velocity_bb) / self._delta_t
vdx = (velocity_b - velocity_p) / self._delta_x
result = brunone * (vdt + (speed_of_sound * np.sign(velocity_b * vdx) * vdx))
# Store/return the calculated result
self.field_ext_slice('friction_unsteady_b', 0, 1)[:] = result[:]
| 45.563591 | 124 | 0.643971 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.