hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fa9f14fb566694cd3137372d631c3c4bad4561 | 9,282 | py | Python | content/technical/api/tutorial/python_tutorial/tutorial.py | chris-jan-trapp/easydb-documentation | 7a597a755e11b460f39e0f154d7db038a4e02cb8 | [
"MIT"
] | 3 | 2017-09-06T14:31:13.000Z | 2021-01-06T17:14:30.000Z | content/technical/api/tutorial/python_tutorial/tutorial.py | chris-jan-trapp/easydb-documentation | 7a597a755e11b460f39e0f154d7db038a4e02cb8 | [
"MIT"
] | 10 | 2018-03-20T14:14:56.000Z | 2022-03-08T10:23:00.000Z | content/technical/api/tutorial/python_tutorial/tutorial.py | chris-jan-trapp/easydb-documentation | 7a597a755e11b460f39e0f154d7db038a4e02cb8 | [
"MIT"
] | 4 | 2019-02-21T15:01:14.000Z | 2022-03-08T10:23:14.000Z | import requests
import json
import os
import copy
import argparse
argparser = argparse.ArgumentParser(
description="easydb session creation and search")
argparser.add_argument("-u", "--server_url", help="Url of the server")
argparser.add_argument("-s", "--search", default=[], nargs="*",
help="Search item, can accept multiple elements, comma separated")
argparser.add_argument("-j", "--json", default="",
help="Handwritten search criteria replaces search argument")
args = argparser.parse_args()
"""
Session class handles all Session API applications
"""
class Session:
_session, _token, _header, _content, _plugins, _password, _login = "", "", "", "", "", "", ""
def __init__(self, server, searchable, searchjson=""):
http = "http://"
if server.startswith("http"):
http = ""
self.new_session = http + server + "/api/v1/session"
self.auth_session = http + server + "/api/v1/session/authenticate"
self.deauth_session = http + server + "/api/v1/session/deauthenticate"
self.search = http + server + "/api/v1/search"
self.plugin = http + server + "/api/v1/plugin"
self.server = http + server + "/api/v1/plugin/base/server/status"
self.searchable = searchable
self.searchjson = searchjson
def _setSession(self, session=None):
self._session = session
def _getSession(self):
return self._session
def _setHeader(self, header):
self._header = header
def _getHeader(self):
return self._header
def _setToken(self, token):
self._token = token
def _getToken(self):
return self._token
def _setContent(self, content):
self._content = content
def _getContent(self):
return self._content
def _setPassword(self, password):
self._password = password
def _getPassword(self):
return self._password
def _setLogin(self, login):
self._login = login
def _getLogin(self):
return self._login
def _setPlugins(self, plugins):
self._plugins = plugins
def _getPlugins(self):
return self._plugins
token = property(_getToken, _setToken)
header = property(_getHeader, _setHeader)
session = property(_getSession, _setSession)
content = property(_getContent, _setContent)
password = property(_getPassword, _setPassword)
login = property(_getLogin, _setLogin)
plugins = property(_getPlugins, _setPlugins)
"""
Create new session using URL directed towards database
"""
def start_session(ezdb):
try:
print("start session")
r = requests.get(ezdb.new_session)
check_status_code(r, True)
except requests.exceptions.ConnectionError as e:
server_url_error_message(ezdb.new_session, e)
ezdb.session = r
ezdb.header = r.headers
ezdb.token = getVal(r.json(), "token")
ezdb.content = r.json()
"""
Retrieve the same session using Token and plain url
Compare instances to prove similarity
"""
def retrieve_current_session(ezdb):
payload = {
"token": ezdb.token
}
print("retrieve current session, payload: %s" %
json.dumps(payload, indent=4))
r = requests.get(ezdb.new_session, params=payload)
check_status_code(r, True)
# proof that the session is the same
if getVal(r.json(), "instance") == getVal(ezdb.content, "instance"):
print("retrieved correct session")
"""
Authenticate Session using authenticate url
login and password credentials required, or email instead of login
"""
def authenticate_session(ezdb):
ezdb.login = raw_input("login: ")
ezdb.password = raw_input("password: ")
payload = {
"token": ezdb.token,
"login": ezdb.login,
"password": ezdb.password
}
print("authenticate session, payload: %s" %
json.dumps(payload, indent=4))
r = requests.post(ezdb.auth_session, params=payload)
check_status_code(r, True)
"""
Deauthenticate session using deauthenticate url
"""
def deauthenticate_session(ezdb):
payload = {
"token": ezdb.token
}
print("deauthenticate session, payload: %s" %
json.dumps(payload, indent=4))
r = requests.post(ezdb.deauth_session, params=payload)
check_status_code(r)
"""
Search database using search url and search criteria from json file
"""
def search(ezdb):
tokenpayload = {
"token": ezdb.token
}
if ezdb.searchjson != "":
filename = ezdb.searchjson
do_write_criteria = False
else:
filename = "search.json"
do_write_criteria = True
if do_write_criteria:
write_criteria(ezdb)
_file = os.path.join(os.getcwd(), filename)
if not os.path.isfile(_file):
print(_file + " does not exist")
exit(1)
f = open(_file)
data = json.load(f)
print("search, payload: %s" % json.dumps(data, indent=4))
r = requests.post(ezdb.search, params=tokenpayload, data=json.dumps(data))
search_result = r.json()
write_json(search_result, "searchResult.json")
if "count" in search_result:
print("search response: %s hit(s) found" % search_result["count"])
print("search response was saved as searchResult.json\n")
print(perform_curl_request(r.request))
def perform_curl_request(req):
command = "curl -X {method} -H {headers} -d '{data}' '{uri}'"
method = req.method
uri = req.url
data = req.body
headers = ['"{0}: {1}"'.format(k, v) for k, v in req.headers.items()]
headers = " -H ".join(headers)
return command.format(method=method, headers=headers, data=data, uri=uri)
def write_criteria(ezdb):
with open(os.path.join(os.getcwd(), "criteria_template.json"), "r") as jsonFile:
data = json.load(jsonFile)
tmp = data["search"][0]
criteria = []
for x in ezdb.searchable:
tmp["string"] = x
criteria.append(copy.copy(tmp))
data["search"] = criteria
with open(os.path.join(os.getcwd(), "search.json"), "w") as jsonFile:
jsonFile.write(json.dumps(data))
print("generated search criteria, saved in " +
str(os.path.abspath(jsonFile.name)))
"""
Print the Root Menu About
"""
def root_menu_about(ezdb):
aboutDetails = {
"api": "",
"server_version": "",
"user-schema": "",
"solution": "",
"instance": "",
"db-name": "",
"Plugins": "",
"Optionen": "",
"last-modified": "",
"Fivegit": "",
"CUIgit": "",
"Style": "",
"server": ""
}
print(ezdb.header)
instance = getVal(ezdb.content, "instance")
for key, value in instance.items():
if key in aboutDetails:
aboutDetails[key] = value
# Instance code is labelled as 'name' in dict
if key == "name":
aboutDetails["instance"] = value
for key, value in ezdb.header.items():
if key in aboutDetails:
aboutDetails[key] = value
# Get Plugins
print("get plugins")
r = requests.get(ezdb.plugin)
ezdb.plugins = r.json()["plugins"]
plgns = []
for plg in ezdb.plugins:
plgns.append(plg["name"])
aboutDetails["Plugins"] = plgns
# Get Server Info
payload = {
"token": ezdb.token
}
print("get server info")
r = requests.get(ezdb.server, params=payload)
pretty_printer(aboutDetails)
"""
Helper Methods
"""
def getVal(data, str):
for key, value in data.items():
if key == str:
return value
def write_json(data, name):
with open(name, "w") as outfile:
json.dump(data, outfile, indent=4)
def write_file(self, r, filename):
with open(filename, "wb") as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
def pretty_printer(dict):
print "{:<20} {:<20}".format("About", "Information")
for k, v in dict.iteritems():
if v == "":
continue
if isinstance(v, list):
print "{:<20} {:<20}".format(k, ", ".join(v))
continue
print "{:<20} {:<20}".format(k, v)
def check_status_code(response, exit_on_failure=False):
if response.status_code != 200:
print("got status code %s: %s" %
(response.status_code, json.dumps(response.json(), indent=4)))
if exit_on_failure:
print("exit after unexpected status code")
exit(1)
"""
error_message
"""
def server_url_error_message(str, err):
print "URL is invalid"
print "{0} raises {1}".format(str, err)
sys.exit()
if __name__ == "__main__":
ezdb = Session(args.server_url, args.search, args.json)
print("\nCreate and authenticate session\n")
start_session(ezdb)
retrieve_current_session(ezdb)
authenticate_session(ezdb)
print("\nShow root menu\n")
root_menu_about(ezdb)
print("\nPerform search: %s\n" % ("from file %s" %
args.json if args.json != "" else ("[%s]" % ", ".join(args.search))))
search(ezdb)
print("\nDeauthenticate session\n")
deauthenticate_session(ezdb)
| 24.62069 | 107 | 0.612476 | import requests
import json
import os
import copy
import argparse
argparser = argparse.ArgumentParser(
description="easydb session creation and search")
argparser.add_argument("-u", "--server_url", help="Url of the server")
argparser.add_argument("-s", "--search", default=[], nargs="*",
help="Search item, can accept multiple elements, comma separated")
argparser.add_argument("-j", "--json", default="",
help="Handwritten search criteria replaces search argument")
args = argparser.parse_args()
"""
Session class handles all Session API applications
"""
class Session:
_session, _token, _header, _content, _plugins, _password, _login = "", "", "", "", "", "", ""
def __init__(self, server, searchable, searchjson=""):
http = "http://"
if server.startswith("http"):
http = ""
self.new_session = http + server + "/api/v1/session"
self.auth_session = http + server + "/api/v1/session/authenticate"
self.deauth_session = http + server + "/api/v1/session/deauthenticate"
self.search = http + server + "/api/v1/search"
self.plugin = http + server + "/api/v1/plugin"
self.server = http + server + "/api/v1/plugin/base/server/status"
self.searchable = searchable
self.searchjson = searchjson
def _setSession(self, session=None):
self._session = session
def _getSession(self):
return self._session
def _setHeader(self, header):
self._header = header
def _getHeader(self):
return self._header
def _setToken(self, token):
self._token = token
def _getToken(self):
return self._token
def _setContent(self, content):
self._content = content
def _getContent(self):
return self._content
def _setPassword(self, password):
self._password = password
def _getPassword(self):
return self._password
def _setLogin(self, login):
self._login = login
def _getLogin(self):
return self._login
def _setPlugins(self, plugins):
self._plugins = plugins
def _getPlugins(self):
return self._plugins
token = property(_getToken, _setToken)
header = property(_getHeader, _setHeader)
session = property(_getSession, _setSession)
content = property(_getContent, _setContent)
password = property(_getPassword, _setPassword)
login = property(_getLogin, _setLogin)
plugins = property(_getPlugins, _setPlugins)
"""
Create new session using URL directed towards database
"""
def start_session(ezdb):
try:
print("start session")
r = requests.get(ezdb.new_session)
check_status_code(r, True)
except requests.exceptions.ConnectionError as e:
server_url_error_message(ezdb.new_session, e)
ezdb.session = r
ezdb.header = r.headers
ezdb.token = getVal(r.json(), "token")
ezdb.content = r.json()
"""
Retrieve the same session using Token and plain url
Compare instances to prove similarity
"""
def retrieve_current_session(ezdb):
payload = {
"token": ezdb.token
}
print("retrieve current session, payload: %s" %
json.dumps(payload, indent=4))
r = requests.get(ezdb.new_session, params=payload)
check_status_code(r, True)
if getVal(r.json(), "instance") == getVal(ezdb.content, "instance"):
print("retrieved correct session")
"""
Authenticate Session using authenticate url
login and password credentials required, or email instead of login
"""
def authenticate_session(ezdb):
ezdb.login = raw_input("login: ")
ezdb.password = raw_input("password: ")
payload = {
"token": ezdb.token,
"login": ezdb.login,
"password": ezdb.password
}
print("authenticate session, payload: %s" %
json.dumps(payload, indent=4))
r = requests.post(ezdb.auth_session, params=payload)
check_status_code(r, True)
"""
Deauthenticate session using deauthenticate url
"""
def deauthenticate_session(ezdb):
payload = {
"token": ezdb.token
}
print("deauthenticate session, payload: %s" %
json.dumps(payload, indent=4))
r = requests.post(ezdb.deauth_session, params=payload)
check_status_code(r)
"""
Search database using search url and search criteria from json file
"""
def search(ezdb):
tokenpayload = {
"token": ezdb.token
}
if ezdb.searchjson != "":
filename = ezdb.searchjson
do_write_criteria = False
else:
filename = "search.json"
do_write_criteria = True
if do_write_criteria:
write_criteria(ezdb)
_file = os.path.join(os.getcwd(), filename)
if not os.path.isfile(_file):
print(_file + " does not exist")
exit(1)
f = open(_file)
data = json.load(f)
print("search, payload: %s" % json.dumps(data, indent=4))
r = requests.post(ezdb.search, params=tokenpayload, data=json.dumps(data))
search_result = r.json()
write_json(search_result, "searchResult.json")
if "count" in search_result:
print("search response: %s hit(s) found" % search_result["count"])
print("search response was saved as searchResult.json\n")
print(perform_curl_request(r.request))
def perform_curl_request(req):
command = "curl -X {method} -H {headers} -d '{data}' '{uri}'"
method = req.method
uri = req.url
data = req.body
headers = ['"{0}: {1}"'.format(k, v) for k, v in req.headers.items()]
headers = " -H ".join(headers)
return command.format(method=method, headers=headers, data=data, uri=uri)
def write_criteria(ezdb):
with open(os.path.join(os.getcwd(), "criteria_template.json"), "r") as jsonFile:
data = json.load(jsonFile)
tmp = data["search"][0]
criteria = []
for x in ezdb.searchable:
tmp["string"] = x
criteria.append(copy.copy(tmp))
data["search"] = criteria
with open(os.path.join(os.getcwd(), "search.json"), "w") as jsonFile:
jsonFile.write(json.dumps(data))
print("generated search criteria, saved in " +
str(os.path.abspath(jsonFile.name)))
"""
Print the Root Menu About
"""
def root_menu_about(ezdb):
aboutDetails = {
"api": "",
"server_version": "",
"user-schema": "",
"solution": "",
"instance": "",
"db-name": "",
"Plugins": "",
"Optionen": "",
"last-modified": "",
"Fivegit": "",
"CUIgit": "",
"Style": "",
"server": ""
}
print(ezdb.header)
instance = getVal(ezdb.content, "instance")
for key, value in instance.items():
if key in aboutDetails:
aboutDetails[key] = value
if key == "name":
aboutDetails["instance"] = value
for key, value in ezdb.header.items():
if key in aboutDetails:
aboutDetails[key] = value
print("get plugins")
r = requests.get(ezdb.plugin)
ezdb.plugins = r.json()["plugins"]
plgns = []
for plg in ezdb.plugins:
plgns.append(plg["name"])
aboutDetails["Plugins"] = plgns
payload = {
"token": ezdb.token
}
print("get server info")
r = requests.get(ezdb.server, params=payload)
pretty_printer(aboutDetails)
"""
Helper Methods
"""
def getVal(data, str):
for key, value in data.items():
if key == str:
return value
def write_json(data, name):
with open(name, "w") as outfile:
json.dump(data, outfile, indent=4)
def write_file(self, r, filename):
with open(filename, "wb") as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
def pretty_printer(dict):
print "{:<20} {:<20}".format("About", "Information")
for k, v in dict.iteritems():
if v == "":
continue
if isinstance(v, list):
print "{:<20} {:<20}".format(k, ", ".join(v))
continue
print "{:<20} {:<20}".format(k, v)
def check_status_code(response, exit_on_failure=False):
if response.status_code != 200:
print("got status code %s: %s" %
(response.status_code, json.dumps(response.json(), indent=4)))
if exit_on_failure:
print("exit after unexpected status code")
exit(1)
"""
error_message
"""
def server_url_error_message(str, err):
print "URL is invalid"
print "{0} raises {1}".format(str, err)
sys.exit()
if __name__ == "__main__":
ezdb = Session(args.server_url, args.search, args.json)
print("\nCreate and authenticate session\n")
start_session(ezdb)
retrieve_current_session(ezdb)
authenticate_session(ezdb)
print("\nShow root menu\n")
root_menu_about(ezdb)
print("\nPerform search: %s\n" % ("from file %s" %
args.json if args.json != "" else ("[%s]" % ", ".join(args.search))))
search(ezdb)
print("\nDeauthenticate session\n")
deauthenticate_session(ezdb)
| false | true |
f7fa9fbaf28434362c6e8685f38c9a0bcfbe30a0 | 6,084 | py | Python | qa/rpc-tests/proxy_test.py | Hser2bio/feirm | 4bb1c8e3bdecd9ea449f2148f6b204e1729c3afc | [
"MIT"
] | 4 | 2018-07-16T02:19:59.000Z | 2020-06-17T22:31:41.000Z | qa/rpc-tests/proxy_test.py | Hser2bio/feirm | 4bb1c8e3bdecd9ea449f2148f6b204e1729c3afc | [
"MIT"
] | 2 | 2018-02-15T02:32:36.000Z | 2019-02-06T21:51:35.000Z | qa/rpc-tests/proxy_test.py | Hser2bio/feirm | 4bb1c8e3bdecd9ea449f2148f6b204e1729c3afc | [
"MIT"
] | 10 | 2018-02-26T14:18:54.000Z | 2020-03-27T19:11:49.000Z | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("youraddress.onion:4918", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "youraddress.onion")
assert_equal(cmd.port, 4918)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.671233 | 145 | 0.652202 |
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
class ProxyTest(BitcoinTestFramework):
def __init__(self):
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
node.addnode("youraddress.onion:4918", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "youraddress.onion")
assert_equal(cmd.port, 4918)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| true | true |
f7fa9fc8ade4c08a30fb61a42306d9397cc5661a | 351 | py | Python | blueprints/AKS/scripts/store_kube_token.py | ahmadiesa-abu/cloudify-web-monitoring-plugin | 65dc04c4d183b097fb7428735955ad1ae6f9b58b | [
"Apache-2.0"
] | null | null | null | blueprints/AKS/scripts/store_kube_token.py | ahmadiesa-abu/cloudify-web-monitoring-plugin | 65dc04c4d183b097fb7428735955ad1ae6f9b58b | [
"Apache-2.0"
] | null | null | null | blueprints/AKS/scripts/store_kube_token.py | ahmadiesa-abu/cloudify-web-monitoring-plugin | 65dc04c4d183b097fb7428735955ad1ae6f9b58b | [
"Apache-2.0"
] | 1 | 2021-05-26T07:08:52.000Z | 2021-05-26T07:08:52.000Z | import base64
from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
from cloudify.manager import get_rest_client
client = get_rest_client()
token = base64.b64decode(inputs['kube_token']).decode('utf-8')
client.secrets.create('kubernetes_token', token, update_if_exists=True)
ctx.instance.runtime_properties['token'] = token
| 29.25 | 72 | 0.809117 | import base64
from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
from cloudify.manager import get_rest_client
client = get_rest_client()
token = base64.b64decode(inputs['kube_token']).decode('utf-8')
client.secrets.create('kubernetes_token', token, update_if_exists=True)
ctx.instance.runtime_properties['token'] = token
| true | true |
f7faa38321b8877dffdf54f0383438cd5f8f1ca3 | 10,990 | py | Python | futu/quote/quote_response_handler.py | szmile2008/py-futu-api | efe4af5deedf7e030dfe3ee78817f89191821753 | [
"Apache-2.0"
] | 1 | 2019-09-01T08:49:46.000Z | 2019-09-01T08:49:46.000Z | futu/quote/quote_response_handler.py | faruto/py-futu-api | cb274d5ab5387dca190b739d161f2bc8eabe073d | [
"Apache-2.0"
] | null | null | null | futu/quote/quote_response_handler.py | faruto/py-futu-api | cb274d5ab5387dca190b739d161f2bc8eabe073d | [
"Apache-2.0"
] | 1 | 2022-03-26T08:59:12.000Z | 2022-03-26T08:59:12.000Z | # -*- coding: utf-8 -*-
import pandas as pd
from futu.common import RspHandlerBase
from futu.quote.quote_query import *
class StockQuoteHandlerBase(RspHandlerBase):
"""
异步处理推送的订阅股票的报价。
.. code:: python
class StockQuoteTest(StockQuoteHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, content = super(StockQuoteTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("StockQuoteTest: error, msg: %s" % content)
return RET_ERROR, content
print("StockQuoteTest ", content) # StockQuoteTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, quote_list = StockQuoteQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, quote_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时报价推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_stock_quote的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'data_date', 'data_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price', 'volume',
'turnover', 'turnover_rate', 'amplitude', 'suspension',
'listing_date', 'price_spread', 'dark_status', 'strike_price',
'contract_size', 'open_interest', 'implied_volatility',
'premium', 'delta', 'gamma', 'vega', 'theta', 'rho'
]
quote_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, quote_frame_table
class OrderBookHandlerBase(RspHandlerBase):
"""
异步处理推送的实时摆盘。
.. code:: python
class OrderBookTest(OrderBookHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(OrderBookTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("OrderBookTest: error, msg: %s" % data)
return RET_ERROR, data
print("OrderBookTest ", data) # OrderBookTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, order_book = OrderBookQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, order_book
def on_recv_rsp(self, rsp_pb):
"""
在收到实摆盘数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_order_book的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code == RET_OK:
self.on_recv_log(content)
return ret_code, content
class CurKlineHandlerBase(RspHandlerBase):
"""
异步处理推送的k线数据。
.. code:: python
class CurKlineTest(CurKlineHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(CurKlineTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("CurKlineTest: error, msg: %s" % data)
return RET_ERROR, data
print("CurKlineTest ", data) # CurKlineTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, kline_list = CurKlinePush.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, kline_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时k线数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_cur_kline的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'time_key', 'open', 'close', 'high', 'low', 'volume',
'turnover', 'k_type'
]
kline_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, kline_frame_table
class TickerHandlerBase(RspHandlerBase):
"""
异步处理推送的逐笔数据。
.. code:: python
class TickerTest(TickerHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(TickerTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("CurKlineTest: error, msg: %s" % data)
return RET_ERROR, data
print("TickerTest ", data) # TickerTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, ticker_list = TickerQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, ticker_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时逐笔数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_rt_ticker的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
self.on_recv_log(content)
col_list = [
'code', 'time', 'price', 'volume', 'turnover',
"ticker_direction", 'sequence', 'type', 'push_data_type',
]
ticker_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, ticker_frame_table
class RTDataHandlerBase(RspHandlerBase):
"""
异步处理推送的分时数据。
.. code:: python
class RTDataTest(RTDataHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(RTDataTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("RTDataTest: error, msg: %s" % data)
return RET_ERROR, data
print("RTDataTest ", data) # RTDataTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, rt_data_list = RtDataQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, rt_data_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时逐笔数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_rt_data的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'time', 'is_blank', 'opened_mins', 'cur_price',
"last_close", 'avg_price', 'turnover', 'volume'
]
rt_data_table = pd.DataFrame(content, columns=col_list)
return RET_OK, rt_data_table
class BrokerHandlerBase(RspHandlerBase):
"""
异步处理推送的经纪数据。
.. code:: python
class BrokerTest(BrokerHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(BrokerTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("BrokerTest: error, msg: %s" % data)
return RET_ERROR, data
print("BrokerTest ", data) # BrokerTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, (stock_code, bid_content, ask_content) = BrokerQueueQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, (stock_code, bid_content, ask_content)
def on_recv_rsp(self, rsp_pb):
"""
在收到实时经纪数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 成功时返回(RET_OK, stock_code, [bid_frame_table, ask_frame_table]), 相关frame table含义见 get_broker_queue_ 的返回值说明
失败时返回(RET_ERROR, ERR_MSG, None)
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content, None
else:
self.on_recv_log(content)
stock_code, bid_content, ask_content = content
bid_list = [
'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos'
]
ask_list = [
'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos'
]
bid_frame_table = pd.DataFrame(bid_content, columns=bid_list)
ask_frame_table = pd.DataFrame(ask_content, columns=ask_list)
return ret_code, stock_code, [bid_frame_table, ask_frame_table]
class KeepAliveHandlerBase(RspHandlerBase):
"""Base class for handling KeepAlive"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, alive_time = KeepAlive.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, alive_time
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
return ret_code, content
class SysNotifyHandlerBase(RspHandlerBase):
"""sys notify"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, content = SysNotifyPush.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
notify_type, sub_type, msg = content
return RET_OK, (notify_type, sub_type, msg)
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
return ret_code, content
class AsyncHandler_InitConnect(RspHandlerBase):
""" AsyncHandler_TrdSubAccPush"""
def __init__(self, notify_obj=None):
self._notify_obj = notify_obj
super(AsyncHandler_InitConnect, self).__init__()
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, msg, conn_info_map = InitConnect.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_init_connect(ret_code, msg, conn_info_map)
return ret_code, msg
#
# class OrderDetailHandlerBase(RspHandlerBase):
# def __init__(self):
# super(OrderDetailHandlerBase, self).__init__()
#
# def on_recv_rsp(self, rsp_pb):
# """receive response callback function"""
# ret_code, msg, data = OrderDetail.unpack_rsp(rsp_pb)
#
# if ret_code != RET_OK:
# return ret_code, msg
# else:
# return ret_code, data | 30.275482 | 121 | 0.590082 |
import pandas as pd
from futu.common import RspHandlerBase
from futu.quote.quote_query import *
class StockQuoteHandlerBase(RspHandlerBase):
"""
异步处理推送的订阅股票的报价。
.. code:: python
class StockQuoteTest(StockQuoteHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, content = super(StockQuoteTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("StockQuoteTest: error, msg: %s" % content)
return RET_ERROR, content
print("StockQuoteTest ", content) # StockQuoteTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, quote_list = StockQuoteQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, quote_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时报价推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_stock_quote的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'data_date', 'data_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price', 'volume',
'turnover', 'turnover_rate', 'amplitude', 'suspension',
'listing_date', 'price_spread', 'dark_status', 'strike_price',
'contract_size', 'open_interest', 'implied_volatility',
'premium', 'delta', 'gamma', 'vega', 'theta', 'rho'
]
quote_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, quote_frame_table
class OrderBookHandlerBase(RspHandlerBase):
"""
异步处理推送的实时摆盘。
.. code:: python
class OrderBookTest(OrderBookHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(OrderBookTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("OrderBookTest: error, msg: %s" % data)
return RET_ERROR, data
print("OrderBookTest ", data) # OrderBookTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, order_book = OrderBookQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, order_book
def on_recv_rsp(self, rsp_pb):
"""
在收到实摆盘数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_order_book的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code == RET_OK:
self.on_recv_log(content)
return ret_code, content
class CurKlineHandlerBase(RspHandlerBase):
"""
异步处理推送的k线数据。
.. code:: python
class CurKlineTest(CurKlineHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(CurKlineTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("CurKlineTest: error, msg: %s" % data)
return RET_ERROR, data
print("CurKlineTest ", data) # CurKlineTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, kline_list = CurKlinePush.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, kline_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时k线数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_cur_kline的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'time_key', 'open', 'close', 'high', 'low', 'volume',
'turnover', 'k_type'
]
kline_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, kline_frame_table
class TickerHandlerBase(RspHandlerBase):
"""
异步处理推送的逐笔数据。
.. code:: python
class TickerTest(TickerHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(TickerTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("CurKlineTest: error, msg: %s" % data)
return RET_ERROR, data
print("TickerTest ", data) # TickerTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, ticker_list = TickerQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, ticker_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时逐笔数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_rt_ticker的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
self.on_recv_log(content)
col_list = [
'code', 'time', 'price', 'volume', 'turnover',
"ticker_direction", 'sequence', 'type', 'push_data_type',
]
ticker_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, ticker_frame_table
class RTDataHandlerBase(RspHandlerBase):
"""
异步处理推送的分时数据。
.. code:: python
class RTDataTest(RTDataHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(RTDataTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("RTDataTest: error, msg: %s" % data)
return RET_ERROR, data
print("RTDataTest ", data) # RTDataTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, rt_data_list = RtDataQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, rt_data_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时逐笔数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_rt_data的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'time', 'is_blank', 'opened_mins', 'cur_price',
"last_close", 'avg_price', 'turnover', 'volume'
]
rt_data_table = pd.DataFrame(content, columns=col_list)
return RET_OK, rt_data_table
class BrokerHandlerBase(RspHandlerBase):
"""
异步处理推送的经纪数据。
.. code:: python
class BrokerTest(BrokerHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(BrokerTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("BrokerTest: error, msg: %s" % data)
return RET_ERROR, data
print("BrokerTest ", data) # BrokerTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, (stock_code, bid_content, ask_content) = BrokerQueueQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, (stock_code, bid_content, ask_content)
def on_recv_rsp(self, rsp_pb):
"""
在收到实时经纪数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 成功时返回(RET_OK, stock_code, [bid_frame_table, ask_frame_table]), 相关frame table含义见 get_broker_queue_ 的返回值说明
失败时返回(RET_ERROR, ERR_MSG, None)
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content, None
else:
self.on_recv_log(content)
stock_code, bid_content, ask_content = content
bid_list = [
'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos'
]
ask_list = [
'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos'
]
bid_frame_table = pd.DataFrame(bid_content, columns=bid_list)
ask_frame_table = pd.DataFrame(ask_content, columns=ask_list)
return ret_code, stock_code, [bid_frame_table, ask_frame_table]
class KeepAliveHandlerBase(RspHandlerBase):
"""Base class for handling KeepAlive"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, alive_time = KeepAlive.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, alive_time
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
return ret_code, content
class SysNotifyHandlerBase(RspHandlerBase):
"""sys notify"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, content = SysNotifyPush.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
notify_type, sub_type, msg = content
return RET_OK, (notify_type, sub_type, msg)
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
return ret_code, content
class AsyncHandler_InitConnect(RspHandlerBase):
""" AsyncHandler_TrdSubAccPush"""
def __init__(self, notify_obj=None):
self._notify_obj = notify_obj
super(AsyncHandler_InitConnect, self).__init__()
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, msg, conn_info_map = InitConnect.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_init_connect(ret_code, msg, conn_info_map)
return ret_code, msg
| false | true |
f7faa3b7027a00cec360dd10e85f374e66cbb652 | 228 | py | Python | study/w3resource/exercises/python-basic/031 - 060/python-basic - 052.py | gustavomarquezinho/python | e36779aa5c4bfaf88c587f05db5bd447fd41e4a2 | [
"MIT"
] | null | null | null | study/w3resource/exercises/python-basic/031 - 060/python-basic - 052.py | gustavomarquezinho/python | e36779aa5c4bfaf88c587f05db5bd447fd41e4a2 | [
"MIT"
] | null | null | null | study/w3resource/exercises/python-basic/031 - 060/python-basic - 052.py | gustavomarquezinho/python | e36779aa5c4bfaf88c587f05db5bd447fd41e4a2 | [
"MIT"
] | null | null | null | # 052 - Write a Python program to print to stderr.
from __future__ import print_function
from sys import stderr
def printe(*pArgs, **pKwargs):
print(*pArgs, file=stderr, **pKwargs)
printe('abc', 'efg', 'xyzm', sep = '--') | 25.333333 | 50 | 0.688596 |
from __future__ import print_function
from sys import stderr
def printe(*pArgs, **pKwargs):
print(*pArgs, file=stderr, **pKwargs)
printe('abc', 'efg', 'xyzm', sep = '--') | true | true |
f7faa4e358ca07d08708d6decce3527c62206742 | 1,526 | py | Python | Server/ChatBot/venv/Lib/site-packages/pygubu/builder/widgets/tkscrolledframe.py | sozuer53/BBC | 31bb128cb1e1a19db955fd673d67cf0e92bac3a4 | [
"Apache-2.0"
] | 42 | 2018-12-12T01:00:59.000Z | 2022-03-27T07:32:29.000Z | pygubu/pygubu/builder/widgets/tkscrolledframe.py | GoopyAspirin/python-rsa | 1779b35ee0abe80b44be77fe2e26c7fc26765c1c | [
"MIT"
] | 13 | 2020-11-06T13:50:45.000Z | 2022-01-25T07:17:37.000Z | pygubu/pygubu/builder/widgets/tkscrolledframe.py | GoopyAspirin/python-rsa | 1779b35ee0abe80b44be77fe2e26c7fc26765c1c | [
"MIT"
] | 8 | 2020-11-14T04:30:26.000Z | 2021-01-16T17:55:19.000Z | from __future__ import unicode_literals
from pygubu.builder.builderobject import *
from pygubu.widgets.tkscrolledframe import TkScrolledFrame
class TKScrolledFrameBO(BuilderObject):
class_ = TkScrolledFrame
container = True
# maxchildren = 1
# allowed_children = ('tk.Frame', 'ttk.Frame' )
OPTIONS_STANDARD = ('borderwidth', 'cursor', 'highlightbackground',
'highlightcolor', 'highlightthickness',
'padx', 'pady', 'relief', 'takefocus')
OPTIONS_SPECIFIC = ('background', 'class_', 'container',
'height', 'width')
OPTIONS_CUSTOM = ('scrolltype', 'usemousewheel')
properties = OPTIONS_STANDARD + OPTIONS_SPECIFIC + OPTIONS_CUSTOM
ro_properties = ('class_', 'scrolltype')
def get_child_master(self):
return self.widget.innerframe
def configure(self, target=None):
super(TKScrolledFrameBO, self).configure(self.widget.innerframe)
def _set_property(self, target_widget, pname, value):
if pname in ('usemousewheel',):
super(TKScrolledFrameBO, self)._set_property(self.widget, pname, value)
else:
super(TKScrolledFrameBO, self)._set_property(target_widget, pname, value)
def layout(self, target=None):
self._grid_layout(self.widget, configure_rc=False)
self._grid_rc_layout(self.widget.innerframe)
register_widget('pygubu.builder.widgets.tkscrolledframe', TKScrolledFrameBO,
'ScrolledFrame', ('Pygubu Widgets', 'tk'))
| 39.128205 | 85 | 0.68021 | from __future__ import unicode_literals
from pygubu.builder.builderobject import *
from pygubu.widgets.tkscrolledframe import TkScrolledFrame
class TKScrolledFrameBO(BuilderObject):
class_ = TkScrolledFrame
container = True
OPTIONS_STANDARD = ('borderwidth', 'cursor', 'highlightbackground',
'highlightcolor', 'highlightthickness',
'padx', 'pady', 'relief', 'takefocus')
OPTIONS_SPECIFIC = ('background', 'class_', 'container',
'height', 'width')
OPTIONS_CUSTOM = ('scrolltype', 'usemousewheel')
properties = OPTIONS_STANDARD + OPTIONS_SPECIFIC + OPTIONS_CUSTOM
ro_properties = ('class_', 'scrolltype')
def get_child_master(self):
return self.widget.innerframe
def configure(self, target=None):
super(TKScrolledFrameBO, self).configure(self.widget.innerframe)
def _set_property(self, target_widget, pname, value):
if pname in ('usemousewheel',):
super(TKScrolledFrameBO, self)._set_property(self.widget, pname, value)
else:
super(TKScrolledFrameBO, self)._set_property(target_widget, pname, value)
def layout(self, target=None):
self._grid_layout(self.widget, configure_rc=False)
self._grid_rc_layout(self.widget.innerframe)
register_widget('pygubu.builder.widgets.tkscrolledframe', TKScrolledFrameBO,
'ScrolledFrame', ('Pygubu Widgets', 'tk'))
| true | true |
f7faa504a00ce7434456dc618e27c739991941e4 | 255 | py | Python | pycristoforo/tests/conftest.py | samdobson/PyCristoforo | 5d631494e95881ef13c8dbe0e93e89b71a860782 | [
"MIT"
] | 27 | 2019-08-02T19:20:07.000Z | 2022-03-19T02:15:22.000Z | pycristoforo/tests/conftest.py | samdobson/PyCristoforo | 5d631494e95881ef13c8dbe0e93e89b71a860782 | [
"MIT"
] | 14 | 2019-06-21T07:29:54.000Z | 2022-03-20T22:51:10.000Z | pycristoforo/tests/conftest.py | samdobson/PyCristoforo | 5d631494e95881ef13c8dbe0e93e89b71a860782 | [
"MIT"
] | 6 | 2020-03-14T10:32:19.000Z | 2021-06-11T00:10:12.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Dummy conftest.py for pycristoforo.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
# import pytest
| 21.25 | 60 | 0.658824 | true | true | |
f7faa6847ba11bee709266225a83f23c1471678b | 1,499 | py | Python | django_tally/user_def/group_tally.py | CodeYellowBV/django-tally | a705821050da912fb8dabd56c41c040ea0a00a21 | [
"MIT"
] | null | null | null | django_tally/user_def/group_tally.py | CodeYellowBV/django-tally | a705821050da912fb8dabd56c41c040ea0a00a21 | [
"MIT"
] | null | null | null | django_tally/user_def/group_tally.py | CodeYellowBV/django-tally | a705821050da912fb8dabd56c41c040ea0a00a21 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.postgres import fields as pg_fields
from ..data import DBStored
from ..group import Group
from .tally import UserDefTallyBaseNonStored
from .lang import run, Env
from .lang.json import decode
class UserDefGroupTallyBaseNonStored(UserDefTallyBaseNonStored):
get_group = pg_fields.JSONField(
default=None,
blank=True, null=True,
)
def as_tally(self, **kwargs):
return super().as_tally(get_group=decode(self.get_group), **kwargs)
class UserTally(Group, UserDefTallyBaseNonStored.UserTally):
def __init__(self, get_group=None, **kwargs):
super(Group, self).__init__(**kwargs)
self._get_group = get_group
def get_group(self, value):
return run(
self._get_group,
Env(
env={'value': value},
base_env=self._env,
),
log=True,
)
class Meta:
abstract = True
class UserDefGroupTallyBase(UserDefGroupTallyBaseNonStored):
db_name = models.TextField(unique=True)
def as_tally(self):
return super().as_tally(db_name=self.db_name)
class UserTally(DBStored, UserDefGroupTallyBaseNonStored.UserTally):
def __init__(self, db_name=None, **kwargs):
super(DBStored, self).__init__(**kwargs)
self.db_name = db_name
self.ensure_data()
class Meta:
abstract = True
| 26.298246 | 75 | 0.631755 | from django.db import models
from django.contrib.postgres import fields as pg_fields
from ..data import DBStored
from ..group import Group
from .tally import UserDefTallyBaseNonStored
from .lang import run, Env
from .lang.json import decode
class UserDefGroupTallyBaseNonStored(UserDefTallyBaseNonStored):
get_group = pg_fields.JSONField(
default=None,
blank=True, null=True,
)
def as_tally(self, **kwargs):
return super().as_tally(get_group=decode(self.get_group), **kwargs)
class UserTally(Group, UserDefTallyBaseNonStored.UserTally):
def __init__(self, get_group=None, **kwargs):
super(Group, self).__init__(**kwargs)
self._get_group = get_group
def get_group(self, value):
return run(
self._get_group,
Env(
env={'value': value},
base_env=self._env,
),
log=True,
)
class Meta:
abstract = True
class UserDefGroupTallyBase(UserDefGroupTallyBaseNonStored):
db_name = models.TextField(unique=True)
def as_tally(self):
return super().as_tally(db_name=self.db_name)
class UserTally(DBStored, UserDefGroupTallyBaseNonStored.UserTally):
def __init__(self, db_name=None, **kwargs):
super(DBStored, self).__init__(**kwargs)
self.db_name = db_name
self.ensure_data()
class Meta:
abstract = True
| true | true |
f7faa7b3d23f7446d47189e1735febe215ffdf4a | 5,440 | py | Python | miqa/core/rest/session.py | girder/miqa | 756675481c6a11c02134acbde405bfafc9d06b87 | [
"Apache-2.0"
] | 1 | 2021-05-26T18:49:52.000Z | 2021-05-26T18:49:52.000Z | miqa/core/rest/session.py | girder/miqa | 756675481c6a11c02134acbde405bfafc9d06b87 | [
"Apache-2.0"
] | 8 | 2021-04-29T17:25:28.000Z | 2021-05-27T15:09:20.000Z | miqa/core/rest/session.py | girder/miqa | 756675481c6a11c02134acbde405bfafc9d06b87 | [
"Apache-2.0"
] | null | null | null | import os
from pathlib import Path
import re
from drf_yasg.utils import no_body, swagger_auto_schema
from jsonschema import validate
from jsonschema.exceptions import ValidationError as JSONValidationError
from rest_framework import serializers, status
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet
from miqa.core.conversion.csv_to_json import csvContentToJsonObject
from miqa.core.models import Experiment, Image, Scan, ScanNote, Session, Site
from miqa.core.models.scan import ScanDecision
from miqa.core.schema.data_import import schema
class SessionSerializer(serializers.ModelSerializer):
class Meta:
model = Session
fields = ['id', 'name']
class SessionSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = Session
fields = ['importpath', 'exportpath']
importpath = serializers.CharField(source='import_path')
exportpath = serializers.CharField(source='export_path')
class SessionViewSet(ReadOnlyModelViewSet):
queryset = Session.objects.all()
permission_classes = [AllowAny]
serializer_class = SessionSerializer
@swagger_auto_schema(
method='GET',
responses={200: SessionSettingsSerializer()},
)
@swagger_auto_schema(
method='PUT',
request_body=SessionSettingsSerializer(),
responses={200: SessionSettingsSerializer()},
)
@action(detail=True, url_path='settings', url_name='settings', methods=['GET', 'PUT'])
def settings_(self, request, **kwargs):
session: Session = self.get_object()
if request.method == 'GET':
serializer = SessionSettingsSerializer(instance=session)
elif request.method == 'PUT':
serializer = SessionSettingsSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
session.import_path = serializer.data['importpath']
session.export_path = serializer.data['exportpath']
session.save()
return Response(serializer.data)
@swagger_auto_schema(
request_body=no_body,
responses={204: 'Import succeeded.'},
)
@action(detail=True, url_path='import', url_name='import', methods=['POST'])
def import_(self, request, **kwargs):
session: Session = self.get_object()
with open(session.import_path) as fd:
csv_content = fd.read()
try:
json_content = csvContentToJsonObject(csv_content)
validate(json_content, schema) # TODO this should be an internal error
except (JSONValidationError, Exception) as e:
raise ValidationError({'error': f'Invalid CSV file: {str(e)}'})
data_root = Path(json_content['data_root'])
sites = {
site['name']: Site.objects.get_or_create(
name=site['name'], defaults={'creator': request.user}
)[0]
for site in json_content['sites']
}
Experiment.objects.filter(
session=session
).delete() # cascades to scans -> images, scan_notes
experiments = {
e['id']: Experiment(name=e['id'], note=e['note'], session=session)
for e in json_content['experiments']
}
Experiment.objects.bulk_create(experiments.values())
scans = []
images = []
notes = []
for scan_json in json_content['scans']:
experiment = experiments[scan_json['experiment_id']]
site = sites[scan_json['site_id']]
scan = Scan(
scan_id=scan_json['id'],
scan_type=scan_json['type'],
decision=ScanDecision.from_rating(scan_json['decision']),
experiment=experiment,
site=site,
)
scans.append(scan)
# TODO import notes
# if scan_json['note']:
# notes_json = json.loads(unquote(scan_json['note']))
# for note_json in notes_json:
# scan_note = ScanNote(
# **note_json,
# scan=scan,
# )
# # This forces the modified field to use the value we give it
# scan_note.update_modified = False
# notes.append(scan_note)
if 'images' in scan_json:
# TODO implement this
raise Exception('use image_pattern for now')
elif 'image_pattern' in scan_json:
image_pattern = re.compile(scan_json['image_pattern'])
image_dir = data_root / scan_json['path']
for image_file in os.listdir(image_dir):
if image_pattern.fullmatch(image_file):
images.append(
Image(
name=image_file,
raw_path=image_dir / image_file,
scan=scan,
)
)
Scan.objects.bulk_create(scans)
Image.objects.bulk_create(images)
ScanNote.objects.bulk_create(notes)
return Response(status=status.HTTP_204_NO_CONTENT)
| 37.260274 | 90 | 0.605882 | import os
from pathlib import Path
import re
from drf_yasg.utils import no_body, swagger_auto_schema
from jsonschema import validate
from jsonschema.exceptions import ValidationError as JSONValidationError
from rest_framework import serializers, status
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet
from miqa.core.conversion.csv_to_json import csvContentToJsonObject
from miqa.core.models import Experiment, Image, Scan, ScanNote, Session, Site
from miqa.core.models.scan import ScanDecision
from miqa.core.schema.data_import import schema
class SessionSerializer(serializers.ModelSerializer):
class Meta:
model = Session
fields = ['id', 'name']
class SessionSettingsSerializer(serializers.ModelSerializer):
class Meta:
model = Session
fields = ['importpath', 'exportpath']
importpath = serializers.CharField(source='import_path')
exportpath = serializers.CharField(source='export_path')
class SessionViewSet(ReadOnlyModelViewSet):
queryset = Session.objects.all()
permission_classes = [AllowAny]
serializer_class = SessionSerializer
@swagger_auto_schema(
method='GET',
responses={200: SessionSettingsSerializer()},
)
@swagger_auto_schema(
method='PUT',
request_body=SessionSettingsSerializer(),
responses={200: SessionSettingsSerializer()},
)
@action(detail=True, url_path='settings', url_name='settings', methods=['GET', 'PUT'])
def settings_(self, request, **kwargs):
session: Session = self.get_object()
if request.method == 'GET':
serializer = SessionSettingsSerializer(instance=session)
elif request.method == 'PUT':
serializer = SessionSettingsSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
session.import_path = serializer.data['importpath']
session.export_path = serializer.data['exportpath']
session.save()
return Response(serializer.data)
@swagger_auto_schema(
request_body=no_body,
responses={204: 'Import succeeded.'},
)
@action(detail=True, url_path='import', url_name='import', methods=['POST'])
def import_(self, request, **kwargs):
session: Session = self.get_object()
with open(session.import_path) as fd:
csv_content = fd.read()
try:
json_content = csvContentToJsonObject(csv_content)
validate(json_content, schema)
except (JSONValidationError, Exception) as e:
raise ValidationError({'error': f'Invalid CSV file: {str(e)}'})
data_root = Path(json_content['data_root'])
sites = {
site['name']: Site.objects.get_or_create(
name=site['name'], defaults={'creator': request.user}
)[0]
for site in json_content['sites']
}
Experiment.objects.filter(
session=session
).delete()
experiments = {
e['id']: Experiment(name=e['id'], note=e['note'], session=session)
for e in json_content['experiments']
}
Experiment.objects.bulk_create(experiments.values())
scans = []
images = []
notes = []
for scan_json in json_content['scans']:
experiment = experiments[scan_json['experiment_id']]
site = sites[scan_json['site_id']]
scan = Scan(
scan_id=scan_json['id'],
scan_type=scan_json['type'],
decision=ScanDecision.from_rating(scan_json['decision']),
experiment=experiment,
site=site,
)
scans.append(scan)
json:
raise Exception('use image_pattern for now')
elif 'image_pattern' in scan_json:
image_pattern = re.compile(scan_json['image_pattern'])
image_dir = data_root / scan_json['path']
for image_file in os.listdir(image_dir):
if image_pattern.fullmatch(image_file):
images.append(
Image(
name=image_file,
raw_path=image_dir / image_file,
scan=scan,
)
)
Scan.objects.bulk_create(scans)
Image.objects.bulk_create(images)
ScanNote.objects.bulk_create(notes)
return Response(status=status.HTTP_204_NO_CONTENT)
| true | true |
f7faa7c94e31717dd3bfb2b1c2c2f3bbb745911d | 8,409 | py | Python | salt/states/modjk_worker.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2021-04-05T19:46:35.000Z | 2021-04-05T19:46:35.000Z | salt/states/modjk_worker.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2019-09-06T13:57:28.000Z | 2019-09-06T13:57:28.000Z | salt/states/modjk_worker.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2020-09-30T16:09:48.000Z | 2020-09-30T16:09:48.000Z | # -*- coding: utf-8 -*-
'''
Manage modjk workers
====================
Send commands to a :strong:`modjk` load balancer via the peer system.
This module can be used with the :ref:`prereq <requisites-prereq>`
requisite to remove/add the worker from the load balancer before
deploying/restarting service.
Mandatory Settings:
- The minion needs to have permission to publish the :strong:`modjk.*`
functions (see :ref:`here <peer>` for information on configuring
peer publishing permissions)
- The modjk load balancer must be configured as stated in the :strong:`modjk`
execution module :mod:`documentation <salt.modules.modjk>`
'''
from __future__ import absolute_import
import salt.utils
def __virtual__():
'''
Check if we have peer access ?
'''
return True
def _send_command(cmd,
worker,
lbn,
target,
profile='default',
tgt_type='glob'):
'''
Send a command to the modjk loadbalancer
The minion need to be able to publish the commands to the load balancer
cmd:
worker_stop - won't get any traffic from the lbn
worker_activate - activate the worker
worker_disable - will get traffic only for current sessions
'''
ret = {
'code': False,
'msg': 'OK',
'minions': [],
}
# Send the command to target
func = 'modjk.{0}'.format(cmd)
args = [worker, lbn, profile]
response = __salt__['publish.publish'](target, func, args, tgt_type)
# Get errors and list of affeced minions
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
# parse response
if not response:
ret['msg'] = 'no servers answered the published command {0}'.format(
cmd
)
return ret
elif len(errors) > 0:
ret['msg'] = 'the following minions return False'
ret['minions'] = errors
return ret
else:
ret['code'] = True
ret['msg'] = 'the commad was published successfully'
ret['minions'] = minions
return ret
def _worker_status(target,
worker,
activation,
profile='default',
tgt_type='glob'):
'''
Check if the worker is in `activation` state in the targeted load balancers
The function will return the following dictionary:
result - False if no server returned from the published command
errors - list of servers that couldn't find the worker
wrong_state - list of servers that the worker was in the wrong state
(not activation)
'''
ret = {
'result': True,
'errors': [],
'wrong_state': [],
}
args = [worker, profile]
status = __salt__['publish.publish'](
target, 'modjk.worker_status', args, tgt_type
)
# Did we got any respone from someone ?
if not status:
ret['result'] = False
return ret
# Search for errors & status
for balancer in status:
if not status[balancer]:
ret['errors'].append(balancer)
elif status[balancer]['activation'] != activation:
ret['wrong_state'].append(balancer)
return ret
def _talk2modjk(name, lbn, target, action, profile='default', tgt_type='glob'):
'''
Wrapper function for the stop/disable/activate functions
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
action_map = {
'worker_stop': 'STP',
'worker_disable': 'DIS',
'worker_activate': 'ACT',
}
# Check what needs to be done
status = _worker_status(
target, name, action_map[action], profile, tgt_type
)
if not status['result']:
ret['result'] = False
ret['comment'] = ('no servers answered the published command '
'modjk.worker_status')
return ret
if status['errors']:
ret['result'] = False
ret['comment'] = ('the following balancers could not find the '
'worker {0}: {1}'.format(name, status['errors']))
return ret
if not status['wrong_state']:
ret['comment'] = ('the worker is in the desired activation state on '
'all the balancers')
return ret
else:
ret['comment'] = ('the action {0} will be sent to the balancers '
'{1}'.format(action, status['wrong_state']))
ret['changes'] = {action: status['wrong_state']}
if __opts__['test']:
ret['result'] = None
return ret
# Send the action command to target
response = _send_command(action, name, lbn, target, profile, tgt_type)
ret['comment'] = response['msg']
ret['result'] = response['code']
return ret
def stop(name, lbn, target, profile='default', tgt_type='glob', expr_form=None):
'''
.. versionchanged:: Nitrogen
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Stop the named worker from the lbn load balancers at the targeted minions
The worker won't get any traffic from the lbn
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.stop:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
'''
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
return _talk2modjk(name, lbn, target, 'worker_stop', profile, tgt_type)
def activate(name, lbn, target, profile='default', tgt_type='glob', expr_form=None):
'''
.. versionchanged:: Nitrogen
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Activate the named worker from the lbn load balancers at the targeted
minions
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.activate:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
'''
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
return _talk2modjk(name, lbn, target, 'worker_activate', profile, tgt_type)
def disable(name, lbn, target, profile='default', tgt_type='glob', expr_form=None):
'''
.. versionchanged:: Nitrogen
The ``expr_form`` argument has been renamed to ``tgt_type``, earlier
releases must use ``expr_form``.
Disable the named worker from the lbn load balancers at the targeted
minions. The worker will get traffic only for current sessions and won't
get new ones.
Example:
.. code-block:: yaml
disable-before-deploy:
modjk_worker.disable:
- name: {{ grains['id'] }}
- lbn: application
- target: 'roles:balancer'
- tgt_type: grain
'''
# remember to remove the expr_form argument from this function when
# performing the cleanup on this deprecation.
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
return _talk2modjk(name, lbn, target, 'worker_disable', profile, tgt_type)
| 30.467391 | 84 | 0.593769 |
from __future__ import absolute_import
import salt.utils
def __virtual__():
return True
def _send_command(cmd,
worker,
lbn,
target,
profile='default',
tgt_type='glob'):
ret = {
'code': False,
'msg': 'OK',
'minions': [],
}
func = 'modjk.{0}'.format(cmd)
args = [worker, lbn, profile]
response = __salt__['publish.publish'](target, func, args, tgt_type)
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
if not response:
ret['msg'] = 'no servers answered the published command {0}'.format(
cmd
)
return ret
elif len(errors) > 0:
ret['msg'] = 'the following minions return False'
ret['minions'] = errors
return ret
else:
ret['code'] = True
ret['msg'] = 'the commad was published successfully'
ret['minions'] = minions
return ret
def _worker_status(target,
worker,
activation,
profile='default',
tgt_type='glob'):
ret = {
'result': True,
'errors': [],
'wrong_state': [],
}
args = [worker, profile]
status = __salt__['publish.publish'](
target, 'modjk.worker_status', args, tgt_type
)
if not status:
ret['result'] = False
return ret
for balancer in status:
if not status[balancer]:
ret['errors'].append(balancer)
elif status[balancer]['activation'] != activation:
ret['wrong_state'].append(balancer)
return ret
def _talk2modjk(name, lbn, target, action, profile='default', tgt_type='glob'):
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
action_map = {
'worker_stop': 'STP',
'worker_disable': 'DIS',
'worker_activate': 'ACT',
}
status = _worker_status(
target, name, action_map[action], profile, tgt_type
)
if not status['result']:
ret['result'] = False
ret['comment'] = ('no servers answered the published command '
'modjk.worker_status')
return ret
if status['errors']:
ret['result'] = False
ret['comment'] = ('the following balancers could not find the '
'worker {0}: {1}'.format(name, status['errors']))
return ret
if not status['wrong_state']:
ret['comment'] = ('the worker is in the desired activation state on '
'all the balancers')
return ret
else:
ret['comment'] = ('the action {0} will be sent to the balancers '
'{1}'.format(action, status['wrong_state']))
ret['changes'] = {action: status['wrong_state']}
if __opts__['test']:
ret['result'] = None
return ret
response = _send_command(action, name, lbn, target, profile, tgt_type)
ret['comment'] = response['msg']
ret['result'] = response['code']
return ret
def stop(name, lbn, target, profile='default', tgt_type='glob', expr_form=None):
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
return _talk2modjk(name, lbn, target, 'worker_stop', profile, tgt_type)
def activate(name, lbn, target, profile='default', tgt_type='glob', expr_form=None):
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
return _talk2modjk(name, lbn, target, 'worker_activate', profile, tgt_type)
def disable(name, lbn, target, profile='default', tgt_type='glob', expr_form=None):
if expr_form is not None:
salt.utils.warn_until(
'Fluorine',
'the target type should be passed using the \'tgt_type\' '
'argument instead of \'expr_form\'. Support for using '
'\'expr_form\' will be removed in Salt Fluorine.'
)
tgt_type = expr_form
return _talk2modjk(name, lbn, target, 'worker_disable', profile, tgt_type)
| true | true |
f7faa840392864362dd4dafe8279ea33bdfe883c | 1,689 | py | Python | Chap 2/Chap-2-Proj-Bowling.py | dwhickox/NCHS-Programming-1-Python-Programs | 96eba3826585a81a015740f59329c7a06afc9db7 | [
"MIT"
] | null | null | null | Chap 2/Chap-2-Proj-Bowling.py | dwhickox/NCHS-Programming-1-Python-Programs | 96eba3826585a81a015740f59329c7a06afc9db7 | [
"MIT"
] | null | null | null | Chap 2/Chap-2-Proj-Bowling.py | dwhickox/NCHS-Programming-1-Python-Programs | 96eba3826585a81a015740f59329c7a06afc9db7 | [
"MIT"
] | null | null | null | #David Hickox
#jan 24 17
#Bowling Prgm
#Calculates the Bowling averages and totals
#variables
# kim1-3, holds the scores for kim
# kourt1-3, holds the scores for kourtney
# poptart1-3, holds Mr. Hayes' scores
# epstein1-3, holds Mrs. Epstein's scores
# kimave, kim's average
# kourtave, kourtney's average
# poptartave, Mr. Hayes' average
# epsteinave, Mrs. Epstein's average
# kimney, the team scores for kim and kourtney
# popstein, the team scores for Mr. Hayes and Mrs. Epstein
#imports date time and curency handeling because i hate string formating (this takes the place of #$%.2f"%)
import locale
locale.setlocale( locale.LC_ALL, '' )
#use locale.currency() for currency formating
print("Welcome to the Bowling Program\n")
#initializes all the values
kim1 = 101
kim2 = 126
kim3 = 132
kourt1 = 135
kourt2 = 117
kourt3 = 123
poptart1 = 199
poptart2 = 218
poptart3 = 221
epstein1 = 220
epstein2 = 197
epstein3 = 236
# the maths for the averages
kimave = (kim1+kim2+kim3)/3
kourtave = (kourt1+kourt2+kourt3)/3
poptartave = (poptart1+poptart2+poptart3)/3
epsteinave = (epstein1+epstein2+epstein3)/3
#maths for team scores
kimney = kim1+kim2+kim3+kourt1+kourt2+kourt3
popstein = poptart1+poptart2+poptart3+epstein1+epstein2+epstein3
#prints the results in a well formated mannor
print ("PLAYER\t\t\t\tAVERAGE")
print ("Kim Kardashian\t\t\t%.3f"%kimave)
print ("Kourtney Kardashian\t\t%.3f"%kourtave)
print ("Mr. Hayes\t\t\t%.3f"%poptartave)
print ("Mrs. Epstein\t\t\t%.3f"%epsteinave)
print ("\nTEAM\t\t\t\t SCORE")
print ("Kim and Kourtney Kardashian\t",kimney)
print ("Mr. Hayes and Mrs. Epstein\t",popstein)
#waits for the user to end the program
input("\nPress Enter to Exit")
| 29.631579 | 108 | 0.745411 |
# epstein1-3, holds Mrs. Epstein's scores
# kourtave, kourtney's average
# epsteinave, Mrs. Epstein's average
cale
locale.setlocale( locale.LC_ALL, '' )
#use locale.currency() for currency formating
print("Welcome to the Bowling Program\n")
#initializes all the values
kim1 = 101
kim2 = 126
kim3 = 132
kourt1 = 135
kourt2 = 117
kourt3 = 123
poptart1 = 199
poptart2 = 218
poptart3 = 221
epstein1 = 220
epstein2 = 197
epstein3 = 236
# the maths for the averages
kimave = (kim1+kim2+kim3)/3
kourtave = (kourt1+kourt2+kourt3)/3
poptartave = (poptart1+poptart2+poptart3)/3
epsteinave = (epstein1+epstein2+epstein3)/3
#maths for team scores
kimney = kim1+kim2+kim3+kourt1+kourt2+kourt3
popstein = poptart1+poptart2+poptart3+epstein1+epstein2+epstein3
#prints the results in a well formated mannor
print ("PLAYER\t\t\t\tAVERAGE")
print ("Kim Kardashian\t\t\t%.3f"%kimave)
print ("Kourtney Kardashian\t\t%.3f"%kourtave)
print ("Mr. Hayes\t\t\t%.3f"%poptartave)
print ("Mrs. Epstein\t\t\t%.3f"%epsteinave)
print ("\nTEAM\t\t\t\t SCORE")
print ("Kim and Kourtney Kardashian\t",kimney)
print ("Mr. Hayes and Mrs. Epstein\t",popstein)
#waits for the user to end the program
input("\nPress Enter to Exit")
| true | true |
f7faa87c8956ffaf946e44fe4188181df5f38082 | 7,935 | py | Python | src/test/python/org/o3project/odenos/core/component/network/flow/test_flow.py | o3project/odenos | 837d0d3d3c37482e843c40c5eeeac10646e68c65 | [
"Apache-2.0"
] | 26 | 2015-02-18T10:22:50.000Z | 2020-06-18T05:07:54.000Z | src/test/python/org/o3project/odenos/core/component/network/flow/test_flow.py | o3project/odenos | 837d0d3d3c37482e843c40c5eeeac10646e68c65 | [
"Apache-2.0"
] | 45 | 2015-02-20T00:40:45.000Z | 2021-12-14T21:07:57.000Z | src/test/python/org/o3project/odenos/core/component/network/flow/test_flow.py | o3project/odenos | 837d0d3d3c37482e843c40c5eeeac10646e68c65 | [
"Apache-2.0"
] | 30 | 2015-02-19T02:00:35.000Z | 2017-02-18T15:28:09.000Z | # -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.flow.flow import Flow
import unittest
class FlowTest(unittest.TestCase):
Type = "BasicFlow"
Version = "v01"
Flow_id = "Id01"
Owner = "Owner"
Enabled = True
Priority = 65535
Status = "none"
Attributes = {"bandwidth": 10, "req_bandwidth": 11,
"latency": 10, "req_latency": 11}
def setUp(self):
self.target = Flow(self.Type, self.Version, self.Flow_id, self.Owner,
self.Enabled, self.Priority, self.Status,
self.Attributes)
def tearDown(self):
self.target = None
def test_constructor(self):
self.assertEqual(self.target._body[self.target.TYPE],
self.Type)
self.assertEqual(self.target._body[self.target.VERSION],
self.Version)
self.assertEqual(self.target._body[self.target.FLOW_ID],
self.Flow_id)
self.assertEqual(self.target._body[self.target.OWNER],
self.Owner)
self.assertEqual(self.target._body[self.target.ENABLED],
self.Enabled)
self.assertEqual(self.target._body[self.target.PRIORITY],
self.Priority)
self.assertEqual(self.target._body[self.target.STATUS],
self.Status)
self.assertEqual(self.target._body[self.target.ATTRIBUTES],
self.Attributes)
def test_type_property(self):
self.assertEqual(self.target.type, self.Type)
def test_version_property(self):
self.assertEqual(self.target.version, self.Version)
def test_flow_id_property(self):
self.assertEqual(self.target.flow_id, self.Flow_id)
def test_owner_property(self):
self.assertEqual(self.target.owner, self.Owner)
def test_enabled_property(self):
self.assertEqual(self.target.enabled, self.Enabled)
def test_enabled_setter(self):
self.assertEqual(self.target._body[self.target.ENABLED],
self.Enabled)
self.target.enabled = False
self.assertEqual(self.target._body[self.target.ENABLED],
False)
def test_priority_property(self):
self.assertEqual(self.target.priority, self.Priority)
def test_priority_setter(self):
self.assertEqual(self.target._body[self.target.PRIORITY],
self.Priority)
self.target.priority = 0
self.assertEqual(self.target._body[self.target.PRIORITY],
0)
def test_status_property(self):
self.assertEqual(self.target.status, self.Status)
def test_status_setter(self):
self.assertEqual(self.target._body[self.target.STATUS],
self.Status)
self.target.status = "establishing"
self.assertEqual(self.target._body[self.target.STATUS],
"establishing")
def test_attributes_property(self):
self.assertEqual(self.target.attributes, self.Attributes)
def test_create_from_packed_Version_NotNone(self):
Type2 = "OFPFlow"
Version2 = "v02"
Flow_id2 = "Id02"
Owner2 = "Owner2"
Enabled2 = False
Priority2 = 1
Status2 = "established"
Attributes2 = {"bandwidth": 12, "req_bandwidth": 13,
"latency": 12, "req_latency": 13}
self.value = {"type": Type2, "version": Version2,
"flow_id": Flow_id2, "owner": Owner2,
"enabled": Enabled2, "priority": Priority2,
"status": Status2, "attributes": Attributes2}
self.result = self.target.create_from_packed(self.value)
self.assertEqual(self.result._body[self.target.TYPE],
Type2)
self.assertEqual(self.result._body[self.target.VERSION],
Version2)
self.assertEqual(self.result._body[self.target.FLOW_ID],
Flow_id2)
self.assertEqual(self.result._body[self.target.OWNER],
Owner2)
self.assertEqual(self.result._body[self.target.ENABLED],
Enabled2)
self.assertEqual(self.result._body[self.target.PRIORITY],
Priority2)
self.assertEqual(self.result._body[self.target.STATUS],
Status2)
self.assertEqual(self.result._body[self.target.ATTRIBUTES],
Attributes2)
def test_create_from_packed_Version_None(self):
Type2 = "OFPFlow"
Flow_id2 = "Id02"
Owner2 = "Owner2"
Enabled2 = False
Priority2 = 1
Status2 = "established"
Attributes2 = {"bandwidth": 12, "req_bandwidth": 13,
"latency": 12, "req_latency": 13}
self.value = {"type": Type2,
"flow_id": Flow_id2, "owner": Owner2,
"enabled": Enabled2, "priority": Priority2,
"status": Status2, "attributes": Attributes2}
self.result = self.target.create_from_packed(self.value)
self.assertEqual(self.result._body[self.target.TYPE],
Type2)
self.assertEqual(self.result._body[self.target.VERSION],
None)
self.assertEqual(self.result._body[self.target.FLOW_ID],
Flow_id2)
self.assertEqual(self.result._body[self.target.OWNER],
Owner2)
self.assertEqual(self.result._body[self.target.ENABLED],
Enabled2)
self.assertEqual(self.result._body[self.target.PRIORITY],
Priority2)
self.assertEqual(self.result._body[self.target.STATUS],
Status2)
self.assertEqual(self.result._body[self.target.ATTRIBUTES],
Attributes2)
def test_packed_object(self):
self.result = self.target.packed_object()
self.assertEqual(self.result[self.target.TYPE],
self.Type)
self.assertEqual(self.result[self.target.VERSION],
self.Version)
self.assertEqual(self.result[self.target.FLOW_ID],
self.Flow_id)
self.assertEqual(self.result[self.target.OWNER],
self.Owner)
self.assertEqual(self.result[self.target.ENABLED],
self.Enabled)
self.assertEqual(self.result[self.target.PRIORITY],
self.Priority)
self.assertEqual(self.result[self.target.STATUS],
self.Status)
self.assertEqual(self.result[self.target.ATTRIBUTES],
self.Attributes)
if __name__ == '__main__':
unittest.main()
| 41.984127 | 77 | 0.558916 |
from org.o3project.odenos.core.component.network.flow.flow import Flow
import unittest
class FlowTest(unittest.TestCase):
Type = "BasicFlow"
Version = "v01"
Flow_id = "Id01"
Owner = "Owner"
Enabled = True
Priority = 65535
Status = "none"
Attributes = {"bandwidth": 10, "req_bandwidth": 11,
"latency": 10, "req_latency": 11}
def setUp(self):
self.target = Flow(self.Type, self.Version, self.Flow_id, self.Owner,
self.Enabled, self.Priority, self.Status,
self.Attributes)
def tearDown(self):
self.target = None
def test_constructor(self):
self.assertEqual(self.target._body[self.target.TYPE],
self.Type)
self.assertEqual(self.target._body[self.target.VERSION],
self.Version)
self.assertEqual(self.target._body[self.target.FLOW_ID],
self.Flow_id)
self.assertEqual(self.target._body[self.target.OWNER],
self.Owner)
self.assertEqual(self.target._body[self.target.ENABLED],
self.Enabled)
self.assertEqual(self.target._body[self.target.PRIORITY],
self.Priority)
self.assertEqual(self.target._body[self.target.STATUS],
self.Status)
self.assertEqual(self.target._body[self.target.ATTRIBUTES],
self.Attributes)
def test_type_property(self):
self.assertEqual(self.target.type, self.Type)
def test_version_property(self):
self.assertEqual(self.target.version, self.Version)
def test_flow_id_property(self):
self.assertEqual(self.target.flow_id, self.Flow_id)
def test_owner_property(self):
self.assertEqual(self.target.owner, self.Owner)
def test_enabled_property(self):
self.assertEqual(self.target.enabled, self.Enabled)
def test_enabled_setter(self):
self.assertEqual(self.target._body[self.target.ENABLED],
self.Enabled)
self.target.enabled = False
self.assertEqual(self.target._body[self.target.ENABLED],
False)
def test_priority_property(self):
self.assertEqual(self.target.priority, self.Priority)
def test_priority_setter(self):
self.assertEqual(self.target._body[self.target.PRIORITY],
self.Priority)
self.target.priority = 0
self.assertEqual(self.target._body[self.target.PRIORITY],
0)
def test_status_property(self):
self.assertEqual(self.target.status, self.Status)
def test_status_setter(self):
self.assertEqual(self.target._body[self.target.STATUS],
self.Status)
self.target.status = "establishing"
self.assertEqual(self.target._body[self.target.STATUS],
"establishing")
def test_attributes_property(self):
self.assertEqual(self.target.attributes, self.Attributes)
def test_create_from_packed_Version_NotNone(self):
Type2 = "OFPFlow"
Version2 = "v02"
Flow_id2 = "Id02"
Owner2 = "Owner2"
Enabled2 = False
Priority2 = 1
Status2 = "established"
Attributes2 = {"bandwidth": 12, "req_bandwidth": 13,
"latency": 12, "req_latency": 13}
self.value = {"type": Type2, "version": Version2,
"flow_id": Flow_id2, "owner": Owner2,
"enabled": Enabled2, "priority": Priority2,
"status": Status2, "attributes": Attributes2}
self.result = self.target.create_from_packed(self.value)
self.assertEqual(self.result._body[self.target.TYPE],
Type2)
self.assertEqual(self.result._body[self.target.VERSION],
Version2)
self.assertEqual(self.result._body[self.target.FLOW_ID],
Flow_id2)
self.assertEqual(self.result._body[self.target.OWNER],
Owner2)
self.assertEqual(self.result._body[self.target.ENABLED],
Enabled2)
self.assertEqual(self.result._body[self.target.PRIORITY],
Priority2)
self.assertEqual(self.result._body[self.target.STATUS],
Status2)
self.assertEqual(self.result._body[self.target.ATTRIBUTES],
Attributes2)
def test_create_from_packed_Version_None(self):
Type2 = "OFPFlow"
Flow_id2 = "Id02"
Owner2 = "Owner2"
Enabled2 = False
Priority2 = 1
Status2 = "established"
Attributes2 = {"bandwidth": 12, "req_bandwidth": 13,
"latency": 12, "req_latency": 13}
self.value = {"type": Type2,
"flow_id": Flow_id2, "owner": Owner2,
"enabled": Enabled2, "priority": Priority2,
"status": Status2, "attributes": Attributes2}
self.result = self.target.create_from_packed(self.value)
self.assertEqual(self.result._body[self.target.TYPE],
Type2)
self.assertEqual(self.result._body[self.target.VERSION],
None)
self.assertEqual(self.result._body[self.target.FLOW_ID],
Flow_id2)
self.assertEqual(self.result._body[self.target.OWNER],
Owner2)
self.assertEqual(self.result._body[self.target.ENABLED],
Enabled2)
self.assertEqual(self.result._body[self.target.PRIORITY],
Priority2)
self.assertEqual(self.result._body[self.target.STATUS],
Status2)
self.assertEqual(self.result._body[self.target.ATTRIBUTES],
Attributes2)
def test_packed_object(self):
self.result = self.target.packed_object()
self.assertEqual(self.result[self.target.TYPE],
self.Type)
self.assertEqual(self.result[self.target.VERSION],
self.Version)
self.assertEqual(self.result[self.target.FLOW_ID],
self.Flow_id)
self.assertEqual(self.result[self.target.OWNER],
self.Owner)
self.assertEqual(self.result[self.target.ENABLED],
self.Enabled)
self.assertEqual(self.result[self.target.PRIORITY],
self.Priority)
self.assertEqual(self.result[self.target.STATUS],
self.Status)
self.assertEqual(self.result[self.target.ATTRIBUTES],
self.Attributes)
if __name__ == '__main__':
unittest.main()
| true | true |
f7faa989d90b294b105b860f6bf0d66bb6d3a8fe | 768 | py | Python | BasicPythonPrograms/PythonInheritance.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | BasicPythonPrograms/PythonInheritance.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | BasicPythonPrograms/PythonInheritance.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
] | null | null | null | class Person:
def __init__(self,fname,lname):
self.firstname=fname #proerties
self.lastname=lname
def printname(self): #Method
print(self.firstname,self.lastname)
class Student(Person): #child class
def __init__(self, fname, lname):
Person.__init__(self, fname, lname)
super().__init__(fname,lname)
#Use the person class to create an object,and then execute the printname method
x=Person("Pushkar", "Baviskar") #X is an object of class Person
x1=Student("Manasi", "Pushkar")
x.printname() #call the printname method using object
#created a Parent class
x1.printname()
f=open("E:\Github profile\PythonProgramming\BasicPythonPrograms\Pushkar.txt","r")
print(f.read())
print(f.readline())
f.close()
| 34.909091 | 87 | 0.700521 | class Person:
def __init__(self,fname,lname):
self.firstname=fname
self.lastname=lname
def printname(self):
print(self.firstname,self.lastname)
class Student(Person):
def __init__(self, fname, lname):
Person.__init__(self, fname, lname)
super().__init__(fname,lname)
x=Person("Pushkar", "Baviskar")
x1=Student("Manasi", "Pushkar")
x.printname()
x1.printname()
f=open("E:\Github profile\PythonProgramming\BasicPythonPrograms\Pushkar.txt","r")
print(f.read())
print(f.readline())
f.close()
| true | true |
f7faa9cd76e458ca86b43c3b1502fade86146a38 | 587 | py | Python | visitingController.py | C1626152/bfrp | 2d43f90ba08709446c70453b758c42bb41e5946b | [
"MIT"
] | 1 | 2019-03-05T11:36:17.000Z | 2019-03-05T11:36:17.000Z | visitingController.py | C1626152/bfrp | 2d43f90ba08709446c70453b758c42bb41e5946b | [
"MIT"
] | 1 | 2019-03-12T00:09:09.000Z | 2019-03-12T00:09:09.000Z | visitingController.py | C1626152/bfrp | 2d43f90ba08709446c70453b758c42bb41e5946b | [
"MIT"
] | 2 | 2019-02-14T20:35:34.000Z | 2019-03-05T11:45:54.000Z | import microbit
import radio
# turn radio on
radio.on()
# set channel, power to max, assign group
radio.config(channel=7, power=10, group=1)
tX = radio.send()
rX = radio.receive()
dict currentBlock = []
# While loop to send beacon signal
while rX == False:
tX = "SYN"
if rX == "ACK":
# Placeholder in use here
tX = "someHashcode"
# Wrong, work out how to actually validate for a 32bit string
elif type(rX) == str && len(rX) == 32:
# Needs code to validate
# store new block in dict obj
currentBlock = rX
else:
# Ensure that the script returns to sending signal code
return
| 20.241379 | 61 | 0.698467 | import microbit
import radio
radio.on()
radio.config(channel=7, power=10, group=1)
tX = radio.send()
rX = radio.receive()
dict currentBlock = []
while rX == False:
tX = "SYN"
if rX == "ACK":
tX = "someHashcode"
elif type(rX) == str && len(rX) == 32:
currentBlock = rX
else:
return
| false | true |
f7faaa3340fb48ef829d4e5d6b48af6f8a62c2c9 | 1,010 | py | Python | examples/2_docking/pose_prediction/run.py | jurgjn/hotspots | ef1fc6dd957a2f23094e10c05c702b5ed94a59ac | [
"MIT"
] | 24 | 2019-02-14T00:02:13.000Z | 2022-03-26T02:27:52.000Z | examples/2_docking/pose_prediction/run.py | jurgjn/hotspots | ef1fc6dd957a2f23094e10c05c702b5ed94a59ac | [
"MIT"
] | 27 | 2019-02-06T12:18:27.000Z | 2020-10-30T14:26:08.000Z | examples/2_docking/pose_prediction/run.py | jurgjn/hotspots | ef1fc6dd957a2f23094e10c05c702b5ed94a59ac | [
"MIT"
] | 12 | 2019-02-13T20:38:56.000Z | 2022-03-09T01:20:54.000Z | from __future__ import print_function
import os
#####################################################################
# input data
prot = "1m17" # protein to dock into
ligand = "AQ4" # ligand to dock
ref = "1m17" # reference structure for comparison
#####################################################################
# run docking
# hotspot-guided GOLD
if not os.path.exists("./hotspot_guided_docking"):
os.mkdir("./hotspot_guided_docking")
os.system("""python ./docking.py "./hotspot_guided_docking" "{}" "{}" """.format(prot, ligand))
# default GOLD
if not os.path.exists("./default_docking"):
os.mkdir("./default_docking")
os.system("""python ./docking.py "./default_docking" "{}" "{}" -hs False """.format(prot, ligand))
# comparision
os.system("""python ./comparision.py "{}" "{}" "{}" "hotspot_guided_docking/results.mol2" """.format(prot, ref, ligand))
os.system("""python ./comparision.py "{}" "{}" "{}" "default_docking/results.mol2" """.format(prot, ref, ligand))
| 36.071429 | 120 | 0.573267 | from __future__ import print_function
import os
| true | true |
f7faaa4287c34b67f0176d4206a4ebfccc2e4186 | 23,027 | py | Python | sdk/python/pulumi_azure/keyvault/certifiate.py | adnang/pulumi-azure | 32360d2f1e41e27d7fdd6522cb26d65e531f279f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/keyvault/certifiate.py | adnang/pulumi-azure | 32360d2f1e41e27d7fdd6522cb26d65e531f279f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/keyvault/certifiate.py | adnang/pulumi-azure | 32360d2f1e41e27d7fdd6522cb26d65e531f279f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
warnings.warn("azure.keyvault.Certifiate has been deprecated in favor of azure.keyvault.Certificate", DeprecationWarning)
class Certifiate(pulumi.CustomResource):
certificate: pulumi.Output[dict]
"""
A `certificate` block as defined below, used to Import an existing certificate.
* `contents` (`str`) - The base64-encoded certificate contents. Changing this forces a new resource to be created.
* `password` (`str`) - The password associated with the certificate. Changing this forces a new resource to be created.
"""
certificate_data: pulumi.Output[str]
"""
The raw Key Vault Certificate data represented as a hexadecimal string.
"""
certificate_policy: pulumi.Output[dict]
"""
A `certificate_policy` block as defined below.
* `issuerParameters` (`dict`) - A `issuer_parameters` block as defined below.
* `name` (`str`) - The name of the Certificate Issuer. Possible values include `Self` (for self-signed certificate), or `Unknown` (for a certificate issuing authority like `Let's Encrypt` and Azure direct supported ones). Changing this forces a new resource to be created.
* `keyProperties` (`dict`) - A `key_properties` block as defined below.
* `exportable` (`bool`) - Is this Certificate Exportable? Changing this forces a new resource to be created.
* `key_size` (`float`) - The size of the Key used in the Certificate. Possible values include `2048` and `4096`. Changing this forces a new resource to be created.
* `key_type` (`str`) - Specifies the Type of Key, such as `RSA`. Changing this forces a new resource to be created.
* `reuseKey` (`bool`) - Is the key reusable? Changing this forces a new resource to be created.
* `lifetimeActions` (`list`) - A `lifetime_action` block as defined below.
* `action` (`dict`) - A `action` block as defined below.
* `actionType` (`str`) - The Type of action to be performed when the lifetime trigger is triggerec. Possible values include `AutoRenew` and `EmailContacts`. Changing this forces a new resource to be created.
* `trigger` (`dict`) - A `trigger` block as defined below.
* `daysBeforeExpiry` (`float`) - The number of days before the Certificate expires that the action associated with this Trigger should run. Changing this forces a new resource to be created. Conflicts with `lifetime_percentage`.
* `lifetimePercentage` (`float`) - The percentage at which during the Certificates Lifetime the action associated with this Trigger should run. Changing this forces a new resource to be created. Conflicts with `days_before_expiry`.
* `secretProperties` (`dict`) - A `secret_properties` block as defined below.
* `content_type` (`str`) - The Content-Type of the Certificate, such as `application/x-pkcs12` for a PFX or `application/x-pem-file` for a PEM. Changing this forces a new resource to be created.
* `x509CertificateProperties` (`dict`) - A `x509_certificate_properties` block as defined below.
* `extendedKeyUsages` (`list`) - A list of Extended/Enhanced Key Usages. Changing this forces a new resource to be created.
* `keyUsages` (`list`) - A list of uses associated with this Key. Possible values include `cRLSign`, `dataEncipherment`, `decipherOnly`, `digitalSignature`, `encipherOnly`, `keyAgreement`, `keyCertSign`, `keyEncipherment` and `nonRepudiation` and are case-sensitive. Changing this forces a new resource to be created.
* `subject` (`str`) - The Certificate's Subject. Changing this forces a new resource to be created.
* `subjectAlternativeNames` (`dict`) - A `subject_alternative_names` block as defined below.
* `dnsNames` (`list`) - A list of alternative DNS names (FQDNs) identified by the Certificate. Changing this forces a new resource to be created.
* `emails` (`list`) - A list of email addresses identified by this Certificate. Changing this forces a new resource to be created.
* `upns` (`list`) - A list of User Principal Names identified by the Certificate. Changing this forces a new resource to be created.
* `validityInMonths` (`float`) - The Certificates Validity Period in Months. Changing this forces a new resource to be created.
"""
key_vault_id: pulumi.Output[str]
"""
The ID of the Key Vault where the Certificate should be created.
"""
name: pulumi.Output[str]
"""
Specifies the name of the Key Vault Certificate. Changing this forces a new resource to be created.
"""
secret_id: pulumi.Output[str]
"""
The ID of the associated Key Vault Secret.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
thumbprint: pulumi.Output[str]
"""
The X509 Thumbprint of the Key Vault Certificate represented as a hexadecimal string.
"""
version: pulumi.Output[str]
"""
The current version of the Key Vault Certificate.
"""
warnings.warn("azure.keyvault.Certifiate has been deprecated in favor of azure.keyvault.Certificate", DeprecationWarning)
def __init__(__self__, resource_name, opts=None, certificate=None, certificate_policy=None, key_vault_id=None, name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Key Vault Certificate.
## Example Usage (Generating a new certificate)
```python
import pulumi
import pulumi_azure as azure
current = azure.core.get_client_config()
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_key_vault = azure.keyvault.KeyVault("exampleKeyVault",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
tenant_id=current.tenant_id,
sku_name="standard",
access_policy=[{
"tenantId": current.tenant_id,
"objectId": current.object_id,
"certificatePermissions": [
"create",
"delete",
"deleteissuers",
"get",
"getissuers",
"import",
"list",
"listissuers",
"managecontacts",
"manageissuers",
"setissuers",
"update",
],
"keyPermissions": [
"backup",
"create",
"decrypt",
"delete",
"encrypt",
"get",
"import",
"list",
"purge",
"recover",
"restore",
"sign",
"unwrapKey",
"update",
"verify",
"wrapKey",
],
"secretPermissions": [
"backup",
"delete",
"get",
"list",
"purge",
"recover",
"restore",
"set",
],
}],
tags={
"environment": "Production",
})
example_certificate = azure.keyvault.Certificate("exampleCertificate",
key_vault_id=example_key_vault.id,
certificate_policy={
"issuer_parameters": {
"name": "Self",
},
"key_properties": {
"exportable": True,
"keySize": 2048,
"keyType": "RSA",
"reuseKey": True,
},
"lifetime_action": [{
"action": {
"actionType": "AutoRenew",
},
"trigger": {
"daysBeforeExpiry": 30,
},
}],
"secret_properties": {
"contentType": "application/x-pkcs12",
},
"x509_certificate_properties": {
"extendedKeyUsages": ["1.3.6.1.5.5.7.3.1"],
"keyUsages": [
"cRLSign",
"dataEncipherment",
"digitalSignature",
"keyAgreement",
"keyCertSign",
"keyEncipherment",
],
"subject_alternative_names": {
"dnsNames": [
"internal.contoso.com",
"domain.hello.world",
],
},
"subject": "CN=hello-world",
"validityInMonths": 12,
},
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] certificate: A `certificate` block as defined below, used to Import an existing certificate.
:param pulumi.Input[dict] certificate_policy: A `certificate_policy` block as defined below.
:param pulumi.Input[str] key_vault_id: The ID of the Key Vault where the Certificate should be created.
:param pulumi.Input[str] name: Specifies the name of the Key Vault Certificate. Changing this forces a new resource to be created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **certificate** object supports the following:
* `contents` (`pulumi.Input[str]`) - The base64-encoded certificate contents. Changing this forces a new resource to be created.
* `password` (`pulumi.Input[str]`) - The password associated with the certificate. Changing this forces a new resource to be created.
The **certificate_policy** object supports the following:
* `issuerParameters` (`pulumi.Input[dict]`) - A `issuer_parameters` block as defined below.
* `name` (`pulumi.Input[str]`) - The name of the Certificate Issuer. Possible values include `Self` (for self-signed certificate), or `Unknown` (for a certificate issuing authority like `Let's Encrypt` and Azure direct supported ones). Changing this forces a new resource to be created.
* `keyProperties` (`pulumi.Input[dict]`) - A `key_properties` block as defined below.
* `exportable` (`pulumi.Input[bool]`) - Is this Certificate Exportable? Changing this forces a new resource to be created.
* `key_size` (`pulumi.Input[float]`) - The size of the Key used in the Certificate. Possible values include `2048` and `4096`. Changing this forces a new resource to be created.
* `key_type` (`pulumi.Input[str]`) - Specifies the Type of Key, such as `RSA`. Changing this forces a new resource to be created.
* `reuseKey` (`pulumi.Input[bool]`) - Is the key reusable? Changing this forces a new resource to be created.
* `lifetimeActions` (`pulumi.Input[list]`) - A `lifetime_action` block as defined below.
* `action` (`pulumi.Input[dict]`) - A `action` block as defined below.
* `actionType` (`pulumi.Input[str]`) - The Type of action to be performed when the lifetime trigger is triggerec. Possible values include `AutoRenew` and `EmailContacts`. Changing this forces a new resource to be created.
* `trigger` (`pulumi.Input[dict]`) - A `trigger` block as defined below.
* `daysBeforeExpiry` (`pulumi.Input[float]`) - The number of days before the Certificate expires that the action associated with this Trigger should run. Changing this forces a new resource to be created. Conflicts with `lifetime_percentage`.
* `lifetimePercentage` (`pulumi.Input[float]`) - The percentage at which during the Certificates Lifetime the action associated with this Trigger should run. Changing this forces a new resource to be created. Conflicts with `days_before_expiry`.
* `secretProperties` (`pulumi.Input[dict]`) - A `secret_properties` block as defined below.
* `content_type` (`pulumi.Input[str]`) - The Content-Type of the Certificate, such as `application/x-pkcs12` for a PFX or `application/x-pem-file` for a PEM. Changing this forces a new resource to be created.
* `x509CertificateProperties` (`pulumi.Input[dict]`) - A `x509_certificate_properties` block as defined below.
* `extendedKeyUsages` (`pulumi.Input[list]`) - A list of Extended/Enhanced Key Usages. Changing this forces a new resource to be created.
* `keyUsages` (`pulumi.Input[list]`) - A list of uses associated with this Key. Possible values include `cRLSign`, `dataEncipherment`, `decipherOnly`, `digitalSignature`, `encipherOnly`, `keyAgreement`, `keyCertSign`, `keyEncipherment` and `nonRepudiation` and are case-sensitive. Changing this forces a new resource to be created.
* `subject` (`pulumi.Input[str]`) - The Certificate's Subject. Changing this forces a new resource to be created.
* `subjectAlternativeNames` (`pulumi.Input[dict]`) - A `subject_alternative_names` block as defined below.
* `dnsNames` (`pulumi.Input[list]`) - A list of alternative DNS names (FQDNs) identified by the Certificate. Changing this forces a new resource to be created.
* `emails` (`pulumi.Input[list]`) - A list of email addresses identified by this Certificate. Changing this forces a new resource to be created.
* `upns` (`pulumi.Input[list]`) - A list of User Principal Names identified by the Certificate. Changing this forces a new resource to be created.
* `validityInMonths` (`pulumi.Input[float]`) - The Certificates Validity Period in Months. Changing this forces a new resource to be created.
"""
pulumi.log.warn("Certifiate is deprecated: azure.keyvault.Certifiate has been deprecated in favor of azure.keyvault.Certificate")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['certificate'] = certificate
if certificate_policy is None:
raise TypeError("Missing required property 'certificate_policy'")
__props__['certificate_policy'] = certificate_policy
if key_vault_id is None:
raise TypeError("Missing required property 'key_vault_id'")
__props__['key_vault_id'] = key_vault_id
__props__['name'] = name
__props__['tags'] = tags
__props__['certificate_data'] = None
__props__['secret_id'] = None
__props__['thumbprint'] = None
__props__['version'] = None
super(Certifiate, __self__).__init__(
'azure:keyvault/certifiate:Certifiate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, certificate=None, certificate_data=None, certificate_policy=None, key_vault_id=None, name=None, secret_id=None, tags=None, thumbprint=None, version=None):
"""
Get an existing Certifiate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] certificate: A `certificate` block as defined below, used to Import an existing certificate.
:param pulumi.Input[str] certificate_data: The raw Key Vault Certificate data represented as a hexadecimal string.
:param pulumi.Input[dict] certificate_policy: A `certificate_policy` block as defined below.
:param pulumi.Input[str] key_vault_id: The ID of the Key Vault where the Certificate should be created.
:param pulumi.Input[str] name: Specifies the name of the Key Vault Certificate. Changing this forces a new resource to be created.
:param pulumi.Input[str] secret_id: The ID of the associated Key Vault Secret.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] thumbprint: The X509 Thumbprint of the Key Vault Certificate represented as a hexadecimal string.
:param pulumi.Input[str] version: The current version of the Key Vault Certificate.
The **certificate** object supports the following:
* `contents` (`pulumi.Input[str]`) - The base64-encoded certificate contents. Changing this forces a new resource to be created.
* `password` (`pulumi.Input[str]`) - The password associated with the certificate. Changing this forces a new resource to be created.
The **certificate_policy** object supports the following:
* `issuerParameters` (`pulumi.Input[dict]`) - A `issuer_parameters` block as defined below.
* `name` (`pulumi.Input[str]`) - The name of the Certificate Issuer. Possible values include `Self` (for self-signed certificate), or `Unknown` (for a certificate issuing authority like `Let's Encrypt` and Azure direct supported ones). Changing this forces a new resource to be created.
* `keyProperties` (`pulumi.Input[dict]`) - A `key_properties` block as defined below.
* `exportable` (`pulumi.Input[bool]`) - Is this Certificate Exportable? Changing this forces a new resource to be created.
* `key_size` (`pulumi.Input[float]`) - The size of the Key used in the Certificate. Possible values include `2048` and `4096`. Changing this forces a new resource to be created.
* `key_type` (`pulumi.Input[str]`) - Specifies the Type of Key, such as `RSA`. Changing this forces a new resource to be created.
* `reuseKey` (`pulumi.Input[bool]`) - Is the key reusable? Changing this forces a new resource to be created.
* `lifetimeActions` (`pulumi.Input[list]`) - A `lifetime_action` block as defined below.
* `action` (`pulumi.Input[dict]`) - A `action` block as defined below.
* `actionType` (`pulumi.Input[str]`) - The Type of action to be performed when the lifetime trigger is triggerec. Possible values include `AutoRenew` and `EmailContacts`. Changing this forces a new resource to be created.
* `trigger` (`pulumi.Input[dict]`) - A `trigger` block as defined below.
* `daysBeforeExpiry` (`pulumi.Input[float]`) - The number of days before the Certificate expires that the action associated with this Trigger should run. Changing this forces a new resource to be created. Conflicts with `lifetime_percentage`.
* `lifetimePercentage` (`pulumi.Input[float]`) - The percentage at which during the Certificates Lifetime the action associated with this Trigger should run. Changing this forces a new resource to be created. Conflicts with `days_before_expiry`.
* `secretProperties` (`pulumi.Input[dict]`) - A `secret_properties` block as defined below.
* `content_type` (`pulumi.Input[str]`) - The Content-Type of the Certificate, such as `application/x-pkcs12` for a PFX or `application/x-pem-file` for a PEM. Changing this forces a new resource to be created.
* `x509CertificateProperties` (`pulumi.Input[dict]`) - A `x509_certificate_properties` block as defined below.
* `extendedKeyUsages` (`pulumi.Input[list]`) - A list of Extended/Enhanced Key Usages. Changing this forces a new resource to be created.
* `keyUsages` (`pulumi.Input[list]`) - A list of uses associated with this Key. Possible values include `cRLSign`, `dataEncipherment`, `decipherOnly`, `digitalSignature`, `encipherOnly`, `keyAgreement`, `keyCertSign`, `keyEncipherment` and `nonRepudiation` and are case-sensitive. Changing this forces a new resource to be created.
* `subject` (`pulumi.Input[str]`) - The Certificate's Subject. Changing this forces a new resource to be created.
* `subjectAlternativeNames` (`pulumi.Input[dict]`) - A `subject_alternative_names` block as defined below.
* `dnsNames` (`pulumi.Input[list]`) - A list of alternative DNS names (FQDNs) identified by the Certificate. Changing this forces a new resource to be created.
* `emails` (`pulumi.Input[list]`) - A list of email addresses identified by this Certificate. Changing this forces a new resource to be created.
* `upns` (`pulumi.Input[list]`) - A list of User Principal Names identified by the Certificate. Changing this forces a new resource to be created.
* `validityInMonths` (`pulumi.Input[float]`) - The Certificates Validity Period in Months. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["certificate"] = certificate
__props__["certificate_data"] = certificate_data
__props__["certificate_policy"] = certificate_policy
__props__["key_vault_id"] = key_vault_id
__props__["name"] = name
__props__["secret_id"] = secret_id
__props__["tags"] = tags
__props__["thumbprint"] = thumbprint
__props__["version"] = version
return Certifiate(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 64.682584 | 343 | 0.642507 |
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
warnings.warn("azure.keyvault.Certifiate has been deprecated in favor of azure.keyvault.Certificate", DeprecationWarning)
class Certifiate(pulumi.CustomResource):
certificate: pulumi.Output[dict]
certificate_data: pulumi.Output[str]
certificate_policy: pulumi.Output[dict]
key_vault_id: pulumi.Output[str]
name: pulumi.Output[str]
secret_id: pulumi.Output[str]
tags: pulumi.Output[dict]
thumbprint: pulumi.Output[str]
version: pulumi.Output[str]
warnings.warn("azure.keyvault.Certifiate has been deprecated in favor of azure.keyvault.Certificate", DeprecationWarning)
def __init__(__self__, resource_name, opts=None, certificate=None, certificate_policy=None, key_vault_id=None, name=None, tags=None, __props__=None, __name__=None, __opts__=None):
pulumi.log.warn("Certifiate is deprecated: azure.keyvault.Certifiate has been deprecated in favor of azure.keyvault.Certificate")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['certificate'] = certificate
if certificate_policy is None:
raise TypeError("Missing required property 'certificate_policy'")
__props__['certificate_policy'] = certificate_policy
if key_vault_id is None:
raise TypeError("Missing required property 'key_vault_id'")
__props__['key_vault_id'] = key_vault_id
__props__['name'] = name
__props__['tags'] = tags
__props__['certificate_data'] = None
__props__['secret_id'] = None
__props__['thumbprint'] = None
__props__['version'] = None
super(Certifiate, __self__).__init__(
'azure:keyvault/certifiate:Certifiate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, certificate=None, certificate_data=None, certificate_policy=None, key_vault_id=None, name=None, secret_id=None, tags=None, thumbprint=None, version=None):
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["certificate"] = certificate
__props__["certificate_data"] = certificate_data
__props__["certificate_policy"] = certificate_policy
__props__["key_vault_id"] = key_vault_id
__props__["name"] = name
__props__["secret_id"] = secret_id
__props__["tags"] = tags
__props__["thumbprint"] = thumbprint
__props__["version"] = version
return Certifiate(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true | true |
f7faaa467764b3cb87d3c87d86a6e208de610871 | 4,012 | py | Python | examples/test_demo_site.py | johnhiggs/SeleniumBase | 2cbb156e1351bc4ab36a2975c000a406c6ee8f3a | [
"MIT"
] | 3 | 2020-06-30T19:12:01.000Z | 2020-07-03T05:22:25.000Z | examples/test_demo_site.py | johnhiggs/SeleniumBase | 2cbb156e1351bc4ab36a2975c000a406c6ee8f3a | [
"MIT"
] | null | null | null | examples/test_demo_site.py | johnhiggs/SeleniumBase | 2cbb156e1351bc4ab36a2975c000a406c6ee8f3a | [
"MIT"
] | null | null | null | from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_demo_site(self):
self.open("https://seleniumbase.io/demo_page/")
# Assert the title of the current web page
self.assert_title("Web Testing Page")
# Assert that the element is visible on the page
self.assert_element("tbody#tbodyId")
# Assert that the text appears within a given element
self.assert_text("Demo Page", "h1")
# Type/update text in text fields on the page
self.type("#myTextInput", "This is Automated")
self.type("textarea.area1", "Testing Time!\n")
self.type('[name="preText2"]', "Typing Text!")
# Verify that a hover dropdown link changes page text
self.assert_text("Automation Practice", "h3")
self.hover_and_click("#myDropdown", "#dropOption2")
self.assert_text("Link Two Selected", "h3")
# Verify that a button click changes text on the page
self.assert_text("This Text is Green", "#pText")
self.click("#myButton")
self.assert_text("This Text is Purple", "#pText")
# Assert that the given SVG is visible on the page
self.assert_element('svg[name="svgName"]')
# Verify that a slider control updates a progrss bar
self.assert_element('progress[value="50"]')
self.press_right_arrow("#myslider", times=5)
self.assert_element('progress[value="100"]')
# Verify that a "select" option updates a meter bar
self.assert_element('meter[value="0.25"]')
self.select_option_by_text("#mySelect", "Set to 75%")
self.assert_element('meter[value="0.75"]')
# Assert an element located inside an iFrame
self.assert_false(self.is_element_visible("img"))
self.switch_to_frame("#myFrame1")
self.assert_true(self.is_element_visible("img"))
self.switch_to_default_content()
# Assert text located inside an iFrame
self.assert_false(self.is_text_visible("iFrame Text"))
self.switch_to_frame("#myFrame2")
self.assert_true(self.is_text_visible("iFrame Text"))
self.switch_to_default_content()
# Verify that clicking a radio button selects it
self.assert_false(self.is_selected("#radioButton2"))
self.click("#radioButton2")
self.assert_true(self.is_selected("#radioButton2"))
# Verify that clicking a checkbox makes it selected
self.assert_false(self.is_selected("#checkBox1"))
self.click("#checkBox1")
self.assert_true(self.is_selected("#checkBox1"))
# Verify clicking on multiple elements with one call
self.assert_false(self.is_selected("#checkBox2"))
self.assert_false(self.is_selected("#checkBox3"))
self.assert_false(self.is_selected("#checkBox4"))
self.click_visible_elements("input.checkBoxClassB")
self.assert_true(self.is_selected("#checkBox2"))
self.assert_true(self.is_selected("#checkBox3"))
self.assert_true(self.is_selected("#checkBox4"))
# Verify that clicking an iFrame checkbox selects it
self.assert_false(self.is_element_visible(".fBox"))
self.switch_to_frame("#myFrame3")
self.assert_true(self.is_element_visible(".fBox"))
self.assert_false(self.is_selected(".fBox"))
self.click(".fBox")
self.assert_true(self.is_selected(".fBox"))
self.switch_to_default_content()
# Assert link text
self.assert_link_text("seleniumbase.com")
self.assert_link_text("SeleniumBase on GitHub")
self.assert_link_text("seleniumbase.io")
# Click link text
self.click_link_text("SeleniumBase Demo Page")
# Assert exact text
self.assert_exact_text("Demo Page", "h1")
# Assert no broken links (Can be slow if many links)
# self.assert_no_404_errors()
# Assert no JavaScript errors (Can also detect 404s)
self.assert_no_js_errors()
| 39.333333 | 62 | 0.66002 | from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_demo_site(self):
self.open("https://seleniumbase.io/demo_page/")
self.assert_title("Web Testing Page")
self.assert_element("tbody#tbodyId")
self.assert_text("Demo Page", "h1")
self.type("#myTextInput", "This is Automated")
self.type("textarea.area1", "Testing Time!\n")
self.type('[name="preText2"]', "Typing Text!")
self.assert_text("Automation Practice", "h3")
self.hover_and_click("#myDropdown", "#dropOption2")
self.assert_text("Link Two Selected", "h3")
self.assert_text("This Text is Green", "#pText")
self.click("#myButton")
self.assert_text("This Text is Purple", "#pText")
self.assert_element('svg[name="svgName"]')
self.assert_element('progress[value="50"]')
self.press_right_arrow("#myslider", times=5)
self.assert_element('progress[value="100"]')
self.assert_element('meter[value="0.25"]')
self.select_option_by_text("#mySelect", "Set to 75%")
self.assert_element('meter[value="0.75"]')
self.assert_false(self.is_element_visible("img"))
self.switch_to_frame("#myFrame1")
self.assert_true(self.is_element_visible("img"))
self.switch_to_default_content()
self.assert_false(self.is_text_visible("iFrame Text"))
self.switch_to_frame("#myFrame2")
self.assert_true(self.is_text_visible("iFrame Text"))
self.switch_to_default_content()
self.assert_false(self.is_selected("#radioButton2"))
self.click("#radioButton2")
self.assert_true(self.is_selected("#radioButton2"))
self.assert_false(self.is_selected("#checkBox1"))
self.click("#checkBox1")
self.assert_true(self.is_selected("#checkBox1"))
self.assert_false(self.is_selected("#checkBox2"))
self.assert_false(self.is_selected("#checkBox3"))
self.assert_false(self.is_selected("#checkBox4"))
self.click_visible_elements("input.checkBoxClassB")
self.assert_true(self.is_selected("#checkBox2"))
self.assert_true(self.is_selected("#checkBox3"))
self.assert_true(self.is_selected("#checkBox4"))
self.assert_false(self.is_element_visible(".fBox"))
self.switch_to_frame("#myFrame3")
self.assert_true(self.is_element_visible(".fBox"))
self.assert_false(self.is_selected(".fBox"))
self.click(".fBox")
self.assert_true(self.is_selected(".fBox"))
self.switch_to_default_content()
self.assert_link_text("seleniumbase.com")
self.assert_link_text("SeleniumBase on GitHub")
self.assert_link_text("seleniumbase.io")
self.click_link_text("SeleniumBase Demo Page")
self.assert_exact_text("Demo Page", "h1")
self.assert_no_js_errors()
| true | true |
f7faaab4227cd17ca2d0de0c99bfceff25010f0c | 754 | py | Python | pi-thon.py | Cydox/pi-thon | b9a3bb23bcbf0ee6b0caf68f9a6d73bcebce2b94 | [
"MIT"
] | null | null | null | pi-thon.py | Cydox/pi-thon | b9a3bb23bcbf0ee6b0caf68f9a6d73bcebce2b94 | [
"MIT"
] | null | null | null | pi-thon.py | Cydox/pi-thon | b9a3bb23bcbf0ee6b0caf68f9a6d73bcebce2b94 | [
"MIT"
] | null | null | null | import os
f = open("pi-decimals.txt")
lines = f.readlines()
f.close()
pi = ""
for i in range(len(lines)):
pi += lines[i].replace("\r", "").replace("\n", "")
answer = raw_input ("Enter pi from memory: ")
for i in range(len(answer)):
if not answer[i] == pi[i]:
break
print "Your score from memory:", i - 1
raw_input ("After pressing enter you will put in one more digit at a time")
os.system("clear")
while True:
print pi[:i + 2]
raw_input("press enter...")
os.system("clear")
answer = raw_input("")
if answer == pi[:i+2]:
os.system("clear")
i += 1
else:
print "Wrong! correct answer:", pi[:i + 2]
print "Your score is :", i
input = raw_input("Continue? (Y/n) ")
if input == "n":
break
else:
i = 2
os.system("clear")
| 20.378378 | 75 | 0.606101 | import os
f = open("pi-decimals.txt")
lines = f.readlines()
f.close()
pi = ""
for i in range(len(lines)):
pi += lines[i].replace("\r", "").replace("\n", "")
answer = raw_input ("Enter pi from memory: ")
for i in range(len(answer)):
if not answer[i] == pi[i]:
break
print "Your score from memory:", i - 1
raw_input ("After pressing enter you will put in one more digit at a time")
os.system("clear")
while True:
print pi[:i + 2]
raw_input("press enter...")
os.system("clear")
answer = raw_input("")
if answer == pi[:i+2]:
os.system("clear")
i += 1
else:
print "Wrong! correct answer:", pi[:i + 2]
print "Your score is :", i
input = raw_input("Continue? (Y/n) ")
if input == "n":
break
else:
i = 2
os.system("clear")
| false | true |
f7faab0687606af0a5b876ef8dd2381a4cbd1153 | 3,980 | py | Python | svg.py | cornelius/beautiful-labels | 23d8f0ab08a77c785f348dd883518b372ffbc725 | [
"MIT"
] | null | null | null | svg.py | cornelius/beautiful-labels | 23d8f0ab08a77c785f348dd883518b372ffbc725 | [
"MIT"
] | 1 | 2019-04-12T12:41:36.000Z | 2019-04-12T12:41:36.000Z | svg.py | cornelius/beautiful-labels | 23d8f0ab08a77c785f348dd883518b372ffbc725 | [
"MIT"
] | 2 | 2019-02-22T17:36:06.000Z | 2020-02-14T20:09:25.000Z | class Tag:
def __init__(self, doc, name):
self.doc = doc
self.name = name
def __enter__(self):
self.doc.level += 1
def __exit__(self, type, value, traceback):
self.doc.level -= 1
if self.doc.open_tag:
self.doc.out += "/>\n"
self.open_tag = False
self.doc.indent()
self.doc.out += "</" + self.name + ">\n"
class Document:
def __init__(self):
self.level = 0
self.last_level = 0
self.out = ""
self.open_tag = False
self.text('<?xml version="1.0" standalone="no"?>')
self.text('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">')
self.text("<!--\n This file was generated by `beautiful-labels`. Don't edit it directly.\n-->")
def indent(self):
self.out += self.level * ' '
def tag(self, name, attributes=None):
if self.open_tag:
if self.last_level == self.level:
self.out += "/>\n"
else:
self.out += ">\n"
self.open_tag = False
self.indent()
self.out += "<" + name
if attributes:
self.out += ' ' + attributes
if name == "svg":
self.out += ' xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"'
self.open_tag = True
self.last_level = self.level
return Tag(self, name)
def text(self, text):
if self.open_tag:
self.out += ">\n"
self.open_tag = False
self.indent()
self.out += text + "\n"
def calculate_lines(repo):
lines = []
for category in repo.all_categories():
line_header = category
line_labels = []
for label in repo.labels_for_category(category):
line_labels.append(label)
if len(line_labels) == 4:
lines.append((line_header, line_labels.copy()))
line_labels.clear()
line_header = ""
if line_labels:
lines.append((line_header, line_labels))
return lines
def text_color(background_color):
(r,g,b) = tuple(int(background_color[i:i+2], 16) for i in (0, 2 ,4))
# See https://www.w3.org/TR/AERT/#color-contrast for details about the formula
brightness = (0.299*r + 0.587*g + 0.114*b)
if brightness > 186:
return "black"
else:
return "white"
def write_text(doc, text, size=20, fill="black", x="0", y="0"):
with doc.tag('g', 'font-size="%s" font-family="arial" fill="%s"' % (size, fill)):
with doc.tag('text', 'x="%s" y="%s"' % (x, y)):
doc.text(text)
def write_rect(doc, x=0, y=0, width=10, height=10, fill="black"):
doc.tag('rect', 'x="%s" y="%s" width="%s" height="%s" fill="%s" rx="5"' % (x, y, width, height, fill))
def write_svg(org, repo, filename, label_font_size=14):
lines = calculate_lines(repo)
line_height = 60
image_width = 840
image_height = 100 + len(lines) * line_height
doc = Document()
with doc.tag('svg', 'width="%s" height="%s"' % (image_width, image_height)):
doc.tag('rect', 'x="0" y="0" width="%s" height="%s" fill="#eee"' % (image_width, image_height))
write_text(doc, "Labels for %s/%s" % (org, repo.repo), size=25, fill="#444", x=40, y=50)
line_y = 120
for category, labels_line in lines:
if category:
write_text(doc, category, size=25, fill="#777", x=40, y=line_y)
label_x = 200
for label in labels_line:
write_rect(doc, x=label_x, y=line_y-30, width=130, height=40, fill="#" + str(label["color"]))
write_text(doc, label["name"], size=label_font_size, fill=text_color(str(label["color"])), x=label_x+13, y=line_y-4)
label_x += 150
line_y += line_height
with open(str(filename), "w") as file:
file.write(doc.out)
| 34.310345 | 132 | 0.546734 | class Tag:
def __init__(self, doc, name):
self.doc = doc
self.name = name
def __enter__(self):
self.doc.level += 1
def __exit__(self, type, value, traceback):
self.doc.level -= 1
if self.doc.open_tag:
self.doc.out += "/>\n"
self.open_tag = False
self.doc.indent()
self.doc.out += "</" + self.name + ">\n"
class Document:
def __init__(self):
self.level = 0
self.last_level = 0
self.out = ""
self.open_tag = False
self.text('<?xml version="1.0" standalone="no"?>')
self.text('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">')
self.text("<!--\n This file was generated by `beautiful-labels`. Don't edit it directly.\n-->")
def indent(self):
self.out += self.level * ' '
def tag(self, name, attributes=None):
if self.open_tag:
if self.last_level == self.level:
self.out += "/>\n"
else:
self.out += ">\n"
self.open_tag = False
self.indent()
self.out += "<" + name
if attributes:
self.out += ' ' + attributes
if name == "svg":
self.out += ' xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"'
self.open_tag = True
self.last_level = self.level
return Tag(self, name)
def text(self, text):
if self.open_tag:
self.out += ">\n"
self.open_tag = False
self.indent()
self.out += text + "\n"
def calculate_lines(repo):
lines = []
for category in repo.all_categories():
line_header = category
line_labels = []
for label in repo.labels_for_category(category):
line_labels.append(label)
if len(line_labels) == 4:
lines.append((line_header, line_labels.copy()))
line_labels.clear()
line_header = ""
if line_labels:
lines.append((line_header, line_labels))
return lines
def text_color(background_color):
(r,g,b) = tuple(int(background_color[i:i+2], 16) for i in (0, 2 ,4))
# See https://www.w3.org/TR/AERT/#color-contrast for details about the formula
brightness = (0.299*r + 0.587*g + 0.114*b)
if brightness > 186:
return "black"
else:
return "white"
def write_text(doc, text, size=20, fill="black", x="0", y="0"):
with doc.tag('g', 'font-size="%s" font-family="arial" fill="%s"' % (size, fill)):
with doc.tag('text', 'x="%s" y="%s"' % (x, y)):
doc.text(text)
def write_rect(doc, x=0, y=0, width=10, height=10, fill="black"):
doc.tag('rect', 'x="%s" y="%s" width="%s" height="%s" fill="%s" rx="5"' % (x, y, width, height, fill))
def write_svg(org, repo, filename, label_font_size=14):
lines = calculate_lines(repo)
line_height = 60
image_width = 840
image_height = 100 + len(lines) * line_height
doc = Document()
with doc.tag('svg', 'width="%s" height="%s"' % (image_width, image_height)):
doc.tag('rect', 'x="0" y="0" width="%s" height="%s" fill="#eee"' % (image_width, image_height))
write_text(doc, "Labels for %s/%s" % (org, repo.repo), size=25, fill="#444", x=40, y=50)
line_y = 120
for category, labels_line in lines:
if category:
write_text(doc, category, size=25, fill="#777", x=40, y=line_y)
label_x = 200
for label in labels_line:
write_rect(doc, x=label_x, y=line_y-30, width=130, height=40, fill="#" + str(label["color"]))
write_text(doc, label["name"], size=label_font_size, fill=text_color(str(label["color"])), x=label_x+13, y=line_y-4)
label_x += 150
line_y += line_height
with open(str(filename), "w") as file:
file.write(doc.out)
| true | true |
f7faabf273c6a4e6495383d367209960633a73f2 | 285 | py | Python | program_data/clean_404.py | yifan-zhou19/ggnn_graph_classification | 37bca1315fdf95933f52b4cd504ce89a768a86df | [
"MIT"
] | 19 | 2019-02-26T05:43:39.000Z | 2022-02-21T04:14:46.000Z | program_data/clean_404.py | yifan-zhou19/ggnn_graph_classification | 37bca1315fdf95933f52b4cd504ce89a768a86df | [
"MIT"
] | null | null | null | program_data/clean_404.py | yifan-zhou19/ggnn_graph_classification | 37bca1315fdf95933f52b4cd504ce89a768a86df | [
"MIT"
] | 8 | 2018-12-25T02:05:41.000Z | 2022-02-23T07:50:48.000Z | import os
path = "github_cpp_program_data"
for root, dirs, files in os.walk(path, topdown=False):
for file in files:
file_path = os.path.join(root, file)
with open(file_path, "r") as f:
data = str(f.read())
if "404" in data:
print(file_path)
os.remove(file_path)
| 23.75 | 54 | 0.670175 | import os
path = "github_cpp_program_data"
for root, dirs, files in os.walk(path, topdown=False):
for file in files:
file_path = os.path.join(root, file)
with open(file_path, "r") as f:
data = str(f.read())
if "404" in data:
print(file_path)
os.remove(file_path)
| true | true |
f7faacf8b915b685fa246da285be95cd96ee9acd | 6,967 | py | Python | model/vgg19/model4_val5.py | wan-h/JD-AI-Fashion-Challenge | 817f693672f418745e3a4c89a0417a3165b08130 | [
"MIT"
] | 3 | 2018-05-06T15:15:21.000Z | 2018-05-13T12:31:42.000Z | model/vgg19/model4_val5.py | wan-h/JD-AI-Fashion-Challenge | 817f693672f418745e3a4c89a0417a3165b08130 | [
"MIT"
] | null | null | null | model/vgg19/model4_val5.py | wan-h/JD-AI-Fashion-Challenge | 817f693672f418745e3a4c89a0417a3165b08130 | [
"MIT"
] | null | null | null | """
以model 2为基础,新增real crop
"""
import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=224,
data_type=[config.DATA_TYPE_ORIGINAL],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
train_batch_size=[32, 32, 32],
val_batch_size=256,
predict_batch_size=256,
epoch=[1, 4, 10],
lr=[0.001, 0.0001, 0.00001],
freeze_layers=[-1, 0.6, 5])
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet169(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr))
# model.summary()
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
start = time.time()
model_config.save_log("####### start train model")
init_stage = model_config.get_init_stage()
model_config.save_log("####### init stage is %d" % init_stage)
for i in range(init_stage, len(model_config.epoch)):
model_config.save_log("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
train_flow = data_loader.KerasGenerator(model_config=model_config,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.15,
height_shift_range=0.1,
horizontal_flip=True,
real_transform=True,
rescale=1. / 256).flow_from_files(model_config.train_files, mode="fit",
target_size=model_config.image_size,
batch_size=
model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model_config.save_log("####### initial epoch is 0, end epoch is %d" % model_config.epoch[i])
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
if i == init_stage:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.initial_epoch))
model.load_weights(model_config.get_weights_path(model_config.initial_epoch))
model_config.save_log("####### initial epoch is %d, end epoch is %d" % (
model_config.initial_epoch, model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.initial_epoch,
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model_config.save_log(
"####### initial epoch is %d, end epoch is %d" % (model_config.epoch[i - 1], model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
model_config.save_log("####### train model spend %d seconds" % (time.time() - start))
model_config.save_log("####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
| 51.227941 | 129 | 0.519736 | import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=224,
data_type=[config.DATA_TYPE_ORIGINAL],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
train_batch_size=[32, 32, 32],
val_batch_size=256,
predict_batch_size=256,
epoch=[1, 4, 10],
lr=[0.001, 0.0001, 0.00001],
freeze_layers=[-1, 0.6, 5])
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet169(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr))
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
start = time.time()
model_config.save_log("####### start train model")
init_stage = model_config.get_init_stage()
model_config.save_log("####### init stage is %d" % init_stage)
for i in range(init_stage, len(model_config.epoch)):
model_config.save_log("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
train_flow = data_loader.KerasGenerator(model_config=model_config,
featurewise_center=True,
featurewise_std_normalization=True,
width_shift_range=0.15,
height_shift_range=0.1,
horizontal_flip=True,
real_transform=True,
rescale=1. / 256).flow_from_files(model_config.train_files, mode="fit",
target_size=model_config.image_size,
batch_size=
model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model_config.save_log("####### initial epoch is 0, end epoch is %d" % model_config.epoch[i])
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
if i == init_stage:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.initial_epoch))
model.load_weights(model_config.get_weights_path(model_config.initial_epoch))
model_config.save_log("####### initial epoch is %d, end epoch is %d" % (
model_config.initial_epoch, model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.initial_epoch,
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
else:
model_config.save_log("####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model_config.save_log(
"####### initial epoch is %d, end epoch is %d" % (model_config.epoch[i - 1], model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=1,
callbacks=[checkpoint, clr])
model_config.save_log("####### train model spend %d seconds" % (time.time() - start))
model_config.save_log("####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
| true | true |
f7faad658eda1d9df5d5b8de9498b459d238904e | 59 | py | Python | settings.py | tburrows13/perudo-online | b88c5c9aa0957abd99f87d1653216edded1130b9 | [
"MIT"
] | 1 | 2018-06-08T16:56:48.000Z | 2018-06-08T16:56:48.000Z | settings.py | tburrows13/perudo-online | b88c5c9aa0957abd99f87d1653216edded1130b9 | [
"MIT"
] | null | null | null | settings.py | tburrows13/perudo-online | b88c5c9aa0957abd99f87d1653216edded1130b9 | [
"MIT"
] | 1 | 2020-10-22T13:12:11.000Z | 2020-10-22T13:12:11.000Z | server_ip = "localhost"
server_port = 20006
MAX_PLAYERS = 6 | 19.666667 | 23 | 0.779661 | server_ip = "localhost"
server_port = 20006
MAX_PLAYERS = 6 | true | true |
f7faad77d6b11163642289c028ca1c22d9ca22a1 | 16,330 | py | Python | tests/rule_based_profiler/parameter_builder/test_parameter_container.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | tests/rule_based_profiler/parameter_builder/test_parameter_container.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | tests/rule_based_profiler/parameter_builder/test_parameter_container.py | andyjessen/great_expectations | 74f7f2aa7b51144f34156ed49490dae4edaa5cb7 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.types import (
Domain,
ParameterContainer,
ParameterNode,
build_parameter_container,
build_parameter_container_for_variables,
get_fully_qualified_parameter_names,
get_parameter_values_for_fully_qualified_parameter_names,
)
def test_build_parameter_container(
parameters_with_different_depth_level_values,
multi_part_name_parameter_container,
):
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container,
parameter_values=parameters_with_different_depth_level_values,
)
assert parameter_container == multi_part_name_parameter_container
def test_get_fully_qualified_parameter_names(
parameters_with_different_depth_level_values,
):
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container,
parameter_values=parameters_with_different_depth_level_values,
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=None,
details=None,
rule_name="my_rule",
)
# Convert variables argument to ParameterContainer
variables: ParameterContainer = build_parameter_container_for_variables(
variables_configs={
"my_int": 9,
"my_float": 3.38,
"my_string": "hello",
}
)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
expected_fully_qualified_parameter_names: List[str] = [
"$variables",
"$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format",
"$parameter.date_strings.yyyy_mm_dd_date_format",
"$parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format",
"$parameter.date_strings.mm_yyyy_dd_date_format",
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds",
"$parameter.date_strings.tolerances.max_num_conversion_attempts",
"$parameter.tolerances.mostly",
"$parameter.tolerances.financial.usd",
"$parameter.monthly_taxi_fairs.mean_values",
"$parameter.daily_taxi_fairs.mean_values",
"$parameter.weekly_taxi_fairs.mean_values",
"$mean",
]
fully_qualified_parameter_names: List[str] = get_fully_qualified_parameter_names(
domain=domain,
variables=variables,
parameters=parameters,
)
assert len(fully_qualified_parameter_names) == len(
expected_fully_qualified_parameter_names
)
assert sorted(fully_qualified_parameter_names) == sorted(
expected_fully_qualified_parameter_names
)
def test_get_parameter_values_for_fully_qualified_parameter_names(
parameters_with_different_depth_level_values,
):
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container,
parameter_values=parameters_with_different_depth_level_values,
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=None,
details=None,
rule_name="my_rule",
)
# Convert variables argument to ParameterContainer
variables: ParameterContainer = build_parameter_container_for_variables(
variables_configs={
"my_int": 9,
"my_float": 3.38,
"my_string": "hello",
}
)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
# fmt: off
expected_parameter_values_for_fully_qualified_parameter_names: Dict[str, ParameterNode] = {
"$variables": {
"my_int": 9,
"my_float": 3.38,
"my_string": "hello",
},
"$parameter.weekly_taxi_fairs.mean_values": {
"value": [
{
"sunday": 71.43,
"monday": 74.35,
"tuesday": 42.3,
"wednesday": 42.3,
"thursday": 82.2,
"friday": 78.78,
"saturday": 91.39,
},
{
"sunday": 81.43,
"monday": 84.35,
"tuesday": 52.3,
"wednesday": 43.3,
"thursday": 22.2,
"friday": 98.78,
"saturday": 81.39,
},
{
"sunday": 61.43,
"monday": 34.35,
"tuesday": 82.3,
"wednesday": 72.3,
"thursday": 22.2,
"friday": 38.78,
"saturday": 51.39,
},
{
"sunday": 51.43,
"monday": 64.35,
"tuesday": 72.3,
"wednesday": 82.3,
"thursday": 22.2,
"friday": 98.78,
"saturday": 31.39,
},
{
"sunday": 72.43,
"monday": 77.35,
"tuesday": 46.3,
"wednesday": 47.3,
"thursday": 88.2,
"friday": 79.78,
"saturday": 93.39,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 41.3,
"wednesday": 49.3,
"thursday": 80.2,
"friday": 78.78,
"saturday": 93.39,
},
{
"sunday": 74.43,
"monday": 78.35,
"tuesday": 49.3,
"wednesday": 43.3,
"thursday": 88.2,
"friday": 72.78,
"saturday": 97.39,
},
{
"sunday": 73.43,
"monday": 72.35,
"tuesday": 40.3,
"wednesday": 40.3,
"thursday": 89.2,
"friday": 77.78,
"saturday": 90.39,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 45.3,
"wednesday": 44.3,
"thursday": 89.2,
"friday": 77.78,
"saturday": 96.39,
},
{
"sunday": 75.43,
"monday": 74.25,
"tuesday": 42.33,
"wednesday": 42.23,
"thursday": 82.21,
"friday": 78.76,
"saturday": 91.37,
},
{
"sunday": 71.43,
"monday": 74.37,
"tuesday": 42.3,
"wednesday": 42.32,
"thursday": 82.23,
"friday": 78.77,
"saturday": 91.49,
},
{
"sunday": 71.63,
"monday": 74.37,
"tuesday": 42.2,
"wednesday": 42.1,
"thursday": 82.29,
"friday": 78.79,
"saturday": 91.39,
},
{
"sunday": 71.42,
"monday": 74.33,
"tuesday": 42.33,
"wednesday": 42.34,
"thursday": 82.25,
"friday": 78.77,
"saturday": 91.69,
},
{
"sunday": 71.44,
"monday": 72.35,
"tuesday": 42.33,
"wednesday": 42.31,
"thursday": 82.29,
"friday": 78.68,
"saturday": 91.49,
},
{
"sunday": 71.44,
"monday": 74.32,
"tuesday": 42.32,
"wednesday": 42.32,
"thursday": 82.29,
"friday": 78.77,
"saturday": 91.49,
},
{
"sunday": 71.44,
"monday": 74.33,
"tuesday": 42.21,
"wednesday": 42.31,
"thursday": 82.27,
"friday": 78.74,
"saturday": 91.49,
},
{
"sunday": 71.33,
"monday": 74.25,
"tuesday": 42.31,
"wednesday": 42.03,
"thursday": 82.02,
"friday": 78.08,
"saturday": 91.38,
},
{
"sunday": 71.41,
"monday": 74.31,
"tuesday": 42.39,
"wednesday": 42.93,
"thursday": 82.92,
"friday": 78.75,
"saturday": 91.49,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 42.3,
"wednesday": 32.3,
"thursday": 52.2,
"friday": 88.78,
"saturday": 81.39,
},
{
"sunday": 71.43,
"monday": 74.35,
"tuesday": 32.3,
"wednesday": 92.3,
"thursday": 72.2,
"friday": 74.78,
"saturday": 51.39,
},
{
"sunday": 72.43,
"monday": 64.35,
"tuesday": 52.3,
"wednesday": 42.39,
"thursday": 82.28,
"friday": 78.77,
"saturday": 91.36,
},
{
"sunday": 81.43,
"monday": 94.35,
"tuesday": 62.3,
"wednesday": 52.3,
"thursday": 92.2,
"friday": 88.78,
"saturday": 51.39,
},
{
"sunday": 21.43,
"monday": 34.35,
"tuesday": 42.34,
"wednesday": 62.3,
"thursday": 52.2,
"friday": 98.78,
"saturday": 81.39,
},
{
"sunday": 71.33,
"monday": 74.25,
"tuesday": 42.13,
"wednesday": 42.93,
"thursday": 82.82,
"friday": 78.78,
"saturday": 91.39,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 44.3,
"wednesday": 45.3,
"thursday": 86.2,
"friday": 77.78,
"saturday": 98.39,
},
{
"sunday": 79.43,
"monday": 78.35,
"tuesday": 47.3,
"wednesday": 46.3,
"thursday": 85.2,
"friday": 74.78,
"saturday": 93.39,
},
{
"sunday": 71.42,
"monday": 74.31,
"tuesday": 42.0,
"wednesday": 42.1,
"thursday": 82.23,
"friday": 65.78,
"saturday": 91.26,
},
{
"sunday": 91.43,
"monday": 84.35,
"tuesday": 42.37,
"wednesday": 42.36,
"thursday": 82.25,
"friday": 78.74,
"saturday": 91.32,
},
{
"sunday": 71.33,
"monday": 74.45,
"tuesday": 42.35,
"wednesday": 42.36,
"thursday": 82.27,
"friday": 26.78,
"saturday": 71.39,
},
{
"sunday": 71.53,
"monday": 73.35,
"tuesday": 43.32,
"wednesday": 42.23,
"thursday": 82.32,
"friday": 78.18,
"saturday": 91.49,
},
{
"sunday": 71.53,
"monday": 74.25,
"tuesday": 52.3,
"wednesday": 52.3,
"thursday": 81.23,
"friday": 78.78,
"saturday": 78.39,
},
],
"details": {
"confidence": "high",
},
},
"$parameter.tolerances.mostly": 0.91,
"$parameter.tolerances.financial.usd": 1.0,
"$parameter.monthly_taxi_fairs.mean_values": {
"value": [
2.3,
9.8,
42.3,
8.1,
38.5,
53.7,
71.43,
16.34,
49.43,
74.35,
51.98,
46.42,
20.01,
69.44,
65.32,
8.83,
55.79,
82.2,
36.93,
83.78,
31.13,
76.93,
67.67,
25.12,
58.04,
79.78,
90.91,
15.26,
61.65,
78.78,
12.99,
],
"details": {
"confidence": "low",
},
},
"$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format": {
"value": "%Y-%m-%d %H:%M:%S %Z",
"details": {
"confidence": 0.78,
},
},
"$parameter.date_strings.yyyy_mm_dd_date_format": {
"value": "%Y-%m-%d",
"details": {
"confidence": 0.78,
},
},
"$parameter.date_strings.tolerances.max_num_conversion_attempts": 5,
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds": 100,
"$parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format": {
"value": "%m-%Y-%d %H:%M:%S %Z",
"details": {
"confidence": 0.78,
},
},
"$parameter.date_strings.mm_yyyy_dd_date_format": {
"value": "%m-%Y-%d",
"details": {
"confidence": 0.78,
},
},
"$parameter.daily_taxi_fairs.mean_values": {
"value": {
"sunday": 71.43,
"monday": 74.35,
"tuesday": 42.3,
"wednesday": 42.3,
"thursday": 82.2,
"friday": 78.78,
"saturday": 91.39,
},
"details": {
"confidence": "medium",
},
},
"$mean": 0.65,
}
# fmt: on
parameter_values_for_fully_qualified_parameter_names: Dict[
str, ParameterNode
] = get_parameter_values_for_fully_qualified_parameter_names(
domain=domain,
variables=variables,
parameters=parameters,
)
assert (
parameter_values_for_fully_qualified_parameter_names
== expected_parameter_values_for_fully_qualified_parameter_names
)
| 32.791165 | 95 | 0.395407 | from typing import Dict, List
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.types import (
Domain,
ParameterContainer,
ParameterNode,
build_parameter_container,
build_parameter_container_for_variables,
get_fully_qualified_parameter_names,
get_parameter_values_for_fully_qualified_parameter_names,
)
def test_build_parameter_container(
parameters_with_different_depth_level_values,
multi_part_name_parameter_container,
):
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container,
parameter_values=parameters_with_different_depth_level_values,
)
assert parameter_container == multi_part_name_parameter_container
def test_get_fully_qualified_parameter_names(
parameters_with_different_depth_level_values,
):
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container,
parameter_values=parameters_with_different_depth_level_values,
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=None,
details=None,
rule_name="my_rule",
)
variables: ParameterContainer = build_parameter_container_for_variables(
variables_configs={
"my_int": 9,
"my_float": 3.38,
"my_string": "hello",
}
)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
expected_fully_qualified_parameter_names: List[str] = [
"$variables",
"$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format",
"$parameter.date_strings.yyyy_mm_dd_date_format",
"$parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format",
"$parameter.date_strings.mm_yyyy_dd_date_format",
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds",
"$parameter.date_strings.tolerances.max_num_conversion_attempts",
"$parameter.tolerances.mostly",
"$parameter.tolerances.financial.usd",
"$parameter.monthly_taxi_fairs.mean_values",
"$parameter.daily_taxi_fairs.mean_values",
"$parameter.weekly_taxi_fairs.mean_values",
"$mean",
]
fully_qualified_parameter_names: List[str] = get_fully_qualified_parameter_names(
domain=domain,
variables=variables,
parameters=parameters,
)
assert len(fully_qualified_parameter_names) == len(
expected_fully_qualified_parameter_names
)
assert sorted(fully_qualified_parameter_names) == sorted(
expected_fully_qualified_parameter_names
)
def test_get_parameter_values_for_fully_qualified_parameter_names(
parameters_with_different_depth_level_values,
):
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container,
parameter_values=parameters_with_different_depth_level_values,
)
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN,
domain_kwargs=None,
details=None,
rule_name="my_rule",
)
variables: ParameterContainer = build_parameter_container_for_variables(
variables_configs={
"my_int": 9,
"my_float": 3.38,
"my_string": "hello",
}
)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
expected_parameter_values_for_fully_qualified_parameter_names: Dict[str, ParameterNode] = {
"$variables": {
"my_int": 9,
"my_float": 3.38,
"my_string": "hello",
},
"$parameter.weekly_taxi_fairs.mean_values": {
"value": [
{
"sunday": 71.43,
"monday": 74.35,
"tuesday": 42.3,
"wednesday": 42.3,
"thursday": 82.2,
"friday": 78.78,
"saturday": 91.39,
},
{
"sunday": 81.43,
"monday": 84.35,
"tuesday": 52.3,
"wednesday": 43.3,
"thursday": 22.2,
"friday": 98.78,
"saturday": 81.39,
},
{
"sunday": 61.43,
"monday": 34.35,
"tuesday": 82.3,
"wednesday": 72.3,
"thursday": 22.2,
"friday": 38.78,
"saturday": 51.39,
},
{
"sunday": 51.43,
"monday": 64.35,
"tuesday": 72.3,
"wednesday": 82.3,
"thursday": 22.2,
"friday": 98.78,
"saturday": 31.39,
},
{
"sunday": 72.43,
"monday": 77.35,
"tuesday": 46.3,
"wednesday": 47.3,
"thursday": 88.2,
"friday": 79.78,
"saturday": 93.39,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 41.3,
"wednesday": 49.3,
"thursday": 80.2,
"friday": 78.78,
"saturday": 93.39,
},
{
"sunday": 74.43,
"monday": 78.35,
"tuesday": 49.3,
"wednesday": 43.3,
"thursday": 88.2,
"friday": 72.78,
"saturday": 97.39,
},
{
"sunday": 73.43,
"monday": 72.35,
"tuesday": 40.3,
"wednesday": 40.3,
"thursday": 89.2,
"friday": 77.78,
"saturday": 90.39,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 45.3,
"wednesday": 44.3,
"thursday": 89.2,
"friday": 77.78,
"saturday": 96.39,
},
{
"sunday": 75.43,
"monday": 74.25,
"tuesday": 42.33,
"wednesday": 42.23,
"thursday": 82.21,
"friday": 78.76,
"saturday": 91.37,
},
{
"sunday": 71.43,
"monday": 74.37,
"tuesday": 42.3,
"wednesday": 42.32,
"thursday": 82.23,
"friday": 78.77,
"saturday": 91.49,
},
{
"sunday": 71.63,
"monday": 74.37,
"tuesday": 42.2,
"wednesday": 42.1,
"thursday": 82.29,
"friday": 78.79,
"saturday": 91.39,
},
{
"sunday": 71.42,
"monday": 74.33,
"tuesday": 42.33,
"wednesday": 42.34,
"thursday": 82.25,
"friday": 78.77,
"saturday": 91.69,
},
{
"sunday": 71.44,
"monday": 72.35,
"tuesday": 42.33,
"wednesday": 42.31,
"thursday": 82.29,
"friday": 78.68,
"saturday": 91.49,
},
{
"sunday": 71.44,
"monday": 74.32,
"tuesday": 42.32,
"wednesday": 42.32,
"thursday": 82.29,
"friday": 78.77,
"saturday": 91.49,
},
{
"sunday": 71.44,
"monday": 74.33,
"tuesday": 42.21,
"wednesday": 42.31,
"thursday": 82.27,
"friday": 78.74,
"saturday": 91.49,
},
{
"sunday": 71.33,
"monday": 74.25,
"tuesday": 42.31,
"wednesday": 42.03,
"thursday": 82.02,
"friday": 78.08,
"saturday": 91.38,
},
{
"sunday": 71.41,
"monday": 74.31,
"tuesday": 42.39,
"wednesday": 42.93,
"thursday": 82.92,
"friday": 78.75,
"saturday": 91.49,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 42.3,
"wednesday": 32.3,
"thursday": 52.2,
"friday": 88.78,
"saturday": 81.39,
},
{
"sunday": 71.43,
"monday": 74.35,
"tuesday": 32.3,
"wednesday": 92.3,
"thursday": 72.2,
"friday": 74.78,
"saturday": 51.39,
},
{
"sunday": 72.43,
"monday": 64.35,
"tuesday": 52.3,
"wednesday": 42.39,
"thursday": 82.28,
"friday": 78.77,
"saturday": 91.36,
},
{
"sunday": 81.43,
"monday": 94.35,
"tuesday": 62.3,
"wednesday": 52.3,
"thursday": 92.2,
"friday": 88.78,
"saturday": 51.39,
},
{
"sunday": 21.43,
"monday": 34.35,
"tuesday": 42.34,
"wednesday": 62.3,
"thursday": 52.2,
"friday": 98.78,
"saturday": 81.39,
},
{
"sunday": 71.33,
"monday": 74.25,
"tuesday": 42.13,
"wednesday": 42.93,
"thursday": 82.82,
"friday": 78.78,
"saturday": 91.39,
},
{
"sunday": 72.43,
"monday": 73.35,
"tuesday": 44.3,
"wednesday": 45.3,
"thursday": 86.2,
"friday": 77.78,
"saturday": 98.39,
},
{
"sunday": 79.43,
"monday": 78.35,
"tuesday": 47.3,
"wednesday": 46.3,
"thursday": 85.2,
"friday": 74.78,
"saturday": 93.39,
},
{
"sunday": 71.42,
"monday": 74.31,
"tuesday": 42.0,
"wednesday": 42.1,
"thursday": 82.23,
"friday": 65.78,
"saturday": 91.26,
},
{
"sunday": 91.43,
"monday": 84.35,
"tuesday": 42.37,
"wednesday": 42.36,
"thursday": 82.25,
"friday": 78.74,
"saturday": 91.32,
},
{
"sunday": 71.33,
"monday": 74.45,
"tuesday": 42.35,
"wednesday": 42.36,
"thursday": 82.27,
"friday": 26.78,
"saturday": 71.39,
},
{
"sunday": 71.53,
"monday": 73.35,
"tuesday": 43.32,
"wednesday": 42.23,
"thursday": 82.32,
"friday": 78.18,
"saturday": 91.49,
},
{
"sunday": 71.53,
"monday": 74.25,
"tuesday": 52.3,
"wednesday": 52.3,
"thursday": 81.23,
"friday": 78.78,
"saturday": 78.39,
},
],
"details": {
"confidence": "high",
},
},
"$parameter.tolerances.mostly": 0.91,
"$parameter.tolerances.financial.usd": 1.0,
"$parameter.monthly_taxi_fairs.mean_values": {
"value": [
2.3,
9.8,
42.3,
8.1,
38.5,
53.7,
71.43,
16.34,
49.43,
74.35,
51.98,
46.42,
20.01,
69.44,
65.32,
8.83,
55.79,
82.2,
36.93,
83.78,
31.13,
76.93,
67.67,
25.12,
58.04,
79.78,
90.91,
15.26,
61.65,
78.78,
12.99,
],
"details": {
"confidence": "low",
},
},
"$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format": {
"value": "%Y-%m-%d %H:%M:%S %Z",
"details": {
"confidence": 0.78,
},
},
"$parameter.date_strings.yyyy_mm_dd_date_format": {
"value": "%Y-%m-%d",
"details": {
"confidence": 0.78,
},
},
"$parameter.date_strings.tolerances.max_num_conversion_attempts": 5,
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds": 100,
"$parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format": {
"value": "%m-%Y-%d %H:%M:%S %Z",
"details": {
"confidence": 0.78,
},
},
"$parameter.date_strings.mm_yyyy_dd_date_format": {
"value": "%m-%Y-%d",
"details": {
"confidence": 0.78,
},
},
"$parameter.daily_taxi_fairs.mean_values": {
"value": {
"sunday": 71.43,
"monday": 74.35,
"tuesday": 42.3,
"wednesday": 42.3,
"thursday": 82.2,
"friday": 78.78,
"saturday": 91.39,
},
"details": {
"confidence": "medium",
},
},
"$mean": 0.65,
}
parameter_values_for_fully_qualified_parameter_names: Dict[
str, ParameterNode
] = get_parameter_values_for_fully_qualified_parameter_names(
domain=domain,
variables=variables,
parameters=parameters,
)
assert (
parameter_values_for_fully_qualified_parameter_names
== expected_parameter_values_for_fully_qualified_parameter_names
)
| true | true |
f7faade87f6260f80c525f74f167ed204f2a0750 | 18,601 | py | Python | benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/15-sender_receiver_35.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/15-sender_receiver_35.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/15-sender_receiver_35.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1)))
hint = Hint("h_s_c1", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, r0))
hint = Hint("h_s_timeout0", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1)))
hint = Hint("h_delta2", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 38.273663 | 89 | 0.576582 | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(0, x_r_l)
hint = Hint("h_r_l0", env, frozenset([r_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1)))
hint = Hint("h_s_c1", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, r0))
hint = Hint("h_s_timeout0", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1)))
hint = Hint("h_delta2", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| true | true |
f7faae0860a8a5881c97468472d89213b45e7587 | 807 | py | Python | connections/singletonConnection.py | Link-Hawks/Edgar | af734f212a7ec3d476ff67e5fc253e47ca597f8b | [
"MIT"
] | null | null | null | connections/singletonConnection.py | Link-Hawks/Edgar | af734f212a7ec3d476ff67e5fc253e47ca597f8b | [
"MIT"
] | null | null | null | connections/singletonConnection.py | Link-Hawks/Edgar | af734f212a7ec3d476ff67e5fc253e47ca597f8b | [
"MIT"
] | 2 | 2019-04-01T06:37:25.000Z | 2019-05-23T01:03:29.000Z | from os import system
from pymongo import MongoClient
class SingletonConnection(object):
__cliente = None
@classmethod
def get_connection(cls):
if cls.__cliente is None:
cls.__cliente = MongoClient('127.0.0.1', 27017)
return cls.__cliente
@classmethod
def get_banco(cls, nome_banco):
db = cls.get_connection()[nome_banco]
return db
@classmethod
def get_collection(cls, nome_collection, nome_banco):
collection = cls.get_banco(nome_banco)[nome_collection]
return collection
@classmethod
def _start_mongo_service(cls, senha_sudo):
command_start_mongo_service = "systemctl start mongodb"
senha_sudo = senha_sudo
system(f'echo "{senha_sudo}" | sudo -S {command_start_mongo_service}')
| 26.9 | 78 | 0.684015 | from os import system
from pymongo import MongoClient
class SingletonConnection(object):
__cliente = None
@classmethod
def get_connection(cls):
if cls.__cliente is None:
cls.__cliente = MongoClient('127.0.0.1', 27017)
return cls.__cliente
@classmethod
def get_banco(cls, nome_banco):
db = cls.get_connection()[nome_banco]
return db
@classmethod
def get_collection(cls, nome_collection, nome_banco):
collection = cls.get_banco(nome_banco)[nome_collection]
return collection
@classmethod
def _start_mongo_service(cls, senha_sudo):
command_start_mongo_service = "systemctl start mongodb"
senha_sudo = senha_sudo
system(f'echo "{senha_sudo}" | sudo -S {command_start_mongo_service}')
| true | true |
f7faae9a2f8f7c1837b8bcf3604b4e880e6dbe20 | 2,018 | py | Python | resize.py | hwine/mg-resize-images-for-social | 15077d0e482717f422d96f9c5ec95aa0a975e33c | [
"MIT"
] | null | null | null | resize.py | hwine/mg-resize-images-for-social | 15077d0e482717f422d96f9c5ec95aa0a975e33c | [
"MIT"
] | 2 | 2022-03-07T19:20:40.000Z | 2022-03-07T22:30:18.000Z | resize.py | hwine/mg-resize-images-for-social | 15077d0e482717f422d96f9c5ec95aa0a975e33c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
from collections import namedtuple
from gooey import Gooey, GooeyParser
from PIL import Image
"""
Instagram can host a single image, or a grouping of images that we create a "gallery" image in the size for Instagram - 1080 x 1080 px
Facebook image size - 1200 x630 px
Facebook event - for the sale listing on the calendar - 1920 x 1005 pixels
Event Brite Image - 2160 x 1080px
Twitter image - so many sizes to choose from on this platform - I can use the 1080 x 1080 square.
"""
Site = namedtuple("Site", "name hint max_x max_y")
sites = [
Site("Instagram", "ig", 1080, 1080),
Site("Facebood", "fb", 1200, 630),
Site("FB_event", "fbe", 1920, 630),
Site("EventBrite", "eb", 2160, 1080),
Site("Twitter", "tw", 1080, 1080)
]
def resize_image(image, specs):
cur_x, cur_y = image.size
if cur_x < specs.max_x and cur_y < specs.max_y:
return image, specs.hint
# we want to keep the aspect ratio
x_ratio = float(specs.max_x) / float(cur_x)
y_ratio = float(specs.max_y) / float(cur_y)
ratio = min(x_ratio, y_ratio)
new_x = int(cur_x * ratio)
new_y = int(cur_y * ratio)
resized_image = image.resize((new_x, new_y))
return resized_image, specs.hint
def generate_files(fname):
base, _ = os.path.splitext(fname)
for site in sites:
im = Image.open(fname)
new_im, hint = resize_image(im, site)
new_fname = f"{base}-{hint}.jpg"
new_im.save(new_fname)
@Gooey(show_restart_button=False,
return_to_config=True,
clear_before_run=True)
def parse_args(argv=None):
parser = GooeyParser(description="Resize images for Social Media")
parser.add_argument('Filename', widget="FileChooser")
args = parser.parse_args()
return args
def main(argv=None):
if not argv:
argv = sys.argv[1:]
args = parse_args(argv)
if args.Filename:
generate_files(args.Filename)
if __name__ == "__main__":
main(sys.argv[1:])
| 30.119403 | 138 | 0.667988 |
import os
import sys
from collections import namedtuple
from gooey import Gooey, GooeyParser
from PIL import Image
Site = namedtuple("Site", "name hint max_x max_y")
sites = [
Site("Instagram", "ig", 1080, 1080),
Site("Facebood", "fb", 1200, 630),
Site("FB_event", "fbe", 1920, 630),
Site("EventBrite", "eb", 2160, 1080),
Site("Twitter", "tw", 1080, 1080)
]
def resize_image(image, specs):
cur_x, cur_y = image.size
if cur_x < specs.max_x and cur_y < specs.max_y:
return image, specs.hint
x_ratio = float(specs.max_x) / float(cur_x)
y_ratio = float(specs.max_y) / float(cur_y)
ratio = min(x_ratio, y_ratio)
new_x = int(cur_x * ratio)
new_y = int(cur_y * ratio)
resized_image = image.resize((new_x, new_y))
return resized_image, specs.hint
def generate_files(fname):
base, _ = os.path.splitext(fname)
for site in sites:
im = Image.open(fname)
new_im, hint = resize_image(im, site)
new_fname = f"{base}-{hint}.jpg"
new_im.save(new_fname)
@Gooey(show_restart_button=False,
return_to_config=True,
clear_before_run=True)
def parse_args(argv=None):
parser = GooeyParser(description="Resize images for Social Media")
parser.add_argument('Filename', widget="FileChooser")
args = parser.parse_args()
return args
def main(argv=None):
if not argv:
argv = sys.argv[1:]
args = parse_args(argv)
if args.Filename:
generate_files(args.Filename)
if __name__ == "__main__":
main(sys.argv[1:])
| true | true |
f7faafa42f04eb2bc9058050fa2eb4c9acc14640 | 1,775 | py | Python | tethysext/atcore/models/resource_workflow_steps/form_input_rws.py | Aquaveo/tethysext-atcore | 7a83ccea24fdbbe806f12154f938554dd6c8015f | [
"BSD-3-Clause"
] | 3 | 2020-11-05T23:50:47.000Z | 2021-02-26T21:43:29.000Z | tethysext/atcore/models/resource_workflow_steps/form_input_rws.py | Aquaveo/tethysext-atcore | 7a83ccea24fdbbe806f12154f938554dd6c8015f | [
"BSD-3-Clause"
] | 7 | 2020-10-29T16:53:49.000Z | 2021-05-07T19:46:47.000Z | tethysext/atcore/models/resource_workflow_steps/form_input_rws.py | Aquaveo/tethysext-atcore | 7a83ccea24fdbbe806f12154f938554dd6c8015f | [
"BSD-3-Clause"
] | null | null | null | """
********************************************************************************
* Name: form_input_rws.py
* Author: glarsen, mlebaron
* Created On: October 17, 2019
* Copyright: (c) Aquaveo 2019
********************************************************************************
"""
from tethysext.atcore.models.app_users import ResourceWorkflowStep
class FormInputRWS(ResourceWorkflowStep):
"""
Workflow step that can be used to get form input from a user.
Options:
form_title(str): Title to be displayed at the top of the form. Defaults to the name of the step.
status_label(str): Custom label for the status select form field. Defaults to "Status".
param_class(dict): A param class to represent form fields.
renderer(str): Renderer option. Available values are 'django' and 'bokeh'. Defauls to 'django'.
""" # noqa: #501
CONTROLLER = 'tethysext.atcore.controllers.resource_workflows.workflow_views.FormInputWV'
TYPE = 'form_input_resource_workflow_step'
__mapper_args__ = {
'polymorphic_identity': TYPE
}
@property
def default_options(self):
default_options = super().default_options
default_options.update({
'form_title': None,
'status_label': None,
'param_class': {},
'renderer': 'django'
})
return default_options
def init_parameters(self, *args, **kwargs):
return {
'form-values': {
'help': 'Values from form',
'value': {},
'required': True
},
'resource_name': {
'help': 'The name of the resource',
'value': '',
'required': True
}
}
| 32.87037 | 104 | 0.540282 | from tethysext.atcore.models.app_users import ResourceWorkflowStep
class FormInputRWS(ResourceWorkflowStep):
CONTROLLER = 'tethysext.atcore.controllers.resource_workflows.workflow_views.FormInputWV'
TYPE = 'form_input_resource_workflow_step'
__mapper_args__ = {
'polymorphic_identity': TYPE
}
@property
def default_options(self):
default_options = super().default_options
default_options.update({
'form_title': None,
'status_label': None,
'param_class': {},
'renderer': 'django'
})
return default_options
def init_parameters(self, *args, **kwargs):
return {
'form-values': {
'help': 'Values from form',
'value': {},
'required': True
},
'resource_name': {
'help': 'The name of the resource',
'value': '',
'required': True
}
}
| true | true |
f7faafb2fccf9b24e7da2a4d923d2cd8ed2b0334 | 666 | py | Python | log/migrations/0003_auto_20190209_1009.py | zyayoung/lab-item-tracking | 6d0ee000114300d6693ec078f974b9a6ef4dfe40 | [
"MIT"
] | 4 | 2019-01-14T15:44:22.000Z | 2019-01-16T16:07:19.000Z | log/migrations/0003_auto_20190209_1009.py | zyayoung/lab-item-tracking | 6d0ee000114300d6693ec078f974b9a6ef4dfe40 | [
"MIT"
] | 2 | 2019-02-01T00:50:20.000Z | 2019-02-22T15:15:54.000Z | log/migrations/0003_auto_20190209_1009.py | zyayoung/lab-item-tracking | 6d0ee000114300d6693ec078f974b9a6ef4dfe40 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2019-02-09 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('log', '0002_auto_20190209_0933'),
]
operations = [
migrations.RemoveField(
model_name='log',
name='obj',
),
migrations.AddField(
model_name='log',
name='_id',
field=models.IntegerField(default=0, verbose_name='ID'),
),
migrations.AddField(
model_name='log',
name='category',
field=models.TextField(blank=True, null=True, verbose_name='类型'),
),
]
| 23.785714 | 77 | 0.551051 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('log', '0002_auto_20190209_0933'),
]
operations = [
migrations.RemoveField(
model_name='log',
name='obj',
),
migrations.AddField(
model_name='log',
name='_id',
field=models.IntegerField(default=0, verbose_name='ID'),
),
migrations.AddField(
model_name='log',
name='category',
field=models.TextField(blank=True, null=True, verbose_name='类型'),
),
]
| true | true |
f7faafe986c505534c2ecfa2be83b48562cd299a | 867 | py | Python | test/get_value.py | seriesdb/seriesdb-client-python | bdd341af6e409f65394f728de5ed6e37b9860d78 | [
"MIT"
] | null | null | null | test/get_value.py | seriesdb/seriesdb-client-python | bdd341af6e409f65394f728de5ed6e37b9860d78 | [
"MIT"
] | null | null | null | test/get_value.py | seriesdb/seriesdb-client-python | bdd341af6e409f65394f728de5ed6e37b9860d78 | [
"MIT"
] | null | null | null | import asyncio
import traceback
import struct
import pycommons.logger
from seriesdb.client import Client
logger = pycommons.logger.get_instance(__name__)
count = 0
async def get_value():
loop = asyncio.get_event_loop()
client = Client("localhost:8888", loop=loop)
global count
while True:
try:
table = "huobi.btc.usdt.1m"
key = struct.pack('>I', 1)
value = await client.get_value(table, key)
logger.info("Received value: %s", value)
count += 1
if count % 1000 == 0:
logger.info("count: %s", count)
except Exception:
logger.error("Failed to check: %s", traceback.format_exc())
await asyncio.sleep(1)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.create_task(get_value())
loop.run_forever()
| 24.083333 | 71 | 0.611303 | import asyncio
import traceback
import struct
import pycommons.logger
from seriesdb.client import Client
logger = pycommons.logger.get_instance(__name__)
count = 0
async def get_value():
loop = asyncio.get_event_loop()
client = Client("localhost:8888", loop=loop)
global count
while True:
try:
table = "huobi.btc.usdt.1m"
key = struct.pack('>I', 1)
value = await client.get_value(table, key)
logger.info("Received value: %s", value)
count += 1
if count % 1000 == 0:
logger.info("count: %s", count)
except Exception:
logger.error("Failed to check: %s", traceback.format_exc())
await asyncio.sleep(1)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.create_task(get_value())
loop.run_forever()
| true | true |
f7fab11858d4b7e1ee32d1c78df0882a18aea998 | 1,491 | py | Python | src/wavestate/model/optics/alm/substrates.py | wavestate/wavestate-model | d5e9cd3bd7352e07cc789b40a4d9452975b27237 | [
"Apache-2.0"
] | null | null | null | src/wavestate/model/optics/alm/substrates.py | wavestate/wavestate-model | d5e9cd3bd7352e07cc789b40a4d9452975b27237 | [
"Apache-2.0"
] | null | null | null | src/wavestate/model/optics/alm/substrates.py | wavestate/wavestate-model | d5e9cd3bd7352e07cc789b40a4d9452975b27237 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 Lee McCuller <mcculler@mit.edu>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
from ...base import FrequencyKey
substrates = dict(
fused_silica={
FrequencyKey({"Nd1064": 1}): 1.4496,
FrequencyKey({"Nd1064": 2}): 1.4607,
FrequencyKey({"1550": 1}): 1.440,
FrequencyKey({"1550": 2}): 1.4538,
# https://refractiveindex.info/?shelf=glass&book=fused_silica&page=Malitson
# n^2-1=\frac{0.6961663λ^2}{λ^2-0.0684043^2}+\frac{0.4079426λ^2}{λ^2-0.1162414^2}+\frac{0.8974794λ^2}{λ^2-9.896161^2}
"Sellmeier": None,
},
silicon={
FrequencyKey({"1550": 1}): 3.4850,
FrequencyKey({"1550": 2}): 3.6950,
},
BK7={
FrequencyKey({"Nd1064": 1}): 1.5066,
FrequencyKey({"Nd1064": 2}): 1.5195,
},
vacuum={
FrequencyKey({"Nd1064": 1}): 1.0,
FrequencyKey({"Nd1064": 2}): 1.0,
FrequencyKey({"1550": 1}): 1.0,
FrequencyKey({"1550": 2}): 1.0,
},
nitrogen={
FrequencyKey({"Nd1064": 1}): 1.0002952,
FrequencyKey({"Nd1064": 2}): 1.0002994,
},
PPKTP={
FrequencyKey({"Nd1064": 1}): 1.8302,
FrequencyKey({"Nd1064": 2}): 1.7779,
},
)
| 31.723404 | 125 | 0.587525 |
from ...base import FrequencyKey
substrates = dict(
fused_silica={
FrequencyKey({"Nd1064": 1}): 1.4496,
FrequencyKey({"Nd1064": 2}): 1.4607,
FrequencyKey({"1550": 1}): 1.440,
FrequencyKey({"1550": 2}): 1.4538,
"Sellmeier": None,
},
silicon={
FrequencyKey({"1550": 1}): 3.4850,
FrequencyKey({"1550": 2}): 3.6950,
},
BK7={
FrequencyKey({"Nd1064": 1}): 1.5066,
FrequencyKey({"Nd1064": 2}): 1.5195,
},
vacuum={
FrequencyKey({"Nd1064": 1}): 1.0,
FrequencyKey({"Nd1064": 2}): 1.0,
FrequencyKey({"1550": 1}): 1.0,
FrequencyKey({"1550": 2}): 1.0,
},
nitrogen={
FrequencyKey({"Nd1064": 1}): 1.0002952,
FrequencyKey({"Nd1064": 2}): 1.0002994,
},
PPKTP={
FrequencyKey({"Nd1064": 1}): 1.8302,
FrequencyKey({"Nd1064": 2}): 1.7779,
},
)
| true | true |
f7fab265f4bdb814cf259fcfe5376acd8f3f8679 | 8,812 | py | Python | Code/hashtable.py | omarsagoo/CS-1.3-Core-Data-Structures | fd48ca53910bcebb3d2d4d48c56ca5e176fb0246 | [
"MIT"
] | null | null | null | Code/hashtable.py | omarsagoo/CS-1.3-Core-Data-Structures | fd48ca53910bcebb3d2d4d48c56ca5e176fb0246 | [
"MIT"
] | 6 | 2020-02-14T18:35:53.000Z | 2020-03-09T20:14:06.000Z | Code/hashtable.py | omarsagoo/CS-1.3-Core-Data-Structures | fd48ca53910bcebb3d2d4d48c56ca5e176fb0246 | [
"MIT"
] | 1 | 2020-05-15T22:09:15.000Z | 2020-05-15T22:09:15.000Z | #!python
from linkedlist import LinkedList
class HashTable(object):
def __init__(self, init_size=8):
"""Initialize this hash table with the given initial size."""
self.buckets = [LinkedList() for i in range(init_size)]
self.size = 0 # Number of key-value entries
def __str__(self):
"""Return a formatted string representation of this hash table."""
items = ['{!r}: {!r}'.format(key, val) for key, val in self.items()]
return '{' + ', '.join(items) + '}'
def __repr__(self):
"""Return a string representation of this hash table."""
return 'HashTable({!r})'.format(self.items())
def _bucket_index(self, key):
"""Return the bucket index where the given key would be stored."""
return hash(key) % len(self.buckets)
def load_factor(self):
"""Return the load factor, the ratio of number of entries to buckets.
Best and worst case running time: ??? under what conditions? [TODO]"""
return self.size / len(self.buckets)
def keys(self):
"""Return a list of all keys in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Collect all keys in each of the buckets
all_keys = []
for bucket in self.buckets:
for key, value in bucket.items():
all_keys.append(key)
return all_keys
def values(self):
"""Return a list of all values in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Collect all values in each of the buckets
all_values = []
for bucket in self.buckets:
for key, value in bucket.items():
all_values.append(value)
return all_values
def items(self):
"""Return a list of all entries (key-value pairs) in this hash table.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Collect all pairs of key-value entries in each of the buckets
all_items = []
for bucket in self.buckets:
all_items.extend(bucket.items())
return all_items
def length(self):
"""Return the number of key-value entries by traversing its buckets.
Best and worst case running time: ??? under what conditions? [TODO]"""
# Count number of key-value entries in each of the buckets
item_count = 0
for bucket in self.buckets:
item_count += bucket.length()
return item_count
# Equivalent to this list comprehension:
# return sum(bucket.length() for bucket in self.buckets)
def contains(self, key):
"""Return True if this hash table contains the given key, or False.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Check if an entry with the given key exists in that bucket
entry = bucket.find(lambda key_value: key_value[0] == key)
return entry is not None # True or False
def get(self, key):
"""Return the value associated with the given key, or raise KeyError.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# Return the given key's associated value
assert isinstance(entry, tuple)
assert len(entry) == 2
return entry[1]
else: # Not found
raise KeyError('Key not found: {}'.format(key))
def set(self, key, value):
"""Insert or update the given key with its associated value.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
# Check if an entry with the given key exists in that bucket
# print(index, bucket, (key, value))
# print((key, value), self.load_factor())
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# In this case, the given key's value is being updated
# Remove the old key-value entry from the bucket first
bucket.delete(entry)
self.size -= 1
# Insert the new key-value entry into the bucket in either case
# item = (key, value)
bucket.append((key, value))
self.size += 1
# Check if the load factor exceeds a threshold such as 0.75
# If so, automatically resize to reduce the load factor
if self.load_factor() > 0.75:
self._resize()
def delete(self, key):
"""Delete the given key and its associated value, or raise KeyError.
Best case running time: ??? under what conditions? [TODO]
Worst case running time: ??? under what conditions? [TODO]"""
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# Remove the key-value entry from the bucket
bucket.delete(entry)
self.size -= 1
else: # Not found
raise KeyError('Key not found: {}'.format(key))
def _resize(self, new_size=None):
"""Resize this hash table's buckets and rehash all key-value entries.
Should be called automatically when load factor exceeds a threshold
such as 0.75 after an insertion (when set is called with a new key).
Best and worst case running time: ??? under what conditions? [TODO]
Best and worst case space usage: ??? what uses this memory? [TODO]"""
# If unspecified, choose new size dynamically based on current size
if new_size is None:
new_size = len(self.buckets) * 2 # Double size
# Option to reduce size if buckets are sparsely filled (low load factor)
elif new_size is 0:
new_size = len(self.buckets) / 2 # Half size
# Get a list to temporarily hold all current key-value entries
temp_list = self.items()
self.size = 0
# Create a new list of new_size total empty linked list buckets
self.buckets = [LinkedList() for i in range(new_size)]
# Insert each key-value entry into the new list of buckets,
# which will rehash them into a new bucket index based on the new size
for key, value in temp_list:
self.set(key, value)
def test_hash_table():
ht = HashTable(4)
print('HashTable: ' + str(ht))
print('Setting entries:')
ht.set('I', 1)
print('set(I, 1): ' + str(ht))
ht.set('V', 5)
print('set(V, 5): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
ht.set('X', 10)
print('set(X, 10): ' + str(ht))
ht.set('L', 50) # Should trigger resize
print('set(L, 50): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
print('Getting entries:')
print('get(I): ' + str(ht.get('I')))
print('get(V): ' + str(ht.get('V')))
print('get(X): ' + str(ht.get('X')))
print('get(L): ' + str(ht.get('L')))
print('contains(X): ' + str(ht.contains('X')))
print('contains(Z): ' + str(ht.contains('Z')))
print('Deleting entries:')
ht.delete('I')
print('delete(I): ' + str(ht))
ht.delete('V')
print('delete(V): ' + str(ht))
ht.delete('X')
print('delete(X): ' + str(ht))
ht.delete('L')
print('delete(L): ' + str(ht))
print('contains(X): ' + str(ht.contains('X')))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
if __name__ == '__main__':
test_hash_table()
| 41.17757 | 80 | 0.601339 |
from linkedlist import LinkedList
class HashTable(object):
def __init__(self, init_size=8):
self.buckets = [LinkedList() for i in range(init_size)]
self.size = 0
def __str__(self):
items = ['{!r}: {!r}'.format(key, val) for key, val in self.items()]
return '{' + ', '.join(items) + '}'
def __repr__(self):
return 'HashTable({!r})'.format(self.items())
def _bucket_index(self, key):
return hash(key) % len(self.buckets)
def load_factor(self):
return self.size / len(self.buckets)
def keys(self):
all_keys = []
for bucket in self.buckets:
for key, value in bucket.items():
all_keys.append(key)
return all_keys
def values(self):
all_values = []
for bucket in self.buckets:
for key, value in bucket.items():
all_values.append(value)
return all_values
def items(self):
all_items = []
for bucket in self.buckets:
all_items.extend(bucket.items())
return all_items
def length(self):
item_count = 0
for bucket in self.buckets:
item_count += bucket.length()
return item_count
def contains(self, key):
index = self._bucket_index(key)
bucket = self.buckets[index]
entry = bucket.find(lambda key_value: key_value[0] == key)
return entry is not None
def get(self, key):
index = self._bucket_index(key)
bucket = self.buckets[index]
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None:
assert isinstance(entry, tuple)
assert len(entry) == 2
return entry[1]
else: # Not found
raise KeyError('Key not found: {}'.format(key))
def set(self, key, value):
# Find the bucket the given key belongs in
index = self._bucket_index(key)
bucket = self.buckets[index]
# Find the entry with the given key in that bucket, if one exists
# Check if an entry with the given key exists in that bucket
# print(index, bucket, (key, value))
# print((key, value), self.load_factor())
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None: # Found
# In this case, the given key's value is being updated
bucket.delete(entry)
self.size -= 1
bucket.append((key, value))
self.size += 1
if self.load_factor() > 0.75:
self._resize()
def delete(self, key):
index = self._bucket_index(key)
bucket = self.buckets[index]
entry = bucket.find(lambda key_value: key_value[0] == key)
if entry is not None:
bucket.delete(entry)
self.size -= 1
else:
raise KeyError('Key not found: {}'.format(key))
def _resize(self, new_size=None):
if new_size is None:
new_size = len(self.buckets) * 2
elif new_size is 0:
new_size = len(self.buckets) / 2
temp_list = self.items()
self.size = 0
self.buckets = [LinkedList() for i in range(new_size)]
for key, value in temp_list:
self.set(key, value)
def test_hash_table():
ht = HashTable(4)
print('HashTable: ' + str(ht))
print('Setting entries:')
ht.set('I', 1)
print('set(I, 1): ' + str(ht))
ht.set('V', 5)
print('set(V, 5): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
ht.set('X', 10)
print('set(X, 10): ' + str(ht))
ht.set('L', 50)
print('set(L, 50): ' + str(ht))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
print('Getting entries:')
print('get(I): ' + str(ht.get('I')))
print('get(V): ' + str(ht.get('V')))
print('get(X): ' + str(ht.get('X')))
print('get(L): ' + str(ht.get('L')))
print('contains(X): ' + str(ht.contains('X')))
print('contains(Z): ' + str(ht.contains('Z')))
print('Deleting entries:')
ht.delete('I')
print('delete(I): ' + str(ht))
ht.delete('V')
print('delete(V): ' + str(ht))
ht.delete('X')
print('delete(X): ' + str(ht))
ht.delete('L')
print('delete(L): ' + str(ht))
print('contains(X): ' + str(ht.contains('X')))
print('size: ' + str(ht.size))
print('length: ' + str(ht.length()))
print('buckets: ' + str(len(ht.buckets)))
print('load_factor: ' + str(ht.load_factor()))
if __name__ == '__main__':
test_hash_table()
| true | true |
f7fab2882ba44013b1ca7273273e6b041c1e46c3 | 1,301 | py | Python | costor_server/storage/api/views/authcheck.py | rphi/costor | 081de65778d404cf7a22c5524bf89a146fa8326b | [
"CNRI-Python"
] | 2 | 2019-12-31T16:49:36.000Z | 2021-02-17T09:47:41.000Z | costor_server/storage/api/views/authcheck.py | rphi/costor | 081de65778d404cf7a22c5524bf89a146fa8326b | [
"CNRI-Python"
] | null | null | null | costor_server/storage/api/views/authcheck.py | rphi/costor | 081de65778d404cf7a22c5524bf89a146fa8326b | [
"CNRI-Python"
] | null | null | null | from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import MultiPartParser
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.exceptions import APIException
from rest_framework.decorators import parser_classes
from django.shortcuts import get_object_or_404
from manager.models import Agent
@api_view(['GET'])
@permission_classes([permissions.AllowAny])
def auth_check(request):
if not request.user.is_authenticated:
raise APIException(
detail="You aren't authenticated.",
code=403
)
#print(request.GET)
if 'agent' not in request.GET:
return Response(f'Authenticated as {request.user.username} with no agent')
agent = Agent.objects.filter(name=request.GET['agent'])
if not agent.exists():
raise APIException(
detail="Can't find that agent",
code=404
)
agent = agent.first()
if request.user not in agent.users.all():
raise APIException(
detail=f'Authenticated as {request.user.username} but you don\'t have permission for agent {agent.name}',
code=403
)
return Response(f'Authenticated as {request.user.username} for agent {agent.name}')
| 30.97619 | 117 | 0.704074 | from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import MultiPartParser
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.exceptions import APIException
from rest_framework.decorators import parser_classes
from django.shortcuts import get_object_or_404
from manager.models import Agent
@api_view(['GET'])
@permission_classes([permissions.AllowAny])
def auth_check(request):
if not request.user.is_authenticated:
raise APIException(
detail="You aren't authenticated.",
code=403
)
#print(request.GET)
if 'agent' not in request.GET:
return Response(f'Authenticated as {request.user.username} with no agent')
agent = Agent.objects.filter(name=request.GET['agent'])
if not agent.exists():
raise APIException(
detail="Can't find that agent",
code=404
)
agent = agent.first()
if request.user not in agent.users.all():
raise APIException(
detail=f'Authenticated as {request.user.username} but you don\'t have permission for agent {agent.name}',
code=403
)
return Response(f'Authenticated as {request.user.username} for agent {agent.name}')
| true | true |
f7fab2df7376188532f564c19c48e06c3e9af63f | 3,804 | py | Python | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_free_resource_usages_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_free_resource_usages_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_free_resource_usages_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListFreeResourceUsagesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'body': 'ListFreeResourceUsagesReq'
}
attribute_map = {
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, x_language=None, body=None):
"""ListFreeResourceUsagesRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._body = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def x_language(self):
"""Gets the x_language of this ListFreeResourceUsagesRequest.
语言。中文:zh_CN英文:en_US缺省为zh_CN。
:return: The x_language of this ListFreeResourceUsagesRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListFreeResourceUsagesRequest.
语言。中文:zh_CN英文:en_US缺省为zh_CN。
:param x_language: The x_language of this ListFreeResourceUsagesRequest.
:type: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this ListFreeResourceUsagesRequest.
:return: The body of this ListFreeResourceUsagesRequest.
:rtype: ListFreeResourceUsagesReq
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ListFreeResourceUsagesRequest.
:param body: The body of this ListFreeResourceUsagesRequest.
:type: ListFreeResourceUsagesReq
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListFreeResourceUsagesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.171429 | 80 | 0.571767 |
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListFreeResourceUsagesRequest:
sensitive_list = []
openapi_types = {
'x_language': 'str',
'body': 'ListFreeResourceUsagesReq'
}
attribute_map = {
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, x_language=None, body=None):
self._x_language = None
self._body = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def x_language(self):
return self._x_language
@x_language.setter
def x_language(self, x_language):
self._x_language = x_language
@property
def body(self):
return self._body
@body.setter
def body(self, body):
self._body = body
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ListFreeResourceUsagesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fab2e625c0265f8763f47d55ab85af7c523b95 | 898 | py | Python | tests/metrics/test_silhouette_metrics.py | gokceneraslan/scib | 91cfe2e4872230d8806c8f9ad5a0c251f268fdc4 | [
"MIT"
] | 1 | 2021-04-06T09:28:09.000Z | 2021-04-06T09:28:09.000Z | tests/metrics/test_silhouette_metrics.py | qqdb/scib | 7d11d7959baaebc3ad588356407a78ac2c3271f4 | [
"MIT"
] | null | null | null | tests/metrics/test_silhouette_metrics.py | qqdb/scib | 7d11d7959baaebc3ad588356407a78ac2c3271f4 | [
"MIT"
] | null | null | null | from tests.common import *
def test_silhouette(adata_pca):
score = scIB.me.silhouette(
adata_pca,
group_key='celltype',
embed='X_pca',
scale=True
)
LOGGER.info(f"score: {score}")
assert 0 <= score <= 1
def test_silhouette_batch(adata_pca):
_, sil = scIB.me.silhouette_batch(
adata_pca,
batch_key='batch',
group_key='celltype',
embed='X_pca',
scale=True,
verbose=False
)
score = sil['silhouette_score'].mean()
LOGGER.info(f"score: {score}")
assert 0 <= score <= 1
def test_isolated_labels_silhouette(adata_pca):
score = scIB.me.isolated_labels(
adata_pca,
label_key='celltype',
batch_key='batch',
embed='X_pca',
cluster=False,
verbose=True
)
LOGGER.info(f"score: {score}")
assert score <= 1
assert score >= 0
| 21.902439 | 47 | 0.58686 | from tests.common import *
def test_silhouette(adata_pca):
score = scIB.me.silhouette(
adata_pca,
group_key='celltype',
embed='X_pca',
scale=True
)
LOGGER.info(f"score: {score}")
assert 0 <= score <= 1
def test_silhouette_batch(adata_pca):
_, sil = scIB.me.silhouette_batch(
adata_pca,
batch_key='batch',
group_key='celltype',
embed='X_pca',
scale=True,
verbose=False
)
score = sil['silhouette_score'].mean()
LOGGER.info(f"score: {score}")
assert 0 <= score <= 1
def test_isolated_labels_silhouette(adata_pca):
score = scIB.me.isolated_labels(
adata_pca,
label_key='celltype',
batch_key='batch',
embed='X_pca',
cluster=False,
verbose=True
)
LOGGER.info(f"score: {score}")
assert score <= 1
assert score >= 0
| true | true |
f7fab3a6960181d694c6e03a208191c3808a326f | 49,550 | py | Python | BinanceWatch/storage/BinanceDataBase.py | jontstaz/BinanceWatch | 824daf452164fa4970bffe6e7639fe2bd594f857 | [
"MIT"
] | 2 | 2021-05-02T11:07:44.000Z | 2021-05-06T13:00:36.000Z | BinanceWatch/storage/BinanceDataBase.py | jontstaz/BinanceWatch | 824daf452164fa4970bffe6e7639fe2bd594f857 | [
"MIT"
] | null | null | null | BinanceWatch/storage/BinanceDataBase.py | jontstaz/BinanceWatch | 824daf452164fa4970bffe6e7639fe2bd594f857 | [
"MIT"
] | null | null | null | import datetime
from typing import Optional
from BinanceWatch.storage.DataBase import DataBase, SQLConditionEnum
from BinanceWatch.storage import tables
from BinanceWatch.utils.time_utils import datetime_to_millistamp
class BinanceDataBase(DataBase):
"""
Handles the recording of the binance account in a local database
"""
def __init__(self, name: str = 'binance_db'):
super().__init__(name)
def add_universal_transfer(self, transfer_id: int, transfer_type: str, transfer_time: int, asset: str,
amount: float, auto_commit: bool = True):
"""
add a universal transfer to the database
:param transfer_id: id of the transfer
:type transfer_id: int
:param transfer_type: enum of the transfer type (ex: 'MAIN_MARGIN')
:type transfer_type: str
:param transfer_time: millistamp of the operation
:type transfer_time: int
:param asset: asset that got transferred
:type asset: str
:param amount: amount transferred
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
table = tables.UNIVERSAL_TRANSFER_TABLE
row = (transfer_id, transfer_type, transfer_time, asset, amount)
self.add_row(table, row, auto_commit=auto_commit)
def get_universal_transfers(self, transfer_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return universal transfers stored in the database. Transfer type, Asset type and time filters can be used
:param transfer_type: enum of the transfer type (ex: 'MAIN_MARGIN')
:type transfer_type: Optional[str]
:param asset: fetch only interests in this asset
:type asset: Optional[str]
:param start_time: fetch only interests after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only interests before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(1206491332, # transfer id
'MAIN_MARGIN', # transfer type
1589121841000, # time
'BNB', # asset
10.594112), # amount
]
"""
table = tables.UNIVERSAL_TRANSFER_TABLE
conditions_list = []
if transfer_type is not None:
conditions_list.append((table.trfType,
SQLConditionEnum.equal,
transfer_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.trfTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.trfTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_universal_transfer_time(self, transfer_type: str) -> int:
"""
return the latest time when a universal transfer was made
If None, return the millistamp corresponding to 2017/01/01
:param transfer_type: enum of the transfer type (ex: 'MAIN_MARGIN')
:type transfer_type: str
:return: millistamp
:rtype: int
"""
table = tables.UNIVERSAL_TRANSFER_TABLE
conditions_list = [(table.trfType,
SQLConditionEnum.equal,
transfer_type)]
selection = f"MAX({table.trfTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_margin_interest(self, margin_type: str, interest_time: int, asset: str, interest: float,
interest_type: str, auto_commit: bool = True):
"""
add a repay to the database
:param margin_type: either 'cross' or 'isolated'
:type margin_type: str
:param interest_time: millistamp of the operation
:type interest_time: int
:param asset: asset that got repaid
:type asset: str
:param interest: amount of interest accrued
:type interest: float
:param interest_type: one of (PERIODIC, ON_BORROW, PERIODIC_CONVERTED, ON_BORROW_CONVERTED)
:type interest_type: str
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (interest_time, asset, interest, interest_type)
self.add_row(table, row, auto_commit=auto_commit)
def get_margin_interests(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return margin interests stored in the database. Asset type and time filters can be used
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param asset: fetch only interests in this asset
:type asset: Optional[str]
:param start_time: fetch only interests after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only interests before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
1559415215400, # time
'BNB', # asset
0.51561, # interest
'PERIODIC_CONVERTED'), # interest type
]
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_margin_interest_time(self, margin_type: str, asset: Optional[str] = None):
"""
return the latest time when a margin interest was accured on a defined asset or on all assets
If None, return the millistamp corresponding to 2017/01/01
:param asset: name of the asset charged as interest
:type asset: Optional[str]
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:return: millistamp
:rtype: int
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.interestTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_repay(self, margin_type: str, tx_id: int, repay_time: int, asset: str, principal: float,
interest: float, auto_commit: bool = True):
"""
add a repay to the database
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param tx_id: binance id for the transaction (uniqueness?)
:type tx_id: int
:param repay_time: millitstamp of the operation
:type repay_time: int
:param asset: asset that got repaid
:type asset: str
:param principal: principal amount repaid for the loan
:type principal: float
:param interest: amount of interest repaid for the loan
:type interest:
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (tx_id, repay_time, asset, principal, interest)
self.add_row(table, row, auto_commit=auto_commit)
def get_repays(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return repays stored in the database. Asset type and time filters can be used
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param asset: fetch only repays of this asset
:type asset: Optional[str]
:param start_time: fetch only repays after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only repays before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(8289451654, # transaction id
1559415215400, # time
'USDT', # asset
145.5491462, # principal
0.51561), # interest
]
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.repayTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.repayTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_repay_time(self, asset: str, margin_type: str) -> int:
"""
return the latest time when a repay was made on a defined asset
If None, return the millistamp corresponding to 2017/01/01
:param asset: name of the asset repaid
:type asset: str
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:return: millistamp
:rtype: int
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.repayTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_loan(self, margin_type: str, tx_id: int, loan_time: int, asset: str, principal: float,
auto_commit: bool = True):
"""
add a loan to the database
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param tx_id: binance id for the transaction (uniqueness?)
:type tx_id: int
:param loan_time: millitstamp of the operation
:type loan_time: int
:param asset: asset that got loaned
:type asset: str
:param principal: amount of loaned asset
:type principal: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (tx_id, loan_time, asset, principal)
self.add_row(table, row, auto_commit=auto_commit)
def get_loans(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return loans stored in the database. Asset type and time filters can be used
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:param asset: fetch only loans of this asset
:type asset: Optional[str]
:param start_time: fetch only loans after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only loans before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(8289451654, # transaction id
1559415215400, # time
'USDT', # asset
145.5491462), # amount
]
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.loanTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.loanTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_loan_time(self, asset: str, margin_type: str) -> int:
"""
return the latest time when an loan was made on a defined asset
If None, return the millistamp corresponding to 2017/01/01
:param asset: name of the asset loaned
:type asset: str
:param margin_type: either 'cross' or 'isolated'
:type margin_type:
:return: millistamp
:rtype: int
"""
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.loanTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_redemption(self, redemption_time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
"""
add a lending redemption to the database
:param redemption_time: millitstamp of the operation
:type redemption_time: int
:param lending_type: either 'DAILY', 'ACTIVITY' or 'CUSTOMIZED_FIXED'
:type lending_type: str
:param asset: asset lent
:type asset: str
:param amount: amount of asset redeemed
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (redemption_time, lending_type, asset, amount)
self.add_row(tables.LENDING_REDEMPTION_TABLE, row, auto_commit=auto_commit)
def get_lending_redemptions(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return lending redemptions stored in the database. Asset type and time filters can be used
:param lending_type: fetch only redemptions from this lending type
:type lending_type: Optional[str]
:param asset: fetch only redemptions from this asset
:type asset: Optional[str]
:param start_time: fetch only redemptions after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only redemptions before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
1612841562000, # time
'DAILY', # lending type
'LTC', # asset
1.89151684), # amount
]
"""
conditions_list = []
table = tables.LENDING_REDEMPTION_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.redemptionTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.redemptionTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_redemption_time(self, lending_type: Optional[str] = None) -> int:
"""
return the latest time when an lending redemption was made.
If None, return the millistamp corresponding to 2017/01/01
:param lending_type: type of lending
:type lending_type: str
:return: millistamp
:rtype: int
"""
conditions_list = []
table = tables.LENDING_REDEMPTION_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.redemptionTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_purchase(self, purchase_id: int, purchase_time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
"""
add a lending purchase to the database
:param purchase_id: id of the purchase
:type purchase_id: int
:param purchase_time: millitstamp of the operation
:type purchase_time: int
:param lending_type: either 'DAILY', 'ACTIVITY' or 'CUSTOMIZED_FIXED'
:type lending_type: str
:param asset: asset lent
:type asset: str
:param amount: amount of asset lent
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (purchase_id, purchase_time, lending_type, asset, amount)
self.add_row(tables.LENDING_PURCHASE_TABLE, row, auto_commit=auto_commit)
def get_lending_purchases(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return lending purchases stored in the database. Asset type and time filters can be used
:param lending_type: fetch only purchases from this lending type
:type lending_type: Optional[str]
:param asset: fetch only purchases from this asset
:type asset: Optional[str]
:param start_time: fetch only purchases after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only purchases before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(58516828, # purchase id
1612841562000, # time
'DAILY', # lending type
'LTC', # asset
1.89151684), # amount
]
"""
conditions_list = []
table = tables.LENDING_PURCHASE_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.purchaseTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.purchaseTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_purchase_time(self, lending_type: Optional[str] = None) -> int:
"""
return the latest time when an lending purchase was made.
If None, return the millistamp corresponding to 2017/01/01
:param lending_type: type of lending
:type lending_type: str
:return: millistamp
:rtype: int
"""
conditions_list = []
table = tables.LENDING_PURCHASE_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.purchaseTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_interest(self, time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
"""
add an lending interest to the database
:param time: millitstamp of the operation
:type time: int
:param lending_type: either 'DAILY', 'ACTIVITY' or 'CUSTOMIZED_FIXED'
:type lending_type: str
:param asset: asset that was received
:type asset: str
:param amount: amount of asset received
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (time, lending_type, asset, amount)
self.add_row(tables.LENDING_INTEREST_TABLE, row, auto_commit=auto_commit)
def get_lending_interests(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
"""
return lending interests stored in the database. Asset type and time filters can be used
:param lending_type: fetch only interests from this lending type
:type lending_type: Optional[str]
:param asset: fetch only interests from this asset
:type asset: Optional[str]
:param start_time: fetch only interests after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only interests before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(1619846515000, # time
'DAILY', # lending type
'DOT', # asset
0.00490156) # amount
]
"""
conditions_list = []
table = tables.LENDING_INTEREST_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_interest_time(self, lending_type: Optional[str] = None) -> int:
"""
return the latest time when an interest was received.
If None, return the millistamp corresponding to 2017/01/01
:param lending_type: type of lending
:type lending_type: str
:return: millistamp
:rtype: int
"""
conditions_list = []
table = tables.LENDING_INTEREST_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.interestTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_dust(self, tran_id: str, time: int, asset: str, asset_amount: float, bnb_amount: float, bnb_fee: float,
auto_commit: bool = True):
"""
add dust operation to the database
:param tran_id: id of the transaction (non unique)
:type tran_id: str
:param time: millitstamp of the operation
:type time: int
:param asset: asset that got converted to BNB
:type asset: str
:param asset_amount: amount of asset that got converted
:type asset_amount: float
:param bnb_amount: amount received from the conversion
:type bnb_amount: float
:param bnb_fee: fee amount in BNB
:type bnb_fee: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (tran_id, time, asset, asset_amount, bnb_amount, bnb_fee)
self.add_row(tables.SPOT_DUST_TABLE, row, auto_commit=auto_commit)
def get_spot_dusts(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return dusts stored in the database. Asset type and time filters can be used
:param asset: fetch only dusts from this asset
:type asset: Optional[str]
:param start_time: fetch only dusts after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only dusts before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(82156485284, # transaction id
1605489113400, # time
'TRX', # asset
102.78415879, # asset amount
0.09084498, # bnb amount
0.00171514), # bnb fee
]
"""
conditions_list = []
table = tables.SPOT_DUST_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.dustTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.dustTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def add_dividend(self, div_id: int, div_time: int, asset: str, amount: float, auto_commit: bool = True):
"""
add a dividend to the database
:param div_id: dividend id
:type div_id: int
:param div_time: millistamp of dividend reception
:type div_time: int
:param asset: name of the dividend unit
:type asset: str
:param amount: amount of asset distributed
:type amount: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (div_id, div_time, asset, amount)
self.add_row(tables.SPOT_DIVIDEND_TABLE, row, auto_commit=auto_commit)
def get_spot_dividends(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return dividends stored in the database. Asset type and time filters can be used
:param asset: fetch only dividends of this asset
:type asset: Optional[str]
:param start_time: fetch only dividends after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only dividends before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(8945138941, # dividend id
1594513589000, # time
'TRX', # asset
0.18745654), # amount
]
"""
conditions_list = []
table = tables.SPOT_DIVIDEND_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.divTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.divTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_dividend_time(self) -> int:
"""
fetch the latest time a dividend has been distributed on the spot account. If None is found,
return the millistamp corresponding to 2017/1/1
:return:
"""
table = tables.SPOT_DIVIDEND_TABLE
selection = f"MAX({table.divTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_withdraw(self, withdraw_id: str, tx_id: str, apply_time: int, asset: str, amount: float, fee: float,
auto_commit: bool = True):
"""
add a withdraw to the database
:param withdraw_id: binance if of the withdraw
:type withdraw_id: str
:param tx_id: transaction id
:type tx_id: str
:param apply_time: millistamp when the withdraw was requested
:type apply_time: int
:param asset: name of the token
:type asset: str
:param amount: amount of token withdrawn
:type amount: float
:param fee: amount of the asset paid for the withdraw
:type fee: float
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (withdraw_id, tx_id, apply_time, asset, amount, fee)
self.add_row(tables.SPOT_WITHDRAW_TABLE, row, auto_commit=auto_commit)
def get_spot_withdraws(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return withdraws stored in the database. Asset type and time filters can be used
:param asset: fetch only withdraws of this asset
:type asset: Optional[str]
:param start_time: fetch only withdraws after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only withdraws before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
('84984dcqq5z11gyjfa', # withdraw id
'aazd8949vredqs56dz', # transaction id
1599138389000, # withdraw time
'XTZ', # asset
57.0194, # amount
0.5), # fee
]
"""
conditions_list = []
table = tables.SPOT_WITHDRAW_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.applyTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.applyTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_withdraw_time(self) -> int:
"""
fetch the latest time a withdraw has been made on the spot account. If None is found, return the millistamp
corresponding to 2017/1/1
:return:
"""
table = tables.SPOT_WITHDRAW_TABLE
selection = f"MAX({table.applyTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_deposit(self, tx_id: str, insert_time: int, amount: float, asset: str, auto_commit=True):
"""
add a deposit to the database
:param tx_id: transaction id
:type tx_id: str
:param insert_time: millistamp when the deposit arrived on binance
:type insert_time: int
:param amount: amount of token deposited
:type amount: float
:param asset: name of the token
:type asset: str
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (tx_id, insert_time, asset, amount)
self.add_row(tables.SPOT_DEPOSIT_TABLE, row, auto_commit)
def get_spot_deposits(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
"""
return deposits stored in the database. Asset type and time filters can be used
:param asset: fetch only deposits of this asset
:type asset: Optional[str]
:param start_time: fetch only deposits after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only deposits before this millistamp
:type end_time: Optional[int]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
('azdf5e6a1d5z', # transaction id
1589479004000, # deposit time
'LTC', # asset
14.25), # amount
]
"""
conditions_list = []
table = tables.SPOT_DEPOSIT_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.insertTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.insertTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_deposit_time(self) -> int:
"""
fetch the latest time a deposit has been made on the spot account. If None is found, return the millistamp
corresponding to 2017/1/1
:return: last deposit millistamp
:rtype: int
"""
table = tables.SPOT_DEPOSIT_TABLE
selection = f"MAX({table.insertTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_trade(self, trade_type: str, trade_id: int, trade_time: int, asset: str, ref_asset: str, qty: float,
price: float, fee: float, fee_asset: str, is_buyer: bool, auto_commit=True):
"""
add a trade to the database
:param trade_type: type trade executed
:type trade_type: string, must be one of {'spot', 'cross_margin'}
:param trade_id: id of the trade (binance id, unique per trading pair)
:type trade_id: int
:param trade_time: millistamp of the trade
:type trade_time: int
:param asset: name of the asset in the trading pair (ex 'BTC' for 'BTCUSDT')
:type asset: string
:param ref_asset: name of the reference asset in the trading pair (ex 'USDT' for 'BTCUSDT')
:type ref_asset: string
:param qty: quantity of asset exchanged
:type qty: float
:param price: price of the asset regarding the ref_asset
:type price: float
:param fee: amount kept by the exchange
:type fee: float
:param fee_asset: token unit for the fee
:type fee_asset: str
:param is_buyer: if the trade is a buy or a sell
:type is_buyer: bool
:param auto_commit: if the database should commit the change made, default True
:type auto_commit: bool
:return: None
:rtype: None
"""
row = (trade_id, trade_time, asset, ref_asset, qty, price, fee, fee_asset, int(is_buyer))
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of ('spot', 'cross_margin') but {trade_type} was received")
self.add_row(table, row, auto_commit)
def get_trades(self, trade_type: str, start_time: Optional[int] = None, end_time: Optional[int] = None,
asset: Optional[str] = None, ref_asset: Optional[str] = None):
"""
return trades stored in the database. asset type, ref_asset type and time filters can be used
:param trade_type: type trade executed
:type trade_type: string, must be one of ('spot', 'cross_margin')
:param start_time: fetch only trades after this millistamp
:type start_time: Optional[int]
:param end_time: fetch only trades before this millistamp
:type end_time: Optional[int]
:param asset: fetch only trades with this asset
:type asset: Optional[str]
:param ref_asset: fetch only trades with this ref_asset
:type ref_asset: Optional[str]
:return: The raw rows selected as saved in the database
:rtype: List[Tuple]
.. code-block:: python
[
(384518832, # trade_id
1582892988052, # trade time
'BTC', # asset
'USDT', # ref asset
0.0015, # asset quantity
9011.2, # asset price to ref asset
0.01425, # fee
'USDT', # fee asset
0), # is_buyer
]
"""
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of ('spot', 'cross_margin') but {trade_type} was received")
conditions_list = []
if start_time is not None:
conditions_list.append((table.tdTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.tdTime,
SQLConditionEnum.lower,
end_time))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if ref_asset is not None:
conditions_list.append((table.refAsset,
SQLConditionEnum.equal,
ref_asset))
return self.get_conditions_rows(table, conditions_list=conditions_list, order_list=[table.tdTime])
def get_max_trade_id(self, asset: str, ref_asset: str, trade_type: str) -> int:
"""
return the latest trade id for a trading pair. If none is found, return -1
:param asset: name of the asset in the trading pair (ex 'BTC' for 'BTCUSDT')
:type asset: string
:param ref_asset: name of the reference asset in the trading pair (ex 'USDT' for 'BTCUSDT')
:type ref_asset: string
:param trade_type: type trade executed
:type trade_type: string, must be one of {'spot', 'cross_margin'}
:return: latest trade id
:rtype: int
"""
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of {'spot', 'cross_margin'} but {trade_type} was received")
selection = f"MAX({table.tradeId})"
conditions_list = [
(table.asset,
SQLConditionEnum.equal,
asset),
(table.refAsset,
SQLConditionEnum.equal,
ref_asset)
]
result = self.get_conditions_rows(table, selection=selection, conditions_list=conditions_list)
try:
result = result[0][0]
except IndexError:
return -1
if result is None:
return -1
return result
| 41.395155 | 118 | 0.565308 | import datetime
from typing import Optional
from BinanceWatch.storage.DataBase import DataBase, SQLConditionEnum
from BinanceWatch.storage import tables
from BinanceWatch.utils.time_utils import datetime_to_millistamp
class BinanceDataBase(DataBase):
def __init__(self, name: str = 'binance_db'):
super().__init__(name)
def add_universal_transfer(self, transfer_id: int, transfer_type: str, transfer_time: int, asset: str,
amount: float, auto_commit: bool = True):
table = tables.UNIVERSAL_TRANSFER_TABLE
row = (transfer_id, transfer_type, transfer_time, asset, amount)
self.add_row(table, row, auto_commit=auto_commit)
def get_universal_transfers(self, transfer_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
table = tables.UNIVERSAL_TRANSFER_TABLE
conditions_list = []
if transfer_type is not None:
conditions_list.append((table.trfType,
SQLConditionEnum.equal,
transfer_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.trfTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.trfTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_universal_transfer_time(self, transfer_type: str) -> int:
table = tables.UNIVERSAL_TRANSFER_TABLE
conditions_list = [(table.trfType,
SQLConditionEnum.equal,
transfer_type)]
selection = f"MAX({table.trfTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_margin_interest(self, margin_type: str, interest_time: int, asset: str, interest: float,
interest_type: str, auto_commit: bool = True):
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (interest_time, asset, interest, interest_type)
self.add_row(table, row, auto_commit=auto_commit)
def get_margin_interests(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_margin_interest_time(self, margin_type: str, asset: Optional[str] = None):
if margin_type == 'cross':
table = tables.CROSS_MARGIN_INTEREST_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.interestTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_repay(self, margin_type: str, tx_id: int, repay_time: int, asset: str, principal: float,
interest: float, auto_commit: bool = True):
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (tx_id, repay_time, asset, principal, interest)
self.add_row(table, row, auto_commit=auto_commit)
def get_repays(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.repayTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.repayTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_repay_time(self, asset: str, margin_type: str) -> int:
if margin_type == 'cross':
table = tables.CROSS_MARGIN_REPAY_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.repayTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_loan(self, margin_type: str, tx_id: int, loan_time: int, asset: str, principal: float,
auto_commit: bool = True):
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
row = (tx_id, loan_time, asset, principal)
self.add_row(table, row, auto_commit=auto_commit)
def get_loans(self, margin_type: str, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = []
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.loanTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.loanTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_loan_time(self, asset: str, margin_type: str) -> int:
if margin_type == 'cross':
table = tables.CROSS_MARGIN_LOAN_TABLE
elif margin_type == 'isolated':
raise NotImplementedError
else:
raise ValueError(f"margin type should be 'cross' or 'isolated' but {margin_type} was received")
conditions_list = [(table.asset,
SQLConditionEnum.equal,
asset)]
selection = f"MAX({table.loanTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_redemption(self, redemption_time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
row = (redemption_time, lending_type, asset, amount)
self.add_row(tables.LENDING_REDEMPTION_TABLE, row, auto_commit=auto_commit)
def get_lending_redemptions(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
conditions_list = []
table = tables.LENDING_REDEMPTION_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.redemptionTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.redemptionTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_redemption_time(self, lending_type: Optional[str] = None) -> int:
conditions_list = []
table = tables.LENDING_REDEMPTION_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.redemptionTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_purchase(self, purchase_id: int, purchase_time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
row = (purchase_id, purchase_time, lending_type, asset, amount)
self.add_row(tables.LENDING_PURCHASE_TABLE, row, auto_commit=auto_commit)
def get_lending_purchases(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
conditions_list = []
table = tables.LENDING_PURCHASE_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.purchaseTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.purchaseTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_purchase_time(self, lending_type: Optional[str] = None) -> int:
conditions_list = []
table = tables.LENDING_PURCHASE_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.purchaseTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_lending_interest(self, time: int, lending_type: str, asset: str, amount: float,
auto_commit: bool = True):
row = (time, lending_type, asset, amount)
self.add_row(tables.LENDING_INTEREST_TABLE, row, auto_commit=auto_commit)
def get_lending_interests(self, lending_type: Optional[str] = None, asset: Optional[str] = None,
start_time: Optional[int] = None, end_time: Optional[int] = None):
conditions_list = []
table = tables.LENDING_INTEREST_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.interestTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_lending_interest_time(self, lending_type: Optional[str] = None) -> int:
conditions_list = []
table = tables.LENDING_INTEREST_TABLE
if lending_type is not None:
conditions_list.append((table.lendingType,
SQLConditionEnum.equal,
lending_type))
selection = f"MAX({table.interestTime})"
result = self.get_conditions_rows(table,
selection=selection,
conditions_list=conditions_list)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_dust(self, tran_id: str, time: int, asset: str, asset_amount: float, bnb_amount: float, bnb_fee: float,
auto_commit: bool = True):
row = (tran_id, time, asset, asset_amount, bnb_amount, bnb_fee)
self.add_row(tables.SPOT_DUST_TABLE, row, auto_commit=auto_commit)
def get_spot_dusts(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
conditions_list = []
table = tables.SPOT_DUST_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.dustTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.dustTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def add_dividend(self, div_id: int, div_time: int, asset: str, amount: float, auto_commit: bool = True):
row = (div_id, div_time, asset, amount)
self.add_row(tables.SPOT_DIVIDEND_TABLE, row, auto_commit=auto_commit)
def get_spot_dividends(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
conditions_list = []
table = tables.SPOT_DIVIDEND_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.divTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.divTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_dividend_time(self) -> int:
table = tables.SPOT_DIVIDEND_TABLE
selection = f"MAX({table.divTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_withdraw(self, withdraw_id: str, tx_id: str, apply_time: int, asset: str, amount: float, fee: float,
auto_commit: bool = True):
row = (withdraw_id, tx_id, apply_time, asset, amount, fee)
self.add_row(tables.SPOT_WITHDRAW_TABLE, row, auto_commit=auto_commit)
def get_spot_withdraws(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
conditions_list = []
table = tables.SPOT_WITHDRAW_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.applyTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.applyTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_withdraw_time(self) -> int:
table = tables.SPOT_WITHDRAW_TABLE
selection = f"MAX({table.applyTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_deposit(self, tx_id: str, insert_time: int, amount: float, asset: str, auto_commit=True):
row = (tx_id, insert_time, asset, amount)
self.add_row(tables.SPOT_DEPOSIT_TABLE, row, auto_commit)
def get_spot_deposits(self, asset: Optional[str] = None, start_time: Optional[int] = None,
end_time: Optional[int] = None):
conditions_list = []
table = tables.SPOT_DEPOSIT_TABLE
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if start_time is not None:
conditions_list.append((table.insertTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.insertTime,
SQLConditionEnum.lower,
end_time))
return self.get_conditions_rows(table, conditions_list=conditions_list)
def get_last_spot_deposit_time(self) -> int:
table = tables.SPOT_DEPOSIT_TABLE
selection = f"MAX({table.insertTime})"
result = self.get_conditions_rows(table,
selection=selection)
default = datetime_to_millistamp(datetime.datetime(2017, 1, 1, tzinfo=datetime.timezone.utc))
try:
result = result[0][0]
except IndexError:
return default
if result is None:
return default
return result
def add_trade(self, trade_type: str, trade_id: int, trade_time: int, asset: str, ref_asset: str, qty: float,
price: float, fee: float, fee_asset: str, is_buyer: bool, auto_commit=True):
row = (trade_id, trade_time, asset, ref_asset, qty, price, fee, fee_asset, int(is_buyer))
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of ('spot', 'cross_margin') but {trade_type} was received")
self.add_row(table, row, auto_commit)
def get_trades(self, trade_type: str, start_time: Optional[int] = None, end_time: Optional[int] = None,
asset: Optional[str] = None, ref_asset: Optional[str] = None):
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of ('spot', 'cross_margin') but {trade_type} was received")
conditions_list = []
if start_time is not None:
conditions_list.append((table.tdTime,
SQLConditionEnum.greater_equal,
start_time))
if end_time is not None:
conditions_list.append((table.tdTime,
SQLConditionEnum.lower,
end_time))
if asset is not None:
conditions_list.append((table.asset,
SQLConditionEnum.equal,
asset))
if ref_asset is not None:
conditions_list.append((table.refAsset,
SQLConditionEnum.equal,
ref_asset))
return self.get_conditions_rows(table, conditions_list=conditions_list, order_list=[table.tdTime])
def get_max_trade_id(self, asset: str, ref_asset: str, trade_type: str) -> int:
if trade_type == 'spot':
table = tables.SPOT_TRADE_TABLE
elif trade_type == 'cross_margin':
table = tables.CROSS_MARGIN_TRADE_TABLE
else:
raise ValueError(f"trade type should be one of {'spot', 'cross_margin'} but {trade_type} was received")
selection = f"MAX({table.tradeId})"
conditions_list = [
(table.asset,
SQLConditionEnum.equal,
asset),
(table.refAsset,
SQLConditionEnum.equal,
ref_asset)
]
result = self.get_conditions_rows(table, selection=selection, conditions_list=conditions_list)
try:
result = result[0][0]
except IndexError:
return -1
if result is None:
return -1
return result
| true | true |
f7fab899a40af0955de3abfc841878e7f2bed3cc | 12,469 | py | Python | InputConfiguration/translator.py | ashleygw/video-manipulation | 351a904c5f10168e43f4afd4375c15d584b68d61 | [
"MIT"
] | 2 | 2018-08-30T02:59:26.000Z | 2019-04-02T19:23:57.000Z | InputConfiguration/translator.py | ashleygw/video-manipulation | 351a904c5f10168e43f4afd4375c15d584b68d61 | [
"MIT"
] | 24 | 2019-03-02T00:09:58.000Z | 2019-04-29T18:39:42.000Z | InputConfiguration/translator.py | ashleygw/video-manipulation | 351a904c5f10168e43f4afd4375c15d584b68d61 | [
"MIT"
] | 2 | 2018-09-11T17:03:41.000Z | 2018-10-02T23:09:40.000Z | # File to translate .pde sketch files to quad objects
# ready for our program.
# Cleanup infile
# Reorganize the file
# Indenting?
# conversion from cc[] to correct mappin -
# Translated from midi lines -
# Added absolute path
# Processing writes to map.csv
# Args input
# global variables
# better multiline in global comments
# writebufferlines
# added the midi hashmap
# removed drawing in setup
# fixed finding cc arrays and replacing with params
# If code is in controllerChange it will be ignored - Oh well.
import sys
import re
import os
from pathlib import Path
mypath = Path().absolute()
#https://stackoverflow.com/questions/68633/regex-that-will-match-a-java-method-declaration
func = re.compile(r'^[ \t]*(?:(?:public|protected|private)\s+)?(?:(static|final|native|synchronized|abstract|threadsafe|transient|(?:<[?\w\[\] ,&]+>)|(?:<[^<]*<[?\w\[\] ,&]+>[^>]*>)|(?:<[^<]*<[^<]*<[?\w\[\] ,&]+>[^>]*>[^>]*>))\s+){0,}(?!return)\b([\w.]+)\b(?:|(?:<[?\w\[\] ,&]+>)|(?:<[^<]*<[?\w\[\] ,&]+>[^>]*>)|(?:<[^<]*<[^<]*<[?\w\[\] ,&]+>[^>]*>[^>]*>))((?:\[\]){0,})\s+\b\w+\b\s*\(\s*(?:\b([\w.]+)\b(?:|(?:<[?\w\[\] ,&]+>)|(?:<[^<]*<[?\w\[\] ,&]+>[^>]*>)|(?:<[^<]*<[^<]*<[?\w\[\] ,&]+>[^>]*>[^>]*>))((?:\[\]){0,})(\.\.\.)?\s+(\w+)\b(?![>\[])\s*(?:,\s+\b([\w.]+)\b(?:|(?:<[?\w\[\] ,&]+>)|(?:<[^<]*<[?\w\[\] ,&]+>[^>]*>)|(?:<[^<]*<[^<]*<[?\w\[\] ,&]+>[^>]*>[^>]*>))((?:\[\]){0,})(\.\.\.)?\s+(\w+)\b(?![>\[])\s*){0,})?\s*\)(?:\s*throws [\w.]+(\s*,\s*[\w.]+))?')
midiInput = re.compile(r'(cc)')
javaPrimitives = {"byte", "short", "int", "long", "float", "double", "char", "boolean"}
processingAdditions = {"PImage","PVector","Capture","Movie","String","PFont","PApplet","PGraphics","Array","ArrayList","DoubleDict","DoubleList","HashMap","IntDict","IntList","Table","TableRow","BufferedReader","PrintWriter","PShader","PFont","AudioIn","Amplitude"}
typesToIgnore = {"Midi"}
validPrePrimitive = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.-*+_~"
with open("processingPrimitives.txt",'r') as f:
read = f.read()
# Reverse sort ensures that longer words are checked first
processingPrimitives = sorted(read.split("\n"),reverse = True)
# Loading substitutions for the cc array.
# This file is generated by the pde file.
MidiMap = {}
try:
with open("Map.csv","r") as f:
mapFile = f.read()
mapFile = mapFile.split("\n")
for line in mapFile:
t = line.split(",")
if len(t) == 2:
b,a = t
MidiMap[a] = b
foundMidiMap = True
except:
print("No mapping file found! Double check Param mapping!")
foundMidiMap = False
#Read input file to string
infile = ""
if len(sys.argv) == 2:
fileToOpen = mypath / sys.argv[1]
else:
print("Did not get path argument.")
fileToOpen = mypath / "tempClass.pde"
print("Opening file: " + str(fileToOpen))
with open(fileToOpen,"r") as f:
infile = f.read()
infile = infile.split("\n")
def findMidiInput(string):
m = midiInput.search(string)
return m
def isGlobalVariable(string,scope):
return string.startswith(tuple(javaPrimitives.union(processingAdditions))) and scope == 0 and not containsFunction(string) and not "cc" in string
indentLevel = 0
def indent():
global indentLevel
return "\t" * indentLevel
def writeGlobalComments(f):
"""
Currently just writes the comments until code is reached.
Totally might break - Untested Multiline functionality.
"""
inBody = False
for line in infile:
if line.startswith("/*"):
f.write(line + "\n")
if "*/" not in line:
inBody = True
continue
if "*/" in line:
f.write(line + "\n")
inBody = False
if inBody:
f.write(line + "\n")
elif len(removeComments(line)) > 1:
f.write("\n")
return
else:
f.write(line + "\n")
def containsFunction(string):
m = func.search(string)
return m
def removeComments(string):
"""
String will not include newlines so
/*xxxx*/ is the only way we will see a multiline.
"""
lineComment = string.find("//")
if lineComment != -1:
string = string[:lineComment]
while "/*" in string:
sindex = string.find("/*")
eindex = string.find("*/")
if sindex < eindex:
string = string[:sindex] + string[eindex+2:]
else:
break
return string
def writeImports(f):
toImport = []
for line in infile:
if line.startswith("import"):
toImport.append(line)
if len(toImport) > 0:
f.write("//IMPORTS FOLLOW - Double check to make sure these are necessary!\n")
for imp in toImport:
f.write(imp + "\n")
f.write("\n")
def writeGlobalFunctions(f):
inFunction = False
scopeDepth = 0
for line in infile:
noComments = removeComments(line)
if "{" in noComments:
scopeDepth += 1
if "}" in noComments:
scopeDepth -= 1
if not inFunction and (scopeDepth == 0 or (scopeDepth == 1 and "{" in noComments)):
if containsFunction(noComments):
if not( "void setup()" in noComments or "void draw()" in noComments or "void controllerChange" in noComments):
inFunction = True
writeBufferline(f,"private " + line)
#f.write("\tprivate " + line + "\n")
elif inFunction:
if scopeDepth == 0: #Found a }
writeBufferline(f,line)
f.write("\n")
inFunction = False
else:
writeBufferline(f,line)
def writeClass(f):
global indentLevel
f.write("public class OutputQuad extends QuadObject{\n")
indentLevel += 1
writeFields(f)
writeConstructor(f)
writeRunSketch(f)
indentLevel -= 1
writeGlobalFunctions(f)
f.write("}\n")
def writeFields(f):
allGlobals = findGlobals()
f.write(indent() + "private HashMap<String, Integer> map = MidiMapper.getSpecialButtons();\n")
for globalVariable in allGlobals:
line = globalVariable[0] + " " + " ".join(globalVariable[1])
f.write(indent() + "private " + line + "\n")
f.write("\n")
def getSetup():
setupStartIndex = -1
setupEndIndex = -1
setupLines = []
inSetup = False
scopeDepth = 0
for i,line in enumerate(infile):
if "void setup()" in removeComments(line[:]):
setupStartIndex = i+1
inSetup = True
continue
if "{" in removeComments(line[:]):
scopeDepth += 1
if "}" in removeComments(line[:]):
scopeDepth -= 1
if scopeDepth < 0:
inSetup = False
setupEndIndex = i
break
if inSetup:
setupLines.append(line)
return setupLines
def replaceThis(line):
index = 0
newLine = line[:]
while index < len(newLine):
split = newLine[index:]
if split.startswith("//"):
return newLine
elif split.startswith("this"):
#Check last character
if index > 0: #Reverse flow protection
if not newLine[index-1] in validPrePrimitive:
newLine = newLine[:index] + "app" + newLine[index+4:]
index+=10
else:
newLine = newLine[:index] + "app" + newLine[index+4:]
index+=2
index+=1
return newLine
# Ensure that no drawing is happening in setup!
# It is bad for many reasons, but it will crash our main program
# regardless of what this code does. Also overwrites "this" with "app"
ignoreInSetup = ["fullScreen","noStroke","colorMode","size","background"]
def writeConstructor(f):
f.write(indent() + "OutputQuad(PApplet app, PGraphics buffer){\n")
constructorBody = getSetup()
for line in constructorBody:
rc = removeComments(line)
if any(ignore in rc for ignore in ignoreInSetup):
continue
if "this" in rc:
line = replaceThis(rc)
f.write(indent() + line + "\n")
f.write(indent() + "}\n")
f.write("\n")
def getDraw():
drawStartIndex = -1
drawEndIndex = -1
drawLines = []
inDraw = False
scopeDepth = 0
for i,line in enumerate(infile):
if "void draw()" in removeComments(line[:]):
drawStartIndex = i+1
inDraw = True
continue
if "{" in removeComments(line[:]):
scopeDepth += 1
if "}" in removeComments(line[:]):
scopeDepth -= 1
if scopeDepth < 0:
inDraw = False
drawEndIndex = i
break
if inDraw:
drawLines.append(line)
return drawLines
def findAll(line,substring):
indexes = []
lastIndex = 0
while lastIndex != -1:
lastIndex = line.find(substring,lastIndex,len(line))
if lastIndex == -1:
break
indexes.append(lastIndex)
lastIndex += 1
return indexes
def updateLine(line):
# Adds tempBuffer. to necessary function calls.
index = 0
newLine = line[:]
while index < len(newLine):
split = newLine[index:]
if split.startswith("//"):
return newLine
for keyword in processingPrimitives:
if split.startswith(keyword):
#Check last character
if index > 0: #Reverse flow protection
if not newLine[index-1] in validPrePrimitive:
newLine = newLine[:index] + "tempBuffer." + newLine[index:]
index+=10
else:
newLine = newLine[:index] + "tempBuffer." + newLine[index:]
index+=10
index+=1
return newLine
def writeBufferline(f,line):
rc = removeComments(line)
#matches = [(x,findAll(rc,x)) for x in processingPrimitives if x in rc]
newstr = updateLine(line)
f.write("\t" + newstr + "\n")
def writeRunSketch(f):
global indentLevel
f.write(indent() + "@Override\n")
f.write(indent() + "protected void runSketch(Arraylist<Float> params){\n")
indentLevel += 1
f.write(indent() + "tempBuffer.beginDraw();\n")
runSketchBody = getDraw()
#Address indenting
for line in runSketchBody:
writeBufferline(f,line)
f.write(indent() + "tempBuffer.endDraw();\n")
indentLevel -= 1
f.write(indent() + "}\n")
f.write("\n")
def findGlobals():
scope = 0
globalsToAdd = []
for line in infile:
noComments = removeComments(line)
noComments = noComments.lstrip()
if "{" in noComments:
scope+=1
if "}" in noComments:
scope -= 1
#Get all possible keywords/separate Midi and global structs
if isGlobalVariable(noComments,scope):
globalsToAdd.append((line.split()[0], line.split()[1:]))
return globalsToAdd
def replaceCC(IRFile):
with open("output.pde","w") as f:
for line in IRFile:
if findMidiInput(removeComments(line)):
startIndex = line.find("cc")
rParen = line[startIndex:].find("]")
indexIn = line[startIndex+3:rParen+startIndex] #3 offsets cc[
#Replace indexIn with Map value
if foundMidiMap:
if indexIn.isdigit() and indexIn in MidiMap:
indexIn = '"' + MidiMap[indexIn] + '"'
composite = line[:startIndex] + "params.get(map.get(" + indexIn + "))" + line[rParen + startIndex + 1:]
else:
composite = line[:startIndex] + "params.get(" + indexIn + ")" + line[rParen + startIndex + 1:]
f.write(composite + " // Replaced: " + line + "\n")
elif "new Midi" in removeComments(line):
f.write("\t\t//Deleted new Midi initialization\n")
else:
f.write(line + '\n')
def generateNewFile():
with open("output.pde","w+") as f:
writeGlobalComments(f)
writeImports(f)
writeClass(f)
#Second pass to replace Midi cc array
#Totally could have used truncate
with open("output.pde","r+") as f:
IRFile = f.read()
IRFile = IRFile.split("\n")
replaceCC(IRFile)
generateNewFile() | 33.883152 | 762 | 0.554736 |
import sys
import re
import os
from pathlib import Path
mypath = Path().absolute()
func = re.compile(r'^[ \t]*(?:(?:public|protected|private)\s+)?(?:(static|final|native|synchronized|abstract|threadsafe|transient|(?:<[?\w\[\] ,&]+>)|(?:<[^<]*<[?\w\[\] ,&]+>[^>]*>)|(?:<[^<]*<[^<]*<[?\w\[\] ,&]+>[^>]*>[^>]*>))\s+){0,}(?!return)\b([\w.]+)\b(?:|(?:<[?\w\[\] ,&]+>)|(?:<[^<]*<[?\w\[\] ,&]+>[^>]*>)|(?:<[^<]*<[^<]*<[?\w\[\] ,&]+>[^>]*>[^>]*>))((?:\[\]){0,})\s+\b\w+\b\s*\(\s*(?:\b([\w.]+)\b(?:|(?:<[?\w\[\] ,&]+>)|(?:<[^<]*<[?\w\[\] ,&]+>[^>]*>)|(?:<[^<]*<[^<]*<[?\w\[\] ,&]+>[^>]*>[^>]*>))((?:\[\]){0,})(\.\.\.)?\s+(\w+)\b(?![>\[])\s*(?:,\s+\b([\w.]+)\b(?:|(?:<[?\w\[\] ,&]+>)|(?:<[^<]*<[?\w\[\] ,&]+>[^>]*>)|(?:<[^<]*<[^<]*<[?\w\[\] ,&]+>[^>]*>[^>]*>))((?:\[\]){0,})(\.\.\.)?\s+(\w+)\b(?![>\[])\s*){0,})?\s*\)(?:\s*throws [\w.]+(\s*,\s*[\w.]+))?')
midiInput = re.compile(r'(cc)')
javaPrimitives = {"byte", "short", "int", "long", "float", "double", "char", "boolean"}
processingAdditions = {"PImage","PVector","Capture","Movie","String","PFont","PApplet","PGraphics","Array","ArrayList","DoubleDict","DoubleList","HashMap","IntDict","IntList","Table","TableRow","BufferedReader","PrintWriter","PShader","PFont","AudioIn","Amplitude"}
typesToIgnore = {"Midi"}
validPrePrimitive = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.-*+_~"
with open("processingPrimitives.txt",'r') as f:
read = f.read()
processingPrimitives = sorted(read.split("\n"),reverse = True)
MidiMap = {}
try:
with open("Map.csv","r") as f:
mapFile = f.read()
mapFile = mapFile.split("\n")
for line in mapFile:
t = line.split(",")
if len(t) == 2:
b,a = t
MidiMap[a] = b
foundMidiMap = True
except:
print("No mapping file found! Double check Param mapping!")
foundMidiMap = False
infile = ""
if len(sys.argv) == 2:
fileToOpen = mypath / sys.argv[1]
else:
print("Did not get path argument.")
fileToOpen = mypath / "tempClass.pde"
print("Opening file: " + str(fileToOpen))
with open(fileToOpen,"r") as f:
infile = f.read()
infile = infile.split("\n")
def findMidiInput(string):
m = midiInput.search(string)
return m
def isGlobalVariable(string,scope):
return string.startswith(tuple(javaPrimitives.union(processingAdditions))) and scope == 0 and not containsFunction(string) and not "cc" in string
indentLevel = 0
def indent():
global indentLevel
return "\t" * indentLevel
def writeGlobalComments(f):
inBody = False
for line in infile:
if line.startswith("/*"):
f.write(line + "\n")
if "*/" not in line:
inBody = True
continue
if "*/" in line:
f.write(line + "\n")
inBody = False
if inBody:
f.write(line + "\n")
elif len(removeComments(line)) > 1:
f.write("\n")
return
else:
f.write(line + "\n")
def containsFunction(string):
m = func.search(string)
return m
def removeComments(string):
lineComment = string.find("//")
if lineComment != -1:
string = string[:lineComment]
while "/*" in string:
sindex = string.find("/*")
eindex = string.find("*/")
if sindex < eindex:
string = string[:sindex] + string[eindex+2:]
else:
break
return string
def writeImports(f):
toImport = []
for line in infile:
if line.startswith("import"):
toImport.append(line)
if len(toImport) > 0:
f.write("//IMPORTS FOLLOW - Double check to make sure these are necessary!\n")
for imp in toImport:
f.write(imp + "\n")
f.write("\n")
def writeGlobalFunctions(f):
inFunction = False
scopeDepth = 0
for line in infile:
noComments = removeComments(line)
if "{" in noComments:
scopeDepth += 1
if "}" in noComments:
scopeDepth -= 1
if not inFunction and (scopeDepth == 0 or (scopeDepth == 1 and "{" in noComments)):
if containsFunction(noComments):
if not( "void setup()" in noComments or "void draw()" in noComments or "void controllerChange" in noComments):
inFunction = True
writeBufferline(f,"private " + line)
elif inFunction:
if scopeDepth == 0:
writeBufferline(f,line)
f.write("\n")
inFunction = False
else:
writeBufferline(f,line)
def writeClass(f):
global indentLevel
f.write("public class OutputQuad extends QuadObject{\n")
indentLevel += 1
writeFields(f)
writeConstructor(f)
writeRunSketch(f)
indentLevel -= 1
writeGlobalFunctions(f)
f.write("}\n")
def writeFields(f):
allGlobals = findGlobals()
f.write(indent() + "private HashMap<String, Integer> map = MidiMapper.getSpecialButtons();\n")
for globalVariable in allGlobals:
line = globalVariable[0] + " " + " ".join(globalVariable[1])
f.write(indent() + "private " + line + "\n")
f.write("\n")
def getSetup():
setupStartIndex = -1
setupEndIndex = -1
setupLines = []
inSetup = False
scopeDepth = 0
for i,line in enumerate(infile):
if "void setup()" in removeComments(line[:]):
setupStartIndex = i+1
inSetup = True
continue
if "{" in removeComments(line[:]):
scopeDepth += 1
if "}" in removeComments(line[:]):
scopeDepth -= 1
if scopeDepth < 0:
inSetup = False
setupEndIndex = i
break
if inSetup:
setupLines.append(line)
return setupLines
def replaceThis(line):
index = 0
newLine = line[:]
while index < len(newLine):
split = newLine[index:]
if split.startswith("//"):
return newLine
elif split.startswith("this"):
if index > 0:
if not newLine[index-1] in validPrePrimitive:
newLine = newLine[:index] + "app" + newLine[index+4:]
index+=10
else:
newLine = newLine[:index] + "app" + newLine[index+4:]
index+=2
index+=1
return newLine
ignoreInSetup = ["fullScreen","noStroke","colorMode","size","background"]
def writeConstructor(f):
f.write(indent() + "OutputQuad(PApplet app, PGraphics buffer){\n")
constructorBody = getSetup()
for line in constructorBody:
rc = removeComments(line)
if any(ignore in rc for ignore in ignoreInSetup):
continue
if "this" in rc:
line = replaceThis(rc)
f.write(indent() + line + "\n")
f.write(indent() + "}\n")
f.write("\n")
def getDraw():
drawStartIndex = -1
drawEndIndex = -1
drawLines = []
inDraw = False
scopeDepth = 0
for i,line in enumerate(infile):
if "void draw()" in removeComments(line[:]):
drawStartIndex = i+1
inDraw = True
continue
if "{" in removeComments(line[:]):
scopeDepth += 1
if "}" in removeComments(line[:]):
scopeDepth -= 1
if scopeDepth < 0:
inDraw = False
drawEndIndex = i
break
if inDraw:
drawLines.append(line)
return drawLines
def findAll(line,substring):
indexes = []
lastIndex = 0
while lastIndex != -1:
lastIndex = line.find(substring,lastIndex,len(line))
if lastIndex == -1:
break
indexes.append(lastIndex)
lastIndex += 1
return indexes
def updateLine(line):
index = 0
newLine = line[:]
while index < len(newLine):
split = newLine[index:]
if split.startswith("//"):
return newLine
for keyword in processingPrimitives:
if split.startswith(keyword):
if index > 0:
if not newLine[index-1] in validPrePrimitive:
newLine = newLine[:index] + "tempBuffer." + newLine[index:]
index+=10
else:
newLine = newLine[:index] + "tempBuffer." + newLine[index:]
index+=10
index+=1
return newLine
def writeBufferline(f,line):
rc = removeComments(line)
newstr = updateLine(line)
f.write("\t" + newstr + "\n")
def writeRunSketch(f):
global indentLevel
f.write(indent() + "@Override\n")
f.write(indent() + "protected void runSketch(Arraylist<Float> params){\n")
indentLevel += 1
f.write(indent() + "tempBuffer.beginDraw();\n")
runSketchBody = getDraw()
for line in runSketchBody:
writeBufferline(f,line)
f.write(indent() + "tempBuffer.endDraw();\n")
indentLevel -= 1
f.write(indent() + "}\n")
f.write("\n")
def findGlobals():
scope = 0
globalsToAdd = []
for line in infile:
noComments = removeComments(line)
noComments = noComments.lstrip()
if "{" in noComments:
scope+=1
if "}" in noComments:
scope -= 1
if isGlobalVariable(noComments,scope):
globalsToAdd.append((line.split()[0], line.split()[1:]))
return globalsToAdd
def replaceCC(IRFile):
with open("output.pde","w") as f:
for line in IRFile:
if findMidiInput(removeComments(line)):
startIndex = line.find("cc")
rParen = line[startIndex:].find("]")
indexIn = line[startIndex+3:rParen+startIndex]
if foundMidiMap:
if indexIn.isdigit() and indexIn in MidiMap:
indexIn = '"' + MidiMap[indexIn] + '"'
composite = line[:startIndex] + "params.get(map.get(" + indexIn + "))" + line[rParen + startIndex + 1:]
else:
composite = line[:startIndex] + "params.get(" + indexIn + ")" + line[rParen + startIndex + 1:]
f.write(composite + " // Replaced: " + line + "\n")
elif "new Midi" in removeComments(line):
f.write("\t\t//Deleted new Midi initialization\n")
else:
f.write(line + '\n')
def generateNewFile():
with open("output.pde","w+") as f:
writeGlobalComments(f)
writeImports(f)
writeClass(f)
with open("output.pde","r+") as f:
IRFile = f.read()
IRFile = IRFile.split("\n")
replaceCC(IRFile)
generateNewFile() | true | true |
f7fab910bb8a6c6b379d8f8353b85660680173e9 | 1,148 | py | Python | manager/app.py | W-DEJONG/Id-manager | f1fa147f7915da67f545dd1c4dc3abae0a0ac0cc | [
"BSD-3-Clause"
] | 1 | 2022-02-01T10:45:30.000Z | 2022-02-01T10:45:30.000Z | manager/app.py | W-DEJONG/Id-manager | f1fa147f7915da67f545dd1c4dc3abae0a0ac0cc | [
"BSD-3-Clause"
] | null | null | null | manager/app.py | W-DEJONG/Id-manager | f1fa147f7915da67f545dd1c4dc3abae0a0ac0cc | [
"BSD-3-Clause"
] | null | null | null | import os
from flask import Flask, redirect, url_for
from manager import config
from manager.models import db
from manager.oauth2 import config_oauth
from manager.routes import auth, oauth, api, admin
from manager.auth import csrf, login_manager
def create_app(test_config=None):
"""
create and configure the app
:param test_config: dict() with test configuration
:return:
"""
app = Flask(__name__,
instance_path=os.environ.get('MANAGER_INSTANCE_PATH'),
instance_relative_config=True)
app.config.from_object(config)
if test_config is None:
app.config.from_pyfile('manager.cfg', silent=True)
else:
app.config.from_mapping(test_config)
db.init_app(app)
csrf.init_app(app)
login_manager.init_app(app)
config_oauth(app)
app.register_blueprint(auth.bp)
app.register_blueprint(oauth.bp)
app.register_blueprint(api.bp)
app.register_blueprint(admin.bp)
@app.route('/')
def home():
return redirect(url_for('auth.home'))
@app.route('/_health')
def health():
return {'status': 'healthy'}
return app
| 25.511111 | 70 | 0.684669 | import os
from flask import Flask, redirect, url_for
from manager import config
from manager.models import db
from manager.oauth2 import config_oauth
from manager.routes import auth, oauth, api, admin
from manager.auth import csrf, login_manager
def create_app(test_config=None):
app = Flask(__name__,
instance_path=os.environ.get('MANAGER_INSTANCE_PATH'),
instance_relative_config=True)
app.config.from_object(config)
if test_config is None:
app.config.from_pyfile('manager.cfg', silent=True)
else:
app.config.from_mapping(test_config)
db.init_app(app)
csrf.init_app(app)
login_manager.init_app(app)
config_oauth(app)
app.register_blueprint(auth.bp)
app.register_blueprint(oauth.bp)
app.register_blueprint(api.bp)
app.register_blueprint(admin.bp)
@app.route('/')
def home():
return redirect(url_for('auth.home'))
@app.route('/_health')
def health():
return {'status': 'healthy'}
return app
| true | true |
f7fabb36d75130c3b7807e2c5dc6e6309ef4522b | 248 | py | Python | Three/manage.py | LyCq/sanjiliandong | ec0d44073f9b3aa27dee0b2fd23e905ec10c6b7c | [
"MIT"
] | null | null | null | Three/manage.py | LyCq/sanjiliandong | ec0d44073f9b3aa27dee0b2fd23e905ec10c6b7c | [
"MIT"
] | null | null | null | Three/manage.py | LyCq/sanjiliandong | ec0d44073f9b3aa27dee0b2fd23e905ec10c6b7c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Three.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.545455 | 69 | 0.770161 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Three.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true | true |
f7fabcd5fedaf3b33fd1f53a6606bb7851beda75 | 939 | py | Python | nova/console/__init__.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | 7 | 2017-06-19T19:37:00.000Z | 2019-06-16T02:06:14.000Z | nova/console/__init__.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | null | null | null | nova/console/__init__.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | 6 | 2015-06-20T16:07:28.000Z | 2020-08-19T14:57:59.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nova.console` -- Console Proxy to set up VM console access
(i.e. with xvp)
=====================================================
.. automodule:: nova.console
:platform: Unix
:synopsis: Wrapper around console proxies such as xvp to set up
multitenant VM console access
.. moduleauthor:: Monsyne Dragon <mdragon@rackspace.com>
"""
| 39.125 | 78 | 0.667732 | true | true | |
f7fabcf67885a460bbc8b45b494d2ecdc3128a16 | 6,294 | py | Python | src/c_json.py | McSCert/C2Flowchart | fbbeba9c4ef477bef8e096a76895e728698ef6cd | [
"BSD-3-Clause"
] | null | null | null | src/c_json.py | McSCert/C2Flowchart | fbbeba9c4ef477bef8e096a76895e728698ef6cd | [
"BSD-3-Clause"
] | null | null | null | src/c_json.py | McSCert/C2Flowchart | fbbeba9c4ef477bef8e096a76895e728698ef6cd | [
"BSD-3-Clause"
] | null | null | null | #------------------------------------------------------------------------------
# pycparser: c_json.py
#
# by Michael White (@mypalmike)
#
# This example includes functions to serialize and deserialize an ast
# to and from json format. Serializing involves walking the ast and converting
# each node from a python Node object into a python dict. Deserializing
# involves the opposite conversion, walking the tree formed by the
# dict and converting each dict into the specific Node object it represents.
# The dict itself is serialized and deserialized using the python json module.
#
# The dict representation is a fairly direct transformation of the object
# attributes. Each node in the dict gets one metadata field referring to the
# specific node class name, _nodetype. Each local attribute (i.e. not linking
# to child nodes) has a string value or array of string values. Each child
# attribute is either another dict or an array of dicts, exactly as in the
# Node object representation. The "coord" attribute, representing the
# node's location within the source code, is serialized/deserialized from
# a Coord object into a string of the format "filename:line[:column]".
#
# Example TypeDecl node, with IdentifierType child node, represented as a dict:
# "type": {
# "_nodetype": "TypeDecl",
# "coord": "c_files/funky.c:8",
# "declname": "o",
# "quals": [],
# "type": {
# "_nodetype": "IdentifierType",
# "coord": "c_files/funky.c:8",
# "names": [
# "char"
# ]
# }
# }
#------------------------------------------------------------------------------
from __future__ import print_function
import json
import sys
import re
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.extend(['.', '..'])
from pycparser import parse_file, c_ast
from pycparser.plyparser import Coord
RE_CHILD_ARRAY = re.compile(r'(.*)\[(.*)\]')
RE_INTERNAL_ATTR = re.compile('__.*__')
class CJsonError(Exception):
pass
def memodict(fn):
""" Fast memoization decorator for a function taking a single argument """
class memodict(dict):
def __missing__(self, key):
ret = self[key] = fn(key)
return ret
return memodict().__getitem__
@memodict
def child_attrs_of(klass):
"""
Given a Node class, get a set of child attrs.
Memoized to avoid highly repetitive string manipulation
"""
non_child_attrs = set(klass.attr_names)
all_attrs = set([i for i in klass.__slots__ if not RE_INTERNAL_ATTR.match(i)])
return all_attrs - non_child_attrs
def to_dict(node):
""" Recursively convert an ast into dict representation. """
klass = node.__class__
result = {}
# Metadata
result['_nodetype'] = klass.__name__
# Local node attributes
for attr in klass.attr_names:
result[attr] = getattr(node, attr)
# Coord object
if node.coord:
result['coord'] = str(node.coord)
else:
result['coord'] = None
# Child attributes
for child_name, child in node.children():
# Child strings are either simple (e.g. 'value') or arrays (e.g. 'block_items[1]')
match = RE_CHILD_ARRAY.match(child_name)
if match:
array_name, array_index = match.groups()
array_index = int(array_index)
# arrays come in order, so we verify and append.
result[array_name] = result.get(array_name, [])
if array_index != len(result[array_name]):
raise CJsonError('Internal ast error. Array {} out of order. '
'Expected index {}, got {}'.format(
array_name, len(result[array_name]), array_index))
result[array_name].append(to_dict(child))
else:
result[child_name] = to_dict(child)
# Any child attributes that were missing need "None" values in the json.
for child_attr in child_attrs_of(klass):
if child_attr not in result:
result[child_attr] = None
return result
def to_json(node, **kwargs):
""" Convert ast node to json string """
return json.dumps(to_dict(node), **kwargs)
def file_to_dict(filename):
""" Load C file into dict representation of ast """
ast = parse_file(filename, use_cpp=True)
return to_dict(ast)
def file_to_json(filename, **kwargs):
""" Load C file into json string representation of ast """
ast = parse_file(filename, use_cpp=True)
return to_json(ast, **kwargs)
def _parse_coord(coord_str):
""" Parse coord string (file:line[:column]) into Coord object. """
if coord_str is None:
return None
vals = coord_str.split(':')
vals.extend([None] * 3)
filename, line, column = vals[:3]
return Coord(filename, line, column)
def _convert_to_obj(value):
"""
Convert an object in the dict representation into an object.
Note: Mutually recursive with from_dict.
"""
value_type = type(value)
if value_type == dict:
return from_dict(value)
elif value_type == list:
return [_convert_to_obj(item) for item in value]
else:
# String
return value
def from_dict(node_dict):
""" Recursively build an ast from dict representation """
class_name = node_dict.pop('_nodetype')
klass = getattr(c_ast, class_name)
# Create a new dict containing the key-value pairs which we can pass
# to node constructors.
objs = {}
for key, value in node_dict.items():
if key == 'coord':
objs[key] = _parse_coord(value)
else:
objs[key] = _convert_to_obj(value)
# Use keyword parameters, which works thanks to beautifully consistent
# ast Node initializers.
return klass(**objs)
def from_json(ast_json):
""" Build an ast from json string representation """
return from_dict(json.loads(ast_json))
##MAIN
def createJSON(file):
print(file)
ast_dict = file_to_dict(file)
ast = from_dict(ast_dict)
json = to_json(ast, sort_keys=True, indent=4)
#print(json)
with open('tmp/json.txt', 'w+') as outfile:
outfile.write(json)
if len(sys.argv) == 2:
createJSON(sys.argv[1])
| 30.259615 | 90 | 0.634731 |
# a Coord object into a string of the format "filename:line[:column]".
#
# Example TypeDecl node, with IdentifierType child node, represented as a dict:
# "type": {
# "_nodetype": "TypeDecl",
# "coord": "c_files/funky.c:8",
# "declname": "o",
# "quals": [],
# "type": {
# "_nodetype": "IdentifierType",
# "coord": "c_files/funky.c:8",
# "names": [
# "char"
# ]
# }
# }
#------------------------------------------------------------------------------
from __future__ import print_function
import json
import sys
import re
# This is not required if you've installed pycparser into
sys.path.extend(['.', '..'])
from pycparser import parse_file, c_ast
from pycparser.plyparser import Coord
RE_CHILD_ARRAY = re.compile(r'(.*)\[(.*)\]')
RE_INTERNAL_ATTR = re.compile('__.*__')
class CJsonError(Exception):
pass
def memodict(fn):
class memodict(dict):
def __missing__(self, key):
ret = self[key] = fn(key)
return ret
return memodict().__getitem__
@memodict
def child_attrs_of(klass):
non_child_attrs = set(klass.attr_names)
all_attrs = set([i for i in klass.__slots__ if not RE_INTERNAL_ATTR.match(i)])
return all_attrs - non_child_attrs
def to_dict(node):
klass = node.__class__
result = {}
result['_nodetype'] = klass.__name__
for attr in klass.attr_names:
result[attr] = getattr(node, attr)
if node.coord:
result['coord'] = str(node.coord)
else:
result['coord'] = None
for child_name, child in node.children():
match = RE_CHILD_ARRAY.match(child_name)
if match:
array_name, array_index = match.groups()
array_index = int(array_index)
result[array_name] = result.get(array_name, [])
if array_index != len(result[array_name]):
raise CJsonError('Internal ast error. Array {} out of order. '
'Expected index {}, got {}'.format(
array_name, len(result[array_name]), array_index))
result[array_name].append(to_dict(child))
else:
result[child_name] = to_dict(child)
for child_attr in child_attrs_of(klass):
if child_attr not in result:
result[child_attr] = None
return result
def to_json(node, **kwargs):
return json.dumps(to_dict(node), **kwargs)
def file_to_dict(filename):
ast = parse_file(filename, use_cpp=True)
return to_dict(ast)
def file_to_json(filename, **kwargs):
ast = parse_file(filename, use_cpp=True)
return to_json(ast, **kwargs)
def _parse_coord(coord_str):
if coord_str is None:
return None
vals = coord_str.split(':')
vals.extend([None] * 3)
filename, line, column = vals[:3]
return Coord(filename, line, column)
def _convert_to_obj(value):
value_type = type(value)
if value_type == dict:
return from_dict(value)
elif value_type == list:
return [_convert_to_obj(item) for item in value]
else:
return value
def from_dict(node_dict):
class_name = node_dict.pop('_nodetype')
klass = getattr(c_ast, class_name)
objs = {}
for key, value in node_dict.items():
if key == 'coord':
objs[key] = _parse_coord(value)
else:
objs[key] = _convert_to_obj(value)
return klass(**objs)
def from_json(ast_json):
return from_dict(json.loads(ast_json))
createJSON(file):
print(file)
ast_dict = file_to_dict(file)
ast = from_dict(ast_dict)
json = to_json(ast, sort_keys=True, indent=4)
with open('tmp/json.txt', 'w+') as outfile:
outfile.write(json)
if len(sys.argv) == 2:
createJSON(sys.argv[1])
| true | true |
f7fabe374638f660af55150179e4ad424ca381a0 | 2,163 | py | Python | pages/views.py | Manny27nyc/pythondotorg | 257c96d3a94755451a5a5cdcd2abad1e27ea299b | [
"Apache-2.0"
] | 911 | 2015-01-03T22:16:06.000Z | 2022-03-31T23:56:22.000Z | pages/views.py | Manny27nyc/pythondotorg | 257c96d3a94755451a5a5cdcd2abad1e27ea299b | [
"Apache-2.0"
] | 1,342 | 2015-01-02T16:14:45.000Z | 2022-03-28T08:01:20.000Z | pages/views.py | Manny27nyc/pythondotorg | 257c96d3a94755451a5a5cdcd2abad1e27ea299b | [
"Apache-2.0"
] | 551 | 2015-01-04T02:17:31.000Z | 2022-03-23T11:59:25.000Z | import re
from django.http import HttpResponsePermanentRedirect
from django.urls import reverse
from django.views.generic import DetailView
from downloads.models import Release
from .models import Page
class PageView(DetailView):
template_name = 'pages/default.html'
template_name_field = 'template_name'
context_object_name = 'page'
# Use "path" as the lookup key, rather than the default "slug".
slug_url_kwarg = 'path'
slug_field = 'path'
def get_template_names(self):
""" Use the template defined in the model or a default """
names = [self.template_name]
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
return names
def get_queryset(self):
if self.request.user.is_staff:
return Page.objects.all()
else:
return Page.objects.published()
@property
def content_type(self):
return self.object.content_type
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['in_pages_app'] = True
return context
def get(self, request, *args, **kwargs):
# Redirect '/download/releases/X.Y.Z' to
# '/downloads/release/python-XYZ/' if the latter URL doesn't have
# 'release_page' (which points to the former URL) field set.
# See #956 for details.
matched = re.match(r'/download/releases/([\d.]+)/$', self.request.path)
if matched is not None:
release_slug = 'python-{}'.format(matched.group(1).replace('.', ''))
try:
Release.objects.get(slug=release_slug, release_page__isnull=True)
except Release.DoesNotExist:
pass
else:
return HttpResponsePermanentRedirect(
reverse(
'download:download_release_detail',
kwargs={'release_slug': release_slug},
)
)
return super().get(request, *args, **kwargs)
| 32.772727 | 81 | 0.607952 | import re
from django.http import HttpResponsePermanentRedirect
from django.urls import reverse
from django.views.generic import DetailView
from downloads.models import Release
from .models import Page
class PageView(DetailView):
template_name = 'pages/default.html'
template_name_field = 'template_name'
context_object_name = 'page'
slug_url_kwarg = 'path'
slug_field = 'path'
def get_template_names(self):
names = [self.template_name]
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
return names
def get_queryset(self):
if self.request.user.is_staff:
return Page.objects.all()
else:
return Page.objects.published()
@property
def content_type(self):
return self.object.content_type
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['in_pages_app'] = True
return context
def get(self, request, *args, **kwargs):
# 'release_page' (which points to the former URL) field set.
# See #956 for details.
matched = re.match(r'/download/releases/([\d.]+)/$', self.request.path)
if matched is not None:
release_slug = 'python-{}'.format(matched.group(1).replace('.', ''))
try:
Release.objects.get(slug=release_slug, release_page__isnull=True)
except Release.DoesNotExist:
pass
else:
return HttpResponsePermanentRedirect(
reverse(
'download:download_release_detail',
kwargs={'release_slug': release_slug},
)
)
return super().get(request, *args, **kwargs)
| true | true |
f7fabe70cfd106d437b61b034e291f1d10d4b11c | 2,738 | py | Python | tests/test_pypi_api.py | yeraydiazdiaz/pypimod | e456bc30f8c4d21158b80bd46eadd853e420aea6 | [
"Apache-2.0"
] | 2 | 2019-10-06T21:40:08.000Z | 2019-10-07T00:18:03.000Z | tests/test_pypi_api.py | yeraydiazdiaz/pypimod | e456bc30f8c4d21158b80bd46eadd853e420aea6 | [
"Apache-2.0"
] | 8 | 2019-10-26T16:12:04.000Z | 2021-12-13T20:27:11.000Z | tests/test_pypi_api.py | yeraydiazdiaz/pypimod | e456bc30f8c4d21158b80bd46eadd853e420aea6 | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
import httpx
import pendulum
import pytest
from pypimod.sources import pypi_api
from pypimod import exceptions
@pytest.mark.asyncio
async def test_pypi_api(mocker, pypi_api_httpx):
mock_response = mocker.Mock(spec=httpx.Response)
mock_response.json.return_value = pypi_api_httpx
mock_client = mocker.Mock(spec=httpx.AsyncClient)
mock_client.get.return_value = mock_response
summary = await pypi_api.get_project_summary("httpx", client=mock_client)
assert set(summary.keys()) == {
"name",
"summary",
"version",
"author",
"author_email",
"project_urls",
"last_release_datetime",
}
assert summary["name"] == "httpx"
assert summary["summary"] == pypi_api_httpx["info"]["summary"]
assert summary["version"] == pypi_api_httpx["info"]["version"]
assert summary["author"] == pypi_api_httpx["info"]["author"]
assert summary["author_email"] == pypi_api_httpx["info"]["author_email"]
assert summary["project_urls"] == pypi_api_httpx["info"]["project_urls"]
assert summary["last_release_datetime"] == pendulum.parse("2019-10-10T14:20:49")
@pytest.mark.asyncio
async def test_pypi_api_creates_client_if_none_is_passed(mocker, pypi_api_httpx):
mock_response = mocker.Mock(spec=httpx.Response)
mock_response.json.return_value = pypi_api_httpx
mock_client = mocker.Mock(spec=httpx.AsyncClient)
mock_client.get.return_value = mock_response
mock_aenter = mocker.patch.object(
httpx.AsyncClient, "__aenter__", return_value=mock_client,
)
_ = await pypi_api.get_project_summary("httpx")
assert mock_aenter.called
@pytest.mark.asyncio
async def test_pypi_api_invalid_release(mocker, pypi_api_httpx):
no_release_response = deepcopy(pypi_api_httpx)
no_release_response["releases"][no_release_response["info"]["version"]] = []
mock_response = mocker.Mock(spec=httpx.Response)
mock_response.json.return_value = no_release_response
mock_client = mocker.Mock(spec=httpx.AsyncClient)
mock_client.get.return_value = mock_response
summary = await pypi_api.get_project_summary("httpx", client=mock_client)
assert summary["last_release_datetime"] is None
@pytest.mark.asyncio
async def test_pypi_api_http_error(mocker, pypi_api_httpx):
mock_response = mocker.Mock(spec=httpx.Response, status_code=404)
mock_response.raise_for_status.side_effect = httpx.exceptions.HTTPError(
response=mock_response
)
mock_client = mocker.Mock(spec=httpx.AsyncClient)
mock_client.get.return_value = mock_response
with pytest.raises(exceptions.PyPIAPIError):
await pypi_api.get_project_summary("httpx", client=mock_client)
| 35.558442 | 84 | 0.741052 | from copy import deepcopy
import httpx
import pendulum
import pytest
from pypimod.sources import pypi_api
from pypimod import exceptions
@pytest.mark.asyncio
async def test_pypi_api(mocker, pypi_api_httpx):
mock_response = mocker.Mock(spec=httpx.Response)
mock_response.json.return_value = pypi_api_httpx
mock_client = mocker.Mock(spec=httpx.AsyncClient)
mock_client.get.return_value = mock_response
summary = await pypi_api.get_project_summary("httpx", client=mock_client)
assert set(summary.keys()) == {
"name",
"summary",
"version",
"author",
"author_email",
"project_urls",
"last_release_datetime",
}
assert summary["name"] == "httpx"
assert summary["summary"] == pypi_api_httpx["info"]["summary"]
assert summary["version"] == pypi_api_httpx["info"]["version"]
assert summary["author"] == pypi_api_httpx["info"]["author"]
assert summary["author_email"] == pypi_api_httpx["info"]["author_email"]
assert summary["project_urls"] == pypi_api_httpx["info"]["project_urls"]
assert summary["last_release_datetime"] == pendulum.parse("2019-10-10T14:20:49")
@pytest.mark.asyncio
async def test_pypi_api_creates_client_if_none_is_passed(mocker, pypi_api_httpx):
mock_response = mocker.Mock(spec=httpx.Response)
mock_response.json.return_value = pypi_api_httpx
mock_client = mocker.Mock(spec=httpx.AsyncClient)
mock_client.get.return_value = mock_response
mock_aenter = mocker.patch.object(
httpx.AsyncClient, "__aenter__", return_value=mock_client,
)
_ = await pypi_api.get_project_summary("httpx")
assert mock_aenter.called
@pytest.mark.asyncio
async def test_pypi_api_invalid_release(mocker, pypi_api_httpx):
no_release_response = deepcopy(pypi_api_httpx)
no_release_response["releases"][no_release_response["info"]["version"]] = []
mock_response = mocker.Mock(spec=httpx.Response)
mock_response.json.return_value = no_release_response
mock_client = mocker.Mock(spec=httpx.AsyncClient)
mock_client.get.return_value = mock_response
summary = await pypi_api.get_project_summary("httpx", client=mock_client)
assert summary["last_release_datetime"] is None
@pytest.mark.asyncio
async def test_pypi_api_http_error(mocker, pypi_api_httpx):
mock_response = mocker.Mock(spec=httpx.Response, status_code=404)
mock_response.raise_for_status.side_effect = httpx.exceptions.HTTPError(
response=mock_response
)
mock_client = mocker.Mock(spec=httpx.AsyncClient)
mock_client.get.return_value = mock_response
with pytest.raises(exceptions.PyPIAPIError):
await pypi_api.get_project_summary("httpx", client=mock_client)
| true | true |
f7fabfe16e9c9e7dede5970aa44ab235d080576c | 26,402 | py | Python | tests/test_omegacn7500.py | alextingle/minimalmodbus | 2b78659bca938b4d07cb503413bcbe1354aa47b7 | [
"Apache-2.0"
] | null | null | null | tests/test_omegacn7500.py | alextingle/minimalmodbus | 2b78659bca938b4d07cb503413bcbe1354aa47b7 | [
"Apache-2.0"
] | null | null | null | tests/test_omegacn7500.py | alextingle/minimalmodbus | 2b78659bca938b4d07cb503413bcbe1354aa47b7 | [
"Apache-2.0"
] | 2 | 2017-01-04T22:44:03.000Z | 2019-07-03T15:07:00.000Z | #!/usr/bin/env python
#
# Copyright 2011 Jonas Berg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
.. moduleauthor:: Jonas Berg <pyhys@users.sourceforge.net>
test_omegacn7500: Unittests for omegacn7500
Uses a dummy serial port from the module :py:mod:`dummy_serial`.
"""
__author__ = "Jonas Berg"
__email__ = "pyhys@users.sourceforge.net"
__license__ = "Apache License, Version 2.0"
import sys
import unittest
import omegacn7500
import dummy_serial
class TestCalculateRegisterAddress(unittest.TestCase):
knownValues=[
('setpoint', 0, 0, 8192), # registertype, patternnumber, stepnumber, knownresult
('setpoint', 1, 0, 8200),
('time', 0, 0, 8320),
('time', 0, 1, 8321),
('time', 1, 0, 8328),
('actualstep', 0, None, 4160),
('actualstep', 0, 0, 4160),
('actualstep', 1, None, 4161),
('actualstep', 1, 0, 4161),
('actualstep', 1, 5, 4161), # Stepnumber should have no effect.
('cycles', 0, None, 4176),
('cycles', 1, None, 4177),
('linkpattern', 0, None, 4192),
('linkpattern', 1, None, 4193),
]
def testKnownValues(self):
for registertype, patternnumber, stepnumber, knownresult in self.knownValues:
resultvalue = omegacn7500._calculateRegisterAddress(registertype, patternnumber, stepnumber)
self.assertEqual(resultvalue, knownresult)
def testWrongValues(self):
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'ABC', 0, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'setpoint', -1, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'setpoint', 8, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'setpoint', 0, -1)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'setpoint', 0, 8)
def testWrongType(self):
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 0, 0, 0) # Note: Raises value error
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 1.0, 0, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, None, 0, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, ['setpoint'], 0, 0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', 0.0, 0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', [0], 0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', None, 0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', 0, 0.0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', 0, [0])
class TestCheckPatternNumber(unittest.TestCase):
def testKnownResults(self):
omegacn7500._checkPatternNumber(0)
omegacn7500._checkPatternNumber(3)
omegacn7500._checkPatternNumber(7)
def testWrongValue(self):
self.assertRaises(ValueError, omegacn7500._checkPatternNumber, -1)
self.assertRaises(ValueError, omegacn7500._checkPatternNumber, 8)
self.assertRaises(ValueError, omegacn7500._checkPatternNumber, 99)
self.assertRaises(ValueError, omegacn7500._checkPatternNumber, 12345)
def testWrongType(self):
self.assertRaises(TypeError, omegacn7500._checkPatternNumber, '1')
self.assertRaises(TypeError, omegacn7500._checkPatternNumber, 1.0)
self.assertRaises(TypeError, omegacn7500._checkPatternNumber, [1])
self.assertRaises(TypeError, omegacn7500._checkPatternNumber, None)
class TestCheckStepNumber(unittest.TestCase):
def testKnownResults(self):
omegacn7500._checkStepNumber(0)
omegacn7500._checkStepNumber(3)
omegacn7500._checkStepNumber(7)
def testWrongValue(self):
self.assertRaises(ValueError, omegacn7500._checkStepNumber, -1)
self.assertRaises(ValueError, omegacn7500._checkStepNumber, 8)
self.assertRaises(ValueError, omegacn7500._checkStepNumber, 99)
self.assertRaises(ValueError, omegacn7500._checkStepNumber, 12345)
def testWrongType(self):
self.assertRaises(TypeError, omegacn7500._checkStepNumber, '1')
self.assertRaises(TypeError, omegacn7500._checkStepNumber, 1.0)
self.assertRaises(TypeError, omegacn7500._checkStepNumber, [1])
self.assertRaises(TypeError, omegacn7500._checkStepNumber, None)
class TestCheckSetpointValue(unittest.TestCase):
def testKnownResults(self):
omegacn7500._checkSetpointValue(900, 1000)
omegacn7500._checkSetpointValue(900.0, 1000.0)
def testWrongValue(self):
self.assertRaises(ValueError, omegacn7500._checkSetpointValue, 900, 800)
self.assertRaises(ValueError, omegacn7500._checkSetpointValue, 900.0, 800.0)
self.assertRaises(ValueError, omegacn7500._checkSetpointValue, -100, 800)
self.assertRaises(ValueError, omegacn7500._checkSetpointValue, 900, -800)
def testWrongType(self):
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, '900', 1000)
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, [900], 1000)
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, None, 1000)
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, 900, '1000')
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, 900, [1000])
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, 900, None)
class TestCheckTimeValue(unittest.TestCase):
def testKnownResults(self):
omegacn7500._checkTimeValue(75, 99)
omegacn7500._checkTimeValue(75.0, 99.0)
def testWrongValue(self):
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 75, 10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, -5, 10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, -75, 10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 75.0, 10.0)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, -5.0, 10.0)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, -75.0, 10.0)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 5, -10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 75, -10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 5.0, -10.0)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 75.0, -10.0)
def testWrongType(self):
self.assertRaises(TypeError, omegacn7500._checkTimeValue, '75', 99)
self.assertRaises(TypeError, omegacn7500._checkTimeValue, [75], 99)
self.assertRaises(TypeError, omegacn7500._checkTimeValue, None, 99)
self.assertRaises(TypeError, omegacn7500._checkTimeValue, 75, '99')
self.assertRaises(TypeError, omegacn7500._checkTimeValue, 75, [99])
self.assertRaises(TypeError, omegacn7500._checkTimeValue, 75, None)
###########################################
# Communication using a dummy serial port #
###########################################
class TestDummyCommunication_Slave1(unittest.TestCase):
"""Testing using dummy communication, with data recorded for slaveaddress = 1
Most of the tests are for making sure that the communication details are OK.
For some examples of testing the methods for argument value errors or
argument type errors, see the :meth:`.testSetControlModeWithWrongValue` and
:meth:`.testSetControlModeWithWrongValueType` methods.
"""
def setUp(self):
# Prepare a dummy serial port to have proper responses
dummy_serial.VERBOSE = False
dummy_serial.RESPONSES = RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInDictionary'
# Monkey-patch a dummy serial port for testing purpose
omegacn7500.minimalmodbus.serial.Serial = dummy_serial.Serial
# Initialize a (dummy) instrument
self.instrument = omegacn7500.OmegaCN7500('DUMMYPORTNAME', 1)
self.instrument._debug = False
def testReadPv1(self):
self.assertAlmostEqual( self.instrument.get_pv(), 24.6 )
def testRun(self):
self.instrument.run()
def testStop(self):
self.instrument.stop()
def testIsRunning(self):
self.assertFalse( self.instrument.is_running() )
def testGetSetpoint(self):
self.assertAlmostEqual( self.instrument.get_setpoint(), 100)
def testSetSetpoint(self):
self.instrument.set_setpoint(100)
def testGetControlMode(self):
self.assertEqual( self.instrument.get_control_mode(), 'PID')
def testSetControlMode(self):
self.instrument.set_control_mode(3)
def testSetControlModeWithWrongValue(self):
self.assertRaises(ValueError, self.instrument.set_control_mode, 4)
self.assertRaises(ValueError, self.instrument.set_control_mode, -1)
def testSetControlModeWithWrongValueType(self):
self.assertRaises(TypeError, self.instrument.set_control_mode, 3.0)
self.assertRaises(TypeError, self.instrument.set_control_mode, [3])
self.assertRaises(TypeError, self.instrument.set_control_mode, '3')
self.assertRaises(TypeError, self.instrument.set_control_mode, None)
def testGetStartPatternNo(self):
self.assertEqual( self.instrument.get_start_pattern_no(), 2)
def testSetStartPatternNo(self):
self.instrument.set_start_pattern_no(2)
def testGetPatternStepSetpoint(self):
self.assertAlmostEqual( self.instrument.get_pattern_step_setpoint(0, 3), 333.3)
def testSetPatternStepSetpoint(self):
self.instrument.set_pattern_step_setpoint(0, 3, 333.3)
self.instrument.set_pattern_step_setpoint(0, 3, 40)
def testGetPatternStepTime(self):
self.assertAlmostEqual( self.instrument.get_pattern_step_time(0, 3), 45)
def testSetPatternStepTime(self):
self.instrument.set_pattern_step_time(0, 3, 45)
self.instrument.set_pattern_step_time(0, 3, 40)
def testGetPatternActualStep(self):
self.assertEqual( self.instrument.get_pattern_actual_step(0), 7 )
def testSetPatternActualStep(self):
self.instrument.set_pattern_actual_step(0, 7)
def testGetPatternAdditionalCycles(self):
self.assertEqual( self.instrument.get_pattern_additional_cycles(0), 4)
def testSetPatternAdditionalCycles(self):
self.instrument.set_pattern_additional_cycles(0, 4)
self.instrument.set_pattern_additional_cycles(0, 2)
def testGetPatternLinkToPattern(self):
self.assertEqual( self.instrument.get_pattern_link_topattern(0), 1)
def testSetPatternLinkToPattern(self):
self.instrument.set_pattern_link_topattern(0, 1)
def testGetAllPatternVariables(self): # TODO: Change this to proper assertEqual
_print_out( '\nSlave address 1:' )
_print_out( self.instrument.get_all_pattern_variables(0) )
def testSetAllPatternVariables(self):
self.instrument.set_all_pattern_variables(0,
10, 10,
20, 20,
30, 30,
40, 40,
50, 50,
60, 60,
70, 70,
80, 80,
7, 4, 1)
class TestDummyCommunication_Slave10(unittest.TestCase):
"""Testing using dummy communication, with data recorded for slaveaddress = 10
"""
def setUp(self):
dummy_serial.RESPONSES = RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInDictionary'
omegacn7500.minimalmodbus.serial.Serial = dummy_serial.Serial
self.instrument = omegacn7500.OmegaCN7500('DUMMYPORTNAME', 10)
def testReadPv1(self):
self.assertAlmostEqual( self.instrument.get_pv(), 25.9 )
def testRun(self):
self.instrument.run()
def testStop(self):
self.instrument.stop()
def testIsRunning(self):
self.assertFalse( self.instrument.is_running() )
def testGetSetpoint(self):
self.assertAlmostEqual( self.instrument.get_setpoint(), 100)
def testSetSetpoint(self):
self.instrument.set_setpoint(100)
def testGetControlMode(self):
self.assertEqual( self.instrument.get_control_mode(), 'PID')
def testSetControlMode(self):
self.instrument.set_control_mode(3)
def testGetStartPatternNo(self):
self.assertEqual( self.instrument.get_start_pattern_no(), 2)
def testSetStartPatternNo(self):
self.instrument.set_start_pattern_no(2)
def testGetPatternStepSetpoint(self):
self.assertEqual( self.instrument.get_pattern_step_setpoint(0, 3), 333.3)
def testSetPatternStepSetpoint(self):
self.instrument.set_pattern_step_setpoint(0, 3, 333.3)
self.instrument.set_pattern_step_setpoint(0, 3, 40)
def testGetPatternStepTime(self):
self.assertAlmostEqual( self.instrument.get_pattern_step_time(0, 3), 45)
def testSetPatternStepTime(self):
self.instrument.set_pattern_step_time(0, 3, 45)
self.instrument.set_pattern_step_time(0, 3, 40)
def testGetPatternActualStep(self):
self.assertEqual( self.instrument.get_pattern_actual_step(0), 7)
def testSetPatternActualStep(self):
self.instrument.set_pattern_actual_step(0, 7)
def testGetPatternAdditionalCycles(self):
self.assertEqual( self.instrument.get_pattern_additional_cycles(0), 4)
def testSetPatternAdditionalCycles(self):
self.instrument.set_pattern_additional_cycles(0, 4)
self.instrument.set_pattern_additional_cycles(0, 2)
def testGetPatternLinkToPattern(self):
self.assertEqual( self.instrument.get_pattern_link_topattern(0), 1)
def testSetPatternLinkToPattern(self):
self.instrument.set_pattern_link_topattern(0, 1)
def testGetAllPatternVariables(self): # TODO: Change this to proper assertEqual
_print_out( '\nSlave address 10:' )
_print_out( self.instrument.get_all_pattern_variables(0) )
def testSetAllPatternVariables(self):
self.instrument.set_all_pattern_variables(0,
10, 10,
20, 20,
30, 30,
40, 40,
50, 50,
60, 60,
70, 70,
80, 80,
7, 4, 1)
RESPONSES = {}
"""A dictionary of respones from a dummy Omega CN7500 instrument.
The key is the message (string) sent to the serial port, and the item is the response (string)
from the dummy serial port.
"""
## Recorded data from OmegaCN7500 ##
####################################
# Slave address 1, get_pv()
RESPONSES['\x01\x03\x10\x00\x00\x01\x80\xca'] = '\x01\x03\x02\x00\xf68\x02'
# Slave address 1, run()
RESPONSES['\x01\x05\x08\x14\xff\x00\xce^'] = '\x01\x05\x08\x14\xff\x00\xce^'
# Slave address 1, stop()
RESPONSES['\x01\x05\x08\x14\x00\x00\x8f\xae'] = '\x01\x05\x08\x14\x00\x00\x8f\xae'
# Slave address 1, is_running()
RESPONSES['\x01\x02\x08\x14\x00\x01\xfb\xae'] = '\x01\x02\x01\x00\xa1\x88'
# Slave address 1, get_setpoint()
RESPONSES['\x01\x03\x10\x01\x00\x01\xd1\n'] = '\x01\x03\x02\x03\xe8\xb8\xfa'
# Slave address 1, set_setpoint()
RESPONSES['\x01\x10\x10\x01\x00\x01\x02\x03\xe8\xb6\xfe'] = '\x01\x10\x10\x01\x00\x01T\xc9'
# Slave address 1, get_control_mode()
RESPONSES['\x01\x03\x10\x05\x00\x01\x90\xcb'] = '\x01\x03\x02\x00\x00\xb8D'
#RESPONSES['\x01\x03\x10\x05\x00\x01\x90\xcb'] = '\x01\x03\x02\x00\x09xB' # Use this for testing wrong controlmode
# Slave address 1, set_control_mode()
RESPONSES['\x01\x10\x10\x05\x00\x01\x02\x00\x03\xf7\xc5'] = '\x01\x10\x10\x05\x00\x01\x15\x08'
# Slave address 1, get_start_pattern_no()
RESPONSES['\x01\x03\x100\x00\x01\x80\xc5'] = '\x01\x03\x02\x00\x029\x85'
# Slave address 1, set_start_pattern_no()
RESPONSES['\x01\x10\x100\x00\x01\x02\x00\x023\xa0'] = '\x01\x10\x100\x00\x01\x05\x06'
# Slave address 1, set_pattern_step_setpoint() Pattern 0, step 3, value 333.3. See also below.
RESPONSES['\x01\x10 \x03\x00\x01\x02\r\x05C2'] = '\x01\x10 \x03\x00\x01\xfa\t'
# Slave address 1, set_pattern_step_time() Pattern 0, step 3, value 45. See also below.
RESPONSES['\x01\x10 \x83\x00\x01\x02\x00-X|'] = '\x01\x10 \x83\x00\x01\xfb\xe1'
# Slave address 1, set_pattern_additional_cycles() Pattern 0, value 4. See also below.
RESPONSES['\x01\x10\x10P\x00\x01\x02\x00\x04\xba\x02'] = '\x01\x10\x10P\x00\x01\x05\x18'
# Slave address 1, get_all_pattern_variables()
# --- Valid for pattern 0 ---
# SP0: 10 Time0: 10
# SP1: 20 Time1: 20
# SP2: 30 Time2: 30
# SP3: 333 Time3: 45
# SP4: 50 Time4: 50
# SP5: 60 Time5: 60
# SP6: 70 Time6: 70
# SP7: 80 Time7: 80
# Actual step: 7
# Add'l cycles: 4
# Linked pattern: 1
RESPONSES['\x01\x03 \x00\x00\x01\x8f\xca'] = '\x01\x03\x02\x00d\xb9\xaf' # SP0
RESPONSES['\x01\x03 \x01\x00\x01\xde\n'] = '\x01\x03\x02\x00\xc8\xb9\xd2'
RESPONSES['\x01\x03 \x02\x00\x01.\n'] = '\x01\x03\x02\x01,\xb8\t'
RESPONSES['\x01\x03 \x03\x00\x01\x7f\xca'] = '\x01\x03\x02\r\x05|\xd7'
RESPONSES['\x01\x03 \x04\x00\x01\xce\x0b'] = '\x01\x03\x02\x01\xf4\xb8S'
RESPONSES['\x01\x03 \x05\x00\x01\x9f\xcb'] = '\x01\x03\x02\x02X\xb8\xde'
RESPONSES['\x01\x03 \x06\x00\x01o\xcb'] = '\x01\x03\x02\x02\xbc\xb8\x95'
RESPONSES['\x01\x03 \x07\x00\x01>\x0b'] = '\x01\x03\x02\x03 \xb9l'
RESPONSES['\x01\x03 \x80\x00\x01\x8e"'] = '\x01\x03\x02\x00\n8C' # Time0
RESPONSES['\x01\x03 \x81\x00\x01\xdf\xe2'] = '\x01\x03\x02\x00\x14\xb8K'
RESPONSES['\x01\x03 \x82\x00\x01/\xe2'] = '\x01\x03\x02\x00\x1e8L'
RESPONSES['\x01\x03 \x83\x00\x01~"'] = '\x01\x03\x02\x00-xY'
RESPONSES['\x01\x03 \x84\x00\x01\xcf\xe3'] = '\x01\x03\x02\x0029\x91'
RESPONSES['\x01\x03 \x85\x00\x01\x9e#'] = '\x01\x03\x02\x00<\xb8U'
RESPONSES['\x01\x03 \x86\x00\x01n#'] = '\x01\x03\x02\x00F9\xb6'
RESPONSES['\x01\x03 \x87\x00\x01?\xe3'] = '\x01\x03\x02\x00P\xb8x'
RESPONSES['\x01\x03\x10@\x00\x01\x81\x1e'] = '\x01\x03\x02\x00\x07\xf9\x86' # Actual step
RESPONSES['\x01\x03\x10P\x00\x01\x80\xdb'] = '\x01\x03\x02\x00\x04\xb9\x87' # Cycles
RESPONSES['\x01\x03\x10`\x00\x01\x80\xd4'] = '\x01\x03\x02\x00\x01y\x84' # Linked pattern
# Slave address 1, set_all_pattern_variables()
# --- Valid for pattern 0 ---
RESPONSES['\x01\x10 \x00\x00\x01\x02\x00d\x86y'] = '\x01\x10 \x00\x00\x01\n\t' # SP0
RESPONSES['\x01\x10 \x01\x00\x01\x02\x00\xc8\x87\xd5'] = '\x01\x10 \x01\x00\x01[\xc9'
RESPONSES['\x01\x10 \x02\x00\x01\x02\x01,\x86='] = '\x01\x10 \x02\x00\x01\xab\xc9'
RESPONSES['\x01\x10 \x03\x00\x01\x02\x01\x90\x86]'] = '\x01\x10 \x03\x00\x01\xfa\t' # SP3, value 40
RESPONSES['\x01\x10 \x04\x00\x01\x02\x01\xf4\x86\x01'] = '\x01\x10 \x04\x00\x01K\xc8'
RESPONSES['\x01\x10 \x05\x00\x01\x02\x02X\x87]'] = '\x01\x10 \x05\x00\x01\x1a\x08'
RESPONSES['\x01\x10 \x06\x00\x01\x02\x02\xbc\x87%'] = '\x01\x10 \x06\x00\x01\xea\x08'
RESPONSES['\x01\x10 \x07\x00\x01\x02\x03 \x87\r'] = '\x01\x10 \x07\x00\x01\xbb\xc8'
RESPONSES['\x01\x10 \x80\x00\x01\x02\x00\n\x18U'] = '\x01\x10 \x80\x00\x01\x0b\xe1' # Time0
RESPONSES['\x01\x10 \x81\x00\x01\x02\x00\x14\x99\x8c'] = '\x01\x10 \x81\x00\x01Z!'
RESPONSES['\x01\x10 \x82\x00\x01\x02\x00\x1e\x19\xb8'] = '\x01\x10 \x82\x00\x01\xaa!'
RESPONSES['\x01\x10 \x83\x00\x01\x02\x00(\x98\x7f'] = '\x01\x10 \x83\x00\x01\xfb\xe1' # Time3, value 40
RESPONSES['\x01\x10 \x84\x00\x01\x02\x002\x18\x03'] = '\x01\x10 \x84\x00\x01J '
RESPONSES['\x01\x10 \x85\x00\x01\x02\x00<\x98\x16'] = '\x01\x10 \x85\x00\x01\x1b\xe0'
RESPONSES['\x01\x10 \x86\x00\x01\x02\x00F\x19\xc6'] = '\x01\x10 \x86\x00\x01\xeb\xe0'
RESPONSES['\x01\x10 \x87\x00\x01\x02\x00P\x99\xd9'] = '\x01\x10 \x87\x00\x01\xba '
RESPONSES['\x01\x10\x10@\x00\x01\x02\x00\x07\xf8\x93'] = '\x01\x10\x10@\x00\x01\x04\xdd' # Actual step
RESPONSES['\x01\x10\x10P\x00\x01\x02\x00\x02:\x00'] = '\x01\x10\x10P\x00\x01\x05\x18' # Cycles, value 2
RESPONSES['\x01\x10\x10`\x00\x01\x02\x00\x01\x7f\xf1'] = '\x01\x10\x10`\x00\x01\x05\x17' # Linked pattern
# Slave address 10, get_pv()
RESPONSES['\n\x03\x10\x00\x00\x01\x81\xb1'] = '\n\x03\x02\x01\x03\\\x14'
# Slave address 10, run()
RESPONSES['\n\x05\x08\x14\xff\x00\xcf%'] = '\n\x05\x08\x14\xff\x00\xcf%'
# Slave address 10, stop()
RESPONSES['\n\x05\x08\x14\x00\x00\x8e\xd5'] = '\n\x05\x08\x14\x00\x00\x8e\xd5'
# Slave address 10, is_running()
RESPONSES['\n\x02\x08\x14\x00\x01\xfa\xd5'] = '\n\x02\x01\x00\xa3\xac'
# Slave address 10, get_setpoint()
RESPONSES['\n\x03\x10\x01\x00\x01\xd0q'] = '\n\x03\x02\x03\xe8\x1d;'
# Slave address 10, set_setpoint()
RESPONSES['\n\x10\x10\x01\x00\x01\x02\x03\xe8\xc5\xce'] = '\n\x10\x10\x01\x00\x01U\xb2'
# Slave address 10, get_control_mode()
RESPONSES['\n\x03\x10\x05\x00\x01\x91\xb0'] = '\n\x03\x02\x00\x00\x1d\x85'
# Slave address 10, set_control_mode()
RESPONSES['\n\x10\x10\x05\x00\x01\x02\x00\x03\x84\xf5'] = '\n\x10\x10\x05\x00\x01\x14s'
# Slave address 10, get_start_pattern_no()
RESPONSES['\n\x03\x100\x00\x01\x81\xbe'] = '\n\x03\x02\x00\x02\x9cD'
# Slave address 10, set_start_pattern_no()
RESPONSES['\n\x10\x100\x00\x01\x02\x00\x02@\x90'] = '\n\x10\x100\x00\x01\x04}'
# Slave address 10, set_pattern_step_setpoint() Pattern 0, step 3, value 333.3. See also below.
RESPONSES['\n\x10 \x03\x00\x01\x02\r\x050\x02'] = '\n\x10 \x03\x00\x01\xfbr'
# Slave address 10, set_pattern_step_time() Pattern 0, step 3, value 45. See also below.
RESPONSES['\n\x10 \x83\x00\x01\x02\x00-+L'] = '\n\x10 \x83\x00\x01\xfa\x9a'
# Slave address 10, set_pattern_additional_cycles() Pattern 0, value 4. See also below.
RESPONSES['\n\x10\x10P\x00\x01\x02\x00\x04\xc92'] = '\n\x10\x10P\x00\x01\x04c'
# Slave address 10, get_all_pattern_variables()
# --- Valid for pattern 0 ---
# SP0: 10 Time0: 10
# SP1: 20 Time1: 20
# SP2: 30 Time2: 30
# SP3: 333 Time3: 45
# SP4: 50 Time4: 50
# SP5: 60 Time5: 60
# SP6: 70 Time6: 70
# SP7: 80 Time7: 80
# Actual step: 7
# Add'l cycles: 4
# Linked pattern: 1
RESPONSES['\n\x03 \x00\x00\x01\x8e\xb1'] = '\n\x03\x02\x00d\x1cn' # SP0
RESPONSES['\n\x03 \x01\x00\x01\xdfq'] = '\n\x03\x02\x00\xc8\x1c\x13'
RESPONSES['\n\x03 \x02\x00\x01/q'] = '\n\x03\x02\x01,\x1d\xc8'
RESPONSES['\n\x03 \x03\x00\x01~\xb1'] = '\n\x03\x02\r\x05\xd9\x16'
RESPONSES['\n\x03 \x04\x00\x01\xcfp'] = '\n\x03\x02\x01\xf4\x1d\x92'
RESPONSES['\n\x03 \x05\x00\x01\x9e\xb0'] = '\n\x03\x02\x02X\x1d\x1f'
RESPONSES['\n\x03 \x06\x00\x01n\xb0'] = '\n\x03\x02\x02\xbc\x1dT'
RESPONSES['\n\x03 \x07\x00\x01?p'] = '\n\x03\x02\x03 \x1c\xad'
RESPONSES['\n\x03 \x80\x00\x01\x8fY'] = '\n\x03\x02\x00\n\x9d\x82' # Time0
RESPONSES['\n\x03 \x81\x00\x01\xde\x99'] = '\n\x03\x02\x00\x14\x1d\x8a'
RESPONSES['\n\x03 \x82\x00\x01.\x99'] = '\n\x03\x02\x00\x1e\x9d\x8d'
RESPONSES['\n\x03 \x83\x00\x01\x7fY'] = '\n\x03\x02\x00-\xdd\x98'
RESPONSES['\n\x03 \x84\x00\x01\xce\x98'] = '\n\x03\x02\x002\x9cP'
RESPONSES['\n\x03 \x85\x00\x01\x9fX'] = '\n\x03\x02\x00<\x1d\x94'
RESPONSES['\n\x03 \x86\x00\x01oX'] = '\n\x03\x02\x00F\x9cw'
RESPONSES['\n\x03 \x87\x00\x01>\x98'] = '\n\x03\x02\x00P\x1d\xb9'
RESPONSES['\n\x03\x10@\x00\x01\x80e'] = '\n\x03\x02\x00\x07\\G' # Actual step
RESPONSES['\n\x03\x10P\x00\x01\x81\xa0'] = '\n\x03\x02\x00\x04\x1cF' # Cycles
RESPONSES['\n\x03\x10`\x00\x01\x81\xaf'] = '\n\x03\x02\x00\x01\xdcE' # Linked pattern
# Slave address 10, set_all_pattern_variables()
# --- Valid for pattern 0 ---
RESPONSES['\n\x10 \x00\x00\x01\x02\x00d\xf5I'] = '\n\x10 \x00\x00\x01\x0br' # SP0
RESPONSES['\n\x10 \x01\x00\x01\x02\x00\xc8\xf4\xe5'] = '\n\x10 \x01\x00\x01Z\xb2'
RESPONSES['\n\x10 \x02\x00\x01\x02\x01,\xf5\r'] = '\n\x10 \x02\x00\x01\xaa\xb2'
RESPONSES['\n\x10 \x03\x00\x01\x02\x01\x90\xf5m'] = '\n\x10 \x03\x00\x01\xfbr' # SP3, value 40
RESPONSES['\n\x10 \x04\x00\x01\x02\x01\xf4\xf51'] = '\n\x10 \x04\x00\x01J\xb3'
RESPONSES['\n\x10 \x05\x00\x01\x02\x02X\xf4m'] = '\n\x10 \x05\x00\x01\x1bs'
RESPONSES['\n\x10 \x06\x00\x01\x02\x02\xbc\xf4\x15'] = '\n\x10 \x06\x00\x01\xebs'
RESPONSES['\n\x10 \x07\x00\x01\x02\x03 \xf4='] = '\n\x10 \x07\x00\x01\xba\xb3'
RESPONSES['\n\x10 \x80\x00\x01\x02\x00\nke'] = '\n\x10 \x80\x00\x01\n\x9a' # Time0
RESPONSES['\n\x10 \x81\x00\x01\x02\x00\x14\xea\xbc'] = '\n\x10 \x81\x00\x01[Z'
RESPONSES['\n\x10 \x82\x00\x01\x02\x00\x1ej\x88'] = '\n\x10 \x82\x00\x01\xabZ'
RESPONSES['\n\x10 \x83\x00\x01\x02\x00(\xebO'] = '\n\x10 \x83\x00\x01\xfa\x9a' # Time3, value 40
RESPONSES['\n\x10 \x84\x00\x01\x02\x002k3'] = '\n\x10 \x84\x00\x01K['
RESPONSES['\n\x10 \x85\x00\x01\x02\x00<\xeb&'] = '\n\x10 \x85\x00\x01\x1a\x9b'
RESPONSES['\n\x10 \x86\x00\x01\x02\x00Fj\xf6'] = '\n\x10 \x86\x00\x01\xea\x9b'
RESPONSES['\n\x10 \x87\x00\x01\x02\x00P\xea\xe9'] = '\n\x10 \x87\x00\x01\xbb['
RESPONSES['\n\x10\x10@\x00\x01\x02\x00\x07\x8b\xa3'] = '\n\x10\x10@\x00\x01\x05\xa6' # Actual step
RESPONSES['\n\x10\x10P\x00\x01\x02\x00\x02I0'] = '\n\x10\x10P\x00\x01\x04c' # Cycles, value 2
RESPONSES['\n\x10\x10`\x00\x01\x02\x00\x01\x0c\xc1'] = '\n\x10\x10`\x00\x01\x04l' # Linked pattern
def _print_out( inputstring ):
"""Print the inputstring. To make it compatible with Python2 and Python3."""
sys.stdout.write(inputstring + '\n')
if __name__ == '__main__':
unittest.main()
| 44.076795 | 124 | 0.668737 |
__author__ = "Jonas Berg"
__email__ = "pyhys@users.sourceforge.net"
__license__ = "Apache License, Version 2.0"
import sys
import unittest
import omegacn7500
import dummy_serial
class TestCalculateRegisterAddress(unittest.TestCase):
knownValues=[
('setpoint', 0, 0, 8192),
('setpoint', 1, 0, 8200),
('time', 0, 0, 8320),
('time', 0, 1, 8321),
('time', 1, 0, 8328),
('actualstep', 0, None, 4160),
('actualstep', 0, 0, 4160),
('actualstep', 1, None, 4161),
('actualstep', 1, 0, 4161),
('actualstep', 1, 5, 4161),
('cycles', 0, None, 4176),
('cycles', 1, None, 4177),
('linkpattern', 0, None, 4192),
('linkpattern', 1, None, 4193),
]
def testKnownValues(self):
for registertype, patternnumber, stepnumber, knownresult in self.knownValues:
resultvalue = omegacn7500._calculateRegisterAddress(registertype, patternnumber, stepnumber)
self.assertEqual(resultvalue, knownresult)
def testWrongValues(self):
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'ABC', 0, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'setpoint', -1, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'setpoint', 8, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'setpoint', 0, -1)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 'setpoint', 0, 8)
def testWrongType(self):
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 0, 0, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, 1.0, 0, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, None, 0, 0)
self.assertRaises(ValueError, omegacn7500._calculateRegisterAddress, ['setpoint'], 0, 0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', 0.0, 0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', [0], 0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', None, 0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', 0, 0.0)
self.assertRaises(TypeError, omegacn7500._calculateRegisterAddress, 'setpoint', 0, [0])
class TestCheckPatternNumber(unittest.TestCase):
def testKnownResults(self):
omegacn7500._checkPatternNumber(0)
omegacn7500._checkPatternNumber(3)
omegacn7500._checkPatternNumber(7)
def testWrongValue(self):
self.assertRaises(ValueError, omegacn7500._checkPatternNumber, -1)
self.assertRaises(ValueError, omegacn7500._checkPatternNumber, 8)
self.assertRaises(ValueError, omegacn7500._checkPatternNumber, 99)
self.assertRaises(ValueError, omegacn7500._checkPatternNumber, 12345)
def testWrongType(self):
self.assertRaises(TypeError, omegacn7500._checkPatternNumber, '1')
self.assertRaises(TypeError, omegacn7500._checkPatternNumber, 1.0)
self.assertRaises(TypeError, omegacn7500._checkPatternNumber, [1])
self.assertRaises(TypeError, omegacn7500._checkPatternNumber, None)
class TestCheckStepNumber(unittest.TestCase):
def testKnownResults(self):
omegacn7500._checkStepNumber(0)
omegacn7500._checkStepNumber(3)
omegacn7500._checkStepNumber(7)
def testWrongValue(self):
self.assertRaises(ValueError, omegacn7500._checkStepNumber, -1)
self.assertRaises(ValueError, omegacn7500._checkStepNumber, 8)
self.assertRaises(ValueError, omegacn7500._checkStepNumber, 99)
self.assertRaises(ValueError, omegacn7500._checkStepNumber, 12345)
def testWrongType(self):
self.assertRaises(TypeError, omegacn7500._checkStepNumber, '1')
self.assertRaises(TypeError, omegacn7500._checkStepNumber, 1.0)
self.assertRaises(TypeError, omegacn7500._checkStepNumber, [1])
self.assertRaises(TypeError, omegacn7500._checkStepNumber, None)
class TestCheckSetpointValue(unittest.TestCase):
def testKnownResults(self):
omegacn7500._checkSetpointValue(900, 1000)
omegacn7500._checkSetpointValue(900.0, 1000.0)
def testWrongValue(self):
self.assertRaises(ValueError, omegacn7500._checkSetpointValue, 900, 800)
self.assertRaises(ValueError, omegacn7500._checkSetpointValue, 900.0, 800.0)
self.assertRaises(ValueError, omegacn7500._checkSetpointValue, -100, 800)
self.assertRaises(ValueError, omegacn7500._checkSetpointValue, 900, -800)
def testWrongType(self):
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, '900', 1000)
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, [900], 1000)
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, None, 1000)
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, 900, '1000')
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, 900, [1000])
self.assertRaises(TypeError, omegacn7500._checkSetpointValue, 900, None)
class TestCheckTimeValue(unittest.TestCase):
def testKnownResults(self):
omegacn7500._checkTimeValue(75, 99)
omegacn7500._checkTimeValue(75.0, 99.0)
def testWrongValue(self):
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 75, 10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, -5, 10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, -75, 10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 75.0, 10.0)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, -5.0, 10.0)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, -75.0, 10.0)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 5, -10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 75, -10)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 5.0, -10.0)
self.assertRaises(ValueError, omegacn7500._checkTimeValue, 75.0, -10.0)
def testWrongType(self):
self.assertRaises(TypeError, omegacn7500._checkTimeValue, '75', 99)
self.assertRaises(TypeError, omegacn7500._checkTimeValue, [75], 99)
self.assertRaises(TypeError, omegacn7500._checkTimeValue, None, 99)
self.assertRaises(TypeError, omegacn7500._checkTimeValue, 75, '99')
self.assertRaises(TypeError, omegacn7500._checkTimeValue, 75, [99])
self.assertRaises(TypeError, omegacn7500._checkTimeValue, 75, None)
artPatternNo(self):
self.instrument.set_start_pattern_no(2)
def testGetPatternStepSetpoint(self):
self.assertAlmostEqual( self.instrument.get_pattern_step_setpoint(0, 3), 333.3)
def testSetPatternStepSetpoint(self):
self.instrument.set_pattern_step_setpoint(0, 3, 333.3)
self.instrument.set_pattern_step_setpoint(0, 3, 40)
def testGetPatternStepTime(self):
self.assertAlmostEqual( self.instrument.get_pattern_step_time(0, 3), 45)
def testSetPatternStepTime(self):
self.instrument.set_pattern_step_time(0, 3, 45)
self.instrument.set_pattern_step_time(0, 3, 40)
def testGetPatternActualStep(self):
self.assertEqual( self.instrument.get_pattern_actual_step(0), 7 )
def testSetPatternActualStep(self):
self.instrument.set_pattern_actual_step(0, 7)
def testGetPatternAdditionalCycles(self):
self.assertEqual( self.instrument.get_pattern_additional_cycles(0), 4)
def testSetPatternAdditionalCycles(self):
self.instrument.set_pattern_additional_cycles(0, 4)
self.instrument.set_pattern_additional_cycles(0, 2)
def testGetPatternLinkToPattern(self):
self.assertEqual( self.instrument.get_pattern_link_topattern(0), 1)
def testSetPatternLinkToPattern(self):
self.instrument.set_pattern_link_topattern(0, 1)
def testGetAllPatternVariables(self):
_print_out( '\nSlave address 1:' )
_print_out( self.instrument.get_all_pattern_variables(0) )
def testSetAllPatternVariables(self):
self.instrument.set_all_pattern_variables(0,
10, 10,
20, 20,
30, 30,
40, 40,
50, 50,
60, 60,
70, 70,
80, 80,
7, 4, 1)
class TestDummyCommunication_Slave10(unittest.TestCase):
def setUp(self):
dummy_serial.RESPONSES = RESPONSES
dummy_serial.DEFAULT_RESPONSE = 'NotFoundInDictionary'
omegacn7500.minimalmodbus.serial.Serial = dummy_serial.Serial
self.instrument = omegacn7500.OmegaCN7500('DUMMYPORTNAME', 10)
def testReadPv1(self):
self.assertAlmostEqual( self.instrument.get_pv(), 25.9 )
def testRun(self):
self.instrument.run()
def testStop(self):
self.instrument.stop()
def testIsRunning(self):
self.assertFalse( self.instrument.is_running() )
def testGetSetpoint(self):
self.assertAlmostEqual( self.instrument.get_setpoint(), 100)
def testSetSetpoint(self):
self.instrument.set_setpoint(100)
def testGetControlMode(self):
self.assertEqual( self.instrument.get_control_mode(), 'PID')
def testSetControlMode(self):
self.instrument.set_control_mode(3)
def testGetStartPatternNo(self):
self.assertEqual( self.instrument.get_start_pattern_no(), 2)
def testSetStartPatternNo(self):
self.instrument.set_start_pattern_no(2)
def testGetPatternStepSetpoint(self):
self.assertEqual( self.instrument.get_pattern_step_setpoint(0, 3), 333.3)
def testSetPatternStepSetpoint(self):
self.instrument.set_pattern_step_setpoint(0, 3, 333.3)
self.instrument.set_pattern_step_setpoint(0, 3, 40)
def testGetPatternStepTime(self):
self.assertAlmostEqual( self.instrument.get_pattern_step_time(0, 3), 45)
def testSetPatternStepTime(self):
self.instrument.set_pattern_step_time(0, 3, 45)
self.instrument.set_pattern_step_time(0, 3, 40)
def testGetPatternActualStep(self):
self.assertEqual( self.instrument.get_pattern_actual_step(0), 7)
def testSetPatternActualStep(self):
self.instrument.set_pattern_actual_step(0, 7)
def testGetPatternAdditionalCycles(self):
self.assertEqual( self.instrument.get_pattern_additional_cycles(0), 4)
def testSetPatternAdditionalCycles(self):
self.instrument.set_pattern_additional_cycles(0, 4)
self.instrument.set_pattern_additional_cycles(0, 2)
def testGetPatternLinkToPattern(self):
self.assertEqual( self.instrument.get_pattern_link_topattern(0), 1)
def testSetPatternLinkToPattern(self):
self.instrument.set_pattern_link_topattern(0, 1)
def testGetAllPatternVariables(self):
_print_out( '\nSlave address 10:' )
_print_out( self.instrument.get_all_pattern_variables(0) )
def testSetAllPatternVariables(self):
self.instrument.set_all_pattern_variables(0,
10, 10,
20, 20,
30, 30,
40, 40,
50, 50,
60, 60,
70, 70,
80, 80,
7, 4, 1)
RESPONSES = {}
03\x100\x00\x01\x80\xc5'] = '\x01\x03\x02\x00\x029\x85'
RESPONSES['\x01\x10\x100\x00\x01\x02\x00\x023\xa0'] = '\x01\x10\x100\x00\x01\x05\x06'
RESPONSES['\x01\x10 \x03\x00\x01\x02\r\x05C2'] = '\x01\x10 \x03\x00\x01\xfa\t'
RESPONSES['\x01\x10 \x83\x00\x01\x02\x00-X|'] = '\x01\x10 \x83\x00\x01\xfb\xe1'
RESPONSES['\x01\x10\x10P\x00\x01\x02\x00\x04\xba\x02'] = '\x01\x10\x10P\x00\x01\x05\x18'
# Linked pattern: 1
RESPONSES['\x01\x03 \x00\x00\x01\x8f\xca'] = '\x01\x03\x02\x00d\xb9\xaf' # SP0
RESPONSES['\x01\x03 \x01\x00\x01\xde\n'] = '\x01\x03\x02\x00\xc8\xb9\xd2'
RESPONSES['\x01\x03 \x02\x00\x01.\n'] = '\x01\x03\x02\x01,\xb8\t'
RESPONSES['\x01\x03 \x03\x00\x01\x7f\xca'] = '\x01\x03\x02\r\x05|\xd7'
RESPONSES['\x01\x03 \x04\x00\x01\xce\x0b'] = '\x01\x03\x02\x01\xf4\xb8S'
RESPONSES['\x01\x03 \x05\x00\x01\x9f\xcb'] = '\x01\x03\x02\x02X\xb8\xde'
RESPONSES['\x01\x03 \x06\x00\x01o\xcb'] = '\x01\x03\x02\x02\xbc\xb8\x95'
RESPONSES['\x01\x03 \x07\x00\x01>\x0b'] = '\x01\x03\x02\x03 \xb9l'
RESPONSES['\x01\x03 \x80\x00\x01\x8e"'] = '\x01\x03\x02\x00\n8C' # Time0
RESPONSES['\x01\x03 \x81\x00\x01\xdf\xe2'] = '\x01\x03\x02\x00\x14\xb8K'
RESPONSES['\x01\x03 \x82\x00\x01/\xe2'] = '\x01\x03\x02\x00\x1e8L'
RESPONSES['\x01\x03 \x83\x00\x01~"'] = '\x01\x03\x02\x00-xY'
RESPONSES['\x01\x03 \x84\x00\x01\xcf\xe3'] = '\x01\x03\x02\x0029\x91'
RESPONSES['\x01\x03 \x85\x00\x01\x9e
RESPONSES['\x01\x03 \x86\x00\x01n
RESPONSES['\x01\x03 \x87\x00\x01?\xe3'] = '\x01\x03\x02\x00P\xb8x'
RESPONSES['\x01\x03\x10@\x00\x01\x81\x1e'] = '\x01\x03\x02\x00\x07\xf9\x86' # Actual step
RESPONSES['\x01\x03\x10P\x00\x01\x80\xdb'] = '\x01\x03\x02\x00\x04\xb9\x87' # Cycles
RESPONSES['\x01\x03\x10`\x00\x01\x80\xd4'] = '\x01\x03\x02\x00\x01y\x84' # Linked pattern
# Slave address 1, set_all_pattern_variables()
# --- Valid for pattern 0 ---
RESPONSES['\x01\x10 \x00\x00\x01\x02\x00d\x86y'] = '\x01\x10 \x00\x00\x01\n\t' # SP0
RESPONSES['\x01\x10 \x01\x00\x01\x02\x00\xc8\x87\xd5'] = '\x01\x10 \x01\x00\x01[\xc9'
RESPONSES['\x01\x10 \x02\x00\x01\x02\x01,\x86='] = '\x01\x10 \x02\x00\x01\xab\xc9'
RESPONSES['\x01\x10 \x03\x00\x01\x02\x01\x90\x86]'] = '\x01\x10 \x03\x00\x01\xfa\t' # SP3, value 40
RESPONSES['\x01\x10 \x04\x00\x01\x02\x01\xf4\x86\x01'] = '\x01\x10 \x04\x00\x01K\xc8'
RESPONSES['\x01\x10 \x05\x00\x01\x02\x02X\x87]'] = '\x01\x10 \x05\x00\x01\x1a\x08'
RESPONSES['\x01\x10 \x06\x00\x01\x02\x02\xbc\x87%'] = '\x01\x10 \x06\x00\x01\xea\x08'
RESPONSES['\x01\x10 \x07\x00\x01\x02\x03 \x87\r'] = '\x01\x10 \x07\x00\x01\xbb\xc8'
RESPONSES['\x01\x10 \x80\x00\x01\x02\x00\n\x18U'] = '\x01\x10 \x80\x00\x01\x0b\xe1' # Time0
RESPONSES['\x01\x10 \x81\x00\x01\x02\x00\x14\x99\x8c'] = '\x01\x10 \x81\x00\x01Z!'
RESPONSES['\x01\x10 \x82\x00\x01\x02\x00\x1e\x19\xb8'] = '\x01\x10 \x82\x00\x01\xaa!'
RESPONSES['\x01\x10 \x83\x00\x01\x02\x00(\x98\x7f'] = '\x01\x10 \x83\x00\x01\xfb\xe1' # Time3, value 40
RESPONSES['\x01\x10 \x84\x00\x01\x02\x002\x18\x03'] = '\x01\x10 \x84\x00\x01J '
RESPONSES['\x01\x10 \x85\x00\x01\x02\x00<\x98\x16'] = '\x01\x10 \x85\x00\x01\x1b\xe0'
RESPONSES['\x01\x10 \x86\x00\x01\x02\x00F\x19\xc6'] = '\x01\x10 \x86\x00\x01\xeb\xe0'
RESPONSES['\x01\x10 \x87\x00\x01\x02\x00P\x99\xd9'] = '\x01\x10 \x87\x00\x01\xba '
RESPONSES['\x01\x10\x10@\x00\x01\x02\x00\x07\xf8\x93'] = '\x01\x10\x10@\x00\x01\x04\xdd' # Actual step
RESPONSES['\x01\x10\x10P\x00\x01\x02\x00\x02:\x00'] = '\x01\x10\x10P\x00\x01\x05\x18' # Cycles, value 2
RESPONSES['\x01\x10\x10`\x00\x01\x02\x00\x01\x7f\xf1'] = '\x01\x10\x10`\x00\x01\x05\x17' # Linked pattern
# Slave address 10, get_pv()
RESPONSES['\n\x03\x10\x00\x00\x01\x81\xb1'] = '\n\x03\x02\x01\x03\\\x14'
# Slave address 10, run()
RESPONSES['\n\x05\x08\x14\xff\x00\xcf%'] = '\n\x05\x08\x14\xff\x00\xcf%'
# Slave address 10, stop()
RESPONSES['\n\x05\x08\x14\x00\x00\x8e\xd5'] = '\n\x05\x08\x14\x00\x00\x8e\xd5'
# Slave address 10, is_running()
RESPONSES['\n\x02\x08\x14\x00\x01\xfa\xd5'] = '\n\x02\x01\x00\xa3\xac'
# Slave address 10, get_setpoint()
RESPONSES['\n\x03\x10\x01\x00\x01\xd0q'] = '\n\x03\x02\x03\xe8\x1d;'
# Slave address 10, set_setpoint()
RESPONSES['\n\x10\x10\x01\x00\x01\x02\x03\xe8\xc5\xce'] = '\n\x10\x10\x01\x00\x01U\xb2'
# Slave address 10, get_control_mode()
RESPONSES['\n\x03\x10\x05\x00\x01\x91\xb0'] = '\n\x03\x02\x00\x00\x1d\x85'
# Slave address 10, set_control_mode()
RESPONSES['\n\x10\x10\x05\x00\x01\x02\x00\x03\x84\xf5'] = '\n\x10\x10\x05\x00\x01\x14s'
# Slave address 10, get_start_pattern_no()
RESPONSES['\n\x03\x100\x00\x01\x81\xbe'] = '\n\x03\x02\x00\x02\x9cD'
# Slave address 10, set_start_pattern_no()
RESPONSES['\n\x10\x100\x00\x01\x02\x00\x02@\x90'] = '\n\x10\x100\x00\x01\x04}'
# Slave address 10, set_pattern_step_setpoint() Pattern 0, step 3, value 333.3. See also below.
RESPONSES['\n\x10 \x03\x00\x01\x02\r\x050\x02'] = '\n\x10 \x03\x00\x01\xfbr'
# Slave address 10, set_pattern_step_time() Pattern 0, step 3, value 45. See also below.
RESPONSES['\n\x10 \x83\x00\x01\x02\x00-+L'] = '\n\x10 \x83\x00\x01\xfa\x9a'
# Slave address 10, set_pattern_additional_cycles() Pattern 0, value 4. See also below.
RESPONSES['\n\x10\x10P\x00\x01\x02\x00\x04\xc92'] = '\n\x10\x10P\x00\x01\x04c'
# Slave address 10, get_all_pattern_variables()
# --- Valid for pattern 0 ---
# SP0: 10 Time0: 10
# SP1: 20 Time1: 20
# SP2: 30 Time2: 30
# SP3: 333 Time3: 45
# SP4: 50 Time4: 50
# SP5: 60 Time5: 60
# SP6: 70 Time6: 70
# SP7: 80 Time7: 80
# Actual step: 7
# Add'l cycles: 4
RESPONSES['\n\x03 \x00\x00\x01\x8e\xb1'] = '\n\x03\x02\x00d\x1cn'
RESPONSES['\n\x03 \x01\x00\x01\xdfq'] = '\n\x03\x02\x00\xc8\x1c\x13'
RESPONSES['\n\x03 \x02\x00\x01/q'] = '\n\x03\x02\x01,\x1d\xc8'
RESPONSES['\n\x03 \x03\x00\x01~\xb1'] = '\n\x03\x02\r\x05\xd9\x16'
RESPONSES['\n\x03 \x04\x00\x01\xcfp'] = '\n\x03\x02\x01\xf4\x1d\x92'
RESPONSES['\n\x03 \x05\x00\x01\x9e\xb0'] = '\n\x03\x02\x02X\x1d\x1f'
RESPONSES['\n\x03 \x06\x00\x01n\xb0'] = '\n\x03\x02\x02\xbc\x1dT'
RESPONSES['\n\x03 \x07\x00\x01?p'] = '\n\x03\x02\x03 \x1c\xad'
RESPONSES['\n\x03 \x80\x00\x01\x8fY'] = '\n\x03\x02\x00\n\x9d\x82'
RESPONSES['\n\x03 \x81\x00\x01\xde\x99'] = '\n\x03\x02\x00\x14\x1d\x8a'
RESPONSES['\n\x03 \x82\x00\x01.\x99'] = '\n\x03\x02\x00\x1e\x9d\x8d'
RESPONSES['\n\x03 \x83\x00\x01\x7fY'] = '\n\x03\x02\x00-\xdd\x98'
RESPONSES['\n\x03 \x84\x00\x01\xce\x98'] = '\n\x03\x02\x002\x9cP'
RESPONSES['\n\x03 \x85\x00\x01\x9fX'] = '\n\x03\x02\x00<\x1d\x94'
RESPONSES['\n\x03 \x86\x00\x01oX'] = '\n\x03\x02\x00F\x9cw'
RESPONSES['\n\x03 \x87\x00\x01>\x98'] = '\n\x03\x02\x00P\x1d\xb9'
RESPONSES['\n\x03\x10@\x00\x01\x80e'] = '\n\x03\x02\x00\x07\\G'
RESPONSES['\n\x03\x10P\x00\x01\x81\xa0'] = '\n\x03\x02\x00\x04\x1cF'
RESPONSES['\n\x03\x10`\x00\x01\x81\xaf'] = '\n\x03\x02\x00\x01\xdcE'
RESPONSES['\n\x10 \x00\x00\x01\x02\x00d\xf5I'] = '\n\x10 \x00\x00\x01\x0br'
RESPONSES['\n\x10 \x01\x00\x01\x02\x00\xc8\xf4\xe5'] = '\n\x10 \x01\x00\x01Z\xb2'
RESPONSES['\n\x10 \x02\x00\x01\x02\x01,\xf5\r'] = '\n\x10 \x02\x00\x01\xaa\xb2'
RESPONSES['\n\x10 \x03\x00\x01\x02\x01\x90\xf5m'] = '\n\x10 \x03\x00\x01\xfbr'
RESPONSES['\n\x10 \x04\x00\x01\x02\x01\xf4\xf51'] = '\n\x10 \x04\x00\x01J\xb3'
RESPONSES['\n\x10 \x05\x00\x01\x02\x02X\xf4m'] = '\n\x10 \x05\x00\x01\x1bs'
RESPONSES['\n\x10 \x06\x00\x01\x02\x02\xbc\xf4\x15'] = '\n\x10 \x06\x00\x01\xebs'
RESPONSES['\n\x10 \x07\x00\x01\x02\x03 \xf4='] = '\n\x10 \x07\x00\x01\xba\xb3'
RESPONSES['\n\x10 \x80\x00\x01\x02\x00\nke'] = '\n\x10 \x80\x00\x01\n\x9a'
RESPONSES['\n\x10 \x81\x00\x01\x02\x00\x14\xea\xbc'] = '\n\x10 \x81\x00\x01[Z'
RESPONSES['\n\x10 \x82\x00\x01\x02\x00\x1ej\x88'] = '\n\x10 \x82\x00\x01\xabZ'
RESPONSES['\n\x10 \x83\x00\x01\x02\x00(\xebO'] = '\n\x10 \x83\x00\x01\xfa\x9a'
RESPONSES['\n\x10 \x84\x00\x01\x02\x002k3'] = '\n\x10 \x84\x00\x01K['
RESPONSES['\n\x10 \x85\x00\x01\x02\x00<\xeb&'] = '\n\x10 \x85\x00\x01\x1a\x9b'
RESPONSES['\n\x10 \x86\x00\x01\x02\x00Fj\xf6'] = '\n\x10 \x86\x00\x01\xea\x9b'
RESPONSES['\n\x10 \x87\x00\x01\x02\x00P\xea\xe9'] = '\n\x10 \x87\x00\x01\xbb['
RESPONSES['\n\x10\x10@\x00\x01\x02\x00\x07\x8b\xa3'] = '\n\x10\x10@\x00\x01\x05\xa6'
RESPONSES['\n\x10\x10P\x00\x01\x02\x00\x02I0'] = '\n\x10\x10P\x00\x01\x04c'
RESPONSES['\n\x10\x10`\x00\x01\x02\x00\x01\x0c\xc1'] = '\n\x10\x10`\x00\x01\x04l'
def _print_out( inputstring ):
sys.stdout.write(inputstring + '\n')
if __name__ == '__main__':
unittest.main()
| true | true |
f7fac057c699f63feb8acfcc093bdf7c8a6fa06a | 59,734 | py | Python | packages/python/plotly/plotly/graph_objs/scatterternary/_marker.py | benlindsay/plotly.py | 51ec6cb50d537a37da704ca74c07f11b62730110 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/graph_objs/scatterternary/_marker.py | benlindsay/plotly.py | 51ec6cb50d537a37da704ca74c07f11b62730110 | [
"MIT"
] | 1 | 2022-01-22T14:39:41.000Z | 2022-01-22T14:39:41.000Z | packages/python/plotly/plotly/graph_objs/scatterternary/_marker.py | RARedeem/plotly.py | 8289eb1c85125e147c4ce14b56272ed50b99ef20 | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary"
_path_str = "scatterternary.marker"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"gradient",
"line",
"maxdisplayed",
"opacity",
"opacitysrc",
"reversescale",
"showscale",
"size",
"sizemin",
"sizemode",
"sizeref",
"sizesrc",
"symbol",
"symbolsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scatterternary.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter
ternary.marker.colorbar.Tickformatstop`
instances or dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scatterternary.marker.colorbar.tickformatstop
defaults), sets the default property values to
use for elements of
scatterternary.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatterternary.mar
ker.colorbar.Title` instance or dict with
compatible properties
titlefont
Deprecated: Please use
scatterternary.marker.colorbar.title.font
instead. Sets this color bar's title font. Note
that the title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scatterternary.marker.colorbar.title.side
instead. Determines the location of color bar's
title with respect to the color bar. Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scatterternary.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Greys,YlGnB
u,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland
,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# gradient
# --------
@property
def gradient(self):
"""
The 'gradient' property is an instance of Gradient
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.marker.Gradient`
- A dict of string/value properties that will be passed
to the Gradient constructor
Supported dict properties:
color
Sets the final color of the gradient fill: the
center color for radial, the right for
horizontal, or the bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
type
Sets the type of gradient used to fill the
markers
typesrc
Sets the source reference on Chart Studio Cloud
for type .
Returns
-------
plotly.graph_objs.scatterternary.marker.Gradient
"""
return self["gradient"]
@gradient.setter
def gradient(self, val):
self["gradient"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on Chart Studio Cloud
for width .
Returns
-------
plotly.graph_objs.scatterternary.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# maxdisplayed
# ------------
@property
def maxdisplayed(self):
"""
Sets a maximum number of points to be drawn on the graph. 0
corresponds to no limit.
The 'maxdisplayed' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["maxdisplayed"]
@maxdisplayed.setter
def maxdisplayed(self, val):
self["maxdisplayed"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for opacity .
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizemin
# -------
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
# sizemode
# --------
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
# sizeref
# -------
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, '0', 'circle', 100, '100', 'circle-open', 200, '200',
'circle-dot', 300, '300', 'circle-open-dot', 1, '1',
'square', 101, '101', 'square-open', 201, '201',
'square-dot', 301, '301', 'square-open-dot', 2, '2',
'diamond', 102, '102', 'diamond-open', 202, '202',
'diamond-dot', 302, '302', 'diamond-open-dot', 3, '3',
'cross', 103, '103', 'cross-open', 203, '203',
'cross-dot', 303, '303', 'cross-open-dot', 4, '4', 'x',
104, '104', 'x-open', 204, '204', 'x-dot', 304, '304',
'x-open-dot', 5, '5', 'triangle-up', 105, '105',
'triangle-up-open', 205, '205', 'triangle-up-dot', 305,
'305', 'triangle-up-open-dot', 6, '6', 'triangle-down',
106, '106', 'triangle-down-open', 206, '206',
'triangle-down-dot', 306, '306', 'triangle-down-open-dot',
7, '7', 'triangle-left', 107, '107', 'triangle-left-open',
207, '207', 'triangle-left-dot', 307, '307',
'triangle-left-open-dot', 8, '8', 'triangle-right', 108,
'108', 'triangle-right-open', 208, '208',
'triangle-right-dot', 308, '308',
'triangle-right-open-dot', 9, '9', 'triangle-ne', 109,
'109', 'triangle-ne-open', 209, '209', 'triangle-ne-dot',
309, '309', 'triangle-ne-open-dot', 10, '10',
'triangle-se', 110, '110', 'triangle-se-open', 210, '210',
'triangle-se-dot', 310, '310', 'triangle-se-open-dot', 11,
'11', 'triangle-sw', 111, '111', 'triangle-sw-open', 211,
'211', 'triangle-sw-dot', 311, '311',
'triangle-sw-open-dot', 12, '12', 'triangle-nw', 112,
'112', 'triangle-nw-open', 212, '212', 'triangle-nw-dot',
312, '312', 'triangle-nw-open-dot', 13, '13', 'pentagon',
113, '113', 'pentagon-open', 213, '213', 'pentagon-dot',
313, '313', 'pentagon-open-dot', 14, '14', 'hexagon', 114,
'114', 'hexagon-open', 214, '214', 'hexagon-dot', 314,
'314', 'hexagon-open-dot', 15, '15', 'hexagon2', 115,
'115', 'hexagon2-open', 215, '215', 'hexagon2-dot', 315,
'315', 'hexagon2-open-dot', 16, '16', 'octagon', 116,
'116', 'octagon-open', 216, '216', 'octagon-dot', 316,
'316', 'octagon-open-dot', 17, '17', 'star', 117, '117',
'star-open', 217, '217', 'star-dot', 317, '317',
'star-open-dot', 18, '18', 'hexagram', 118, '118',
'hexagram-open', 218, '218', 'hexagram-dot', 318, '318',
'hexagram-open-dot', 19, '19', 'star-triangle-up', 119,
'119', 'star-triangle-up-open', 219, '219',
'star-triangle-up-dot', 319, '319',
'star-triangle-up-open-dot', 20, '20',
'star-triangle-down', 120, '120',
'star-triangle-down-open', 220, '220',
'star-triangle-down-dot', 320, '320',
'star-triangle-down-open-dot', 21, '21', 'star-square',
121, '121', 'star-square-open', 221, '221',
'star-square-dot', 321, '321', 'star-square-open-dot', 22,
'22', 'star-diamond', 122, '122', 'star-diamond-open',
222, '222', 'star-diamond-dot', 322, '322',
'star-diamond-open-dot', 23, '23', 'diamond-tall', 123,
'123', 'diamond-tall-open', 223, '223',
'diamond-tall-dot', 323, '323', 'diamond-tall-open-dot',
24, '24', 'diamond-wide', 124, '124', 'diamond-wide-open',
224, '224', 'diamond-wide-dot', 324, '324',
'diamond-wide-open-dot', 25, '25', 'hourglass', 125,
'125', 'hourglass-open', 26, '26', 'bowtie', 126, '126',
'bowtie-open', 27, '27', 'circle-cross', 127, '127',
'circle-cross-open', 28, '28', 'circle-x', 128, '128',
'circle-x-open', 29, '29', 'square-cross', 129, '129',
'square-cross-open', 30, '30', 'square-x', 130, '130',
'square-x-open', 31, '31', 'diamond-cross', 131, '131',
'diamond-cross-open', 32, '32', 'diamond-x', 132, '132',
'diamond-x-open', 33, '33', 'cross-thin', 133, '133',
'cross-thin-open', 34, '34', 'x-thin', 134, '134',
'x-thin-open', 35, '35', 'asterisk', 135, '135',
'asterisk-open', 36, '36', 'hash', 136, '136',
'hash-open', 236, '236', 'hash-dot', 336, '336',
'hash-open-dot', 37, '37', 'y-up', 137, '137',
'y-up-open', 38, '38', 'y-down', 138, '138',
'y-down-open', 39, '39', 'y-left', 139, '139',
'y-left-open', 40, '40', 'y-right', 140, '140',
'y-right-open', 41, '41', 'line-ew', 141, '141',
'line-ew-open', 42, '42', 'line-ns', 142, '142',
'line-ns-open', 43, '43', 'line-ne', 143, '143',
'line-ne-open', 44, '44', 'line-nw', 144, '144',
'line-nw-open', 45, '45', 'arrow-up', 145, '145',
'arrow-up-open', 46, '46', 'arrow-down', 146, '146',
'arrow-down-open', 47, '47', 'arrow-left', 147, '147',
'arrow-left-open', 48, '48', 'arrow-right', 148, '148',
'arrow-right-open', 49, '49', 'arrow-bar-up', 149, '149',
'arrow-bar-up-open', 50, '50', 'arrow-bar-down', 150,
'150', 'arrow-bar-down-open', 51, '51', 'arrow-bar-left',
151, '151', 'arrow-bar-left-open', 52, '52',
'arrow-bar-right', 152, '152', 'arrow-bar-right-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
"""
Sets the source reference on Chart Studio Cloud for symbol .
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scatterternary.marker.Colo
rBar` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
gradient
:class:`plotly.graph_objects.scatterternary.marker.Grad
ient` instance or dict with compatible properties
line
:class:`plotly.graph_objects.scatterternary.marker.Line
` instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on the
graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
symbol .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
gradient=None,
line=None,
maxdisplayed=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterternary.Marker`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scatterternary.marker.Colo
rBar` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
gradient
:class:`plotly.graph_objects.scatterternary.marker.Grad
ient` instance or dict with compatible properties
line
:class:`plotly.graph_objects.scatterternary.marker.Line
` instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on the
graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
symbol .
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("gradient", None)
_v = gradient if gradient is not None else _v
if _v is not None:
self["gradient"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("maxdisplayed", None)
_v = maxdisplayed if maxdisplayed is not None else _v
if _v is not None:
self["maxdisplayed"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("opacitysrc", None)
_v = opacitysrc if opacitysrc is not None else _v
if _v is not None:
self["opacitysrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizemin", None)
_v = sizemin if sizemin is not None else _v
if _v is not None:
self["sizemin"] = _v
_v = arg.pop("sizemode", None)
_v = sizemode if sizemode is not None else _v
if _v is not None:
self["sizemode"] = _v
_v = arg.pop("sizeref", None)
_v = sizeref if sizeref is not None else _v
if _v is not None:
self["sizeref"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("symbol", None)
_v = symbol if symbol is not None else _v
if _v is not None:
self["symbol"] = _v
_v = arg.pop("symbolsrc", None)
_v = symbolsrc if symbolsrc is not None else _v
if _v is not None:
self["symbolsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 40.60775 | 87 | 0.532461 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary"
_path_str = "scatterternary.marker"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"gradient",
"line",
"maxdisplayed",
"opacity",
"opacitysrc",
"reversescale",
"showscale",
"size",
"sizemin",
"sizemode",
"sizeref",
"sizesrc",
"symbol",
"symbolsrc",
}
@property
def autocolorscale(self):
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def gradient(self):
return self["gradient"]
@gradient.setter
def gradient(self, val):
self["gradient"] = val
@property
def line(self):
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def maxdisplayed(self):
return self["maxdisplayed"]
@maxdisplayed.setter
def maxdisplayed(self, val):
self["maxdisplayed"] = val
@property
def opacity(self):
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def opacitysrc(self):
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
@property
def reversescale(self):
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def showscale(self):
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def size(self):
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizemin(self):
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
@property
def sizemode(self):
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
@property
def sizeref(self):
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
@property
def sizesrc(self):
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def symbol(self):
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
@property
def symbolsrc(self):
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scatterternary.marker.Colo
rBar` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu
,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
gradient
:class:`plotly.graph_objects.scatterternary.marker.Grad
ient` instance or dict with compatible properties
line
:class:`plotly.graph_objects.scatterternary.marker.Line
` instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on the
graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
opacity .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
symbol .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
gradient=None,
line=None,
maxdisplayed=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.Marker`"""
)
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("gradient", None)
_v = gradient if gradient is not None else _v
if _v is not None:
self["gradient"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("maxdisplayed", None)
_v = maxdisplayed if maxdisplayed is not None else _v
if _v is not None:
self["maxdisplayed"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("opacitysrc", None)
_v = opacitysrc if opacitysrc is not None else _v
if _v is not None:
self["opacitysrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizemin", None)
_v = sizemin if sizemin is not None else _v
if _v is not None:
self["sizemin"] = _v
_v = arg.pop("sizemode", None)
_v = sizemode if sizemode is not None else _v
if _v is not None:
self["sizemode"] = _v
_v = arg.pop("sizeref", None)
_v = sizeref if sizeref is not None else _v
if _v is not None:
self["sizeref"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("symbol", None)
_v = symbol if symbol is not None else _v
if _v is not None:
self["symbol"] = _v
_v = arg.pop("symbolsrc", None)
_v = symbolsrc if symbolsrc is not None else _v
if _v is not None:
self["symbolsrc"] = _v
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| true | true |
f7fac07fc2e8891e9ac8eebc62220c4699a7ca4c | 17,561 | py | Python | python/aad/aad_base.py | kinect59/ad_examples | bf0bb75faa3f713a2efef04b6b093e6a313825af | [
"MIT"
] | 1 | 2019-02-21T02:28:34.000Z | 2019-02-21T02:28:34.000Z | python/aad/aad_base.py | kinect59/ad_examples | bf0bb75faa3f713a2efef04b6b093e6a313825af | [
"MIT"
] | null | null | null | python/aad/aad_base.py | kinect59/ad_examples | bf0bb75faa3f713a2efef04b6b093e6a313825af | [
"MIT"
] | null | null | null | from common.utils import *
from common.metrics import *
from common.sgd_optimization import *
from aad.aad_globals import *
from aad.query_model import *
from aad.aad_loss import *
class Ensemble(object):
"""Stores all ensemble scores"""
def __init__(self, samples, labels=None, scores=None, weights=None,
agg_scores=None, ordered_anom_idxs=None, original_indexes=None,
auc=0.0, model=None):
self.samples = samples
self.labels = labels
self.scores = scores
self.weights = weights
self.agg_scores = agg_scores
self.ordered_anom_idxs = ordered_anom_idxs
self.original_indexes = original_indexes
self.auc = auc
self.model = model
if original_indexes is None:
self.original_indexes = np.arange(samples.shape[0])
if agg_scores is not None and ordered_anom_idxs is None:
self.ordered_anom_idxs = order(agg_scores, decreasing=True)
class Budget(object):
def __init__(self, topK, budget):
self.topK = topK
self.budget = budget
def get_budget_topK(n, opts):
# set topK as per tau or input topK
topK = opts.topK
if topK <= 0:
topK = int(np.round(opts.tau * n)) # function of total number of instances
budget = opts.budget
if budget <= 0:
budget = int(np.round(opts.tau * n))
budget = min(opts.maxbudget, budget)
return Budget(topK=topK, budget=budget)
def estimate_qtau(samples, model, opts, lo=-1.0, hi=1.0):
n = samples.shape[0]
bt = get_budget_topK(n, opts)
scores = np.zeros(0, dtype=float)
for i in range(50):
w = model.get_random_weights(lo=lo, hi=hi)
s = samples.dot(w)
scores = np.append(scores, s)
qval = quantile(scores, (1.0 - (bt.topK * 1.0 / float(n))) * 100.0)
qmin = np.min(scores)
qmax = np.max(scores)
return qval, qmin, qmax
class MetricsStructure(object):
def __init__(self, train_aucs=None, test_aucs=None, train_precs=None, test_precs=None,
train_aprs=None, test_aprs=None, train_n_at_top=None, test_n_at_top=None,
all_weights=None, queried=None):
self.train_aucs = train_aucs
self.test_aucs = test_aucs
self.train_precs = train_precs
self.test_precs = test_precs
self.train_aprs = train_aprs
self.test_aprs = test_aprs
self.train_n_at_top = train_n_at_top
self.test_n_at_top = test_n_at_top
self.all_weights = all_weights
self.queried = queried
self.test_indexes = []
def get_aad_metrics_structure(budget, opts):
metrics = MetricsStructure(
train_aucs=np.zeros(shape=(1, budget)),
# for precision@k first two columns are fid,k
train_precs=[],
train_aprs=np.zeros(shape=(1, budget)),
train_n_at_top=[],
all_weights=[],
queried=[]
)
for k in range(len(opts.precision_k)):
metrics.train_precs.append(np.zeros(shape=(1, budget)))
metrics.train_n_at_top.append(np.zeros(shape=(1, budget)))
return metrics
EVT_BEFORE_FEEDBACK = 0
EVT_AFTER_FEEDBACK = 1
class AadEventListener(object):
def __init__(self):
pass
def __call__(self, event_type, x, y, iter, queried, model, opts):
pass
class Aad(object):
def __init__(self, detector_type,
ensemble_score=ENSEMBLE_SCORE_LINEAR,
random_state=None, event_listener=None):
self.detector_type = detector_type
self.ensemble_score = ensemble_score
self.event_listener = event_listener
if random_state is None:
self.random_state = np.random.RandomState(42)
else:
self.random_state = random_state
# ensemble weights learned through weak-supervision
self.w = None
self.qval = None
# quick lookup of the uniform weight vector.
# IMPORTANT: Treat this as readonly once set in fit()
self.w_unif_prior = None
def get_num_members(self):
"""Returns the number of ensemble members"""
if self.w is not None:
return len(self.w)
return None
def get_uniform_weights(self):
m = self.get_num_members()
if m is None:
raise ValueError("weights not initialized")
w = np.ones(m, dtype=float)
return normalize(w)
def get_zero_weights(self, m=None):
if m is None:
m = self.get_num_members()
if m is None:
raise ValueError("weights not initialized")
return np.zeros(m, dtype=float)
def get_random_weights(self, m=None, samples=None, lo=-1.0, hi=1.0):
if samples is not None:
w_rnd = np.ravel(get_random_item(samples, self.random_state).todense())
else:
if m is None:
m = self.get_num_members()
if m is None:
raise ValueError("weights not initialized")
w_rnd = self.random_state.uniform(lo, hi, m)
w_rnd = normalize(w_rnd)
return w_rnd
def init_weights(self, init_type=INIT_UNIF, samples=None):
logger.debug("Initializing weights to %s" % initialization_types[init_type])
if init_type == INIT_UNIF:
self.w = self.get_uniform_weights()
elif init_type == INIT_ZERO:
self.w = self.get_zero_weights()
else:
self.w = self.get_random_weights(samples=samples)
def get_score(self, x, w=None):
if w is None:
w = self.w
if w is None:
raise ValueError("weights not initialized")
score = x.dot(w)
return score
def get_auc(self, scores, labels):
n = len(scores)
tmp = np.empty(shape=(n, 2), dtype=float)
tmp[:, 0] = labels
tmp[:, 1] = -scores
auc = fn_auc(tmp)
return auc
def supports_streaming(self):
return False
def get_tau_ranked_instance(self, x, w, tau_rank):
s = self.get_score(x, w)
ps = order(s, decreasing=True)[tau_rank]
return matrix(x[ps, :], nrow=1)
def get_top_quantile(self, x, w, topK):
# IMPORTANT: qval will be computed using the linear dot product
# s = self.get_score(x, w)
s = x.dot(w)
return quantile(s, (1.0 - (topK * 1.0 / float(nrow(x)))) * 100.0)
def order_by_score(self, x, w=None):
anom_score = self.get_score(x, w)
return order(anom_score, decreasing=True), anom_score
def transform_to_ensemble_features(self, x, dense=False, norm_unit=False):
"""Should compute the scores from each ensemble member for each instance in x"""
raise NotImplementedError("Need to implement this method in subclass")
def get_truncated_constraint_set(self, w, x, y, hf,
max_anomalies_in_constraint_set=1000,
max_nominals_in_constraint_set=1000):
hf_tmp = np.array(hf)
yf = y[hf_tmp]
ha_pos = np.where(yf == 1)[0]
hn_pos = np.where(yf == 0)[0]
if len(ha_pos) > 0:
ha = hf_tmp[ha_pos]
else:
ha = np.array([], dtype=int)
if len(hn_pos) > 0:
hn = hf_tmp[hn_pos]
else:
hn = np.array([], dtype=int)
if len(ha) > max_anomalies_in_constraint_set or \
len(hn) > max_nominals_in_constraint_set:
# logger.debug("len(ha) %d, len(hn) %d; random selection subset" % (len(ha), len(hn)))
in_set_ha = np.zeros(len(ha), dtype=int)
in_set_hn = np.zeros(len(hn), dtype=int)
if len(ha) > max_anomalies_in_constraint_set:
tmp = sample(range(len(ha)), max_anomalies_in_constraint_set)
in_set_ha[tmp] = 1
else:
in_set_ha[:] = 1
if len(hn) > max_nominals_in_constraint_set:
tmp = sample(range(len(hn)), max_nominals_in_constraint_set)
in_set_hn[tmp] = 1
else:
in_set_hn[:] = 1
hf = append(ha, hn)
in_set = append(in_set_ha, in_set_hn)
# logger.debug(in_set)
else:
in_set = np.ones(len(hf), dtype=int)
return hf, in_set
def aad_weight_update(self, w, x, y, hf, w_prior, opts,
tau_score=None, tau_rel=True, linear=True):
n = x.shape[0]
bt = get_budget_topK(n, opts)
if opts.tau_score_type == TAU_SCORE_FIXED:
self.qval = tau_score
elif opts.tau_score_type == TAU_SCORE_NONE:
self.qval = None
else:
self.qval = self.get_top_quantile(x, w, bt.topK)
hf, in_constr_set = self.get_truncated_constraint_set(w, x, y, hf,
max_anomalies_in_constraint_set=opts.max_anomalies_in_constraint_set,
max_nominals_in_constraint_set=opts.max_nominals_in_constraint_set)
# logger.debug("Linear: %s, sigma2: %f, with_prior: %s" %
# (str(linear), opts.priorsigma2, str(opts.withprior)))
x_tau = None
if tau_rel:
x_tau = self.get_tau_ranked_instance(x, w, bt.topK)
# logger.debug("x_tau:")
# logger.debug(to_dense_mat(x_tau))
if opts.prior_influence == PRIOR_INFLUENCE_ADAPTIVE:
prior_influence = 1. / max(1., 0. if hf is None else len(hf))
elif opts.prior_influence == PRIOR_INFLUENCE_FIXED:
prior_influence = 1.
else:
raise ValueError("Invalid prior_influence specified: %d" % opts.prior_influence)
def if_f(w, x, y):
if linear:
return aad_loss_linear(w, x, y, self.qval, in_constr_set=in_constr_set, x_tau=x_tau,
Ca=opts.Ca, Cn=opts.Cn, Cx=opts.Cx,
withprior=opts.withprior, w_prior=w_prior,
sigma2=opts.priorsigma2, prior_influence=prior_influence)
else:
raise ValueError("Only linear loss supported")
def if_g(w, x, y):
if linear:
return aad_loss_gradient_linear(w, x, y, self.qval, in_constr_set=in_constr_set, x_tau=x_tau,
Ca=opts.Ca, Cn=opts.Cn, Cx=opts.Cx,
withprior=opts.withprior, w_prior=w_prior,
sigma2=opts.priorsigma2, prior_influence=prior_influence)
else:
raise ValueError("Only linear loss supported")
if False:
w_new = sgd(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000, eps=1e-5,
shuffle=True, rng=self.random_state)
elif False:
w_new = sgdMomentum(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000,
shuffle=True, rng=self.random_state)
elif True:
# sgdRMSProp seems to run fastest and achieve performance close to best
# NOTE: this was an observation on ANNThyroid_1v3 and toy2 datasets
w_new = sgdRMSProp(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000,
shuffle=True, rng=self.random_state)
elif False:
# sgdAdam seems to get best performance while a little slower than sgdRMSProp
# NOTE: this was an observation on ANNThyroid_1v3 and toy2 datasets
w_new = sgdAdam(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000,
shuffle=True, rng=self.random_state)
else:
w_new = sgdRMSPropNestorov(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000,
shuffle=True, rng=self.random_state)
w_len = w_new.dot(w_new)
# logger.debug("w_len: %f" % w_len)
if np.isnan(w_len):
# logger.debug("w_new:\n%s" % str(list(w_new)))
raise ArithmeticError("weight vector contains nan")
w_new = w_new / np.sqrt(w_len)
return w_new
def update_weights(self, x, y, ha, hn, opts, w=None, tau_score=None):
"""Learns new weights for one feedback iteration
Args:
x: np.ndarray
input data
y: np.array(dtype=int)
labels. Only the values at indexes in ha and hn are relevant. Rest may be np.nan.
ha: np.array(dtype=int)
indexes of labeled anomalies in x
hn: indexes of labeled nominals in x
opts: Opts
w: np.array(dtype=float)
current parameter values
"""
if w is None:
w = self.w
w_prior = None
if opts.withprior:
if opts.unifprior:
w_prior = self.w_unif_prior
else:
w_prior = w
tau_rel = opts.constrainttype == AAD_CONSTRAINT_TAU_INSTANCE
if (opts.detector_type == AAD_IFOREST or
opts.detector_type == AAD_HSTREES or
opts.detector_type == AAD_RSFOREST or
opts.detector_type == AAD_MULTIVIEW_FOREST or
opts.detector_type == LODA or
opts.detector_type == PRECOMPUTED_SCORES):
w_new = self.aad_weight_update(w, x, y, hf=append(ha, hn),
w_prior=w_prior, opts=opts, tau_score=tau_score, tau_rel=tau_rel,
linear=(self.ensemble_score == ENSEMBLE_SCORE_LINEAR))
else:
raise ValueError("Invalid weight update for ensemble detectors: %d" % opts.detector_type)
# logger.debug("w_new:")
# logger.debug(w_new)
self.w = w_new
def aad_learn_ensemble_weights_with_budget(self, ensemble, opts):
if opts.budget == 0:
return None
x = ensemble.scores
y = ensemble.labels
n, m = x.shape
bt = get_budget_topK(n, opts)
metrics = get_aad_metrics_structure(opts.budget, opts)
ha = []
hn = []
xis = []
qstate = Query.get_initial_query_state(opts.qtype, opts=opts, qrank=bt.topK,
a=1., b=1., budget=bt.budget)
save_weights = (ensemble.samples is not None and ensemble.samples.shape[1] == 2) and bt.budget < 100
if save_weights:
metrics.all_weights = np.zeros(shape=(opts.budget, m))
else:
metrics.all_weights = None
if self.w is None:
self.init_weights(init_type=opts.init, samples=None)
est_tau_val = None
if opts.tau_score_type == TAU_SCORE_FIXED:
est_tau_val, _, _ = estimate_qtau(x, self, opts, lo=0.0, hi=1.0)
logger.debug("Using fixed estimated tau val: %f" % est_tau_val)
i = 0
feedback_iter = 0
while len(xis) < bt.budget:
starttime_iter = timer()
metrics.queried = xis # xis keeps growing with each feedback iteration
order_anom_idxs, anom_score = self.order_by_score(x, self.w)
xi_ = qstate.get_next_query(maxpos=n, ordered_indexes=order_anom_idxs,
queried_items=xis,
x=x, lbls=y, y=anom_score,
w=self.w, hf=append(ha, hn),
ensemble=ensemble,
model=self, # some custom query models might need this access
remaining_budget=bt.budget - len(xis))
if False and len(xi_) > 1:
logger.debug("#feedback: %d" % len(xi_))
xis.extend(xi_)
if opts.single_inst_feedback:
# Forget the previous feedback instances and
# use only the current feedback for weight updates
ha = []
hn = []
for xi in xi_:
if y[xi] == 1:
ha.append(xi)
else:
hn.append(xi)
if save_weights:
# save the weights in each iteration for later analysis
metrics.all_weights[i, :] = self.w
i += 1
qstate.update_query_state()
if not opts.do_not_update_weights:
self.update_weights(x, y, ha=ha, hn=hn, opts=opts, tau_score=est_tau_val)
if self.event_listener is not None:
self.event_listener(event_type=EVT_AFTER_FEEDBACK, x=x, y=y,
iter=feedback_iter, queried=xis, model=self, opts=opts)
feedback_iter += 1
if np.mod(i, 1) == 0:
endtime_iter = timer()
tdiff = difftime(endtime_iter, starttime_iter, units="secs")
logger.debug("Completed [%s] fid %d rerun %d feedback %d in %f sec(s)" %
(opts.dataset, opts.fid, opts.runidx, i, tdiff))
return metrics
| 37.603854 | 131 | 0.557827 | from common.utils import *
from common.metrics import *
from common.sgd_optimization import *
from aad.aad_globals import *
from aad.query_model import *
from aad.aad_loss import *
class Ensemble(object):
def __init__(self, samples, labels=None, scores=None, weights=None,
agg_scores=None, ordered_anom_idxs=None, original_indexes=None,
auc=0.0, model=None):
self.samples = samples
self.labels = labels
self.scores = scores
self.weights = weights
self.agg_scores = agg_scores
self.ordered_anom_idxs = ordered_anom_idxs
self.original_indexes = original_indexes
self.auc = auc
self.model = model
if original_indexes is None:
self.original_indexes = np.arange(samples.shape[0])
if agg_scores is not None and ordered_anom_idxs is None:
self.ordered_anom_idxs = order(agg_scores, decreasing=True)
class Budget(object):
def __init__(self, topK, budget):
self.topK = topK
self.budget = budget
def get_budget_topK(n, opts):
topK = opts.topK
if topK <= 0:
topK = int(np.round(opts.tau * n))
budget = opts.budget
if budget <= 0:
budget = int(np.round(opts.tau * n))
budget = min(opts.maxbudget, budget)
return Budget(topK=topK, budget=budget)
def estimate_qtau(samples, model, opts, lo=-1.0, hi=1.0):
n = samples.shape[0]
bt = get_budget_topK(n, opts)
scores = np.zeros(0, dtype=float)
for i in range(50):
w = model.get_random_weights(lo=lo, hi=hi)
s = samples.dot(w)
scores = np.append(scores, s)
qval = quantile(scores, (1.0 - (bt.topK * 1.0 / float(n))) * 100.0)
qmin = np.min(scores)
qmax = np.max(scores)
return qval, qmin, qmax
class MetricsStructure(object):
def __init__(self, train_aucs=None, test_aucs=None, train_precs=None, test_precs=None,
train_aprs=None, test_aprs=None, train_n_at_top=None, test_n_at_top=None,
all_weights=None, queried=None):
self.train_aucs = train_aucs
self.test_aucs = test_aucs
self.train_precs = train_precs
self.test_precs = test_precs
self.train_aprs = train_aprs
self.test_aprs = test_aprs
self.train_n_at_top = train_n_at_top
self.test_n_at_top = test_n_at_top
self.all_weights = all_weights
self.queried = queried
self.test_indexes = []
def get_aad_metrics_structure(budget, opts):
metrics = MetricsStructure(
train_aucs=np.zeros(shape=(1, budget)),
train_precs=[],
train_aprs=np.zeros(shape=(1, budget)),
train_n_at_top=[],
all_weights=[],
queried=[]
)
for k in range(len(opts.precision_k)):
metrics.train_precs.append(np.zeros(shape=(1, budget)))
metrics.train_n_at_top.append(np.zeros(shape=(1, budget)))
return metrics
EVT_BEFORE_FEEDBACK = 0
EVT_AFTER_FEEDBACK = 1
class AadEventListener(object):
def __init__(self):
pass
def __call__(self, event_type, x, y, iter, queried, model, opts):
pass
class Aad(object):
def __init__(self, detector_type,
ensemble_score=ENSEMBLE_SCORE_LINEAR,
random_state=None, event_listener=None):
self.detector_type = detector_type
self.ensemble_score = ensemble_score
self.event_listener = event_listener
if random_state is None:
self.random_state = np.random.RandomState(42)
else:
self.random_state = random_state
self.w = None
self.qval = None
self.w_unif_prior = None
def get_num_members(self):
if self.w is not None:
return len(self.w)
return None
def get_uniform_weights(self):
m = self.get_num_members()
if m is None:
raise ValueError("weights not initialized")
w = np.ones(m, dtype=float)
return normalize(w)
def get_zero_weights(self, m=None):
if m is None:
m = self.get_num_members()
if m is None:
raise ValueError("weights not initialized")
return np.zeros(m, dtype=float)
def get_random_weights(self, m=None, samples=None, lo=-1.0, hi=1.0):
if samples is not None:
w_rnd = np.ravel(get_random_item(samples, self.random_state).todense())
else:
if m is None:
m = self.get_num_members()
if m is None:
raise ValueError("weights not initialized")
w_rnd = self.random_state.uniform(lo, hi, m)
w_rnd = normalize(w_rnd)
return w_rnd
def init_weights(self, init_type=INIT_UNIF, samples=None):
logger.debug("Initializing weights to %s" % initialization_types[init_type])
if init_type == INIT_UNIF:
self.w = self.get_uniform_weights()
elif init_type == INIT_ZERO:
self.w = self.get_zero_weights()
else:
self.w = self.get_random_weights(samples=samples)
def get_score(self, x, w=None):
if w is None:
w = self.w
if w is None:
raise ValueError("weights not initialized")
score = x.dot(w)
return score
def get_auc(self, scores, labels):
n = len(scores)
tmp = np.empty(shape=(n, 2), dtype=float)
tmp[:, 0] = labels
tmp[:, 1] = -scores
auc = fn_auc(tmp)
return auc
def supports_streaming(self):
return False
def get_tau_ranked_instance(self, x, w, tau_rank):
s = self.get_score(x, w)
ps = order(s, decreasing=True)[tau_rank]
return matrix(x[ps, :], nrow=1)
def get_top_quantile(self, x, w, topK):
s = x.dot(w)
return quantile(s, (1.0 - (topK * 1.0 / float(nrow(x)))) * 100.0)
def order_by_score(self, x, w=None):
anom_score = self.get_score(x, w)
return order(anom_score, decreasing=True), anom_score
def transform_to_ensemble_features(self, x, dense=False, norm_unit=False):
raise NotImplementedError("Need to implement this method in subclass")
def get_truncated_constraint_set(self, w, x, y, hf,
max_anomalies_in_constraint_set=1000,
max_nominals_in_constraint_set=1000):
hf_tmp = np.array(hf)
yf = y[hf_tmp]
ha_pos = np.where(yf == 1)[0]
hn_pos = np.where(yf == 0)[0]
if len(ha_pos) > 0:
ha = hf_tmp[ha_pos]
else:
ha = np.array([], dtype=int)
if len(hn_pos) > 0:
hn = hf_tmp[hn_pos]
else:
hn = np.array([], dtype=int)
if len(ha) > max_anomalies_in_constraint_set or \
len(hn) > max_nominals_in_constraint_set:
in_set_ha = np.zeros(len(ha), dtype=int)
in_set_hn = np.zeros(len(hn), dtype=int)
if len(ha) > max_anomalies_in_constraint_set:
tmp = sample(range(len(ha)), max_anomalies_in_constraint_set)
in_set_ha[tmp] = 1
else:
in_set_ha[:] = 1
if len(hn) > max_nominals_in_constraint_set:
tmp = sample(range(len(hn)), max_nominals_in_constraint_set)
in_set_hn[tmp] = 1
else:
in_set_hn[:] = 1
hf = append(ha, hn)
in_set = append(in_set_ha, in_set_hn)
else:
in_set = np.ones(len(hf), dtype=int)
return hf, in_set
def aad_weight_update(self, w, x, y, hf, w_prior, opts,
tau_score=None, tau_rel=True, linear=True):
n = x.shape[0]
bt = get_budget_topK(n, opts)
if opts.tau_score_type == TAU_SCORE_FIXED:
self.qval = tau_score
elif opts.tau_score_type == TAU_SCORE_NONE:
self.qval = None
else:
self.qval = self.get_top_quantile(x, w, bt.topK)
hf, in_constr_set = self.get_truncated_constraint_set(w, x, y, hf,
max_anomalies_in_constraint_set=opts.max_anomalies_in_constraint_set,
max_nominals_in_constraint_set=opts.max_nominals_in_constraint_set)
x_tau = None
if tau_rel:
x_tau = self.get_tau_ranked_instance(x, w, bt.topK)
if opts.prior_influence == PRIOR_INFLUENCE_ADAPTIVE:
prior_influence = 1. / max(1., 0. if hf is None else len(hf))
elif opts.prior_influence == PRIOR_INFLUENCE_FIXED:
prior_influence = 1.
else:
raise ValueError("Invalid prior_influence specified: %d" % opts.prior_influence)
def if_f(w, x, y):
if linear:
return aad_loss_linear(w, x, y, self.qval, in_constr_set=in_constr_set, x_tau=x_tau,
Ca=opts.Ca, Cn=opts.Cn, Cx=opts.Cx,
withprior=opts.withprior, w_prior=w_prior,
sigma2=opts.priorsigma2, prior_influence=prior_influence)
else:
raise ValueError("Only linear loss supported")
def if_g(w, x, y):
if linear:
return aad_loss_gradient_linear(w, x, y, self.qval, in_constr_set=in_constr_set, x_tau=x_tau,
Ca=opts.Ca, Cn=opts.Cn, Cx=opts.Cx,
withprior=opts.withprior, w_prior=w_prior,
sigma2=opts.priorsigma2, prior_influence=prior_influence)
else:
raise ValueError("Only linear loss supported")
if False:
w_new = sgd(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000, eps=1e-5,
shuffle=True, rng=self.random_state)
elif False:
w_new = sgdMomentum(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000,
shuffle=True, rng=self.random_state)
elif True:
w_new = sgdRMSProp(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000,
shuffle=True, rng=self.random_state)
elif False:
w_new = sgdAdam(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000,
shuffle=True, rng=self.random_state)
else:
w_new = sgdRMSPropNestorov(w, x[hf, :], y[hf], if_f, if_g,
learning_rate=0.001, max_epochs=1000,
shuffle=True, rng=self.random_state)
w_len = w_new.dot(w_new)
if np.isnan(w_len):
raise ArithmeticError("weight vector contains nan")
w_new = w_new / np.sqrt(w_len)
return w_new
def update_weights(self, x, y, ha, hn, opts, w=None, tau_score=None):
if w is None:
w = self.w
w_prior = None
if opts.withprior:
if opts.unifprior:
w_prior = self.w_unif_prior
else:
w_prior = w
tau_rel = opts.constrainttype == AAD_CONSTRAINT_TAU_INSTANCE
if (opts.detector_type == AAD_IFOREST or
opts.detector_type == AAD_HSTREES or
opts.detector_type == AAD_RSFOREST or
opts.detector_type == AAD_MULTIVIEW_FOREST or
opts.detector_type == LODA or
opts.detector_type == PRECOMPUTED_SCORES):
w_new = self.aad_weight_update(w, x, y, hf=append(ha, hn),
w_prior=w_prior, opts=opts, tau_score=tau_score, tau_rel=tau_rel,
linear=(self.ensemble_score == ENSEMBLE_SCORE_LINEAR))
else:
raise ValueError("Invalid weight update for ensemble detectors: %d" % opts.detector_type)
self.w = w_new
def aad_learn_ensemble_weights_with_budget(self, ensemble, opts):
if opts.budget == 0:
return None
x = ensemble.scores
y = ensemble.labels
n, m = x.shape
bt = get_budget_topK(n, opts)
metrics = get_aad_metrics_structure(opts.budget, opts)
ha = []
hn = []
xis = []
qstate = Query.get_initial_query_state(opts.qtype, opts=opts, qrank=bt.topK,
a=1., b=1., budget=bt.budget)
save_weights = (ensemble.samples is not None and ensemble.samples.shape[1] == 2) and bt.budget < 100
if save_weights:
metrics.all_weights = np.zeros(shape=(opts.budget, m))
else:
metrics.all_weights = None
if self.w is None:
self.init_weights(init_type=opts.init, samples=None)
est_tau_val = None
if opts.tau_score_type == TAU_SCORE_FIXED:
est_tau_val, _, _ = estimate_qtau(x, self, opts, lo=0.0, hi=1.0)
logger.debug("Using fixed estimated tau val: %f" % est_tau_val)
i = 0
feedback_iter = 0
while len(xis) < bt.budget:
starttime_iter = timer()
metrics.queried = xis
order_anom_idxs, anom_score = self.order_by_score(x, self.w)
xi_ = qstate.get_next_query(maxpos=n, ordered_indexes=order_anom_idxs,
queried_items=xis,
x=x, lbls=y, y=anom_score,
w=self.w, hf=append(ha, hn),
ensemble=ensemble,
model=self,
remaining_budget=bt.budget - len(xis))
if False and len(xi_) > 1:
logger.debug("#feedback: %d" % len(xi_))
xis.extend(xi_)
if opts.single_inst_feedback:
ha = []
hn = []
for xi in xi_:
if y[xi] == 1:
ha.append(xi)
else:
hn.append(xi)
if save_weights:
metrics.all_weights[i, :] = self.w
i += 1
qstate.update_query_state()
if not opts.do_not_update_weights:
self.update_weights(x, y, ha=ha, hn=hn, opts=opts, tau_score=est_tau_val)
if self.event_listener is not None:
self.event_listener(event_type=EVT_AFTER_FEEDBACK, x=x, y=y,
iter=feedback_iter, queried=xis, model=self, opts=opts)
feedback_iter += 1
if np.mod(i, 1) == 0:
endtime_iter = timer()
tdiff = difftime(endtime_iter, starttime_iter, units="secs")
logger.debug("Completed [%s] fid %d rerun %d feedback %d in %f sec(s)" %
(opts.dataset, opts.fid, opts.runidx, i, tdiff))
return metrics
| true | true |
f7fac08918b6d8b252fb59487f4eac664dca1a6c | 46,878 | py | Python | data/bin/Lib/distutils/dist.py | shakenetwork/collector | 60864537f9b8046d1b42258756e36a54149dddf9 | [
"Apache-2.0"
] | 309 | 2015-05-08T18:22:55.000Z | 2022-01-11T12:27:41.000Z | data/bin/Lib/distutils/dist.py | shakenetwork/collector | 60864537f9b8046d1b42258756e36a54149dddf9 | [
"Apache-2.0"
] | 30 | 2015-05-13T02:15:15.000Z | 2019-12-28T14:01:19.000Z | data/bin/Lib/distutils/dist.py | shakenetwork/collector | 60864537f9b8046d1b42258756e36a54149dddf9 | [
"Apache-2.0"
] | 35 | 2015-06-11T05:35:55.000Z | 2022-01-11T19:32:00.000Z | """distutils.dist
Provides the Distribution class, which represents the module distribution
being built/installed/distributed.
"""
import sys, os, re
try:
import warnings
except ImportError:
warnings = None
from distutils.errors import *
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
# Regex to define acceptable Distutils command names. This is not *quite*
# the same as a Python NAME -- I don't allow leading underscores. The fact
# that they're very similar is no coincidence; the default naming scheme is
# to look for a Python module named after the command.
command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
class Distribution:
"""The core of the Distutils. Most of the work hiding behind 'setup'
is really done within a Distribution instance, which farms the work out
to the Distutils commands specified on the command line.
Setup scripts will almost never instantiate Distribution directly,
unless the 'setup()' function is totally inadequate to their needs.
However, it is conceivable that a setup script might wish to subclass
Distribution for some specialized purpose, and then pass the subclass
to 'setup()' as the 'distclass' keyword argument. If so, it is
necessary to respect the expectations that 'setup' has of Distribution.
See the code for 'setup()', in core.py, for details.
"""
# 'global_options' describes the command-line options that may be
# supplied to the setup script prior to any actual commands.
# Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
# these global options. This list should be kept to a bare minimum,
# since every global option is also valid as a command option -- and we
# don't want to pollute the commands with too many options that they
# have minimal control over.
# The fourth entry for verbose means that it can be repeated.
global_options = [('verbose', 'v', "run verbosely (default)", 1),
('quiet', 'q', "run quietly (turns verbosity off)"),
('dry-run', 'n', "don't actually do anything"),
('help', 'h', "show detailed help message"),
]
# 'common_usage' is a short (2-3 line) string describing the common
# usage of the setup script.
common_usage = """\
Common commands: (see '--help-commands' for more)
setup.py build will build the package underneath 'build/'
setup.py install will install the package
"""
# options that are not propagated to the commands
display_options = [
('help-commands', None,
"list all available commands"),
('name', None,
"print package name"),
('version', 'V',
"print package version"),
('fullname', None,
"print <package name>-<version>"),
('author', None,
"print the author's name"),
('author-email', None,
"print the author's email address"),
('maintainer', None,
"print the maintainer's name"),
('maintainer-email', None,
"print the maintainer's email address"),
('contact', None,
"print the maintainer's name if known, else the author's"),
('contact-email', None,
"print the maintainer's email address if known, else the author's"),
('url', None,
"print the URL for this package"),
('license', None,
"print the license of the package"),
('licence', None,
"alias for --license"),
('description', None,
"print the package description"),
('long-description', None,
"print the long package description"),
('platforms', None,
"print the list of platforms"),
('classifiers', None,
"print the list of classifiers"),
('keywords', None,
"print the list of keywords"),
('provides', None,
"print the list of packages/modules provided"),
('requires', None,
"print the list of packages/modules required"),
('obsoletes', None,
"print the list of packages/modules made obsolete")
]
display_option_names = [translate_longopt(x[0]) for x in display_options]
# negative options are options that exclude other options
negative_opt = {'quiet': 'verbose'}
# -- Creation/initialization methods -------------------------------
def __init__ (self, attrs=None):
"""Construct a new Distribution instance: initialize all the
attributes of a Distribution, and then use 'attrs' (a dictionary
mapping attribute names to values) to assign some of those
attributes their "real" values. (Any attributes not mentioned in
'attrs' will be assigned to some null value: 0, None, an empty list
or dictionary, etc.) Most importantly, initialize the
'command_obj' attribute to the empty dictionary; this will be
filled in with real command objects by 'parse_command_line()'.
"""
# Default values for our command-line options
self.verbose = 1
self.dry_run = 0
self.help = 0
for attr in self.display_option_names:
setattr(self, attr, 0)
# Store the distribution meta-data (name, version, author, and so
# forth) in a separate object -- we're getting to have enough
# information here (and enough command-line options) that it's
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
# object in a sneaky and underhanded (but efficient!) way.
self.metadata = DistributionMetadata()
for basename in self.metadata._METHOD_BASENAMES:
method_name = "get_" + basename
setattr(self, method_name, getattr(self.metadata, method_name))
# 'cmdclass' maps command names to class objects, so we
# can 1) quickly figure out which class to instantiate when
# we need to create a new command object, and 2) have a way
# for the setup script to override command classes
self.cmdclass = {}
# 'command_packages' is a list of packages in which commands
# are searched for. The factory for command 'foo' is expected
# to be named 'foo' in the module 'foo' in one of the packages
# named here. This list is searched from the left; an error
# is raised if no named package provides the command being
# searched for. (Always access using get_command_packages().)
self.command_packages = None
# 'script_name' and 'script_args' are usually set to sys.argv[0]
# and sys.argv[1:], but they can be overridden when the caller is
# not necessarily a setup script run from the command-line.
self.script_name = None
self.script_args = None
# 'command_options' is where we store command options between
# parsing them (from config files, the command-line, etc.) and when
# they are actually needed -- ie. when the command in question is
# instantiated. It is a dictionary of dictionaries of 2-tuples:
# command_options = { command_name : { option : (source, value) } }
self.command_options = {}
# 'dist_files' is the list of (command, pyversion, file) that
# have been created by any dist commands run so far. This is
# filled regardless of whether the run is dry or not. pyversion
# gives sysconfig.get_python_version() if the dist file is
# specific to a Python version, 'any' if it is good for all
# Python versions on the target platform, and '' for a source
# file. pyversion should not be used to specify minimum or
# maximum required Python versions; use the metainfo for that
# instead.
self.dist_files = []
# These options are really the business of various commands, rather
# than of the Distribution itself. We provide aliases for them in
# Distribution as a convenience to the developer.
self.packages = None
self.package_data = {}
self.package_dir = None
self.py_modules = None
self.libraries = None
self.headers = None
self.ext_modules = None
self.ext_package = None
self.include_dirs = None
self.extra_path = None
self.scripts = None
self.data_files = None
self.password = ''
# And now initialize bookkeeping stuff that can't be supplied by
# the caller at all. 'command_obj' maps command names to
# Command instances -- that's how we enforce that every command
# class is a singleton.
self.command_obj = {}
# 'have_run' maps command names to boolean values; it keeps track
# of whether we have actually run a particular command, to make it
# cheap to "run" a command whenever we think we might need to -- if
# it's already been done, no need for expensive filesystem
# operations, we just check the 'have_run' dictionary and carry on.
# It's only safe to query 'have_run' for a command class that has
# been instantiated -- a false value will be inserted when the
# command object is created, and replaced with a true value when
# the command is successfully run. Thus it's probably best to use
# '.get()' rather than a straight lookup.
self.have_run = {}
# Now we'll use the attrs dictionary (ultimately, keyword args from
# the setup script) to possibly override any or all of these
# distribution options.
if attrs:
# Pull out the set of command options and work on them
# specifically. Note that this order guarantees that aliased
# command options will override any supplied redundantly
# through the general options dictionary.
options = attrs.get('options')
if options is not None:
del attrs['options']
for (command, cmd_options) in options.items():
opt_dict = self.get_option_dict(command)
for (opt, val) in cmd_options.items():
opt_dict[opt] = ("setup script", val)
if 'licence' in attrs:
attrs['license'] = attrs['licence']
del attrs['licence']
msg = "'licence' distribution option is deprecated; use 'license'"
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# Now work on the rest of the attributes. Any attribute that's
# not already defined is invalid!
for (key, val) in attrs.items():
if hasattr(self.metadata, "set_" + key):
getattr(self.metadata, "set_" + key)(val)
elif hasattr(self.metadata, key):
setattr(self.metadata, key, val)
elif hasattr(self, key):
setattr(self, key, val)
else:
msg = "Unknown distribution option: %s" % repr(key)
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
self.finalize_options()
def get_option_dict(self, command):
"""Get the option dictionary for a given command. If that
command's option dictionary hasn't been created yet, then create it
and return the new dictionary; otherwise, return the existing
option dictionary.
"""
dict = self.command_options.get(command)
if dict is None:
dict = self.command_options[command] = {}
return dict
def dump_option_dicts(self, header=None, commands=None, indent=""):
from pprint import pformat
if commands is None: # dump all command option dicts
commands = sorted(self.command_options.keys())
if header is not None:
self.announce(indent + header)
indent = indent + " "
if not commands:
self.announce(indent + "no commands known yet")
return
for cmd_name in commands:
opt_dict = self.command_options.get(cmd_name)
if opt_dict is None:
self.announce(indent +
"no option dict for '%s' command" % cmd_name)
else:
self.announce(indent +
"option dict for '%s' command:" % cmd_name)
out = pformat(opt_dict)
for line in out.split('\n'):
self.announce(indent + " " + line)
# -- Config file finding/parsing methods ---------------------------
def find_config_files(self):
"""Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac, and setup.cfg in the current directory.
"""
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
return files
def parse_config_files(self, filenames=None):
from configparser import ConfigParser
# Ignore install directory options if we have a venv
if sys.prefix != sys.base_prefix:
ignore_options = [
'install-base', 'install-platbase', 'install-lib',
'install-platlib', 'install-purelib', 'install-headers',
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
'home', 'user', 'root']
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__' and opt not in ignore_options:
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
# -- Command-line parsing methods ----------------------------------
def parse_command_line(self):
"""Parse the setup script's command line, taken from the
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
-- see 'setup()' in core.py). This list is first processed for
"global options" -- options that set attributes of the Distribution
instance. Then, it is alternately scanned for Distutils commands
and options for that command. Each new command terminates the
options for the previous command. The allowed options for a
command are determined by the 'user_options' attribute of the
command class -- thus, we have to be able to load command classes
in order to parse the command line. Any error in that 'options'
attribute raises DistutilsGetoptError; any error on the
command-line raises DistutilsArgError. If no Distutils commands
were found on the command line, raises DistutilsArgError. Return
true if command-line was successfully parsed and we should carry
on with executing commands; false if no errors but we shouldn't
execute commands (currently, this only happens if user asks for
help).
"""
#
# We now have enough information to show the Macintosh dialog
# that allows the user to interactively specify the "command line".
#
toplevel_options = self._get_toplevel_options()
# We have to parse the command line a bit at a time -- global
# options, then the first command, then its options, and so on --
# because each command will be handled by a different class, and
# the options that are valid for a particular class aren't known
# until we have loaded the command class, which doesn't happen
# until we know what the command is.
self.commands = []
parser = FancyGetopt(toplevel_options + self.display_options)
parser.set_negative_aliases(self.negative_opt)
parser.set_aliases({'licence': 'license'})
args = parser.getopt(args=self.script_args, object=self)
option_order = parser.get_option_order()
log.set_verbosity(self.verbose)
# for display options we return immediately
if self.handle_display_options(option_order):
return
while args:
args = self._parse_command_opts(parser, args)
if args is None: # user asked for help (and got it)
return
# Handle the cases of --help as a "global" option, ie.
# "setup.py --help" and "setup.py --help command ...". For the
# former, we show global options (--verbose, --dry-run, etc.)
# and display-only options (--name, --version, etc.); for the
# latter, we omit the display-only options and show help for
# each command listed on the command line.
if self.help:
self._show_help(parser,
display_options=len(self.commands) == 0,
commands=self.commands)
return
# Oops, no commands found -- an end-user error
if not self.commands:
raise DistutilsArgError("no commands supplied")
# All is well: return true
return True
def _get_toplevel_options(self):
"""Return the non-display options recognized at the top level.
This includes options that are recognized *only* at the top
level as well as options recognized for commands.
"""
return self.global_options + [
("command-packages=", None,
"list of packages that provide distutils commands"),
]
def _parse_command_opts(self, parser, args):
"""Parse the command-line options for a single command.
'parser' must be a FancyGetopt instance; 'args' must be the list
of arguments, starting with the current command (whose options
we are about to parse). Returns a new version of 'args' with
the next command at the front of the list; will be the empty
list if there are no more commands on the command line. Returns
None if the user asked for help on this command.
"""
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
raise SystemExit("invalid command name '%s'" % command)
self.commands.append(command)
# Dig up the command class that implements this command, so we
# 1) know that it's a valid command, and 2) know which options
# it takes.
try:
cmd_class = self.get_command_class(command)
except DistutilsModuleError as msg:
raise DistutilsArgError(msg)
# Require that the command class be derived from Command -- want
# to be sure that the basic "command" interface is implemented.
if not issubclass(cmd_class, Command):
raise DistutilsClassError(
"command class %s must subclass Command" % cmd_class)
# Also make sure that the command object provides a list of its
# known options.
if not (hasattr(cmd_class, 'user_options') and
isinstance(cmd_class.user_options, list)):
raise DistutilsClassError(("command class %s must provide " +
"'user_options' attribute (a list of tuples)") % \
cmd_class)
# If the command class has a list of negative alias options,
# merge it in with the global negative aliases.
negative_opt = self.negative_opt
if hasattr(cmd_class, 'negative_opt'):
negative_opt = negative_opt.copy()
negative_opt.update(cmd_class.negative_opt)
# Check for help_options in command class. They have a different
# format (tuple of four) so we need to preprocess them here.
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_options = fix_help_options(cmd_class.help_options)
else:
help_options = []
# All commands support the global options too, just by adding
# in 'global_options'.
parser.set_option_table(self.global_options +
cmd_class.user_options +
help_options)
parser.set_negative_aliases(negative_opt)
(args, opts) = parser.getopt(args[1:])
if hasattr(opts, 'help') and opts.help:
self._show_help(parser, display_options=0, commands=[cmd_class])
return
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_option_found=0
for (help_option, short, desc, func) in cmd_class.help_options:
if hasattr(opts, parser.get_attr_name(help_option)):
help_option_found=1
if callable(func):
func()
else:
raise DistutilsClassError(
"invalid help function %r for help option '%s': "
"must be a callable object (function, etc.)"
% (func, help_option))
if help_option_found:
return
# Put the options from the command-line into their official
# holding pen, the 'command_options' dictionary.
opt_dict = self.get_option_dict(command)
for (name, value) in vars(opts).items():
opt_dict[name] = ("command line", value)
return args
def finalize_options(self):
"""Set final values for all the options on the Distribution
instance, analogous to the .finalize_options() method of Command
objects.
"""
for attr in ('keywords', 'platforms'):
value = getattr(self.metadata, attr)
if value is None:
continue
if isinstance(value, str):
value = [elm.strip() for elm in value.split(',')]
setattr(self.metadata, attr, value)
def _show_help(self, parser, global_options=1, display_options=1,
commands=[]):
"""Show help for the setup script command-line in the form of
several lists of command-line options. 'parser' should be a
FancyGetopt instance; do not expect it to be returned in the
same state, as its option table will be reset to make it
generate the correct help text.
If 'global_options' is true, lists the global options:
--verbose, --dry-run, etc. If 'display_options' is true, lists
the "display-only" options: --name, --version, etc. Finally,
lists per-command help for every command name or command class
in 'commands'.
"""
# late import because of mutual dependence between these modules
from distutils.core import gen_usage
from distutils.cmd import Command
if global_options:
if display_options:
options = self._get_toplevel_options()
else:
options = self.global_options
parser.set_option_table(options)
parser.print_help(self.common_usage + "\nGlobal options:")
print('')
if display_options:
parser.set_option_table(self.display_options)
parser.print_help(
"Information display options (just display " +
"information, ignore any commands)")
print('')
for command in self.commands:
if isinstance(command, type) and issubclass(command, Command):
klass = command
else:
klass = self.get_command_class(command)
if (hasattr(klass, 'help_options') and
isinstance(klass.help_options, list)):
parser.set_option_table(klass.user_options +
fix_help_options(klass.help_options))
else:
parser.set_option_table(klass.user_options)
parser.print_help("Options for '%s' command:" % klass.__name__)
print('')
print(gen_usage(self.script_name))
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
from distutils.core import gen_usage
# User just wants a list of commands -- we'll print it out and stop
# processing now (ie. if they ran "setup --help-commands foo bar",
# we ignore "foo bar").
if self.help_commands:
self.print_commands()
print('')
print(gen_usage(self.script_name))
return 1
# If user supplied any of the "display metadata" options, then
# display that metadata in the order in which the user supplied the
# metadata options.
any_display_options = 0
is_display_option = {}
for option in self.display_options:
is_display_option[option[0]] = 1
for (opt, val) in option_order:
if val and is_display_option.get(opt):
opt = translate_longopt(opt)
value = getattr(self.metadata, "get_"+opt)()
if opt in ['keywords', 'platforms']:
print(','.join(value))
elif opt in ('classifiers', 'provides', 'requires',
'obsoletes'):
print('\n'.join(value))
else:
print(value)
any_display_options = 1
return any_display_options
def print_command_list(self, commands, header, max_length):
"""Print a subset of the list of all commands -- used by
'print_commands()'.
"""
print(header + ":")
for cmd in commands:
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
print(" %-*s %s" % (max_length, cmd, description))
def print_commands(self):
"""Print out a help message listing all available commands with a
description of each. The list is divided into "standard commands"
(listed in distutils.command.__all__) and "extra commands"
(mentioned in self.cmdclass, but not a standard command). The
descriptions come from the command class attribute
'description'.
"""
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
max_length = 0
for cmd in (std_commands + extra_commands):
if len(cmd) > max_length:
max_length = len(cmd)
self.print_command_list(std_commands,
"Standard commands",
max_length)
if extra_commands:
print()
self.print_command_list(extra_commands,
"Extra commands",
max_length)
def get_command_list(self):
"""Get a list of (command, description) tuples.
The list is divided into "standard commands" (listed in
distutils.command.__all__) and "extra commands" (mentioned in
self.cmdclass, but not a standard command). The descriptions come
from the command class attribute 'description'.
"""
# Currently this is only used on Mac OS, for the Mac-only GUI
# Distutils interface (by Jack Jansen)
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
rv = []
for cmd in (std_commands + extra_commands):
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
rv.append((cmd, description))
return rv
# -- Command class/object methods ----------------------------------
def get_command_packages(self):
"""Return a list of packages from which commands are loaded."""
pkgs = self.command_packages
if not isinstance(pkgs, list):
if pkgs is None:
pkgs = ''
pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
if "distutils.command" not in pkgs:
pkgs.insert(0, "distutils.command")
self.command_packages = pkgs
return pkgs
def get_command_class(self, command):
"""Return the class that implements the Distutils command named by
'command'. First we check the 'cmdclass' dictionary; if the
command is mentioned there, we fetch the class object from the
dictionary and return it. Otherwise we load the command module
("distutils.command." + command) and fetch the command class from
the module. The loaded class is also stored in 'cmdclass'
to speed future calls to 'get_command_class()'.
Raises DistutilsModuleError if the expected module could not be
found, or if that module does not define the expected class.
"""
klass = self.cmdclass.get(command)
if klass:
return klass
for pkgname in self.get_command_packages():
module_name = "%s.%s" % (pkgname, command)
klass_name = command
try:
__import__ (module_name)
module = sys.modules[module_name]
except ImportError:
continue
try:
klass = getattr(module, klass_name)
except AttributeError:
raise DistutilsModuleError(
"invalid command '%s' (no class '%s' in module '%s')"
% (command, klass_name, module_name))
self.cmdclass[command] = klass
return klass
raise DistutilsModuleError("invalid command '%s'" % command)
def get_command_obj(self, command, create=1):
"""Return the command object for 'command'. Normally this object
is cached on a previous call to 'get_command_obj()'; if no command
object for 'command' is in the cache, then we either create and
return it (if 'create' is true) or return None.
"""
cmd_obj = self.command_obj.get(command)
if not cmd_obj and create:
if DEBUG:
self.announce("Distribution.get_command_obj(): " \
"creating '%s' command object" % command)
klass = self.get_command_class(command)
cmd_obj = self.command_obj[command] = klass(self)
self.have_run[command] = 0
# Set any options that were supplied in config files
# or on the command line. (NB. support for error
# reporting is lame here: any errors aren't reported
# until 'finalize_options()' is called, which means
# we won't report the source of the error.)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj
def _set_command_options(self, command_obj, option_dict=None):
"""Set the options for 'command_obj' from 'option_dict'. Basically
this means copying elements of a dictionary ('option_dict') to
attributes of an instance ('command').
'command_obj' must be a Command instance. If 'option_dict' is not
supplied, uses the standard option dictionary for this command
(from 'self.command_options').
"""
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG:
self.announce(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG:
self.announce(" %s = %s (from %s)" % (option, value,
source))
try:
bool_opts = [translate_longopt(o)
for o in command_obj.boolean_options]
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, str)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError(
"error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError as msg:
raise DistutilsOptionError(msg)
def reinitialize_command(self, command, reinit_subcommands=0):
"""Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
'command' should be a command name (string) or command object. If
'reinit_subcommands' is true, also reinitializes the command's
sub-commands, as declared by the 'sub_commands' class attribute (if
it has one). See the "install" command for an example. Only
reinitializes the sub-commands that actually matter, ie. those
whose test predicates return true.
Returns the reinitialized command object.
"""
from distutils.cmd import Command
if not isinstance(command, Command):
command_name = command
command = self.get_command_obj(command_name)
else:
command_name = command.get_command_name()
if not command.finalized:
return command
command.initialize_options()
command.finalized = 0
self.have_run[command_name] = 0
self._set_command_options(command)
if reinit_subcommands:
for sub in command.get_sub_commands():
self.reinitialize_command(sub, reinit_subcommands)
return command
# -- Methods that operate on the Distribution ----------------------
def announce(self, msg, level=log.INFO):
log.log(level, msg)
def run_commands(self):
"""Run each command that was seen on the setup script command line.
Uses the list of commands found and cache of command objects
created by 'get_command_obj()'.
"""
for cmd in self.commands:
self.run_command(cmd)
# -- Methods that operate on its Commands --------------------------
def run_command(self, command):
"""Do whatever it takes to run a command (including nothing at all,
if the command has already been run). Specifically: if we have
already created and run the command named by 'command', return
silently without doing anything. If the command named by 'command'
doesn't even have a command object yet, create one. Then invoke
'run()' on that command object (or an existing one).
"""
# Already been here, done that? then return silently.
if self.have_run.get(command):
return
log.info("running %s", command)
cmd_obj = self.get_command_obj(command)
cmd_obj.ensure_finalized()
cmd_obj.run()
self.have_run[command] = 1
# -- Distribution query methods ------------------------------------
def has_pure_modules(self):
return len(self.packages or self.py_modules or []) > 0
def has_ext_modules(self):
return self.ext_modules and len(self.ext_modules) > 0
def has_c_libraries(self):
return self.libraries and len(self.libraries) > 0
def has_modules(self):
return self.has_pure_modules() or self.has_ext_modules()
def has_headers(self):
return self.headers and len(self.headers) > 0
def has_scripts(self):
return self.scripts and len(self.scripts) > 0
def has_data_files(self):
return self.data_files and len(self.data_files) > 0
def is_pure(self):
return (self.has_pure_modules() and
not self.has_ext_modules() and
not self.has_c_libraries())
# -- Metadata query methods ----------------------------------------
# If you're looking for 'get_name()', 'get_version()', and so forth,
# they are defined in a sneaky way: the constructor binds self.get_XXX
# to self.metadata.get_XXX. The actual code is in the
# DistributionMetadata class, below.
class DistributionMetadata:
"""Dummy class to hold the distribution meta-data: name, version,
author, and so forth.
"""
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email", "license", "classifiers",
"download_url",
# PEP 314
"provides", "requires", "obsoletes",
)
def __init__ (self):
self.name = None
self.version = None
self.author = None
self.author_email = None
self.maintainer = None
self.maintainer_email = None
self.url = None
self.license = None
self.description = None
self.long_description = None
self.keywords = None
self.platforms = None
self.classifiers = None
self.download_url = None
# PEP 314
self.provides = None
self.requires = None
self.obsoletes = None
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name() )
file.write('Version: %s\n' % self.get_version() )
file.write('Summary: %s\n' % self.get_description() )
file.write('Home-page: %s\n' % self.get_url() )
file.write('Author: %s\n' % self.get_contact() )
file.write('Author-email: %s\n' % self.get_contact_email() )
file.write('License: %s\n' % self.get_license() )
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape(self.get_long_description())
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords )
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
def _write_list(self, file, name, values):
for value in values:
file.write('%s: %s\n' % (name, value))
# -- Metadata query methods ----------------------------------------
def get_name(self):
return self.name or "UNKNOWN"
def get_version(self):
return self.version or "0.0.0"
def get_fullname(self):
return "%s-%s" % (self.get_name(), self.get_version())
def get_author(self):
return self.author or "UNKNOWN"
def get_author_email(self):
return self.author_email or "UNKNOWN"
def get_maintainer(self):
return self.maintainer or "UNKNOWN"
def get_maintainer_email(self):
return self.maintainer_email or "UNKNOWN"
def get_contact(self):
return self.maintainer or self.author or "UNKNOWN"
def get_contact_email(self):
return self.maintainer_email or self.author_email or "UNKNOWN"
def get_url(self):
return self.url or "UNKNOWN"
def get_license(self):
return self.license or "UNKNOWN"
get_licence = get_license
def get_description(self):
return self.description or "UNKNOWN"
def get_long_description(self):
return self.long_description or "UNKNOWN"
def get_keywords(self):
return self.keywords or []
def get_platforms(self):
return self.platforms or ["UNKNOWN"]
def get_classifiers(self):
return self.classifiers or []
def get_download_url(self):
return self.download_url or "UNKNOWN"
# PEP 314
def get_requires(self):
return self.requires or []
def set_requires(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.requires = value
def get_provides(self):
return self.provides or []
def set_provides(self, value):
value = [v.strip() for v in value]
for v in value:
import distutils.versionpredicate
distutils.versionpredicate.split_provision(v)
self.provides = value
def get_obsoletes(self):
return self.obsoletes or []
def set_obsoletes(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.obsoletes = value
def fix_help_options(options):
"""Convert a 4-tuple 'help_options' list as found in various command
classes to the 3-tuple form required by FancyGetopt.
"""
new_options = []
for help_tuple in options:
new_options.append(help_tuple[0:3])
return new_options
| 40.481865 | 82 | 0.59753 |
import sys, os, re
try:
import warnings
except ImportError:
warnings = None
from distutils.errors import *
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
# that they're very similar is no coincidence; the default naming scheme is
command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
class Distribution:
# have minimal control over.
# The fourth entry for verbose means that it can be repeated.
global_options = [('verbose', 'v', "run verbosely (default)", 1),
('quiet', 'q', "run quietly (turns verbosity off)"),
('dry-run', 'n', "don't actually do anything"),
('help', 'h', "show detailed help message"),
]
common_usage = """\
Common commands: (see '--help-commands' for more)
setup.py build will build the package underneath 'build/'
setup.py install will install the package
"""
display_options = [
('help-commands', None,
"list all available commands"),
('name', None,
"print package name"),
('version', 'V',
"print package version"),
('fullname', None,
"print <package name>-<version>"),
('author', None,
"print the author's name"),
('author-email', None,
"print the author's email address"),
('maintainer', None,
"print the maintainer's name"),
('maintainer-email', None,
"print the maintainer's email address"),
('contact', None,
"print the maintainer's name if known, else the author's"),
('contact-email', None,
"print the maintainer's email address if known, else the author's"),
('url', None,
"print the URL for this package"),
('license', None,
"print the license of the package"),
('licence', None,
"alias for --license"),
('description', None,
"print the package description"),
('long-description', None,
"print the long package description"),
('platforms', None,
"print the list of platforms"),
('classifiers', None,
"print the list of classifiers"),
('keywords', None,
"print the list of keywords"),
('provides', None,
"print the list of packages/modules provided"),
('requires', None,
"print the list of packages/modules required"),
('obsoletes', None,
"print the list of packages/modules made obsolete")
]
display_option_names = [translate_longopt(x[0]) for x in display_options]
negative_opt = {'quiet': 'verbose'}
def __init__ (self, attrs=None):
self.verbose = 1
self.dry_run = 0
self.help = 0
for attr in self.display_option_names:
setattr(self, attr, 0)
# information here (and enough command-line options) that it's
self.metadata = DistributionMetadata()
for basename in self.metadata._METHOD_BASENAMES:
method_name = "get_" + basename
setattr(self, method_name, getattr(self.metadata, method_name))
self.cmdclass = {}
self.command_packages = None
self.script_name = None
self.script_args = None
self.command_options = {}
self.dist_files = []
self.packages = None
self.package_data = {}
self.package_dir = None
self.py_modules = None
self.libraries = None
self.headers = None
self.ext_modules = None
self.ext_package = None
self.include_dirs = None
self.extra_path = None
self.scripts = None
self.data_files = None
self.password = ''
# the caller at all. 'command_obj' maps command names to
# Command instances -- that's how we enforce that every command
self.command_obj = {}
# operations, we just check the 'have_run' dictionary and carry on.
# It's only safe to query 'have_run' for a command class that has
# '.get()' rather than a straight lookup.
self.have_run = {}
# Now we'll use the attrs dictionary (ultimately, keyword args from
if attrs:
options = attrs.get('options')
if options is not None:
del attrs['options']
for (command, cmd_options) in options.items():
opt_dict = self.get_option_dict(command)
for (opt, val) in cmd_options.items():
opt_dict[opt] = ("setup script", val)
if 'licence' in attrs:
attrs['license'] = attrs['licence']
del attrs['licence']
msg = "'licence' distribution option is deprecated; use 'license'"
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# not already defined is invalid!
for (key, val) in attrs.items():
if hasattr(self.metadata, "set_" + key):
getattr(self.metadata, "set_" + key)(val)
elif hasattr(self.metadata, key):
setattr(self.metadata, key, val)
elif hasattr(self, key):
setattr(self, key, val)
else:
msg = "Unknown distribution option: %s" % repr(key)
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
self.finalize_options()
def get_option_dict(self, command):
dict = self.command_options.get(command)
if dict is None:
dict = self.command_options[command] = {}
return dict
def dump_option_dicts(self, header=None, commands=None, indent=""):
from pprint import pformat
if commands is None: # dump all command option dicts
commands = sorted(self.command_options.keys())
if header is not None:
self.announce(indent + header)
indent = indent + " "
if not commands:
self.announce(indent + "no commands known yet")
return
for cmd_name in commands:
opt_dict = self.command_options.get(cmd_name)
if opt_dict is None:
self.announce(indent +
"no option dict for '%s' command" % cmd_name)
else:
self.announce(indent +
"option dict for '%s' command:" % cmd_name)
out = pformat(opt_dict)
for line in out.split('\n'):
self.announce(indent + " " + line)
# -- Config file finding/parsing methods ---------------------------
def find_config_files(self):
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
return files
def parse_config_files(self, filenames=None):
from configparser import ConfigParser
# Ignore install directory options if we have a venv
if sys.prefix != sys.base_prefix:
ignore_options = [
'install-base', 'install-platbase', 'install-lib',
'install-platlib', 'install-purelib', 'install-headers',
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
'home', 'user', 'root']
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__' and opt not in ignore_options:
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
# -- Command-line parsing methods ----------------------------------
def parse_command_line(self):
#
# We now have enough information to show the Macintosh dialog
# that allows the user to interactively specify the "command line".
#
toplevel_options = self._get_toplevel_options()
# We have to parse the command line a bit at a time -- global
# options, then the first command, then its options, and so on --
# because each command will be handled by a different class, and
# the options that are valid for a particular class aren't known
# until we know what the command is.
self.commands = []
parser = FancyGetopt(toplevel_options + self.display_options)
parser.set_negative_aliases(self.negative_opt)
parser.set_aliases({'licence': 'license'})
args = parser.getopt(args=self.script_args, object=self)
option_order = parser.get_option_order()
log.set_verbosity(self.verbose)
# for display options we return immediately
if self.handle_display_options(option_order):
return
while args:
args = self._parse_command_opts(parser, args)
if args is None: # user asked for help (and got it)
return
# Handle the cases of --help as a "global" option, ie.
# "setup.py --help" and "setup.py --help command ...". For the
# former, we show global options (--verbose, --dry-run, etc.)
# and display-only options (--name, --version, etc.); for the
# latter, we omit the display-only options and show help for
# each command listed on the command line.
if self.help:
self._show_help(parser,
display_options=len(self.commands) == 0,
commands=self.commands)
return
# Oops, no commands found -- an end-user error
if not self.commands:
raise DistutilsArgError("no commands supplied")
# All is well: return true
return True
def _get_toplevel_options(self):
return self.global_options + [
("command-packages=", None,
"list of packages that provide distutils commands"),
]
def _parse_command_opts(self, parser, args):
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
raise SystemExit("invalid command name '%s'" % command)
self.commands.append(command)
# Dig up the command class that implements this command, so we
# 1) know that it's a valid command, and 2) know which options
try:
cmd_class = self.get_command_class(command)
except DistutilsModuleError as msg:
raise DistutilsArgError(msg)
if not issubclass(cmd_class, Command):
raise DistutilsClassError(
"command class %s must subclass Command" % cmd_class)
if not (hasattr(cmd_class, 'user_options') and
isinstance(cmd_class.user_options, list)):
raise DistutilsClassError(("command class %s must provide " +
"'user_options' attribute (a list of tuples)") % \
cmd_class)
negative_opt = self.negative_opt
if hasattr(cmd_class, 'negative_opt'):
negative_opt = negative_opt.copy()
negative_opt.update(cmd_class.negative_opt)
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_options = fix_help_options(cmd_class.help_options)
else:
help_options = []
parser.set_option_table(self.global_options +
cmd_class.user_options +
help_options)
parser.set_negative_aliases(negative_opt)
(args, opts) = parser.getopt(args[1:])
if hasattr(opts, 'help') and opts.help:
self._show_help(parser, display_options=0, commands=[cmd_class])
return
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_option_found=0
for (help_option, short, desc, func) in cmd_class.help_options:
if hasattr(opts, parser.get_attr_name(help_option)):
help_option_found=1
if callable(func):
func()
else:
raise DistutilsClassError(
"invalid help function %r for help option '%s': "
"must be a callable object (function, etc.)"
% (func, help_option))
if help_option_found:
return
opt_dict = self.get_option_dict(command)
for (name, value) in vars(opts).items():
opt_dict[name] = ("command line", value)
return args
def finalize_options(self):
for attr in ('keywords', 'platforms'):
value = getattr(self.metadata, attr)
if value is None:
continue
if isinstance(value, str):
value = [elm.strip() for elm in value.split(',')]
setattr(self.metadata, attr, value)
def _show_help(self, parser, global_options=1, display_options=1,
commands=[]):
from distutils.core import gen_usage
from distutils.cmd import Command
if global_options:
if display_options:
options = self._get_toplevel_options()
else:
options = self.global_options
parser.set_option_table(options)
parser.print_help(self.common_usage + "\nGlobal options:")
print('')
if display_options:
parser.set_option_table(self.display_options)
parser.print_help(
"Information display options (just display " +
"information, ignore any commands)")
print('')
for command in self.commands:
if isinstance(command, type) and issubclass(command, Command):
klass = command
else:
klass = self.get_command_class(command)
if (hasattr(klass, 'help_options') and
isinstance(klass.help_options, list)):
parser.set_option_table(klass.user_options +
fix_help_options(klass.help_options))
else:
parser.set_option_table(klass.user_options)
parser.print_help("Options for '%s' command:" % klass.__name__)
print('')
print(gen_usage(self.script_name))
def handle_display_options(self, option_order):
from distutils.core import gen_usage
# processing now (ie. if they ran "setup --help-commands foo bar",
# we ignore "foo bar").
if self.help_commands:
self.print_commands()
print('')
print(gen_usage(self.script_name))
return 1
# If user supplied any of the "display metadata" options, then
# display that metadata in the order in which the user supplied the
# metadata options.
any_display_options = 0
is_display_option = {}
for option in self.display_options:
is_display_option[option[0]] = 1
for (opt, val) in option_order:
if val and is_display_option.get(opt):
opt = translate_longopt(opt)
value = getattr(self.metadata, "get_"+opt)()
if opt in ['keywords', 'platforms']:
print(','.join(value))
elif opt in ('classifiers', 'provides', 'requires',
'obsoletes'):
print('\n'.join(value))
else:
print(value)
any_display_options = 1
return any_display_options
def print_command_list(self, commands, header, max_length):
print(header + ":")
for cmd in commands:
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
print(" %-*s %s" % (max_length, cmd, description))
def print_commands(self):
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
max_length = 0
for cmd in (std_commands + extra_commands):
if len(cmd) > max_length:
max_length = len(cmd)
self.print_command_list(std_commands,
"Standard commands",
max_length)
if extra_commands:
print()
self.print_command_list(extra_commands,
"Extra commands",
max_length)
def get_command_list(self):
# Currently this is only used on Mac OS, for the Mac-only GUI
# Distutils interface (by Jack Jansen)
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
rv = []
for cmd in (std_commands + extra_commands):
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
rv.append((cmd, description))
return rv
# -- Command class/object methods ----------------------------------
def get_command_packages(self):
pkgs = self.command_packages
if not isinstance(pkgs, list):
if pkgs is None:
pkgs = ''
pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
if "distutils.command" not in pkgs:
pkgs.insert(0, "distutils.command")
self.command_packages = pkgs
return pkgs
def get_command_class(self, command):
klass = self.cmdclass.get(command)
if klass:
return klass
for pkgname in self.get_command_packages():
module_name = "%s.%s" % (pkgname, command)
klass_name = command
try:
__import__ (module_name)
module = sys.modules[module_name]
except ImportError:
continue
try:
klass = getattr(module, klass_name)
except AttributeError:
raise DistutilsModuleError(
"invalid command '%s' (no class '%s' in module '%s')"
% (command, klass_name, module_name))
self.cmdclass[command] = klass
return klass
raise DistutilsModuleError("invalid command '%s'" % command)
def get_command_obj(self, command, create=1):
cmd_obj = self.command_obj.get(command)
if not cmd_obj and create:
if DEBUG:
self.announce("Distribution.get_command_obj(): " \
"creating '%s' command object" % command)
klass = self.get_command_class(command)
cmd_obj = self.command_obj[command] = klass(self)
self.have_run[command] = 0
# Set any options that were supplied in config files
# or on the command line. (NB. support for error
# reporting is lame here: any errors aren't reported
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj
def _set_command_options(self, command_obj, option_dict=None):
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG:
self.announce(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG:
self.announce(" %s = %s (from %s)" % (option, value,
source))
try:
bool_opts = [translate_longopt(o)
for o in command_obj.boolean_options]
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, str)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError(
"error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError as msg:
raise DistutilsOptionError(msg)
def reinitialize_command(self, command, reinit_subcommands=0):
from distutils.cmd import Command
if not isinstance(command, Command):
command_name = command
command = self.get_command_obj(command_name)
else:
command_name = command.get_command_name()
if not command.finalized:
return command
command.initialize_options()
command.finalized = 0
self.have_run[command_name] = 0
self._set_command_options(command)
if reinit_subcommands:
for sub in command.get_sub_commands():
self.reinitialize_command(sub, reinit_subcommands)
return command
# -- Methods that operate on the Distribution ----------------------
def announce(self, msg, level=log.INFO):
log.log(level, msg)
def run_commands(self):
for cmd in self.commands:
self.run_command(cmd)
# -- Methods that operate on its Commands --------------------------
def run_command(self, command):
# Already been here, done that? then return silently.
if self.have_run.get(command):
return
log.info("running %s", command)
cmd_obj = self.get_command_obj(command)
cmd_obj.ensure_finalized()
cmd_obj.run()
self.have_run[command] = 1
# -- Distribution query methods ------------------------------------
def has_pure_modules(self):
return len(self.packages or self.py_modules or []) > 0
def has_ext_modules(self):
return self.ext_modules and len(self.ext_modules) > 0
def has_c_libraries(self):
return self.libraries and len(self.libraries) > 0
def has_modules(self):
return self.has_pure_modules() or self.has_ext_modules()
def has_headers(self):
return self.headers and len(self.headers) > 0
def has_scripts(self):
return self.scripts and len(self.scripts) > 0
def has_data_files(self):
return self.data_files and len(self.data_files) > 0
def is_pure(self):
return (self.has_pure_modules() and
not self.has_ext_modules() and
not self.has_c_libraries())
# -- Metadata query methods ----------------------------------------
# If you're looking for 'get_name()', 'get_version()', and so forth,
class DistributionMetadata:
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email", "license", "classifiers",
"download_url",
"provides", "requires", "obsoletes",
)
def __init__ (self):
self.name = None
self.version = None
self.author = None
self.author_email = None
self.maintainer = None
self.maintainer_email = None
self.url = None
self.license = None
self.description = None
self.long_description = None
self.keywords = None
self.platforms = None
self.classifiers = None
self.download_url = None
self.provides = None
self.requires = None
self.obsoletes = None
def write_pkg_info(self, base_dir):
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
def write_pkg_file(self, file):
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name() )
file.write('Version: %s\n' % self.get_version() )
file.write('Summary: %s\n' % self.get_description() )
file.write('Home-page: %s\n' % self.get_url() )
file.write('Author: %s\n' % self.get_contact() )
file.write('Author-email: %s\n' % self.get_contact_email() )
file.write('License: %s\n' % self.get_license() )
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape(self.get_long_description())
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords )
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
def _write_list(self, file, name, values):
for value in values:
file.write('%s: %s\n' % (name, value))
def get_name(self):
return self.name or "UNKNOWN"
def get_version(self):
return self.version or "0.0.0"
def get_fullname(self):
return "%s-%s" % (self.get_name(), self.get_version())
def get_author(self):
return self.author or "UNKNOWN"
def get_author_email(self):
return self.author_email or "UNKNOWN"
def get_maintainer(self):
return self.maintainer or "UNKNOWN"
def get_maintainer_email(self):
return self.maintainer_email or "UNKNOWN"
def get_contact(self):
return self.maintainer or self.author or "UNKNOWN"
def get_contact_email(self):
return self.maintainer_email or self.author_email or "UNKNOWN"
def get_url(self):
return self.url or "UNKNOWN"
def get_license(self):
return self.license or "UNKNOWN"
get_licence = get_license
def get_description(self):
return self.description or "UNKNOWN"
def get_long_description(self):
return self.long_description or "UNKNOWN"
def get_keywords(self):
return self.keywords or []
def get_platforms(self):
return self.platforms or ["UNKNOWN"]
def get_classifiers(self):
return self.classifiers or []
def get_download_url(self):
return self.download_url or "UNKNOWN"
def get_requires(self):
return self.requires or []
def set_requires(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.requires = value
def get_provides(self):
return self.provides or []
def set_provides(self, value):
value = [v.strip() for v in value]
for v in value:
import distutils.versionpredicate
distutils.versionpredicate.split_provision(v)
self.provides = value
def get_obsoletes(self):
return self.obsoletes or []
def set_obsoletes(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.obsoletes = value
def fix_help_options(options):
new_options = []
for help_tuple in options:
new_options.append(help_tuple[0:3])
return new_options
| true | true |
f7fac25652388c3b6a29b76286b03e7f78469ceb | 862 | py | Python | plant/database/models/compo_model.py | gerkx/big-bang-pipe | 5528d1257e18fc093d9785094732076dc46700d5 | [
"MIT"
] | null | null | null | plant/database/models/compo_model.py | gerkx/big-bang-pipe | 5528d1257e18fc093d9785094732076dc46700d5 | [
"MIT"
] | null | null | null | plant/database/models/compo_model.py | gerkx/big-bang-pipe | 5528d1257e18fc093d9785094732076dc46700d5 | [
"MIT"
] | null | null | null | from typing import Type
from nanoid import generate
from peewee import ForeignKeyField, CharField
from retrying import retry
from .base_models import VisModel
from .shot_model import Shot
class Compo(VisModel):
shot = ForeignKeyField(Shot, backref='compo')
@retry(wait_random_min=250, wait_random_max=2000, stop_max_attempt_number=10)
def new_or_get(
self,
shot:Type[Shot],
name:str,
location:str,
inbound_name:str,
**kwargs
):
print(f'location: {location}')
new_compo_shot, _ = self.get_or_create(
shot = shot,
name = name,
defaults = {
'guid': generate(),
'inbound_name': inbound_name,
'location': location,
**kwargs
}
)
return new_compo_shot | 26.121212 | 81 | 0.584687 | from typing import Type
from nanoid import generate
from peewee import ForeignKeyField, CharField
from retrying import retry
from .base_models import VisModel
from .shot_model import Shot
class Compo(VisModel):
shot = ForeignKeyField(Shot, backref='compo')
@retry(wait_random_min=250, wait_random_max=2000, stop_max_attempt_number=10)
def new_or_get(
self,
shot:Type[Shot],
name:str,
location:str,
inbound_name:str,
**kwargs
):
print(f'location: {location}')
new_compo_shot, _ = self.get_or_create(
shot = shot,
name = name,
defaults = {
'guid': generate(),
'inbound_name': inbound_name,
'location': location,
**kwargs
}
)
return new_compo_shot | true | true |
f7fac2bcd8dba5838de3a067e7d3ae2cfc6d745d | 4,304 | py | Python | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_response.py | githubmilesma/huaweicloud-sdk-python-v3 | 9d9449ed68a609ca65f0aa50b5b2a1c28445bf03 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_response.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-dns/huaweicloudsdkdns/v2/model/show_record_set_by_zone_response.py | Lencof/huaweicloud-sdk-python-v3 | d13dc4e2830a83e295be6e4de021999b3376e34e | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowRecordSetByZoneResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'links': 'PageLink',
'recordsets': 'list[ShowRecordSetByZoneResp]',
'metadata': 'Metedata'
}
attribute_map = {
'links': 'links',
'recordsets': 'recordsets',
'metadata': 'metadata'
}
def __init__(self, links=None, recordsets=None, metadata=None):
"""ShowRecordSetByZoneResponse - a model defined in huaweicloud sdk"""
super().__init__()
self._links = None
self._recordsets = None
self._metadata = None
self.discriminator = None
if links is not None:
self.links = links
if recordsets is not None:
self.recordsets = recordsets
if metadata is not None:
self.metadata = metadata
@property
def links(self):
"""Gets the links of this ShowRecordSetByZoneResponse.
:return: The links of this ShowRecordSetByZoneResponse.
:rtype: PageLink
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ShowRecordSetByZoneResponse.
:param links: The links of this ShowRecordSetByZoneResponse.
:type: PageLink
"""
self._links = links
@property
def recordsets(self):
"""Gets the recordsets of this ShowRecordSetByZoneResponse.
:return: The recordsets of this ShowRecordSetByZoneResponse.
:rtype: list[ShowRecordSetByZoneResp]
"""
return self._recordsets
@recordsets.setter
def recordsets(self, recordsets):
"""Sets the recordsets of this ShowRecordSetByZoneResponse.
:param recordsets: The recordsets of this ShowRecordSetByZoneResponse.
:type: list[ShowRecordSetByZoneResp]
"""
self._recordsets = recordsets
@property
def metadata(self):
"""Gets the metadata of this ShowRecordSetByZoneResponse.
:return: The metadata of this ShowRecordSetByZoneResponse.
:rtype: Metedata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ShowRecordSetByZoneResponse.
:param metadata: The metadata of this ShowRecordSetByZoneResponse.
:type: Metedata
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowRecordSetByZoneResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.069182 | 78 | 0.580158 |
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowRecordSetByZoneResponse(SdkResponse):
sensitive_list = []
openapi_types = {
'links': 'PageLink',
'recordsets': 'list[ShowRecordSetByZoneResp]',
'metadata': 'Metedata'
}
attribute_map = {
'links': 'links',
'recordsets': 'recordsets',
'metadata': 'metadata'
}
def __init__(self, links=None, recordsets=None, metadata=None):
super().__init__()
self._links = None
self._recordsets = None
self._metadata = None
self.discriminator = None
if links is not None:
self.links = links
if recordsets is not None:
self.recordsets = recordsets
if metadata is not None:
self.metadata = metadata
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
@property
def recordsets(self):
return self._recordsets
@recordsets.setter
def recordsets(self, recordsets):
self._recordsets = recordsets
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = metadata
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ShowRecordSetByZoneResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fac36f1597d337dddc25e032b2678433c053e0 | 7,907 | py | Python | padim/utils/utils.py | Pangoraw/PaDiM | 76f757fd51c46abda1ced5a26c2865c6d91a8cca | [
"MIT"
] | 15 | 2021-05-27T09:06:24.000Z | 2022-03-08T06:54:09.000Z | padim/utils/utils.py | Pangoraw/PaDiM | 76f757fd51c46abda1ced5a26c2865c6d91a8cca | [
"MIT"
] | 6 | 2021-06-01T09:52:57.000Z | 2021-12-21T13:24:15.000Z | padim/utils/utils.py | Pangoraw/PaDiM | 76f757fd51c46abda1ced5a26c2865c6d91a8cca | [
"MIT"
] | 3 | 2021-05-27T13:35:29.000Z | 2021-12-14T05:06:06.000Z | """
Utils module
The code from this file comes from:
* https://github.com/taikiinoue45/PaDiM
"""
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1 import ImageGrid
from numpy import ndarray as NDArray
from skimage import measure
from sklearn.metrics import auc, roc_auc_score, roc_curve
from tqdm import tqdm
import torch
from torch import Tensor
import torch.nn.functional as F
def embeddings_concat(x0: Tensor, x1: Tensor) -> Tensor:
b0, c0, h0, w0 = x0.size()
_, c1, h1, w1 = x1.size()
s = h0 // h1
x0 = F.unfold(x0, kernel_size=(s, s), dilation=(1, 1), stride=(s, s))
x0 = x0.view(b0, c0, -1, h1, w1)
z = torch.zeros(b0, c0 + c1, x0.size(2), h1, w1).to(x0.device)
for i in range(x0.size(2)):
z[:, :, i, :, :] = torch.cat((x0[:, :, i, :, :], x1), 1)
z = z.view(b0, -1, h1 * w1)
z = F.fold(z, kernel_size=(s, s), output_size=(h0, w0), stride=(s, s))
return z
def mean_smoothing(amaps: Tensor, kernel_size: int = 21) -> Tensor:
mean_kernel = torch.ones(1, 1, kernel_size, kernel_size) / kernel_size ** 2
mean_kernel = mean_kernel.to(amaps.device)
return F.conv2d(amaps, mean_kernel, padding=kernel_size // 2, groups=1)
def compute_roc_score(amaps: NDArray, y_trues: NDArray, stems: List[str]) -> float:
num_data = len(stems)
y_scores = amaps.reshape(num_data, -1).max(axis=1)
fprs, tprs, thresholds = roc_curve(y_trues, y_scores, pos_label=1, drop_intermediate=False)
# Save roc_curve.csv
keys = [f"threshold_{i}" for i in range(len(thresholds))]
roc_df = pd.DataFrame({"key": keys, "fpr": fprs, "tpr": tprs, "threshold": thresholds})
roc_df.to_csv("roc_curve.csv", index=False)
# Update test_dataset.csv
# pred_csv = pd.merge(
# pd.DataFrame({"stem": stems, "y_score": y_scores, "y_true": y_trues}),
# pd.read_csv("test_dataset.csv"),
# on="stem",
# )
# for i, th in enumerate(thresholds):
# pred_csv[f"threshold_{i}"] = pred_csv["y_score"].apply(lambda x: 1 if x >= th else 0)
# pred_csv.to_csv("test_dataset.csv", index=False)
print("np.unique", np.unique(y_trues))
return roc_auc_score(y_trues, y_scores)
def compute_pro_score(amaps: NDArray, masks: NDArray) -> float:
df = pd.DataFrame([], columns=["pro", "fpr", "threshold"])
binary_amaps = np.zeros_like(amaps, dtype=np.bool)
max_step = 200
min_th = amaps.min()
max_th = amaps.max()
delta = (max_th - min_th) / max_step
for th in tqdm(np.arange(min_th, max_th, delta), desc="compute pro"):
binary_amaps[amaps <= th] = 0
binary_amaps[amaps > th] = 1
pros = []
for binary_amap, mask in zip(binary_amaps, masks):
for region in measure.regionprops(measure.label(mask)):
axes0_ids = region.coords[:, 0]
axes1_ids = region.coords[:, 1]
TP_pixels = binary_amap[axes0_ids, axes1_ids].sum()
pros.append(TP_pixels / region.area)
inverse_masks = 1 - masks
FP_pixels = np.logical_and(inverse_masks, binary_amaps).sum()
fpr = FP_pixels / inverse_masks.sum()
df = df.append({"pro": mean(pros), "fpr": fpr, "threshold": th}, ignore_index=True)
df.to_csv("pro_curve.csv", index=False)
return auc(df["fpr"], df["pro"])
def draw_roc_and_pro_curve(roc_score: float, pro_score: float) -> None:
grid = ImageGrid(
fig=plt.figure(figsize=(8, 8)),
rect=111,
nrows_ncols=(1, 1),
axes_pad=0.15,
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="5%",
cbar_pad=0.15,
)
roc_df = pd.read_csv("roc_curve.csv")
fpr = roc_df["fpr"]
tpr = roc_df["tpr"]
th = roc_df["threshold"]
v_min = th.min()
grid[0].plot(fpr, tpr, color="k", label=f"ROC Score: {round(roc_score, 3):.3f}", zorder=1)
im = grid[0].scatter(fpr, tpr, s=8, c=th, cmap="jet", vmin=v_min, vmax=1, zorder=2)
grid[0].set_xlim(-0.05, 1.05)
grid[0].set_ylim(-0.05, 1.05)
grid[0].set_xticks(np.arange(0, 1.1, 0.1))
grid[0].set_yticks(np.arange(0, 1.1, 0.1))
grid[0].tick_params(axis="both", labelsize=14)
grid[0].set_xlabel("FPR: FP / (TN + FP)", fontsize=24)
grid[0].set_ylabel("TPR: TP / (TP + FN)", fontsize=24)
grid[0].xaxis.set_label_coords(0.5, -0.1)
grid[0].yaxis.set_label_coords(-0.1, 0.5)
grid[0].legend(fontsize=24)
grid[0].grid(which="both", linestyle="dotted", linewidth=1)
cb = plt.colorbar(im, cax=grid.cbar_axes[0])
cb.ax.tick_params(labelsize="large")
plt.savefig("roc_curve.png")
plt.close()
grid = ImageGrid(
fig=plt.figure(figsize=(8, 8)),
rect=111,
nrows_ncols=(1, 1),
axes_pad=0.15,
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="5%",
cbar_pad=0.15,
)
pro_df = pd.read_csv("pro_curve.csv")
fpr = pro_df["fpr"]
pro = pro_df["pro"]
th = pro_df["threshold"]
grid[0].plot(fpr, pro, color="k", label=f"PRO Score: {round(pro_score, 3):.3f}", zorder=1)
im = grid[0].scatter(fpr, pro, s=8, c=th, cmap="jet", vmin=v_min, vmax=1, zorder=2)
grid[0].set_xlim(-0.05, 1.05)
grid[0].set_ylim(-0.05, 1.05)
grid[0].set_xticks(np.arange(0, 1.1, 0.1))
grid[0].set_yticks(np.arange(0, 1.1, 0.1))
grid[0].tick_params(axis="both", labelsize=14)
grid[0].set_xlabel("FPR: FP / (TN + FP)", fontsize=24)
grid[0].set_ylabel("PRO: Per-Region Overlap", fontsize=24)
grid[0].xaxis.set_label_coords(0.5, -0.1)
grid[0].yaxis.set_label_coords(-0.1, 0.5)
grid[0].legend(fontsize=24)
grid[0].grid(which="both", linestyle="dotted", linewidth=1)
cb = plt.colorbar(im, cax=grid.cbar_axes[0])
cb.ax.tick_params(labelsize="large")
plt.savefig("pro_curve.png")
plt.close()
def savegif(imgs: NDArray, amaps: NDArray, masks: NDArray, stems: List[str]) -> None:
os.mkdir("results")
pbar = tqdm(enumerate(zip(stems, imgs, masks, amaps)), desc="savefig")
for i, (stem, img, mask, amap) in pbar:
# How to get two subplots to share the same y-axis with a single colorbar
# https://stackoverflow.com/a/38940369
grid = ImageGrid(
fig=plt.figure(figsize=(12, 4)),
rect=111,
nrows_ncols=(1, 3),
axes_pad=0.15,
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="5%",
cbar_pad=0.15,
)
img = denormalize(img)
grid[0].imshow(img)
grid[0].tick_params(labelbottom=False, labelleft=False, bottom=False, left=False)
grid[0].set_title("Input Image", fontsize=24)
grid[1].imshow(img)
grid[1].imshow(mask, alpha=0.3, cmap="Reds")
grid[1].tick_params(labelbottom=False, labelleft=False, bottom=False, left=False)
grid[1].set_title("Ground Truth", fontsize=24)
grid[2].imshow(img)
im = grid[2].imshow(amap, alpha=0.3, cmap="jet", vmin=0, vmax=1)
grid[2].tick_params(labelbottom=False, labelleft=False, bottom=False, left=False)
grid[2].cax.toggle_label(True)
grid[2].set_title("Anomaly Map", fontsize=24)
plt.colorbar(im, cax=grid.cbar_axes[0])
plt.savefig(f"results/{stem}.png", bbox_inches="tight")
plt.close()
# NOTE(inoue): The gif files converted by PIL or imageio were low-quality.
# So, I used the conversion command (ImageMagick) instead.
subprocess.run("convert -delay 100 -loop 0 results/*.png result.gif", shell=True)
def denormalize(img: NDArray) -> NDArray:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = (img * std + mean) * 255.0
return img.astype(np.uint8)
| 35.142222 | 95 | 0.614772 | from typing import List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1 import ImageGrid
from numpy import ndarray as NDArray
from skimage import measure
from sklearn.metrics import auc, roc_auc_score, roc_curve
from tqdm import tqdm
import torch
from torch import Tensor
import torch.nn.functional as F
def embeddings_concat(x0: Tensor, x1: Tensor) -> Tensor:
b0, c0, h0, w0 = x0.size()
_, c1, h1, w1 = x1.size()
s = h0 // h1
x0 = F.unfold(x0, kernel_size=(s, s), dilation=(1, 1), stride=(s, s))
x0 = x0.view(b0, c0, -1, h1, w1)
z = torch.zeros(b0, c0 + c1, x0.size(2), h1, w1).to(x0.device)
for i in range(x0.size(2)):
z[:, :, i, :, :] = torch.cat((x0[:, :, i, :, :], x1), 1)
z = z.view(b0, -1, h1 * w1)
z = F.fold(z, kernel_size=(s, s), output_size=(h0, w0), stride=(s, s))
return z
def mean_smoothing(amaps: Tensor, kernel_size: int = 21) -> Tensor:
mean_kernel = torch.ones(1, 1, kernel_size, kernel_size) / kernel_size ** 2
mean_kernel = mean_kernel.to(amaps.device)
return F.conv2d(amaps, mean_kernel, padding=kernel_size // 2, groups=1)
def compute_roc_score(amaps: NDArray, y_trues: NDArray, stems: List[str]) -> float:
num_data = len(stems)
y_scores = amaps.reshape(num_data, -1).max(axis=1)
fprs, tprs, thresholds = roc_curve(y_trues, y_scores, pos_label=1, drop_intermediate=False)
keys = [f"threshold_{i}" for i in range(len(thresholds))]
roc_df = pd.DataFrame({"key": keys, "fpr": fprs, "tpr": tprs, "threshold": thresholds})
roc_df.to_csv("roc_curve.csv", index=False)
print("np.unique", np.unique(y_trues))
return roc_auc_score(y_trues, y_scores)
def compute_pro_score(amaps: NDArray, masks: NDArray) -> float:
df = pd.DataFrame([], columns=["pro", "fpr", "threshold"])
binary_amaps = np.zeros_like(amaps, dtype=np.bool)
max_step = 200
min_th = amaps.min()
max_th = amaps.max()
delta = (max_th - min_th) / max_step
for th in tqdm(np.arange(min_th, max_th, delta), desc="compute pro"):
binary_amaps[amaps <= th] = 0
binary_amaps[amaps > th] = 1
pros = []
for binary_amap, mask in zip(binary_amaps, masks):
for region in measure.regionprops(measure.label(mask)):
axes0_ids = region.coords[:, 0]
axes1_ids = region.coords[:, 1]
TP_pixels = binary_amap[axes0_ids, axes1_ids].sum()
pros.append(TP_pixels / region.area)
inverse_masks = 1 - masks
FP_pixels = np.logical_and(inverse_masks, binary_amaps).sum()
fpr = FP_pixels / inverse_masks.sum()
df = df.append({"pro": mean(pros), "fpr": fpr, "threshold": th}, ignore_index=True)
df.to_csv("pro_curve.csv", index=False)
return auc(df["fpr"], df["pro"])
def draw_roc_and_pro_curve(roc_score: float, pro_score: float) -> None:
grid = ImageGrid(
fig=plt.figure(figsize=(8, 8)),
rect=111,
nrows_ncols=(1, 1),
axes_pad=0.15,
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="5%",
cbar_pad=0.15,
)
roc_df = pd.read_csv("roc_curve.csv")
fpr = roc_df["fpr"]
tpr = roc_df["tpr"]
th = roc_df["threshold"]
v_min = th.min()
grid[0].plot(fpr, tpr, color="k", label=f"ROC Score: {round(roc_score, 3):.3f}", zorder=1)
im = grid[0].scatter(fpr, tpr, s=8, c=th, cmap="jet", vmin=v_min, vmax=1, zorder=2)
grid[0].set_xlim(-0.05, 1.05)
grid[0].set_ylim(-0.05, 1.05)
grid[0].set_xticks(np.arange(0, 1.1, 0.1))
grid[0].set_yticks(np.arange(0, 1.1, 0.1))
grid[0].tick_params(axis="both", labelsize=14)
grid[0].set_xlabel("FPR: FP / (TN + FP)", fontsize=24)
grid[0].set_ylabel("TPR: TP / (TP + FN)", fontsize=24)
grid[0].xaxis.set_label_coords(0.5, -0.1)
grid[0].yaxis.set_label_coords(-0.1, 0.5)
grid[0].legend(fontsize=24)
grid[0].grid(which="both", linestyle="dotted", linewidth=1)
cb = plt.colorbar(im, cax=grid.cbar_axes[0])
cb.ax.tick_params(labelsize="large")
plt.savefig("roc_curve.png")
plt.close()
grid = ImageGrid(
fig=plt.figure(figsize=(8, 8)),
rect=111,
nrows_ncols=(1, 1),
axes_pad=0.15,
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="5%",
cbar_pad=0.15,
)
pro_df = pd.read_csv("pro_curve.csv")
fpr = pro_df["fpr"]
pro = pro_df["pro"]
th = pro_df["threshold"]
grid[0].plot(fpr, pro, color="k", label=f"PRO Score: {round(pro_score, 3):.3f}", zorder=1)
im = grid[0].scatter(fpr, pro, s=8, c=th, cmap="jet", vmin=v_min, vmax=1, zorder=2)
grid[0].set_xlim(-0.05, 1.05)
grid[0].set_ylim(-0.05, 1.05)
grid[0].set_xticks(np.arange(0, 1.1, 0.1))
grid[0].set_yticks(np.arange(0, 1.1, 0.1))
grid[0].tick_params(axis="both", labelsize=14)
grid[0].set_xlabel("FPR: FP / (TN + FP)", fontsize=24)
grid[0].set_ylabel("PRO: Per-Region Overlap", fontsize=24)
grid[0].xaxis.set_label_coords(0.5, -0.1)
grid[0].yaxis.set_label_coords(-0.1, 0.5)
grid[0].legend(fontsize=24)
grid[0].grid(which="both", linestyle="dotted", linewidth=1)
cb = plt.colorbar(im, cax=grid.cbar_axes[0])
cb.ax.tick_params(labelsize="large")
plt.savefig("pro_curve.png")
plt.close()
def savegif(imgs: NDArray, amaps: NDArray, masks: NDArray, stems: List[str]) -> None:
os.mkdir("results")
pbar = tqdm(enumerate(zip(stems, imgs, masks, amaps)), desc="savefig")
for i, (stem, img, mask, amap) in pbar:
grid = ImageGrid(
fig=plt.figure(figsize=(12, 4)),
rect=111,
nrows_ncols=(1, 3),
axes_pad=0.15,
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="5%",
cbar_pad=0.15,
)
img = denormalize(img)
grid[0].imshow(img)
grid[0].tick_params(labelbottom=False, labelleft=False, bottom=False, left=False)
grid[0].set_title("Input Image", fontsize=24)
grid[1].imshow(img)
grid[1].imshow(mask, alpha=0.3, cmap="Reds")
grid[1].tick_params(labelbottom=False, labelleft=False, bottom=False, left=False)
grid[1].set_title("Ground Truth", fontsize=24)
grid[2].imshow(img)
im = grid[2].imshow(amap, alpha=0.3, cmap="jet", vmin=0, vmax=1)
grid[2].tick_params(labelbottom=False, labelleft=False, bottom=False, left=False)
grid[2].cax.toggle_label(True)
grid[2].set_title("Anomaly Map", fontsize=24)
plt.colorbar(im, cax=grid.cbar_axes[0])
plt.savefig(f"results/{stem}.png", bbox_inches="tight")
plt.close()
subprocess.run("convert -delay 100 -loop 0 results/*.png result.gif", shell=True)
def denormalize(img: NDArray) -> NDArray:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = (img * std + mean) * 255.0
return img.astype(np.uint8)
| true | true |
f7fac379b2652b938dad0dd8689fdf9e95e06da8 | 5,192 | py | Python | Scripts/read_HadCRUT.py | zmlabe/predictGMSTrate | 2bde4a106de1988d772f15a52d283d23bb7128f4 | [
"MIT"
] | 2 | 2022-01-20T20:20:04.000Z | 2022-02-21T12:33:37.000Z | Scripts/read_HadCRUT.py | zmlabe/predictGMSTrate | 2bde4a106de1988d772f15a52d283d23bb7128f4 | [
"MIT"
] | null | null | null | Scripts/read_HadCRUT.py | zmlabe/predictGMSTrate | 2bde4a106de1988d772f15a52d283d23bb7128f4 | [
"MIT"
] | 3 | 2022-01-19T16:25:37.000Z | 2022-03-22T13:25:00.000Z | """
Function reads in monthly data from HadCRUTv4
Notes
-----
Author : Zachary Labe
Date : 10 January 2022
Usage
-----
[1] read_HadCRUT(directory,sliceperiod,sliceyear,
sliceshape,addclimo,slicenan)
"""
def read_HadCRUT(directory,sliceperiod,sliceyear,sliceshape,addclimo,slicenan):
"""
Function reads monthly data from HadCRUT
Parameters
----------
directory : string
path for data
sliceperiod : string
how to average time component of data
sliceyear : string
how to slice number of years for data
sliceshape : string
shape of output array
addclimo : binary
True or false to add climatology
slicenan : string or float
Set missing values
Returns
-------
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
var : 3d numpy array or 4d numpy array
[time,lat,lon] or [year,month,lat,lon]
Usage
-----
lat,lon,var = read_HadCRUT(directory,sliceperiod,sliceyear,
sliceshape,addclimo,slicenan)
"""
print('\n>>>>>>>>>> STARTING read_HadCRUT function!')
### Import modules
import numpy as np
from netCDF4 import Dataset
import warnings
import calc_Utilities as UT
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
###########################################################################
### Parameters
time = np.arange(1850,2020+1,1)
monthslice = sliceyear.shape[0]*12
mon = 12
###########################################################################
### Read in data
filename = 'T2M_HadCRUT_1850-2020.nc'
data = Dataset(directory + filename,'r')
lat1 = data.variables['latitude'][:]
lon1 = data.variables['longitude'][:]
anom = data.variables['T2M'][:,:,:]
data.close()
print('Years of output =',sliceyear.min(),'to',sliceyear.max())
###########################################################################
### Reshape data into [year,month,lat,lon]
datamon = np.reshape(anom,(anom.shape[0]//mon,mon,
lat1.shape[0],lon1.shape[0]))
###########################################################################
### Return absolute temperature (1961-1990 baseline)
if addclimo == True:
filename = 'CLIM_HadCRUT_1880-2020.n'
datac = Dataset(directory + filename,'r')
clim = datac['CLIM'][:,:,:]
datac.close()
### Add [anomaly+climatology]
tempmon = datamon + clim
print('Completed: calculated absolute temperature!')
else:
tempmon = datamon
print('Completed: calculated anomalies!')
###########################################################################
### Slice over months (currently = [yr,mn,lat,lon])
### Shape of output array
if sliceperiod == 'annual':
temptime = np.nanmean(tempmon,axis=1)
if sliceshape == 1:
tempshape = temptime.ravel()
elif sliceshape == 3:
tempshape = temptime
print('Shape of output = ', tempshape.shape,[[tempshape.ndim]])
print('Completed: ANNUAL MEAN!')
print('Completed: ANNUAL MEAN!')
elif sliceperiod == 'DJF':
tempshape = UT.calcDecJanFeb(tempmon,lat1,lon1,'surface',1)
print('Shape of output = ', tempshape.shape,[[tempshape.ndim]])
print('Completed: DJF MEAN!')
elif sliceperiod == 'JJA':
temptime = np.nanmean(tempmon[:,5:8,:,:],axis=1)
if sliceshape == 1:
tempshape = temptime.ravel()
elif sliceshape == 3:
tempshape = temptime
print('Shape of output = ', tempshape.shape,[[tempshape.ndim]])
print('Completed: JJA MEAN!')
elif sliceperiod == 'none':
temptime = tempmon
if sliceshape == 1:
tempshape = tempshape.ravel()
elif sliceshape == 3:
tempshape = np.reshape(temptime,(temptime.shape[0]*temptime.shape[1],
temptime.shape[2],temptime.shape[3]))
elif sliceshape == 4:
tempshape = tempmon
print('Shape of output =', tempshape.shape, [[tempshape.ndim]])
print('Completed: ALL MONTHS!')
###########################################################################
### Change missing values
if slicenan == 'nan':
tempshape[np.where(np.isnan(tempshape))] = np.nan
print('Completed: missing values are =',slicenan)
else:
tempshape[np.where(np.isnan(tempshape))] = slicenan
print('>>>>>>>>>> ENDING read_HadCRUT function!')
return lat1,lon1,tempshape
### Test functions - do not use!
# import numpy as np
# import matplotlib.pyplot as plt
# directory = '/Users/zlabe/Data/HadCRUT/'
# sliceperiod = 'DJF'
# sliceyear = np.arange(1850,2020+1,1)
# sliceshape = 3
# slicenan = 'nan'
# addclimo = True
# lat,lon,var = read_HadCRUT(directory,sliceperiod,sliceyear,sliceshape,addclimo,slicenan) | 34.613333 | 90 | 0.543721 |
def read_HadCRUT(directory,sliceperiod,sliceyear,sliceshape,addclimo,slicenan):
print('\n>>>>>>>>>> STARTING read_HadCRUT function!')
netCDF4 import Dataset
import warnings
import calc_Utilities as UT
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
| true | true |
f7fac37d4c22be12ddd99033a7ea27c603cf062d | 21,688 | py | Python | stl/signals/signal.py | pieter-hendriks/STL-monitoring | 114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df | [
"MIT"
] | null | null | null | stl/signals/signal.py | pieter-hendriks/STL-monitoring | 114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df | [
"MIT"
] | null | null | null | stl/signals/signal.py | pieter-hendriks/STL-monitoring | 114b73b1f4b0687b11b8842b3c4a1c8af7b0d9df | [
"MIT"
] | null | null | null | """ Implementation of a list of signals; contains some QoL methods for reading from files """
from typing import List, Tuple, Iterable
import warnings
import math
from sortedcontainers import SortedList
from .signalvalue import SignalValue
from ..utility import Interval, LineSegment, Point
def sortedListKeyFunction(x: SignalValue):
""" Helper method to act as a key in sorted list.
Not a lambda because lambdas don't pickle."""
return x.getTime()
# pylint: disable=too-many-public-methods
class Signal:
""" Implementation of the Signal class for STL
Contains a set of timeseries data values with associated timestamp and derivative.
Between two such data points, Signals are assumed to be continuous. """
def __init__(
self, name: str = None, times: List[float] = None, values: List[float] = None, derivatives: List[float] = None
):
""" Initializes a Signal. Name is autogenerated if not given.
Set of times must be sorted in ascending order and must not contain duplicates. """
# Ensure that name is valid (a string or None)
if name is not None and not isinstance(name, str):
raise RuntimeError(f"Name argument is {type(name)} (value = {name}) instead of str")
if name is None:
name: str = "defaultname"
self.name: str = name
# Check that all times are unequal -- if times is not given in ascending order, bugs may appear.
if times is not None:
assert values is not None, "We can't autocompute values."
assert all(times[i] != times[i + 1] for i in range(len(times) - 1)), "debug assert: times mustn't be equal"
if len(times) != len(values):
assert len(times) == len(values)
elif values is not None:
# This shouldn't be reached in our usual cases - autogenerating timestamps seems weird.
assert False, "DEBUG STATEMENT: May need to autogenerate timestamps here."
# Set the variables to avoid errors in initialization
# If no values, the result should be empty signal
safeValue = values if values is not None else []
# Times are autogenerated to index of value if none given
safeTime = [round(x, 5) for x in times] if times is not None else list(range(len(safeValue)))
# Derivatives are zero if none given
safeDeriv = derivatives if derivatives is not None else [0] * len(safeValue)
# Prevent errors
assert len(safeValue) == len(safeTime) == len(safeDeriv)
# Initialize
# For efficient item access, we need the sorted list to not be a tree
# This allows O(1) index access instead of O(log n) index access.
self.checkpoints: Iterable[SignalValue] = SortedList(key=sortedListKeyFunction)
self.checkpoints._load = 2**62 - 1
self.times: Iterable[float] = SortedList()
self.times._load = 2**62 - 1
# Fill the lists after setting them to be 1-level max
if safeTime:
self.checkpoints.update(SignalValue(x, y, d) for x, y, d in zip(safeTime, safeValue, safeDeriv))
self.times.update(safeTime)
if derivatives is None:
self.recomputeDerivatives()
@classmethod
def createConstant(cls, name: str, value: float, timestamps: List[float] = (0, float('inf'))) -> 'Signal':
""" Create a constant Signal. Timestamps = [0, float('inf')] if unspecified. """
s = cls(name)
s.checkpoints.update(SignalValue(time, value, 0) for time in timestamps)
s.times.update(x for x in timestamps)
return s
# We type-hint s as 'Signal' to avoid circular import
# It should be BooleanSignal, in principle. But in effect, this method would work equally well from another Signal.
@classmethod
def fromBooleanSignal(cls, s: 'Signal') -> 'Signal':
""" Conversion from a Boolean Signal. """
if not s.checkpoints:
return cls(s.getName())
times, values, derivatives = zip(*[(cp.getTime(), cp.getValue(), cp.getDerivative()) for cp in s.checkpoints])
newSignal = cls(s.getName(), times, values, derivatives)
# Since we convert from Boolean, compute the derivatives.
newSignal.recomputeDerivatives()
return newSignal
@classmethod
def fromCheckpoints(cls, name: str, checkpoints: List[SignalValue]) -> 'Signal':
""" Constructs a Signal instance from a list of checkpoints. Useful for copying. """
s = cls(name)
s.checkpoints.update(checkpoints)
s.times.update(x.getTime() for x in checkpoints)
return s
@classmethod
def computeCheckpointsForComparableSignal(cls, lhsSignal: 'Signal', rhsSignal: 'Signal') -> Tuple['Signal', 'Signal']:
""" Gets the checkpoints (sample points) with timestamps from either Signal
(computing interpolated value for the other Signal when necessary)
where the timestamps fall within the Interval in which both signals are defined (i.e. intersect(lhsInterval,)). """
# These may be Signal or BooleanSignal; annotated Signal because BooleanSignal is a subclass
lhsResult: Signal = cls("lhs")
rhsResult: Signal = cls('rhs')
cp: SignalValue
if not lhsSignal.getTimes() or not rhsSignal.getTimes():
# If either signal is empty, the intersection is empty
return lhsResult, rhsResult
bothDefinedInterval: Interval = Interval.computeIntersection(
lhsSignal.getDefinedTimeInterval(), rhsSignal.getDefinedTimeInterval()
)
for cp in lhsSignal.getCheckpoints():
if bothDefinedInterval.contains(cp.getTime()):
lhsResult.addCheckpoint(cp)
rhsResult.emplaceCheckpoint(
cp.getTime(), rhsSignal.computeInterpolatedValue(cp.getTime()),
rhsSignal.computeInterpolatedDerivative(cp.getTime())
)
for cp in rhsSignal.getCheckpoints():
# Avoid double entries by checking if the given time is already in the result.
if bothDefinedInterval.contains(cp.getTime()) and cp.getTime() not in rhsResult.getTimes():
rhsResult.addCheckpoint(cp)
lhsResult.emplaceCheckpoint(
cp.getTime(), lhsSignal.computeInterpolatedValue(cp.getTime()),
lhsSignal.computeInterpolatedDerivative(cp.getTime())
)
return lhsResult, rhsResult
@classmethod
def computeComparableSignals(cls, lhsSignal: 'Signal', rhsSignal: 'Signal') -> 'SignalList': # type: ignore
""" Create Signals that are comparable - this requires both Signals having the same sample point timings.\n
First, we take all sample points from either Signal within the Interval in which both are defined.\n
Second, we compute all points where, based on the derivative, the Signals intersect. \n
Returns two Signals with an equal amount of sample points, where the time part of each sample point pair is equal."""
# Get the sampling points where self and other are a) both defined or b) intersect
# So, any time x where x in self.times() and x in other.times()
# + any time y where, through the derivatives, we know that self.value(x) == other.value(x), assuming interpolation.
assert isinstance(lhsSignal, type(rhsSignal)), "Operation is unsupported between signals of different semantics."
lhsResult: Signal = cls('empty')
rhsResult: Signal = cls('empty')
if lhsSignal.isEmpty() or rhsSignal.isEmpty():
return [lhsResult, rhsResult]
# We build the sequence (ri)i≤nz containing the sampling points of y and y' when they are both defined,
# and the points where y and y' punctually intersect
# First, we get the sampling points from the signals where they are both defined
# (i.e. any t in s1 or s2 s.t. it is in domain of both)
lhsResult, rhsResult = cls.computeCheckpointsForComparableSignal(lhsSignal, rhsSignal)
# Second, we get the intersection points
if not lhsResult.isEmpty() and not rhsResult.isEmpty():
lhsLines: List[LineSegment] = lhsResult.computeLines()
rhsLines: List[LineSegment] = rhsResult.computeLines()
intersectPoints: List[Point] = LineSegment.computeIntersectionPoints(lhsLines, rhsLines)
for point in intersectPoints:
point.normalize()
lhsResult.emplaceCheckpoint(point.x, point.y, 0)
rhsResult.emplaceCheckpoint(point.x, point.y, 0)
lhsResult.recomputeDerivatives()
rhsResult.recomputeDerivatives()
return [lhsResult, rhsResult]
def computeInterpolatedCheckpoint(self, t: float) -> SignalValue:
"""Compute an interpolated checkpoint for the specified time"""
return SignalValue(t, self.computeInterpolatedValue(t), self.computeInterpolatedDerivative(t))
# Get the value of a signal at time step t
def computeInterpolatedValue(self, t: float) -> float:
"""Compute an interpolated value for the specified time"""
# Mirror Efficient Robustness paper implementation:
# A signal value outside of the defined range of the Signal is undefined -- we crash here to avoid undefined behaviour
if not self.getTimes() or not self.getDefinedTimeInterval().contains(t, closed=True):
raise RuntimeError("Value outside of defined interval")
i = self.computeIndexForSmallestTimeAfter(t)
# Handle exact match
if self.getTime(i) == t:
return self.getValue(i)
# If it's somewhere between two data points, interpolate
value = self.getValue(i - 1)
derivative = self.getDerivative(i - 1)
# fraction of the way the interpolated point is between the point at i-1 and i
fraction = (t - self.getTime(i - 1)) / (self.getTime(i) - self.getTime(i - 1))
value += derivative * fraction
return value
# Get a derivative of the signal at time step t
def computeInterpolatedDerivative(self, t: float) -> float:
"""Compute an interpolated derivative for the specified time.\n
Following the finite, piecewise, linear, continuous hypothesis,
this returns the derivative between the values (in self.getTimes()) that t is located between.
(i.e. self.getDerivative(i) where i is the largest index such that t < self.getTime(i))"""
return self.getDerivative(self.computeIndexForLargestTimeBefore(t))
def computeInterval(self, interval: Interval, half_open: bool = False) -> 'Signal':
""" Find the part of the signal that fits within the specified interval
(endpoint inclusion based on value of 'half_open') """
constructedSignalName = f"{self.getName()}_interval"
signalType = type(self)
output: 'Signal' = signalType(constructedSignalName)
# Handle cases where lower bound is larger or equal to biggest values in the Signal.
if interval.getLower() > self.getLargestTime():
return output
if interval.getLower() == self.getLargestTime():
output.addCheckpoint(self.checkpoints[-1])
return output
# Consider trivial interval case:
if interval.getUpper() == interval.getLower():
if not half_open:
output.addCheckpoint(self.computeInterpolatedCheckpoint(interval.getLower()))
return output
# A valid index in the Signal, where timestamp is as close as possible to
# (but never smaller than) the lower bound of the interval
lowerBoundIndex = self.computeIndexForSmallestTimeAfter(interval.getLower(), inclusive=True)
# A valid index in the Signal, where timestamp is as close as possible to
# (but never larger than or equal to) the upper bound of the interval
upperBoundIndex = self.computeIndexForLargestTimeBefore(interval.getUpper(), not half_open)
# Get the output Signal. It might be missing up to two values still:
# one at interval.getLower() and one at interval.getUpper()
output = self.fromCheckpoints(constructedSignalName, self.checkpoints[lowerBoundIndex:upperBoundIndex + 1])
if interval.getLower() not in self.getTimes() and interval.getLower() > self.getTime(0):
# If lower bound of the interval isn't included, and does fall within our defined range, compute it
output.addCheckpoint(self.computeInterpolatedCheckpoint(interval.getLower()))
if not half_open and interval.getUpper() not in self.getTimes() and interval.getUpper() < self.getTime(-1):
# If upper bound of the interval isn't included, should be, and falls within our defined range, compute it
output.addCheckpoint(self.computeInterpolatedCheckpoint(interval.getUpper()))
return output
def computeIndexForTime(self, time: float) -> int:
""" Find the index where 'time' is located. Errors if time not in the current checkpoint list. """
index = self.times.bisect_left(time)
assert self.getTime(index) == time, "Can't find an index for a time that isn't in our list."
return index
def computeLargestTimeBefore(self, time: float, inclusive: bool = True) -> float:
""" Return the largest timestamp (specified in a checkpoint),
smaller than (or equal to, if inclusive is True) the value in the parameter"""
return self.getTime(self.computeIndexForLargestTimeBefore(time, inclusive))
def computeIndexForSmallestTimeAfter(self, time: float, inclusive: bool = True) -> int:
""" Return the index at which the checkpoint with the timestamp closest to
(but always larger than (or eq iff inclusive)) the given time is """
assert self.getDefinedTimeInterval().contains(time)
index = self.times.bisect_left(time)
if not inclusive and self.getTime(index) == time:
return index + 1
return index
def computeIndexForLargestTimeBefore(self, time: float, inclusive: bool = True) -> int:
""" Return the index at which the checkpoint with the timestamp closest to
(but always smaller than (or eq iff inclusive)) the given time is """
if time > self.getTime(-1):
return self.getCheckpointCount() - 1
if time == self.getTime(-1):
if inclusive:
return self.getCheckpointCount() - 1
return self.getCheckpointCount() - 2
index = self.times.bisect_left(time)
if inclusive and self.getTime(index) == time:
return index
return index - 1
def computeSmallestTimeAfter(self, time: float, inclusive: bool = True) -> float:
"""Get the smallest time (that is specified in a checkpoint) that is larger than
(or equal to, if inclusive is True) the value in parameter"""
# This method can only work if the time is in the interval the Signal is defined over
return self.getTime(self.computeIndexForSmallestTimeAfter(time, inclusive))
def oldFormat(self) -> List[List[float]]:
"""Grab representation of this signal in the format used in old version of the code.
May be useful to compare outputs between the versions."""
# pylint: disable=protected-access
return [self.getTimes()._lists[0], self.getValues(), self.getDerivatives()]
# pylint: enable=protected-access
def computeLines(self) -> List[LineSegment]:
""" Convert the Signal into a set of LineSegments; used to compute intersections. """
ret: List[LineSegment] = []
for i in range(self.getCheckpointCount() - 1):
cpA: SignalValue = self.getCheckpoint(i)
cpB: SignalValue = self.getCheckpoint(i + 1)
ret.append(LineSegment(Point(cpA.getTime(), cpA.getValue()), Point(cpB.getTime(), cpB.getValue())))
return ret
def getValues(self) -> List[float]:
""" Get the values for the signal. """
return [x.getValue() for x in self.checkpoints]
def getTimes(self) -> List[float]:
""" Get the times for the signal. """
return self.times
def getDerivatives(self) -> List[float]:
""" Get the derivatives for the signal. """
return [x.getDerivative() for x in self.checkpoints]
def getCheckpointCount(self) -> int:
""" Get the size of the checkpoint list for the signal. """
return len(self.checkpoints)
def getCheckpoints(self) -> List[SignalValue]:
""" Get the list of checkpoints for the signal. """
return self.checkpoints
def getName(self) -> str:
""" Get the name for the signal. """
return self.name
def setName(self, name: str) -> None:
""" Set the Signal's name attribute. """
self.name = name
def getSmallestTime(self) -> float:
""" Return the value of the smallest timestamp (usually 0) """
# pylint: disable=protected-access
assert len(self.times._lists) == 1
return self.times._lists[0][0]
# pylint: enable=protected-access
def getLargestTime(self) -> float:
""" Return the value of the largest timestamp """
# Returns the checkpoint in self.checkpoints with c.getTime() largest
# pylint: disable=protected-access
assert len(self.times._lists) == 1
return self.times._lists[0][-1]
# pylint: enable=protected-access
def getTime(self, index: int) -> float:
"""Return the timestamp of the signal checkpoint at the specified index"""
# pylint: disable=protected-access
return self.times._lists[0][index]
# pylint: enable=protected-access
def getValue(self, index: int) -> float:
"""Return the value of the signal checkpoint at the specified index"""
# pylint: disable=protected-access
return self.checkpoints._lists[0][index].getValue()
# pylint: enable=protected-access
def getDerivative(self, index: int) -> float:
"""Return the derivative of the signal checkpoint at the specified index"""
# pylint: disable=protected-access
return self.checkpoints._lists[0][index].getDerivative()
def getCheckpoint(self, index: int) -> SignalValue:
"""Return the signal checkpoint at the specified index"""
# pylint: disable=protected-access
return self.checkpoints._lists[0][index]
# pylint: enable=protected-access
def setValue(self, index: int, value: float) -> None:
""" Set the value for the checkpoint at index. """
# pylint: disable=protected-access
self.checkpoints._lists[0][index].setValue(value)
# pylint: enable=protected-access
def setDerivative(self, index: int, derivative: float) -> None:
""" Set the derivative for the checkpoint at index. """
# pylint: disable=protected-access
self.checkpoints._lists[0][index].setDerivative(derivative)
# pylint: enable=protected-access
def getDefinedTimeInterval(self) -> Interval:
""" Returns the Interval of time over which this Signal is defined
Starts at the first sample point, ends at the last. """
# Checkpoints are sorted by time, so we can just get this by index.
if len(self.times) == 0:
return Interval(0, 0)
if len(self.times) == 1:
return Interval(self.getTime(0), self.getTime(0))
return Interval(self.getTime(0), self.getTime(-1))
def popCheckpoint(self) -> SignalValue:
"""Pop the last element from the checkpoint list and return it"""
self.times.pop()
return self.checkpoints.pop()
def addCheckpoint(self, sv: SignalValue) -> None:
"""Add a checkpoint to the signal. Insertion location is determined by the SignalValue's timestamp"""
# Use the emplace method to make a copy
# If we simply .add(sv) we reference the same object, this could cause issues in e.g. the derivative computations
# where different lists may require different derivatives of the same SignalValue
self.emplaceCheckpoint(sv.getTime(), sv.getValue(), sv.getDerivative())
def emplaceCheckpoint(self, time: float, value: float, derivative: float = None) -> None:
"""Add a (constructed) checkpoint to the signal. Insertion location is determined by the timestamp"""
if derivative is None:
derivative = 0
assert time == round(time, 5)
# Ensure similar time insert has similar value
if time in self.times:
if not math.isclose(self.getValue(self.computeIndexForTime(time)), value, rel_tol=1e-4):
warnings.warn("Skipped insertion of a duplicate point with differing value.")
return
self.checkpoints.add(SignalValue(time, value, derivative if derivative is not None else 0))
self.times.add(time)
def removeCheckpoint(self, index):
""" Removes a checkpoint at the specified index from the signal. """
self.checkpoints.pop(index)
self.times.pop(index)
def isEmpty(self) -> bool:
""" Checks if the Signal is empty (i.e. contains no sample points)."""
return self.getCheckpointCount() == 0
def isSingular(self) -> bool:
""" Returns if the signal is defined by a single sample point. """
return self.getCheckpointCount() == 1
def shift(self, offset: float) -> 'Signal':
"""Take the current timestamps, subtract offset"""
cp: SignalValue
newCheckpoints: List[SignalValue] = []
for cp in self.checkpoints:
newCheckpoints.append(SignalValue(cp.getTime() + offset, cp.getValue(), cp.getDerivative()))
return self.fromCheckpoints(f"{self.name}_shift", newCheckpoints)
def recomputeDerivatives(self):
"""Re-compute the derivatives part of each SignalValue, to make sure it matches the current values."""
if self.checkpoints: # no-op if empty list
for i in range(len(self.checkpoints) - 1):
valueDiff = self.getValue(i + 1) - self.getValue(i)
timeDiff = self.getTime(i + 1) - self.getTime(i)
if timeDiff == 0:
assert False, "This shouldn't be possible - means we have a double time entry."
self.setDerivative(i, valueDiff / timeDiff)
self.setDerivative(-1, 0)
def __str__(self) -> str:
ret = ["Signal with the following checkpoint entries: "]
for cp in self.checkpoints:
ret.append(f'\t{cp.getTime()} -> <{cp.getValue()}, {cp.getDerivative()}>')
return '\n'.join(ret)
def __repr__(self) -> str:
times, values, derivatives = [], [], []
for x in self.checkpoints:
times.append(x.getTime())
values.append(x.getValue())
derivatives.append(x.getDerivative())
return f"Signal('{self.name}', {times.__repr__()}, {values.__repr__()}, {derivatives.__repr__()})"
def __eq__(self, other: 'Signal') -> bool:
if not isinstance(self, type(other)) or not isinstance(other, type(self)):
return False
if self.name != other.name:
return False
if len(self.checkpoints) != len(other.checkpoints):
return False
for scp, ocp in zip(self.checkpoints, other.checkpoints):
if scp != ocp:
return False
return True
def filterTimes(self, times: List[float]) -> 'Signal':
""" Filters the times in the current signal (no copy), so that all checkpoints
that do not have cp.t in times are removed from the Signal.
Used to filter the output from functions to the expected output times. """
index = 0
while index < self.getCheckpointCount():
# if times has an extra checkpoint, just skip it
if times[index] > self.getTime(index):
self.removeCheckpoint(index)
else:
index += 1
# pylint: enable=too-many-public-methods
| 46.144681 | 120 | 0.729897 | from typing import List, Tuple, Iterable
import warnings
import math
from sortedcontainers import SortedList
from .signalvalue import SignalValue
from ..utility import Interval, LineSegment, Point
def sortedListKeyFunction(x: SignalValue):
return x.getTime()
class Signal:
def __init__(
self, name: str = None, times: List[float] = None, values: List[float] = None, derivatives: List[float] = None
):
if name is not None and not isinstance(name, str):
raise RuntimeError(f"Name argument is {type(name)} (value = {name}) instead of str")
if name is None:
name: str = "defaultname"
self.name: str = name
if times is not None:
assert values is not None, "We can't autocompute values."
assert all(times[i] != times[i + 1] for i in range(len(times) - 1)), "debug assert: times mustn't be equal"
if len(times) != len(values):
assert len(times) == len(values)
elif values is not None:
assert False, "DEBUG STATEMENT: May need to autogenerate timestamps here."
# Set the variables to avoid errors in initialization
# If no values, the result should be empty signal
safeValue = values if values is not None else []
# Times are autogenerated to index of value if none given
safeTime = [round(x, 5) for x in times] if times is not None else list(range(len(safeValue)))
# Derivatives are zero if none given
safeDeriv = derivatives if derivatives is not None else [0] * len(safeValue)
# Prevent errors
assert len(safeValue) == len(safeTime) == len(safeDeriv)
# Initialize
# For efficient item access, we need the sorted list to not be a tree
# This allows O(1) index access instead of O(log n) index access.
self.checkpoints: Iterable[SignalValue] = SortedList(key=sortedListKeyFunction)
self.checkpoints._load = 2**62 - 1
self.times: Iterable[float] = SortedList()
self.times._load = 2**62 - 1
# Fill the lists after setting them to be 1-level max
if safeTime:
self.checkpoints.update(SignalValue(x, y, d) for x, y, d in zip(safeTime, safeValue, safeDeriv))
self.times.update(safeTime)
if derivatives is None:
self.recomputeDerivatives()
@classmethod
def createConstant(cls, name: str, value: float, timestamps: List[float] = (0, float('inf'))) -> 'Signal':
s = cls(name)
s.checkpoints.update(SignalValue(time, value, 0) for time in timestamps)
s.times.update(x for x in timestamps)
return s
# We type-hint s as 'Signal' to avoid circular import
# It should be BooleanSignal, in principle. But in effect, this method would work equally well from another Signal.
@classmethod
def fromBooleanSignal(cls, s: 'Signal') -> 'Signal':
if not s.checkpoints:
return cls(s.getName())
times, values, derivatives = zip(*[(cp.getTime(), cp.getValue(), cp.getDerivative()) for cp in s.checkpoints])
newSignal = cls(s.getName(), times, values, derivatives)
# Since we convert from Boolean, compute the derivatives.
newSignal.recomputeDerivatives()
return newSignal
@classmethod
def fromCheckpoints(cls, name: str, checkpoints: List[SignalValue]) -> 'Signal':
s = cls(name)
s.checkpoints.update(checkpoints)
s.times.update(x.getTime() for x in checkpoints)
return s
@classmethod
def computeCheckpointsForComparableSignal(cls, lhsSignal: 'Signal', rhsSignal: 'Signal') -> Tuple['Signal', 'Signal']:
# These may be Signal or BooleanSignal; annotated Signal because BooleanSignal is a subclass
lhsResult: Signal = cls("lhs")
rhsResult: Signal = cls('rhs')
cp: SignalValue
if not lhsSignal.getTimes() or not rhsSignal.getTimes():
# If either signal is empty, the intersection is empty
return lhsResult, rhsResult
bothDefinedInterval: Interval = Interval.computeIntersection(
lhsSignal.getDefinedTimeInterval(), rhsSignal.getDefinedTimeInterval()
)
for cp in lhsSignal.getCheckpoints():
if bothDefinedInterval.contains(cp.getTime()):
lhsResult.addCheckpoint(cp)
rhsResult.emplaceCheckpoint(
cp.getTime(), rhsSignal.computeInterpolatedValue(cp.getTime()),
rhsSignal.computeInterpolatedDerivative(cp.getTime())
)
for cp in rhsSignal.getCheckpoints():
# Avoid double entries by checking if the given time is already in the result.
if bothDefinedInterval.contains(cp.getTime()) and cp.getTime() not in rhsResult.getTimes():
rhsResult.addCheckpoint(cp)
lhsResult.emplaceCheckpoint(
cp.getTime(), lhsSignal.computeInterpolatedValue(cp.getTime()),
lhsSignal.computeInterpolatedDerivative(cp.getTime())
)
return lhsResult, rhsResult
@classmethod
def computeComparableSignals(cls, lhsSignal: 'Signal', rhsSignal: 'Signal') -> 'SignalList': # type: ignore
# Get the sampling points where self and other are a) both defined or b) intersect
# So, any time x where x in self.times() and x in other.times()
# + any time y where, through the derivatives, we know that self.value(x) == other.value(x), assuming interpolation.
assert isinstance(lhsSignal, type(rhsSignal)), "Operation is unsupported between signals of different semantics."
lhsResult: Signal = cls('empty')
rhsResult: Signal = cls('empty')
if lhsSignal.isEmpty() or rhsSignal.isEmpty():
return [lhsResult, rhsResult]
# We build the sequence (ri)i≤nz containing the sampling points of y and y' when they are both defined,
# First, we get the sampling points from the signals where they are both defined
# (i.e. any t in s1 or s2 s.t. it is in domain of both)
lhsResult, rhsResult = cls.computeCheckpointsForComparableSignal(lhsSignal, rhsSignal)
# Second, we get the intersection points
if not lhsResult.isEmpty() and not rhsResult.isEmpty():
lhsLines: List[LineSegment] = lhsResult.computeLines()
rhsLines: List[LineSegment] = rhsResult.computeLines()
intersectPoints: List[Point] = LineSegment.computeIntersectionPoints(lhsLines, rhsLines)
for point in intersectPoints:
point.normalize()
lhsResult.emplaceCheckpoint(point.x, point.y, 0)
rhsResult.emplaceCheckpoint(point.x, point.y, 0)
lhsResult.recomputeDerivatives()
rhsResult.recomputeDerivatives()
return [lhsResult, rhsResult]
def computeInterpolatedCheckpoint(self, t: float) -> SignalValue:
return SignalValue(t, self.computeInterpolatedValue(t), self.computeInterpolatedDerivative(t))
# Get the value of a signal at time step t
def computeInterpolatedValue(self, t: float) -> float:
# Mirror Efficient Robustness paper implementation:
# A signal value outside of the defined range of the Signal is undefined -- we crash here to avoid undefined behaviour
if not self.getTimes() or not self.getDefinedTimeInterval().contains(t, closed=True):
raise RuntimeError("Value outside of defined interval")
i = self.computeIndexForSmallestTimeAfter(t)
# Handle exact match
if self.getTime(i) == t:
return self.getValue(i)
# If it's somewhere between two data points, interpolate
value = self.getValue(i - 1)
derivative = self.getDerivative(i - 1)
fraction = (t - self.getTime(i - 1)) / (self.getTime(i) - self.getTime(i - 1))
value += derivative * fraction
return value
def computeInterpolatedDerivative(self, t: float) -> float:
return self.getDerivative(self.computeIndexForLargestTimeBefore(t))
def computeInterval(self, interval: Interval, half_open: bool = False) -> 'Signal':
constructedSignalName = f"{self.getName()}_interval"
signalType = type(self)
output: 'Signal' = signalType(constructedSignalName)
if interval.getLower() > self.getLargestTime():
return output
if interval.getLower() == self.getLargestTime():
output.addCheckpoint(self.checkpoints[-1])
return output
if interval.getUpper() == interval.getLower():
if not half_open:
output.addCheckpoint(self.computeInterpolatedCheckpoint(interval.getLower()))
return output
lowerBoundIndex = self.computeIndexForSmallestTimeAfter(interval.getLower(), inclusive=True)
upperBoundIndex = self.computeIndexForLargestTimeBefore(interval.getUpper(), not half_open)
output = self.fromCheckpoints(constructedSignalName, self.checkpoints[lowerBoundIndex:upperBoundIndex + 1])
if interval.getLower() not in self.getTimes() and interval.getLower() > self.getTime(0):
output.addCheckpoint(self.computeInterpolatedCheckpoint(interval.getLower()))
if not half_open and interval.getUpper() not in self.getTimes() and interval.getUpper() < self.getTime(-1):
# If upper bound of the interval isn't included, should be, and falls within our defined range, compute it
output.addCheckpoint(self.computeInterpolatedCheckpoint(interval.getUpper()))
return output
def computeIndexForTime(self, time: float) -> int:
index = self.times.bisect_left(time)
assert self.getTime(index) == time, "Can't find an index for a time that isn't in our list."
return index
def computeLargestTimeBefore(self, time: float, inclusive: bool = True) -> float:
return self.getTime(self.computeIndexForLargestTimeBefore(time, inclusive))
def computeIndexForSmallestTimeAfter(self, time: float, inclusive: bool = True) -> int:
assert self.getDefinedTimeInterval().contains(time)
index = self.times.bisect_left(time)
if not inclusive and self.getTime(index) == time:
return index + 1
return index
def computeIndexForLargestTimeBefore(self, time: float, inclusive: bool = True) -> int:
if time > self.getTime(-1):
return self.getCheckpointCount() - 1
if time == self.getTime(-1):
if inclusive:
return self.getCheckpointCount() - 1
return self.getCheckpointCount() - 2
index = self.times.bisect_left(time)
if inclusive and self.getTime(index) == time:
return index
return index - 1
def computeSmallestTimeAfter(self, time: float, inclusive: bool = True) -> float:
return self.getTime(self.computeIndexForSmallestTimeAfter(time, inclusive))
def oldFormat(self) -> List[List[float]]:
return [self.getTimes()._lists[0], self.getValues(), self.getDerivatives()]
def computeLines(self) -> List[LineSegment]:
ret: List[LineSegment] = []
for i in range(self.getCheckpointCount() - 1):
cpA: SignalValue = self.getCheckpoint(i)
cpB: SignalValue = self.getCheckpoint(i + 1)
ret.append(LineSegment(Point(cpA.getTime(), cpA.getValue()), Point(cpB.getTime(), cpB.getValue())))
return ret
def getValues(self) -> List[float]:
return [x.getValue() for x in self.checkpoints]
def getTimes(self) -> List[float]:
return self.times
def getDerivatives(self) -> List[float]:
return [x.getDerivative() for x in self.checkpoints]
def getCheckpointCount(self) -> int:
return len(self.checkpoints)
def getCheckpoints(self) -> List[SignalValue]:
return self.checkpoints
def getName(self) -> str:
return self.name
def setName(self, name: str) -> None:
self.name = name
def getSmallestTime(self) -> float:
assert len(self.times._lists) == 1
return self.times._lists[0][0]
def getLargestTime(self) -> float:
assert len(self.times._lists) == 1
return self.times._lists[0][-1]
def getTime(self, index: int) -> float:
return self.times._lists[0][index]
def getValue(self, index: int) -> float:
return self.checkpoints._lists[0][index].getValue()
def getDerivative(self, index: int) -> float:
return self.checkpoints._lists[0][index].getDerivative()
def getCheckpoint(self, index: int) -> SignalValue:
return self.checkpoints._lists[0][index]
def setValue(self, index: int, value: float) -> None:
self.checkpoints._lists[0][index].setValue(value)
def setDerivative(self, index: int, derivative: float) -> None:
self.checkpoints._lists[0][index].setDerivative(derivative)
def getDefinedTimeInterval(self) -> Interval:
if len(self.times) == 0:
return Interval(0, 0)
if len(self.times) == 1:
return Interval(self.getTime(0), self.getTime(0))
return Interval(self.getTime(0), self.getTime(-1))
def popCheckpoint(self) -> SignalValue:
self.times.pop()
return self.checkpoints.pop()
def addCheckpoint(self, sv: SignalValue) -> None:
self.emplaceCheckpoint(sv.getTime(), sv.getValue(), sv.getDerivative())
def emplaceCheckpoint(self, time: float, value: float, derivative: float = None) -> None:
if derivative is None:
derivative = 0
assert time == round(time, 5)
if time in self.times:
if not math.isclose(self.getValue(self.computeIndexForTime(time)), value, rel_tol=1e-4):
warnings.warn("Skipped insertion of a duplicate point with differing value.")
return
self.checkpoints.add(SignalValue(time, value, derivative if derivative is not None else 0))
self.times.add(time)
def removeCheckpoint(self, index):
self.checkpoints.pop(index)
self.times.pop(index)
def isEmpty(self) -> bool:
return self.getCheckpointCount() == 0
def isSingular(self) -> bool:
return self.getCheckpointCount() == 1
def shift(self, offset: float) -> 'Signal':
cp: SignalValue
newCheckpoints: List[SignalValue] = []
for cp in self.checkpoints:
newCheckpoints.append(SignalValue(cp.getTime() + offset, cp.getValue(), cp.getDerivative()))
return self.fromCheckpoints(f"{self.name}_shift", newCheckpoints)
def recomputeDerivatives(self):
if self.checkpoints:
for i in range(len(self.checkpoints) - 1):
valueDiff = self.getValue(i + 1) - self.getValue(i)
timeDiff = self.getTime(i + 1) - self.getTime(i)
if timeDiff == 0:
assert False, "This shouldn't be possible - means we have a double time entry."
self.setDerivative(i, valueDiff / timeDiff)
self.setDerivative(-1, 0)
def __str__(self) -> str:
ret = ["Signal with the following checkpoint entries: "]
for cp in self.checkpoints:
ret.append(f'\t{cp.getTime()} -> <{cp.getValue()}, {cp.getDerivative()}>')
return '\n'.join(ret)
def __repr__(self) -> str:
times, values, derivatives = [], [], []
for x in self.checkpoints:
times.append(x.getTime())
values.append(x.getValue())
derivatives.append(x.getDerivative())
return f"Signal('{self.name}', {times.__repr__()}, {values.__repr__()}, {derivatives.__repr__()})"
def __eq__(self, other: 'Signal') -> bool:
if not isinstance(self, type(other)) or not isinstance(other, type(self)):
return False
if self.name != other.name:
return False
if len(self.checkpoints) != len(other.checkpoints):
return False
for scp, ocp in zip(self.checkpoints, other.checkpoints):
if scp != ocp:
return False
return True
def filterTimes(self, times: List[float]) -> 'Signal':
index = 0
while index < self.getCheckpointCount():
# if times has an extra checkpoint, just skip it
if times[index] > self.getTime(index):
self.removeCheckpoint(index)
else:
index += 1
# pylint: enable=too-many-public-methods
| true | true |
f7fac57e485bfb4af3770b8aeac5249a26e089ce | 8,089 | py | Python | test/functional/wallet_createwallet.py | shamimiceewu025/glee | aa0dc8240f2552e4c64a0b722d4e5f25dd981e66 | [
"MIT"
] | null | null | null | test/functional/wallet_createwallet.py | shamimiceewu025/glee | aa0dc8240f2552e4c64a0b722d4e5f25dd981e66 | [
"MIT"
] | null | null | null | test/functional/wallet_createwallet.py | shamimiceewu025/glee | aa0dc8240f2552e4c64a0b722d4e5f25dd981e66 | [
"MIT"
] | 1 | 2020-11-04T07:04:44.000Z | 2020-11-04T07:04:44.000Z | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The GleecBTC Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test createwallet arguments.
"""
from test_framework.test_framework import GleecBTCTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class CreateWalletTest(GleecBTCTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
node.generate(1) # Leave IBD for sethdseed
self.nodes[0].createwallet(wallet_name='w0')
w0 = node.get_wallet_rpc('w0')
address1 = w0.getnewaddress()
self.log.info("Test disableprivatekeys creation.")
self.nodes[0].createwallet(wallet_name='w1', disable_private_keys=True)
w1 = node.get_wallet_rpc('w1')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getrawchangeaddress)
w1.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info('Test that private keys cannot be imported')
addr = w0.getnewaddress('', 'legacy')
privkey = w0.dumpprivkey(addr)
assert_raises_rpc_error(-4, 'Cannot import private keys to a wallet with private keys disabled', w1.importprivkey, privkey)
result = w1.importmulti([{'scriptPubKey': {'address': addr}, 'timestamp': 'now', 'keys': [privkey]}])
assert not result[0]['success']
assert 'warning' not in result[0]
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'Cannot import private keys to a wallet with private keys disabled')
self.log.info("Test blank creation with private keys disabled.")
self.nodes[0].createwallet(wallet_name='w2', disable_private_keys=True, blank=True)
w2 = node.get_wallet_rpc('w2')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getrawchangeaddress)
w2.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info("Test blank creation with private keys enabled.")
self.nodes[0].createwallet(wallet_name='w3', disable_private_keys=False, blank=True)
w3 = node.get_wallet_rpc('w3')
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getrawchangeaddress)
# Import private key
w3.importprivkey(w0.dumpprivkey(address1))
# Imported private keys are currently ignored by the keypool
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
# Set the seed
w3.sethdseed()
assert_equal(w3.getwalletinfo()['keypoolsize'], 1)
w3.getnewaddress()
w3.getrawchangeaddress()
self.log.info("Test blank creation with privkeys enabled and then encryption")
self.nodes[0].createwallet(wallet_name='w4', disable_private_keys=False, blank=True)
w4 = node.get_wallet_rpc('w4')
assert_equal(w4.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Encrypt the wallet. Nothing should change about the keypool
w4.encryptwallet('pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Now set a seed and it should work. Wallet should also be encrypted
w4.walletpassphrase('pass', 2)
w4.sethdseed()
w4.getnewaddress()
w4.getrawchangeaddress()
self.log.info("Test blank creation with privkeys disabled and then encryption")
self.nodes[0].createwallet(wallet_name='w5', disable_private_keys=True, blank=True)
w5 = node.get_wallet_rpc('w5')
assert_equal(w5.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
# Encrypt the wallet
assert_raises_rpc_error(-16, "Error: wallet does not contain private keys, nothing to encrypt.", w5.encryptwallet, 'pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
self.log.info('New blank and encrypted wallets can be created')
self.nodes[0].createwallet(wallet_name='wblank', disable_private_keys=False, blank=True, passphrase='thisisapassphrase')
wblank = node.get_wallet_rpc('wblank')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", wblank.signmessage, "needanargument", "test")
wblank.walletpassphrase('thisisapassphrase', 10)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getrawchangeaddress)
self.log.info('Test creating a new encrypted wallet.')
# Born encrypted wallet is created (has keys)
self.nodes[0].createwallet(wallet_name='w6', disable_private_keys=False, blank=False, passphrase='thisisapassphrase')
w6 = node.get_wallet_rpc('w6')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", w6.signmessage, "needanargument", "test")
w6.walletpassphrase('thisisapassphrase', 10)
w6.signmessage(w6.getnewaddress('', 'legacy'), "test")
w6.keypoolrefill(1)
# There should only be 1 key
walletinfo = w6.getwalletinfo()
assert_equal(walletinfo['keypoolsize'], 1)
assert_equal(walletinfo['keypoolsize_hd_internal'], 1)
# Allow empty passphrase, but there should be a warning
resp = self.nodes[0].createwallet(wallet_name='w7', disable_private_keys=False, blank=False, passphrase='')
assert_equal(resp['warning'], 'Empty string given as passphrase, wallet will not be encrypted.')
w7 = node.get_wallet_rpc('w7')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 10)
self.log.info('Test making a wallet with avoid reuse flag')
self.nodes[0].createwallet('w8', False, False, '', True) # Use positional arguments to check for bug where avoid_reuse could not be set for wallets without needing them to be encrypted
w8 = node.get_wallet_rpc('w8')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 10)
assert_equal(w8.getwalletinfo()["avoid_reuse"], True)
self.log.info('Using a passphrase with private keys disabled returns error')
assert_raises_rpc_error(-4, 'Passphrase provided but private keys are disabled. A passphrase is only used to encrypt private keys, so cannot be used for wallets with private keys disabled.', self.nodes[0].createwallet, wallet_name='w9', disable_private_keys=True, passphrase='thisisapassphrase')
if __name__ == '__main__':
CreateWalletTest().main()
| 59.477941 | 303 | 0.705279 |
from test_framework.test_framework import GleecBTCTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class CreateWalletTest(GleecBTCTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
node.generate(1)
self.nodes[0].createwallet(wallet_name='w0')
w0 = node.get_wallet_rpc('w0')
address1 = w0.getnewaddress()
self.log.info("Test disableprivatekeys creation.")
self.nodes[0].createwallet(wallet_name='w1', disable_private_keys=True)
w1 = node.get_wallet_rpc('w1')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getrawchangeaddress)
w1.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info('Test that private keys cannot be imported')
addr = w0.getnewaddress('', 'legacy')
privkey = w0.dumpprivkey(addr)
assert_raises_rpc_error(-4, 'Cannot import private keys to a wallet with private keys disabled', w1.importprivkey, privkey)
result = w1.importmulti([{'scriptPubKey': {'address': addr}, 'timestamp': 'now', 'keys': [privkey]}])
assert not result[0]['success']
assert 'warning' not in result[0]
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'Cannot import private keys to a wallet with private keys disabled')
self.log.info("Test blank creation with private keys disabled.")
self.nodes[0].createwallet(wallet_name='w2', disable_private_keys=True, blank=True)
w2 = node.get_wallet_rpc('w2')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getrawchangeaddress)
w2.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info("Test blank creation with private keys enabled.")
self.nodes[0].createwallet(wallet_name='w3', disable_private_keys=False, blank=True)
w3 = node.get_wallet_rpc('w3')
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getrawchangeaddress)
w3.importprivkey(w0.dumpprivkey(address1))
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
w3.sethdseed()
assert_equal(w3.getwalletinfo()['keypoolsize'], 1)
w3.getnewaddress()
w3.getrawchangeaddress()
self.log.info("Test blank creation with privkeys enabled and then encryption")
self.nodes[0].createwallet(wallet_name='w4', disable_private_keys=False, blank=True)
w4 = node.get_wallet_rpc('w4')
assert_equal(w4.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
w4.encryptwallet('pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
w4.walletpassphrase('pass', 2)
w4.sethdseed()
w4.getnewaddress()
w4.getrawchangeaddress()
self.log.info("Test blank creation with privkeys disabled and then encryption")
self.nodes[0].createwallet(wallet_name='w5', disable_private_keys=True, blank=True)
w5 = node.get_wallet_rpc('w5')
assert_equal(w5.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
assert_raises_rpc_error(-16, "Error: wallet does not contain private keys, nothing to encrypt.", w5.encryptwallet, 'pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
self.log.info('New blank and encrypted wallets can be created')
self.nodes[0].createwallet(wallet_name='wblank', disable_private_keys=False, blank=True, passphrase='thisisapassphrase')
wblank = node.get_wallet_rpc('wblank')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", wblank.signmessage, "needanargument", "test")
wblank.walletpassphrase('thisisapassphrase', 10)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getrawchangeaddress)
self.log.info('Test creating a new encrypted wallet.')
self.nodes[0].createwallet(wallet_name='w6', disable_private_keys=False, blank=False, passphrase='thisisapassphrase')
w6 = node.get_wallet_rpc('w6')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", w6.signmessage, "needanargument", "test")
w6.walletpassphrase('thisisapassphrase', 10)
w6.signmessage(w6.getnewaddress('', 'legacy'), "test")
w6.keypoolrefill(1)
walletinfo = w6.getwalletinfo()
assert_equal(walletinfo['keypoolsize'], 1)
assert_equal(walletinfo['keypoolsize_hd_internal'], 1)
resp = self.nodes[0].createwallet(wallet_name='w7', disable_private_keys=False, blank=False, passphrase='')
assert_equal(resp['warning'], 'Empty string given as passphrase, wallet will not be encrypted.')
w7 = node.get_wallet_rpc('w7')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 10)
self.log.info('Test making a wallet with avoid reuse flag')
self.nodes[0].createwallet('w8', False, False, '', True)
w8 = node.get_wallet_rpc('w8')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 10)
assert_equal(w8.getwalletinfo()["avoid_reuse"], True)
self.log.info('Using a passphrase with private keys disabled returns error')
assert_raises_rpc_error(-4, 'Passphrase provided but private keys are disabled. A passphrase is only used to encrypt private keys, so cannot be used for wallets with private keys disabled.', self.nodes[0].createwallet, wallet_name='w9', disable_private_keys=True, passphrase='thisisapassphrase')
if __name__ == '__main__':
CreateWalletTest().main()
| true | true |
f7fac6b23303f4215f37a02078c4db29fd5ce8ad | 576 | py | Python | setup.py | nathanolszowski/tmtt_project | 40792e84d2fbfc7ef3d1e5013ed41f0b77089219 | [
"MIT"
] | null | null | null | setup.py | nathanolszowski/tmtt_project | 40792e84d2fbfc7ef3d1e5013ed41f0b77089219 | [
"MIT"
] | null | null | null | setup.py | nathanolszowski/tmtt_project | 40792e84d2fbfc7ef3d1e5013ed41f0b77089219 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Setup file for ttmt_project.
Use setup.cfg to configure your project.
This file was generated with PyScaffold 3.2.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| 24 | 75 | 0.704861 |
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup
try:
require('setuptools>=38.3')
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(use_pyscaffold=True)
| true | true |
f7fac728ab33fb4a4a27b61750e57b08d33aeb77 | 270 | py | Python | sentipy/__init__.py | sentimentinvestor/sentipy | 26611d1473f4dd7c7003767ddbed343799c6e0b7 | [
"MIT"
] | 8 | 2021-05-31T08:39:29.000Z | 2021-08-01T00:32:51.000Z | sentipy/__init__.py | sentimentinvestor/sentipy | 26611d1473f4dd7c7003767ddbed343799c6e0b7 | [
"MIT"
] | 8 | 2021-06-03T11:39:16.000Z | 2021-11-24T18:31:44.000Z | sentipy/__init__.py | sentimentinvestor/sentipy | 26611d1473f4dd7c7003767ddbed343799c6e0b7 | [
"MIT"
] | 2 | 2021-07-19T22:32:30.000Z | 2021-07-21T14:42:35.000Z | """The Sentipy module provides a simple and lightweight way to interact with the SentimentInvestor API and data.
For more information, please visit https://docs.sentimentinvestor.com/python/
"""
from . import sentipy
__all__ = ["sentipy", "ws"]
__version__ = "1.1.0"
| 27 | 112 | 0.751852 |
from . import sentipy
__all__ = ["sentipy", "ws"]
__version__ = "1.1.0"
| true | true |
f7fac7af6ebd7e3f44e8212adff1f4c0476eeeec | 11,042 | py | Python | importers/issue_tracker/github/querier_github.py | SOM-Research/Gitana | 95babc437d0a418ba8cbf89fe516cc599bc4e880 | [
"MIT"
] | 63 | 2015-05-12T09:13:34.000Z | 2021-09-29T07:24:51.000Z | importers/issue_tracker/github/querier_github.py | atlanmod/Gitana | 95babc437d0a418ba8cbf89fe516cc599bc4e880 | [
"MIT"
] | 29 | 2015-11-26T09:55:34.000Z | 2021-10-21T10:32:52.000Z | importers/issue_tracker/github/querier_github.py | atlanmod/Gitana | 95babc437d0a418ba8cbf89fe516cc599bc4e880 | [
"MIT"
] | 20 | 2016-09-12T15:22:28.000Z | 2021-08-07T23:06:42.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'valerio cosentino'
from github import Github
from util.date_util import DateUtil
from util.token_util import TokenUtil
import re
class GitHubQuerier():
"""
This class collects the data available on the GitHub issue tracker via its API
"""
def __init__(self, url, token, logger):
"""
:type url: str
:param url: full name of the GitHub repository
:type token: str
:param token: a GitHub token
:type logger: Object
:param logger: logger
"""
try:
self._logger = logger
self._url = url
self._token = token
self._github = Github(token)
self._repo = self._load_repo(self._url)
self._token_util = TokenUtil(self._logger, "github")
self._date_util = DateUtil()
except:
self._logger.error("GitHubQuerier init failed")
raise
def _load_repo(self, url):
# connect to the GitHub API
try:
repo = self._github.get_repo(url)
return repo
except Exception:
self._logger.error("GitHubQuerier error loading repository " + url + "- ", exc_info=True)
raise
def get_issue_ids(self, before_date):
"""
gets data source issue ids
:type before_date: str
:param before_date: selects issues with creation date before a given date (YYYY-mm-dd)
"""
issue_ids = []
page_count = 0
self._token_util.wait_is_usable(self._github)
last_page = int(self._repo.get_issues(state="all", direction="asc")._getLastPageUrl().split("page=")[-1])
while page_count != last_page + 1:
self._token_util.wait_is_usable(self._github)
issues = self._repo.get_issues(state="all").get_page(page_count)
for i in issues:
if before_date:
if i.created_at <= self._date_util.get_timestamp(before_date, "%Y-%m-%d"):
issue_ids.append(i.number)
else:
issue_ids.append(i.number)
page_count += 1
if issue_ids:
issue_ids.sort()
return issue_ids
def get_issue(self, issue_id):
"""
gets issue
:type issue_id: int
:param issue_id: data source issue id
"""
self._token_util.wait_is_usable(self._github)
return self._repo.get_issue(issue_id)
def get_issue_summary(self, issue):
"""
gets summary of the issue
:type issue: Object
:param issue: the Object representing the issue
"""
return issue.title
def get_issue_body(self, issue):
"""
gets body of the issue
:type issue: Object
:param issue: the Object representing the issue
"""
return issue.body
def get_issue_version(self, issue):
"""
gets version of the issue
:type issue: Object
:param issue: the Object representing the issue
"""
version = None
if issue.milestone is not None:
version = issue.milestone.number
return version
def get_issue_creation_time(self, issue):
"""
gets creation time of the issue
:type issue: Object
:param issue: the Object representing the issue
"""
return issue.created_at
def get_issue_last_change_time(self, issue):
"""
gets last change date of the issue
:type issue: Object
:param issue: the Object representing the issue
"""
return issue.updated_at
def get_issue_creator(self, issue):
"""
gets creator of the issue
:type issue: Object
:param issue: the Object representing the issue
"""
try:
found = issue.user
except:
found = None
return found
def get_user_email(self, user):
"""
gets the email of the issue creator
:type user: Object
:param user: the Object representing the user
"""
try:
found = user.email
except:
found = None
return found
def get_user_name(self, user):
"""
gets the user name of the issue creator
:type user: Object
:param user: the Object representing the user
"""
try:
found = user.login
except:
found = None
return found
def get_issue_tags(self, issue):
"""
gets labels of the issue
:type issue: Object
:param issue: the Object representing the issue
"""
labels = []
self._token_util.wait_is_usable(self._github)
for label in issue.get_labels():
labels.append(label.name)
return labels
def get_issue_comments(self, issue):
"""
gets the comments of the issue
:type issue: Object
:param issue: the Object representing the issue
"""
comments = []
self._token_util.wait_is_usable(self._github)
for comment in issue.get_comments():
comments.append(comment)
return comments
def get_issue_comment_id(self, issue_comment):
"""
gets the id of the issue comment
:type issue_comment: Object
:param issue_comment: the Object representing the issue comment
"""
return issue_comment.id
def get_issue_comment_body(self, issue_comment):
"""
gets the body of the issue comment
:type issue_comment: Object
:param issue_comment: the Object representing the issue comment
"""
return issue_comment.body
def get_issue_comment_author(self, issue_comment):
"""
gets the author of the issue comment
:type issue_comment: Object
:param issue_comment: the Object representing the issue comment
"""
return issue_comment.user
def get_issue_comment_creation_time(self, issue_comment):
"""
gets the creation time of the issue comment
:type issue_comment: Object
:param issue_comment: the Object representing the issue comment
"""
return issue_comment.created_at
def generate_attachment_id(self, message_id, pos):
"""
creates the attachment id
:type message_id: int
:param message_id: the data source message id
:type pos: int
:param pos: position of the message
"""
return str(message_id) + str(pos)
def get_attachments(self, comment):
"""
gets the attachements within a comment
:type comment: str
:param comment: content of the comment
"""
p = re.compile("\[.*\]\(http.*\)", re.MULTILINE)
matches = p.findall(comment)
attachments = []
for m in matches:
attachments.append(m)
return attachments
def get_attachment_name(self, text):
"""
gets the name of the attachement
:type text: str
:param text: content of the comment
"""
parts = text.split('](')
name = parts[0].lstrip('[')
found = name
if not found:
found = parts[1].split('/')[-1]
return found
def get_attachment_url(self, text):
"""
gets the URL of the attachement
:type text: str
:param text: content of the comment
"""
parts = text.split('](')
return parts[1].rstrip(')')
def get_referenced_issues(self, comment):
"""
gets the referenced issues within a comment
:type comment: str
:param comment: content of the comment
"""
p = re.compile('#\d+', re.MULTILINE)
matches = p.findall(comment)
referenced_issues = []
for m in matches:
referenced_issues.append(m.strip('#'))
return referenced_issues
def get_event_creation_time(self, event):
"""
gets the creation time of an event
:type event: Object
:param event: the Object representing the event
"""
return event.created_at
def get_event_actor(self, event):
"""
gets the actor of an event
:type event: Object
:param event: the Object representing the event
"""
return event.actor
def get_issue_history(self, issue):
"""
gets the event history of an issue
:type issue: Object
:param issue: the Object representing the issue
"""
events = []
self._token_util.wait_is_usable(self._github)
for event in issue.get_events():
events.append(event)
return events
def regenerate_token(self):
"""
regenerate GitHub token
"""
self._github = Github(self._token)
def find_user(self, login):
"""
finds GitHub user
:type login: str
:param login: GitHub username
"""
found = None
self._token_util.wait_is_usable(self._github)
users = self._github.search_users(login, **{"type": "user", "in": "login"})
for user in users:
found = user
break
return found
def get_issue_subscribers(self, history):
"""
gets subscribers of an issue
:type history: Object
:param history: the Object representing the events of an issue
"""
subscribers = []
for event in history:
if event.event == "subscribed":
subscribers.append(event.actor)
return subscribers
def get_issue_assignees(self, history):
"""
gets assignees of an issue
:type history: Object
:param history: the Object representing the events of an issue
"""
assignees = []
for event in history:
if event.event in ["assigned", "unassigned"]:
if event.event == "assigned":
assignees.append(event._rawData.get('assignee'))
elif event.event == "unassigned":
assignees.remove(event._rawData.get('assignee'))
return assignees
def get_commit_dependencies(self, history):
"""
gets dependencies between an issue and commits
:type history: Object
:param history: the Object representing the events of an issue
"""
commit_dependencies = []
for event in history:
if event.event == "referenced":
commit_dependencies.append(event.commit_id)
return commit_dependencies
def get_author_by_commit(self, sha):
self._token_util.wait_is_usable(self._github)
commit = self._repo.get_commit(sha)
return commit.author
| 26.86618 | 113 | 0.573537 |
__author__ = 'valerio cosentino'
from github import Github
from util.date_util import DateUtil
from util.token_util import TokenUtil
import re
class GitHubQuerier():
def __init__(self, url, token, logger):
try:
self._logger = logger
self._url = url
self._token = token
self._github = Github(token)
self._repo = self._load_repo(self._url)
self._token_util = TokenUtil(self._logger, "github")
self._date_util = DateUtil()
except:
self._logger.error("GitHubQuerier init failed")
raise
def _load_repo(self, url):
try:
repo = self._github.get_repo(url)
return repo
except Exception:
self._logger.error("GitHubQuerier error loading repository " + url + "- ", exc_info=True)
raise
def get_issue_ids(self, before_date):
issue_ids = []
page_count = 0
self._token_util.wait_is_usable(self._github)
last_page = int(self._repo.get_issues(state="all", direction="asc")._getLastPageUrl().split("page=")[-1])
while page_count != last_page + 1:
self._token_util.wait_is_usable(self._github)
issues = self._repo.get_issues(state="all").get_page(page_count)
for i in issues:
if before_date:
if i.created_at <= self._date_util.get_timestamp(before_date, "%Y-%m-%d"):
issue_ids.append(i.number)
else:
issue_ids.append(i.number)
page_count += 1
if issue_ids:
issue_ids.sort()
return issue_ids
def get_issue(self, issue_id):
self._token_util.wait_is_usable(self._github)
return self._repo.get_issue(issue_id)
def get_issue_summary(self, issue):
return issue.title
def get_issue_body(self, issue):
return issue.body
def get_issue_version(self, issue):
version = None
if issue.milestone is not None:
version = issue.milestone.number
return version
def get_issue_creation_time(self, issue):
return issue.created_at
def get_issue_last_change_time(self, issue):
return issue.updated_at
def get_issue_creator(self, issue):
try:
found = issue.user
except:
found = None
return found
def get_user_email(self, user):
try:
found = user.email
except:
found = None
return found
def get_user_name(self, user):
try:
found = user.login
except:
found = None
return found
def get_issue_tags(self, issue):
labels = []
self._token_util.wait_is_usable(self._github)
for label in issue.get_labels():
labels.append(label.name)
return labels
def get_issue_comments(self, issue):
comments = []
self._token_util.wait_is_usable(self._github)
for comment in issue.get_comments():
comments.append(comment)
return comments
def get_issue_comment_id(self, issue_comment):
return issue_comment.id
def get_issue_comment_body(self, issue_comment):
return issue_comment.body
def get_issue_comment_author(self, issue_comment):
return issue_comment.user
def get_issue_comment_creation_time(self, issue_comment):
return issue_comment.created_at
def generate_attachment_id(self, message_id, pos):
return str(message_id) + str(pos)
def get_attachments(self, comment):
p = re.compile("\[.*\]\(http.*\)", re.MULTILINE)
matches = p.findall(comment)
attachments = []
for m in matches:
attachments.append(m)
return attachments
def get_attachment_name(self, text):
parts = text.split('](')
name = parts[0].lstrip('[')
found = name
if not found:
found = parts[1].split('/')[-1]
return found
def get_attachment_url(self, text):
parts = text.split('](')
return parts[1].rstrip(')')
def get_referenced_issues(self, comment):
p = re.compile('#\d+', re.MULTILINE)
matches = p.findall(comment)
referenced_issues = []
for m in matches:
referenced_issues.append(m.strip('#'))
return referenced_issues
def get_event_creation_time(self, event):
return event.created_at
def get_event_actor(self, event):
return event.actor
def get_issue_history(self, issue):
events = []
self._token_util.wait_is_usable(self._github)
for event in issue.get_events():
events.append(event)
return events
def regenerate_token(self):
self._github = Github(self._token)
def find_user(self, login):
found = None
self._token_util.wait_is_usable(self._github)
users = self._github.search_users(login, **{"type": "user", "in": "login"})
for user in users:
found = user
break
return found
def get_issue_subscribers(self, history):
subscribers = []
for event in history:
if event.event == "subscribed":
subscribers.append(event.actor)
return subscribers
def get_issue_assignees(self, history):
assignees = []
for event in history:
if event.event in ["assigned", "unassigned"]:
if event.event == "assigned":
assignees.append(event._rawData.get('assignee'))
elif event.event == "unassigned":
assignees.remove(event._rawData.get('assignee'))
return assignees
def get_commit_dependencies(self, history):
commit_dependencies = []
for event in history:
if event.event == "referenced":
commit_dependencies.append(event.commit_id)
return commit_dependencies
def get_author_by_commit(self, sha):
self._token_util.wait_is_usable(self._github)
commit = self._repo.get_commit(sha)
return commit.author
| true | true |
f7fac7dd5bc34ca32f679bf0e5658333b563103a | 980 | py | Python | src/conduit/scripts/drop_tables.py | Infinisil/pyramid-realworld-example-app | edd3ed1f89fb9d38c3d524ed1978ded61d56d7dd | [
"MIT"
] | null | null | null | src/conduit/scripts/drop_tables.py | Infinisil/pyramid-realworld-example-app | edd3ed1f89fb9d38c3d524ed1978ded61d56d7dd | [
"MIT"
] | null | null | null | src/conduit/scripts/drop_tables.py | Infinisil/pyramid-realworld-example-app | edd3ed1f89fb9d38c3d524ed1978ded61d56d7dd | [
"MIT"
] | null | null | null | """Drop database content."""
from pyramid.paster import bootstrap
from pyramid.paster import setup_logging
import argparse
import structlog
import sys
import typing as t
logger = structlog.getLogger("db")
def main(argv: t.List[str] = sys.argv) -> None:
"""Run the script."""
parser = argparse.ArgumentParser(
usage="pipenv run python -m conduit.scripts.drop_tables"
)
parser.add_argument(
"-c",
"--config",
type=str,
default="etc/development.ini",
metavar="<config>",
help="Pyramid application configuration file.",
)
env = bootstrap(
parser.parse_args().config, options={"SKIP_CHECK_DB_MIGRATED": "true"}
)
setup_logging(parser.parse_args().config)
engine = env["registry"].settings["sqlalchemy.engine"]
engine.execute("DROP OWNED BY current_user")
logger.warn("db reset done for", url=str(engine.url))
env["closer"]()
if __name__ == "__main__":
main()
| 23.333333 | 78 | 0.652041 |
from pyramid.paster import bootstrap
from pyramid.paster import setup_logging
import argparse
import structlog
import sys
import typing as t
logger = structlog.getLogger("db")
def main(argv: t.List[str] = sys.argv) -> None:
parser = argparse.ArgumentParser(
usage="pipenv run python -m conduit.scripts.drop_tables"
)
parser.add_argument(
"-c",
"--config",
type=str,
default="etc/development.ini",
metavar="<config>",
help="Pyramid application configuration file.",
)
env = bootstrap(
parser.parse_args().config, options={"SKIP_CHECK_DB_MIGRATED": "true"}
)
setup_logging(parser.parse_args().config)
engine = env["registry"].settings["sqlalchemy.engine"]
engine.execute("DROP OWNED BY current_user")
logger.warn("db reset done for", url=str(engine.url))
env["closer"]()
if __name__ == "__main__":
main()
| true | true |
f7fac89ab018d9a3a63c67bd13cca445f5cfab79 | 2,172 | py | Python | test/lazy/test_chol_lazy_tensor.py | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf | [
"MIT"
] | 2 | 2021-01-30T18:24:18.000Z | 2021-02-16T21:54:11.000Z | test/lazy/test_chol_lazy_tensor.py | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf | [
"MIT"
] | null | null | null | test/lazy/test_chol_lazy_tensor.py | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf | [
"MIT"
] | 1 | 2021-03-15T12:32:24.000Z | 2021-03-15T12:32:24.000Z | #!/usr/bin/env python3
import unittest
import torch
from gpytorch.lazy import CholLazyTensor, TriangularLazyTensor
from gpytorch.test.lazy_tensor_test_case import LazyTensorTestCase
class TestCholLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 0
should_test_sample = True
should_call_cg = False
should_call_lanczos = False
def create_lazy_tensor(self):
chol = torch.tensor(
[[3, 0, 0, 0, 0], [-1, 2, 0, 0, 0], [1, 4, 1, 0, 0], [0, 2, 3, 2, 0], [-4, -2, 1, 3, 4]],
dtype=torch.float,
requires_grad=True,
)
return CholLazyTensor(TriangularLazyTensor(chol))
def evaluate_lazy_tensor(self, lazy_tensor):
chol = lazy_tensor.root.evaluate()
return chol.matmul(chol.transpose(-1, -2))
class TestCholLazyTensorBatch(TestCholLazyTensor):
seed = 0
def create_lazy_tensor(self):
chol = torch.tensor(
[
[[3, 0, 0, 0, 0], [-1, 2, 0, 0, 0], [1, 4, 1, 0, 0], [0, 2, 3, 2, 0], [-4, -2, 1, 3, 4]],
[[2, 0, 0, 0, 0], [3, 1, 0, 0, 0], [-2, 3, 2, 0, 0], [-2, 1, -1, 3, 0], [-4, -4, 5, 2, 3]],
],
dtype=torch.float,
)
chol.add_(torch.eye(5).unsqueeze(0))
chol.requires_grad_(True)
return CholLazyTensor(TriangularLazyTensor(chol))
class TestCholLazyTensorMultiBatch(TestCholLazyTensor):
seed = 0
# Because these LTs are large, we'll skil the big tests
should_test_sample = False
skip_slq_tests = True
def create_lazy_tensor(self):
chol = torch.tensor(
[
[[3, 0, 0, 0, 0], [-1, 2, 0, 0, 0], [1, 4, 1, 0, 0], [0, 2, 3, 2, 0], [-4, -2, 1, 3, 4]],
[[2, 0, 0, 0, 0], [3, 1, 0, 0, 0], [-2, 3, 2, 0, 0], [-2, 1, -1, 3, 0], [-4, -4, 5, 2, 3]],
],
dtype=torch.float,
)
chol = chol.repeat(3, 1, 1, 1)
chol[1].mul_(2)
chol[2].mul_(0.5)
chol.add_(torch.eye(5).unsqueeze_(0).unsqueeze_(0))
chol.requires_grad_(True)
return CholLazyTensor(TriangularLazyTensor(chol))
if __name__ == "__main__":
unittest.main()
| 31.028571 | 107 | 0.547422 |
import unittest
import torch
from gpytorch.lazy import CholLazyTensor, TriangularLazyTensor
from gpytorch.test.lazy_tensor_test_case import LazyTensorTestCase
class TestCholLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 0
should_test_sample = True
should_call_cg = False
should_call_lanczos = False
def create_lazy_tensor(self):
chol = torch.tensor(
[[3, 0, 0, 0, 0], [-1, 2, 0, 0, 0], [1, 4, 1, 0, 0], [0, 2, 3, 2, 0], [-4, -2, 1, 3, 4]],
dtype=torch.float,
requires_grad=True,
)
return CholLazyTensor(TriangularLazyTensor(chol))
def evaluate_lazy_tensor(self, lazy_tensor):
chol = lazy_tensor.root.evaluate()
return chol.matmul(chol.transpose(-1, -2))
class TestCholLazyTensorBatch(TestCholLazyTensor):
seed = 0
def create_lazy_tensor(self):
chol = torch.tensor(
[
[[3, 0, 0, 0, 0], [-1, 2, 0, 0, 0], [1, 4, 1, 0, 0], [0, 2, 3, 2, 0], [-4, -2, 1, 3, 4]],
[[2, 0, 0, 0, 0], [3, 1, 0, 0, 0], [-2, 3, 2, 0, 0], [-2, 1, -1, 3, 0], [-4, -4, 5, 2, 3]],
],
dtype=torch.float,
)
chol.add_(torch.eye(5).unsqueeze(0))
chol.requires_grad_(True)
return CholLazyTensor(TriangularLazyTensor(chol))
class TestCholLazyTensorMultiBatch(TestCholLazyTensor):
seed = 0
should_test_sample = False
skip_slq_tests = True
def create_lazy_tensor(self):
chol = torch.tensor(
[
[[3, 0, 0, 0, 0], [-1, 2, 0, 0, 0], [1, 4, 1, 0, 0], [0, 2, 3, 2, 0], [-4, -2, 1, 3, 4]],
[[2, 0, 0, 0, 0], [3, 1, 0, 0, 0], [-2, 3, 2, 0, 0], [-2, 1, -1, 3, 0], [-4, -4, 5, 2, 3]],
],
dtype=torch.float,
)
chol = chol.repeat(3, 1, 1, 1)
chol[1].mul_(2)
chol[2].mul_(0.5)
chol.add_(torch.eye(5).unsqueeze_(0).unsqueeze_(0))
chol.requires_grad_(True)
return CholLazyTensor(TriangularLazyTensor(chol))
if __name__ == "__main__":
unittest.main()
| true | true |
f7fac9958697852b1d8e9d0b2458b6c59e50eb16 | 859 | py | Python | setup.py | rwnx/bunnyplot | 31f4824d683f6fb835fc3caafa58884a2f7e4730 | [
"MIT"
] | 1 | 2021-05-03T00:35:19.000Z | 2021-05-03T00:35:19.000Z | setup.py | rwnx/bunnyplot | 31f4824d683f6fb835fc3caafa58884a2f7e4730 | [
"MIT"
] | null | null | null | setup.py | rwnx/bunnyplot | 31f4824d683f6fb835fc3caafa58884a2f7e4730 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="bunnyplot",
version="0.1.0",
author="Jerome Twell",
author_email="jtwell1@gmail.com",
description="A utility for producting GraphML from RabbitMQ",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jerometwell/bunnyplot",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"click",
"networkx",
"aiohttp",
"async_timeout"
],
entry_points = {
'console_scripts': ['bunnyplot=bunnyplot.cli:cli'],
},
python_requires='>=3.6',
) | 27.709677 | 65 | 0.63213 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="bunnyplot",
version="0.1.0",
author="Jerome Twell",
author_email="jtwell1@gmail.com",
description="A utility for producting GraphML from RabbitMQ",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jerometwell/bunnyplot",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"click",
"networkx",
"aiohttp",
"async_timeout"
],
entry_points = {
'console_scripts': ['bunnyplot=bunnyplot.cli:cli'],
},
python_requires='>=3.6',
) | true | true |
f7fac9b1d7d6d8bce4f93173226df89c63b5fe81 | 808 | py | Python | PYwithD2L/CH3/3.3.py | JunoCheon/D2L | 9464709862e55151aec28fc637c5942738bdd72b | [
"MIT"
] | null | null | null | PYwithD2L/CH3/3.3.py | JunoCheon/D2L | 9464709862e55151aec28fc637c5942738bdd72b | [
"MIT"
] | null | null | null | PYwithD2L/CH3/3.3.py | JunoCheon/D2L | 9464709862e55151aec28fc637c5942738bdd72b | [
"MIT"
] | null | null | null | #%%
import numpy as np
import torch
from torch.utils import data
from d2l import torch as d2l
npx.np_set()
# %%
true_w = torch.tensor([2,-3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w,true_b,1000)
#%%
def load_array(data_array,batch_size,is_train = True):
dataset = data.TensorDataset(*data_array)
return data.DataLoader(dataset,batch_size,shuffle=is_train)
batch_size = 10
data_iter = load_array((features,labels),batch_size)
# %%
next(iter(data_iter))
# %%
nn.Squential()
net.add(nn.Dense(1))
#%%
from mxnet import init
net.initialize(init.Normal(sigma=0.01))
#%%
loss = gluon.loss.L2Loss()
#%%
torch.manual_seed(0)
a=torch.ones([6])/6
torch.
torch.multinomial(1,a).sample()
# %%
torch.manual_seed(0)
torch.multinomial(a,1)
# %%
torch.manual_seed(0)
torch.randn ((5,))
# %%
| 18.790698 | 63 | 0.711634 |
import numpy as np
import torch
from torch.utils import data
from d2l import torch as d2l
npx.np_set()
true_w = torch.tensor([2,-3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w,true_b,1000)
def load_array(data_array,batch_size,is_train = True):
dataset = data.TensorDataset(*data_array)
return data.DataLoader(dataset,batch_size,shuffle=is_train)
batch_size = 10
data_iter = load_array((features,labels),batch_size)
next(iter(data_iter))
nn.Squential()
net.add(nn.Dense(1))
from mxnet import init
net.initialize(init.Normal(sigma=0.01))
loss = gluon.loss.L2Loss()
torch.manual_seed(0)
a=torch.ones([6])/6
torch.
torch.multinomial(1,a).sample()
torch.manual_seed(0)
torch.multinomial(a,1)
torch.manual_seed(0)
torch.randn ((5,))
| false | true |
f7facb048440fd2b4a675f9781262771dd21f789 | 2,058 | py | Python | neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f81_rbac_qos_policy.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 1 | 2017-09-10T09:57:35.000Z | 2017-09-10T09:57:35.000Z | neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f81_rbac_qos_policy.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-27T00:48:55.000Z | 2015-04-21T05:29:37.000Z | neutron/db/migration/alembic_migrations/versions/mitaka/expand/15e43b934f81_rbac_qos_policy.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-26T00:55:17.000Z | 2020-03-01T17:05:40.000Z | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""rbac_qos_policy
Revision ID: 15e43b934f81
Revises: 1df244e556f5
Create Date: 2015-11-25 18:45:03.819115
"""
from alembic import op
import sqlalchemy as sa
from neutron.api.v2 import attributes as attrs
# revision identifiers, used by Alembic.
revision = '15e43b934f81'
down_revision = 'b4caf27aae4'
def upgrade():
op.create_table('qospolicyrbacs',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id',
sa.String(length=attrs.TENANT_ID_MAX_LEN),
nullable=True),
sa.Column('target_tenant',
sa.String(length=attrs.TENANT_ID_MAX_LEN),
nullable=False),
sa.Column('action', sa.String(length=255), nullable=False),
sa.Column('object_id', sa.String(length=36),
nullable=False),
sa.ForeignKeyConstraint(['object_id'],
['qos_policies.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('target_tenant',
'object_id', 'action'))
op.create_index(op.f('ix_qospolicyrbacs_tenant_id'), 'qospolicyrbacs',
['tenant_id'], unique=False)
| 38.111111 | 79 | 0.582119 |
from alembic import op
import sqlalchemy as sa
from neutron.api.v2 import attributes as attrs
revision = '15e43b934f81'
down_revision = 'b4caf27aae4'
def upgrade():
op.create_table('qospolicyrbacs',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('tenant_id',
sa.String(length=attrs.TENANT_ID_MAX_LEN),
nullable=True),
sa.Column('target_tenant',
sa.String(length=attrs.TENANT_ID_MAX_LEN),
nullable=False),
sa.Column('action', sa.String(length=255), nullable=False),
sa.Column('object_id', sa.String(length=36),
nullable=False),
sa.ForeignKeyConstraint(['object_id'],
['qos_policies.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('target_tenant',
'object_id', 'action'))
op.create_index(op.f('ix_qospolicyrbacs_tenant_id'), 'qospolicyrbacs',
['tenant_id'], unique=False)
| true | true |
f7facb17edfef659254f8e603b3ce318866043e1 | 665 | py | Python | structures/tests/bar_test.py | EladSharony/Mechanics | 078f97bea84114fc1db6fe9700b92b96b18a0d5e | [
"MIT"
] | 24 | 2021-02-23T13:53:14.000Z | 2022-03-29T16:40:56.000Z | structures/tests/bar_test.py | EladSharony/Mechanics | 078f97bea84114fc1db6fe9700b92b96b18a0d5e | [
"MIT"
] | 2 | 2021-04-23T12:30:32.000Z | 2022-03-31T10:51:12.000Z | structures/tests/bar_test.py | EladSharony/Mechanics | 078f97bea84114fc1db6fe9700b92b96b18a0d5e | [
"MIT"
] | 12 | 2021-04-11T20:44:03.000Z | 2022-03-30T19:23:58.000Z | import unittest
from math import sqrt
from eqs import Matrix
from geom2d import Point
from structures.model.node import StrNode
from structures.model.bar import StrBar
class BarTest(unittest.TestCase):
section = sqrt(5)
young = 5
node_a = StrNode(1, Point(0, 0))
node_b = StrNode(2, Point(2, 1))
bar = StrBar(1, node_a, node_b, section, young)
def test_global_stiffness_matrix(self):
expected = Matrix(4, 4).set_data([
4, 2, -4, -2,
2, 1, -2, -1,
-4, -2, 4, 2,
-2, -1, 2, 1
])
actual = self.bar.global_stiffness_matrix()
self.assertEqual(expected, actual)
| 24.62963 | 51 | 0.607519 | import unittest
from math import sqrt
from eqs import Matrix
from geom2d import Point
from structures.model.node import StrNode
from structures.model.bar import StrBar
class BarTest(unittest.TestCase):
section = sqrt(5)
young = 5
node_a = StrNode(1, Point(0, 0))
node_b = StrNode(2, Point(2, 1))
bar = StrBar(1, node_a, node_b, section, young)
def test_global_stiffness_matrix(self):
expected = Matrix(4, 4).set_data([
4, 2, -4, -2,
2, 1, -2, -1,
-4, -2, 4, 2,
-2, -1, 2, 1
])
actual = self.bar.global_stiffness_matrix()
self.assertEqual(expected, actual)
| true | true |
f7facb63ac47f2e72439c2d03848e271ece3fd30 | 4,072 | py | Python | product_search/settings.py | yanglinz/product-search | ad2c5d372944526dd2c6fe4888eb8920e39e2d26 | [
"MIT"
] | 1 | 2018-08-23T19:58:03.000Z | 2018-08-23T19:58:03.000Z | product_search/settings.py | yanglinz/product-search | ad2c5d372944526dd2c6fe4888eb8920e39e2d26 | [
"MIT"
] | 10 | 2020-09-06T01:28:36.000Z | 2022-03-03T22:41:59.000Z | product_search/settings.py | yanglinz/product-search | ad2c5d372944526dd2c6fe4888eb8920e39e2d26 | [
"MIT"
] | null | null | null | import os
import dj_database_url
import django_heroku
import dotenv
dotenv.read_dotenv()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
SECRET_KEY = os.environ["SECRET_KEY"]
DEBUG = os.environ["DEBUG"] == "true"
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"graphene_django",
"corsheaders",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "product_search.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"debug": DEBUG,
},
}
]
WSGI_APPLICATION = "product_search.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES["default"].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Allow all host headers
ALLOWED_HOSTS = ["*"]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, "staticfiles")
STATIC_URL = "/static/"
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [os.path.join(PROJECT_ROOT, "static")]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Django graphene
# http://docs.graphene-python.org/projects/django/en/latest/
GRAPHENE = {"SCHEMA": "server.graphql.schema"}
# Cors
# https://github.com/ottoyiu/django-cors-headers
CORS_ORIGIN_ALLOW_ALL = True
# Application variables
WALMART_API_URL = os.environ["WALMART_API_URL"]
WALMART_API_KEY = os.environ["WALMART_API_KEY"]
# Activate Django-Heroku.
django_heroku.settings(locals())
| 30.162963 | 90 | 0.726424 | import os
import dj_database_url
import django_heroku
import dotenv
dotenv.read_dotenv()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
SECRET_KEY = os.environ["SECRET_KEY"]
DEBUG = os.environ["DEBUG"] == "true"
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
# greater consistency between gunicorn and `./manage.py runserver`. See:
# http://whitenoise.evans.io/en/stable/django.html#using-whitenoise-in-development
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"graphene_django",
"corsheaders",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "product_search.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
"debug": DEBUG,
},
}
]
WSGI_APPLICATION = "product_search.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Change 'default' database configuration with $DATABASE_URL.
DATABASES["default"].update(dj_database_url.config(conn_max_age=500, ssl_require=True))
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Allow all host headers
ALLOWED_HOSTS = ["*"]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, "staticfiles")
STATIC_URL = "/static/"
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [os.path.join(PROJECT_ROOT, "static")]
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Django graphene
# http://docs.graphene-python.org/projects/django/en/latest/
GRAPHENE = {"SCHEMA": "server.graphql.schema"}
# Cors
# https://github.com/ottoyiu/django-cors-headers
CORS_ORIGIN_ALLOW_ALL = True
# Application variables
WALMART_API_URL = os.environ["WALMART_API_URL"]
WALMART_API_KEY = os.environ["WALMART_API_KEY"]
# Activate Django-Heroku.
django_heroku.settings(locals())
| true | true |
f7facb852a3db388a7c69659114114ea83276164 | 12,295 | py | Python | tensorflow_probability/python/experimental/mcmc/sample_fold.py | rupei/probability | 4aa1ee652853a19c4e80d39216c3fa535ed3e589 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/experimental/mcmc/sample_fold.py | rupei/probability | 4aa1ee652853a19c4e80d39216c3fa535ed3e589 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/experimental/mcmc/sample_fold.py | rupei/probability | 4aa1ee652853a19c4e80d39216c3fa535ed3e589 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Drivers for streaming reductions framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.mcmc import sample as exp_sample_lib
from tensorflow_probability.python.experimental.mcmc import sample_discarding_kernel
from tensorflow_probability.python.experimental.mcmc import tracing_reducer
from tensorflow_probability.python.experimental.mcmc import with_reductions
from tensorflow_probability.python.mcmc import sample
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'sample_chain',
'sample_fold',
]
def sample_fold(
num_steps,
current_state,
previous_kernel_results=None,
kernel=None,
reducer=None,
num_burnin_steps=0,
num_steps_between_results=0,
parallel_iterations=10,
seed=None,
name=None,
):
"""Computes the requested reductions over the `kernel`'s samples.
To wit, runs the given `kernel` for `num_steps` steps, and consumes
the stream of samples with the given `Reducer`s' `one_step` method(s).
This runs in constant memory (unless a given `Reducer` builds a
large structure).
The driver internally composes the correct onion of `WithReductions`
and `SampleDiscardingKernel` to implement the requested optionally
thinned reduction; however, the kernel results of those applied
Transition Kernels will not be returned. Hence, if warm-restarting
reductions is desired, one should manually build the Transition Kernel
onion and use `tfp.experimental.mcmc.step_kernel`.
An arbitrary collection of `reducer` can be provided, and the resulting
finalized statistic(s) will be returned in an identical structure.
Args:
num_steps: Integer or scalar `Tensor` representing the number of `Reducer`
steps.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s.
Warm-start for the auxiliary state needed by the given `kernel`.
If not supplied, `sample_fold` will cold-start with
`kernel.bootstrap_results`.
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
reducer: A (possibly nested) structure of `Reducer`s to be evaluated
on the `kernel`'s samples. If no reducers are given (`reducer=None`),
then `None` will be returned in place of streaming calculations.
num_burnin_steps: Integer or scalar `Tensor` representing the number
of chain steps to take before starting to collect results.
Defaults to 0 (i.e., no burn-in).
num_steps_between_results: Integer or scalar `Tensor` representing
the number of chain steps between collecting a result. Only one out
of every `num_steps_between_samples + 1` steps is included in the
returned results. Defaults to 0 (i.e., no thinning).
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: Optional seed for reproducible sampling.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'mcmc_sample_fold').
Returns:
reduction_results: A (possibly nested) structure of finalized reducer
statistics. The structure identically mimics that of `reducer`.
end_state: The final state of the Markov chain(s).
final_kernel_results: `collections.namedtuple` of internal calculations
used to advance the supplied `kernel`. These results do not include
the kernel results of `WithReductions` or `SampleDiscardingKernel`.
"""
with tf.name_scope(name or 'mcmc_sample_fold'):
num_steps = tf.convert_to_tensor(
num_steps, dtype=tf.int32, name='num_steps')
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, name='current_state'),
current_state)
reducer_was_none = False
if reducer is None:
reducer = []
reducer_was_none = True
reduction_kernel = with_reductions.WithReductions(
inner_kernel=sample_discarding_kernel.SampleDiscardingKernel(
inner_kernel=kernel,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results),
reducer=reducer,
)
end_state, final_kernel_results = exp_sample_lib.step_kernel(
num_steps=num_steps,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=reduction_kernel,
return_final_kernel_results=True,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
reduction_results = nest.map_structure_up_to(
reducer,
lambda r, s: r.finalize(s),
reducer,
final_kernel_results.streaming_calculations,
check_types=False)
if reducer_was_none:
reduction_results = None
return (reduction_results,
end_state,
final_kernel_results.inner_results.inner_results)
def _trace_kernel_results(current_state, kernel_results):
del current_state
return kernel_results
def sample_chain(
num_results,
current_state,
previous_kernel_results=None,
kernel=None,
num_burnin_steps=0,
num_steps_between_results=0,
trace_fn=_trace_kernel_results,
return_final_kernel_results=False,
parallel_iterations=10,
seed=None,
name=None,
):
"""Implements Markov chain Monte Carlo via repeated `TransitionKernel` steps.
This function samples from a Markov chain at `current_state` whose
stationary distribution is governed by the supplied `TransitionKernel`
instance (`kernel`).
This function can sample from multiple chains, in parallel. (Whether or not
there are multiple chains is dictated by the `kernel`.)
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state.
Since MCMC states are correlated, it is sometimes desirable to produce
additional intermediate states, and then discard them, ending up with a set of
states with decreased autocorrelation. See [Owen (2017)][1]. Such 'thinning'
is made possible by setting `num_steps_between_results > 0`. The chain then
takes `num_steps_between_results` extra steps between the steps that make it
into the results. The extra steps are never materialized, and thus do not
increase memory requirements.
In addition to returning the chain state, this function supports tracing of
auxiliary variables used by the kernel. The traced values are selected by
specifying `trace_fn`. By default, all kernel results are traced but in the
future the default will be changed to no results being traced, so plan
accordingly. See below for some examples of this feature.
Args:
num_results: Integer number of Markov chain draws.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s).
previous_kernel_results: A `Tensor` or a nested collection of `Tensor`s
representing internal calculations made within the previous call to this
function (or as returned by `bootstrap_results`).
kernel: An instance of `tfp.mcmc.TransitionKernel` which implements one step
of the Markov chain.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. The number of returned chain states is
still equal to `num_results`. Default value: 0 (i.e., no thinning).
trace_fn: A callable that takes in the current chain state and the previous
kernel results and return a `Tensor` or a nested collection of `Tensor`s
that is then traced along with the chain state.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
parallel_iterations: The number of iterations allowed to run in parallel. It
must be a positive integer. See `tf.while_loop` for more details.
seed: Optional, a seed for reproducible sampling.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'experimental_mcmc_sample_chain').
Returns:
checkpointable_states_and_trace: if `return_final_kernel_results` is
`True`. The return value is an instance of
`CheckpointableStatesAndTrace`.
all_states: if `return_final_kernel_results` is `False` and `trace_fn` is
`None`. The return value is a `Tensor` or Python list of `Tensor`s
representing the state(s) of the Markov chain(s) at each result step. Has
same shape as input `current_state` but with a prepended
`num_results`-size dimension.
states_and_trace: if `return_final_kernel_results` is `False` and
`trace_fn` is not `None`. The return value is an instance of
`StatesAndTrace`.
#### References
[1]: Art B. Owen. Statistically efficient thinning of a Markov chain sampler.
_Technical Report_, 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
"""
with tf.name_scope(name or 'experimental_mcmc_sample_chain'):
if not kernel.is_calibrated:
warnings.warn('supplied `TransitionKernel` is not calibrated. Markov '
'chain may not converge to intended target distribution.')
if trace_fn is None:
trace_fn = lambda *args: ()
no_trace = True
else:
no_trace = False
if trace_fn is sample_chain.__defaults__[4]:
warnings.warn('Tracing all kernel results by default is deprecated. Set '
'the `trace_fn` argument to None (the future default '
'value) or an explicit callback that traces the values '
'you are interested in.')
# `WithReductions` assumes all its reducers want to reduce over the
# immediate inner results of its kernel results. However,
# We don't care about the kernel results of `SampleDiscardingKernel`; hence,
# we evaluate the `trace_fn` on a deeper level of inner results.
def real_trace_fn(curr_state, kr):
return curr_state, trace_fn(curr_state, kr.inner_results)
trace_reducer = tracing_reducer.TracingReducer(
trace_fn=real_trace_fn,
size=num_results
)
trace_results, _, final_kernel_results = sample_fold(
num_steps=num_results,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=kernel,
reducer=trace_reducer,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
all_states, trace = trace_results
if return_final_kernel_results:
return sample.CheckpointableStatesAndTrace(
all_states=all_states,
trace=trace,
final_kernel_results=final_kernel_results)
else:
if no_trace:
return all_states
else:
return sample.StatesAndTrace(all_states=all_states, trace=trace)
| 43.140351 | 85 | 0.727938 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.mcmc import sample as exp_sample_lib
from tensorflow_probability.python.experimental.mcmc import sample_discarding_kernel
from tensorflow_probability.python.experimental.mcmc import tracing_reducer
from tensorflow_probability.python.experimental.mcmc import with_reductions
from tensorflow_probability.python.mcmc import sample
from tensorflow.python.util import nest
__all__ = [
'sample_chain',
'sample_fold',
]
def sample_fold(
num_steps,
current_state,
previous_kernel_results=None,
kernel=None,
reducer=None,
num_burnin_steps=0,
num_steps_between_results=0,
parallel_iterations=10,
seed=None,
name=None,
):
with tf.name_scope(name or 'mcmc_sample_fold'):
num_steps = tf.convert_to_tensor(
num_steps, dtype=tf.int32, name='num_steps')
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, name='current_state'),
current_state)
reducer_was_none = False
if reducer is None:
reducer = []
reducer_was_none = True
reduction_kernel = with_reductions.WithReductions(
inner_kernel=sample_discarding_kernel.SampleDiscardingKernel(
inner_kernel=kernel,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results),
reducer=reducer,
)
end_state, final_kernel_results = exp_sample_lib.step_kernel(
num_steps=num_steps,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=reduction_kernel,
return_final_kernel_results=True,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
reduction_results = nest.map_structure_up_to(
reducer,
lambda r, s: r.finalize(s),
reducer,
final_kernel_results.streaming_calculations,
check_types=False)
if reducer_was_none:
reduction_results = None
return (reduction_results,
end_state,
final_kernel_results.inner_results.inner_results)
def _trace_kernel_results(current_state, kernel_results):
del current_state
return kernel_results
def sample_chain(
num_results,
current_state,
previous_kernel_results=None,
kernel=None,
num_burnin_steps=0,
num_steps_between_results=0,
trace_fn=_trace_kernel_results,
return_final_kernel_results=False,
parallel_iterations=10,
seed=None,
name=None,
):
with tf.name_scope(name or 'experimental_mcmc_sample_chain'):
if not kernel.is_calibrated:
warnings.warn('supplied `TransitionKernel` is not calibrated. Markov '
'chain may not converge to intended target distribution.')
if trace_fn is None:
trace_fn = lambda *args: ()
no_trace = True
else:
no_trace = False
if trace_fn is sample_chain.__defaults__[4]:
warnings.warn('Tracing all kernel results by default is deprecated. Set '
'the `trace_fn` argument to None (the future default '
'value) or an explicit callback that traces the values '
'you are interested in.')
# we evaluate the `trace_fn` on a deeper level of inner results.
def real_trace_fn(curr_state, kr):
return curr_state, trace_fn(curr_state, kr.inner_results)
trace_reducer = tracing_reducer.TracingReducer(
trace_fn=real_trace_fn,
size=num_results
)
trace_results, _, final_kernel_results = sample_fold(
num_steps=num_results,
current_state=current_state,
previous_kernel_results=previous_kernel_results,
kernel=kernel,
reducer=trace_reducer,
num_burnin_steps=num_burnin_steps,
num_steps_between_results=num_steps_between_results,
parallel_iterations=parallel_iterations,
seed=seed,
name=name,
)
all_states, trace = trace_results
if return_final_kernel_results:
return sample.CheckpointableStatesAndTrace(
all_states=all_states,
trace=trace,
final_kernel_results=final_kernel_results)
else:
if no_trace:
return all_states
else:
return sample.StatesAndTrace(all_states=all_states, trace=trace)
| true | true |
f7facbc5938a85e2fb02aa28d1a3a30d130325b1 | 428 | py | Python | web crawler functions/crawl_web_dict.py | akshaynagpal/python_web_crawler | a74af25db4c9f819105621868a6a9a7337a2a770 | [
"MIT"
] | 1 | 2022-03-06T21:00:45.000Z | 2022-03-06T21:00:45.000Z | web crawler functions/crawl_web_dict.py | akshaynagpal/python_web_crawler | a74af25db4c9f819105621868a6a9a7337a2a770 | [
"MIT"
] | null | null | null | web crawler functions/crawl_web_dict.py | akshaynagpal/python_web_crawler | a74af25db4c9f819105621868a6a9a7337a2a770 | [
"MIT"
] | null | null | null | def union(p,q):
for e in q:
if e not in p:
p.append(e)
def crawl_web(seed):
tocrawl = [seed]
crawled = []
index = {}
graph = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index,page,content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl,outlinks)
crawled.append(page)
return index, graph | 19.454545 | 40 | 0.635514 | def union(p,q):
for e in q:
if e not in p:
p.append(e)
def crawl_web(seed):
tocrawl = [seed]
crawled = []
index = {}
graph = {}
while tocrawl:
page = tocrawl.pop()
if page not in crawled:
content = get_page(page)
add_page_to_index(index,page,content)
outlinks = get_all_links(content)
graph[page] = outlinks
union(tocrawl,outlinks)
crawled.append(page)
return index, graph | true | true |
f7facc8714f2358ff5e4f5bf725d3516243bec69 | 10,025 | py | Python | algos/custom_ppo2.py | Ottawa-Autonomous-Vehicle-Group/learning-to-drive-in-5-minutes | fb82bc77593605711289e03f95dcfb6d3ea9e6c3 | [
"MIT"
] | 1 | 2020-08-02T20:47:44.000Z | 2020-08-02T20:47:44.000Z | algos/custom_ppo2.py | vijpandaturtle/learning-to-drive-in-5-minutes | fb82bc77593605711289e03f95dcfb6d3ea9e6c3 | [
"MIT"
] | null | null | null | algos/custom_ppo2.py | vijpandaturtle/learning-to-drive-in-5-minutes | fb82bc77593605711289e03f95dcfb6d3ea9e6c3 | [
"MIT"
] | null | null | null | import time
from collections import deque
import gym
import numpy as np
from stable_baselines import logger, PPO2
from stable_baselines.a2c.utils import total_episode_reward_logger
from stable_baselines.common import explained_variance, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten
class PPO2WithVAE(PPO2):
"""
Custom PPO2 version.
Notable changes:
- optimization is done after each episode and not after n steps
"""
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="PPO2"):
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
with TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer:
self._setup_learn()
runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam)
self.episode_reward = np.zeros((self.n_envs,))
ep_info_buf = deque(maxlen=100)
t_first_start = time.time()
n_timesteps = 0
# nupdates = total_timesteps // self.n_batch
for timestep in range(1, total_timesteps + 1):
assert self.n_batch % self.nminibatches == 0
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - timestep / total_timesteps
lr_now = self.learning_rate(frac)
cliprangenow = self.cliprange(frac)
# true_reward is the reward without discount
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run()
n_timesteps += len(obs)
ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
# timestep = ((update * self.noptepochs * self.n_batch + epoch_num * self.n_batch + start) //
# batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, writer=writer,
update=n_timesteps))
else: # recurrent version
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for stan_timestepsrt in range(0, self.n_envs, envs_per_batch):
# timestep = ((update * self.noptepochs * self.n_envs + epoch_num * self.n_envs + start) //
# envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, update=n_timesteps,
writer=writer, states=mb_states))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, n_timesteps)
if self.verbose >= 1 and (timestep % log_interval == 0 or timestep == 1):
explained_var = explained_variance(values, returns)
logger.logkv("total_timesteps", n_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
if n_timesteps > total_timesteps:
break
return self
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
def run(self):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
while True:
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
if self.dones:
print("Episode finished. Reward: {:.2f} {} Steps".format(np.sum(mb_rewards), len(mb_rewards)))
if len(mb_rewards) >= self.n_steps:
break
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
| 52.213542 | 121 | 0.572569 | import time
from collections import deque
import gym
import numpy as np
from stable_baselines import logger, PPO2
from stable_baselines.a2c.utils import total_episode_reward_logger
from stable_baselines.common import explained_variance, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.ppo2.ppo2 import get_schedule_fn, safe_mean, swap_and_flatten
class PPO2WithVAE(PPO2):
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="PPO2"):
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
with TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name) as writer:
self._setup_learn()
runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam)
self.episode_reward = np.zeros((self.n_envs,))
ep_info_buf = deque(maxlen=100)
t_first_start = time.time()
n_timesteps = 0
for timestep in range(1, total_timesteps + 1):
assert self.n_batch % self.nminibatches == 0
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - timestep / total_timesteps
lr_now = self.learning_rate(frac)
cliprangenow = self.cliprange(frac)
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run()
n_timesteps += len(obs)
ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None:
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, writer=writer,
update=n_timesteps))
else:
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for stan_timestepsrt in range(0, self.n_envs, envs_per_batch):
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, update=n_timesteps,
writer=writer, states=mb_states))
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, n_timesteps)
if self.verbose >= 1 and (timestep % log_interval == 0 or timestep == 1):
explained_var = explained_variance(values, returns)
logger.logkv("total_timesteps", n_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
logger.logkv('ep_rewmean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('eplenmean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
if callback is not None:
if callback(locals(), globals()) is False:
break
if n_timesteps > total_timesteps:
break
return self
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam):
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
def run(self):
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
while True:
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
if self.dones:
print("Episode finished. Reward: {:.2f} {} Steps".format(np.sum(mb_rewards), len(mb_rewards)))
if len(mb_rewards) >= self.n_steps:
break
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
| true | true |
f7face466b52b16e22558c4da96f32c9df411036 | 579 | py | Python | webware/MiddleKit/Properties.py | PeaceWorksTechnologySolutions/w4py3-middlekit | a9554e20c47010e7b0c0deee63e1786482c59a1c | [
"MIT"
] | 2 | 2020-10-31T09:12:58.000Z | 2021-02-20T13:52:14.000Z | webware/MiddleKit/Properties.py | WebwareForPython/w4py3-middlekit | f740e2d2d3a5c225d6b8f9eb27ac08f8deed47e6 | [
"MIT"
] | 2 | 2020-01-07T15:24:09.000Z | 2020-01-08T15:39:57.000Z | webware/MiddleKit/Properties.py | PeaceWorksTechnologySolutions/w4py3-middlekit | a9554e20c47010e7b0c0deee63e1786482c59a1c | [
"MIT"
] | 1 | 2021-09-27T21:04:18.000Z | 2021-09-27T21:04:18.000Z | name = 'MiddleKit'
version = (3, 0, 0, 'a1')
docs = [
{'name': 'Introduction', 'file': 'Intro.html'},
{'name': 'Quick Start', 'file': 'QuickStart.html'},
{'name': "User's Guide", 'file': 'UsersGuide.html'},
{'name': 'To Do', 'file': 'TODO.text'},
]
status = 'pre-release'
requiredPyVersion = (3, 6, 0)
synopsis = """For building the "middle tier" of an application, that is, the domain-specific objects in between the front end and the database/datastore. MiddleKit is roughly analogous to NeXT/Apple's Enterprise Objects and Sun's Enterprise Java Beans."""
| 34.058824 | 255 | 0.656304 | name = 'MiddleKit'
version = (3, 0, 0, 'a1')
docs = [
{'name': 'Introduction', 'file': 'Intro.html'},
{'name': 'Quick Start', 'file': 'QuickStart.html'},
{'name': "User's Guide", 'file': 'UsersGuide.html'},
{'name': 'To Do', 'file': 'TODO.text'},
]
status = 'pre-release'
requiredPyVersion = (3, 6, 0)
synopsis = """For building the "middle tier" of an application, that is, the domain-specific objects in between the front end and the database/datastore. MiddleKit is roughly analogous to NeXT/Apple's Enterprise Objects and Sun's Enterprise Java Beans."""
| true | true |
f7facf6279f52b345e048fabfe93b7628ef0dd0f | 27,027 | py | Python | steinerpy/library/search/generic_algorithms.py | Kchour/steinerpy | be6206533b7b28cfb67800ee847f0de367dab834 | [
"MIT"
] | 3 | 2021-06-10T16:46:20.000Z | 2022-02-11T14:24:15.000Z | steinerpy/library/search/generic_algorithms.py | Kchour/steinerpy | be6206533b7b28cfb67800ee847f0de367dab834 | [
"MIT"
] | 12 | 2021-03-31T03:31:24.000Z | 2021-11-18T21:51:18.000Z | steinerpy/library/search/generic_algorithms.py | Kchour/steinerpy | be6206533b7b28cfb67800ee847f0de367dab834 | [
"MIT"
] | 1 | 2021-06-13T15:01:24.000Z | 2021-06-13T15:01:24.000Z | """This module provides a generic incremental search class, that breaks up nomination and update phase"""
import matplotlib.pyplot as plt
import numpy as np
from timeit import default_timer as timer
import steinerpy.config as cfg
from steinerpy.library.animation import AnimateV2
from steinerpy.library.logger import MyLogger
from steinerpy.library.misc.utils import MyTimer
from steinerpy.library.search.search_utils import PriorityQueue, PriorityQueueHeap
from steinerpy.library.search.search_utils import DoublyLinkedList
class Search:
""" Base Class `Search` can be extended by any iterative search algorithm.
Generic search algorithm with open, closed, and linked lists. The user can pass in custom g,h functions
Parameters:
graph (SquareGrid, MyGraph): An object from grid_utils/graph module.
"SquareGrid" vertices are defined by 2D tuples (x_i, y_i)
"MyGraph" vertices are defined N-dim tuples (z_1, ... , z_n). A generalized graph!
start (tuple): Start vertex, `tuple` must belong to class <graph>.
goal (tuple): End vertices, keyed by id. May be None
For use with `Framework` class, must be an iterable (i.e. list, tuple, set) or `dict`
frontierType (PriorityQueue()): The Open List or frontier class type, implemented as a class from search/search_utils/
(PriorityQueue, PriorityQueueHeap). A priority queue returns the item with min priority value.
fCostsFunc: A function returning the fCosts of node u. Returns a scalar value. Arguments are (self, glist, next_node)
id (tuple): An ID number for the current search object. Optional if not using `Framework`.
Attributes:
id: set by the parameter `id`
graph: set by parameter `graph`
start: set by parameter `start`
goal: set by parameter `goal`
current (tuple): The most recently expanded node, i.e. just added to the closed list
frontier (frontierType): set by parameter `frontierType`
g (dict): The cost_so_far dict in the form {v: value}, where "v" is a vertex/node and "value" is the gCost(v).
parent (dict): A linked list (implemented with a dict), in the form {v: parent(v)}, where parent(v), is the parent node of "v".
Note that "v" is member of `graph`.
fCosts (fCostsFunc): Defined by input parameter fCostsFunc, should return a scalar
nominated (bool): True if our current search object has nominated a node, ensures we do not
nominate more than once at a time
closedList (dict): Consist of all previously expanded nodes, a dict {closed(v_i): fCosts}. Not necessary
currentF (float): The Fcost of `current`
minR (float): minimum radius of our set, by observing the boundary nodes. Not necessary atm
currentGoal (tuple): The closest reachable goal
Variables:
total_closed_nodes (int):
total_opened_nodes (int):
total_expanded_nodes (int): Counter that is incremented when we expand a node from the open set
"""
# total_closed_nodes = 0
# total_opened_nodes = 0
total_expanded_nodes = 0
def __init__(self, graph, start, goal, frontierType, fCostsFunc, id):
# ================ Required Definitions ===================== #
self.graph = graph
self.start = start
self.goal = goal
self.current = None
# Open List
self.frontier = frontierType
self.frontier.put(start, 0)
# The cost so far (includes both frontier and closed list)
# TODO: May have to modify g merging in the future for primal-dual
self.g = {}
self.g[start] = 0
# Linked List
self.parent = {}
self.parent[start] = None
# F costs function object for priority updates
self.fCosts = fCostsFunc # fCostsFunc is a passed-in method, returns a float
# def set_start(self, start):
# self.start = start
################################################
### Class methods for updating some stats ###
################################################
@classmethod
def update_expanded_nodes(cls):
cls.total_expanded_nodes +=1
# @classmethod
# def update_opened_nodes(cls):
# cls.total_opened_nodes +=1
# @classmethod
# def update_closed_nodes(cls):
# cls.total_closed_nodes +=1
@classmethod
def reset(cls):
"""Reset all class variables """
cls.total_expanded_nodes = 0
def reconstruct_path(self, parents, goal, start=None, order='forward'):
'''Given a linked list, rebuild a path back from a goal node
to a start (or root node, if start is not specified)
paremeters:
parents: a singly-linked list using python dict
start: a tuple (x,y) position. Optional
goal: a tuple (x,y) position. Mandatory
order: 'forward', or 'reverse'
'''
current = goal
path = []
while current != start and current!= None:
# Detect cycles and break out of them
if current in path:
# print("WARNING CYCLE DETECTED")
break
path.append(current)
# current = parents[current]
current = parents.get(current, None)
if start != None:
path.append(start)
if order == 'forward':
path.reverse()
return path
class GenericSearch(Search):
"""This class extends `Search` and breaks up the search into `nominate` and `update` phases
This gives the user finer control over the search space, i.e. when to stop, update destinations midway, etc.
`GenericSearch` also inherits all the attributes of `Search`
Attributes:
visualize (bool): A flag for visualizing the algorithm. Mainly for debug purposes
animateCurrent (Animate): Animate the current nominated node
animateClosed (Animate): Animate the history of the closed set
animateNeighbors (Animate): Animate the open list in the `update`
Todo:
* Consider putting animateClosed in the `update` function, because closing does not occur until `update`
"""
def __init__(self, graph, fCostsFunc, start, frontierType, goal=None, visualize=False, id=None):
Search.__init__(self, graph, start, goal, frontierType, fCostsFunc, id)
# Visualize algorithm flag
self.visualize = visualize
# Keep track of nomination status
self.nominated = False # Make sure we don't nominate twice in a row
# Each search object needs an id
self.id = (id,)
# keep track of F
self.f = {}
self.f[start] = 0 #May need to figure out how to initialize this besides 0
# min values
# self._fmin, self._gmin, self._pmin, self._rmin = np.inf, np.inf, np.inf, np.inf
# ================ Misc Information ===================== #
self.closedList = {}
self.currentF = 0
self.currentP = 0
self.currentNeighs = [] # Required for testing overlap using open list
self._lmin = 0
self.lnode = None
#### Keep a sorted array for gmin, rmin, and fmin
self.gmin_heap = PriorityQueueHeap()
self.rmin_heap = PriorityQueueHeap()
self.fmin_heap = PriorityQueueHeap()
self.gmin_heap.put(start, 0)
self.rmin_heap.put(start, 0)
self.fmin_heap.put(start, 0)
# Visulization?
# if visualize:
# # initialize plot (graph has the dimensions built it)
# # xlim = (graph.grid_dim[0], graph.grid_dim[1]) #(minX, maxX)
# # ylim = (graph.grid_dim[2], graph.grid_dim[3]) #(minY, maxY)
# # no sleep atm
# # self.animateCurrent = Animate(number=1, xlim=xlim, ylim=ylim, gridSize=1,linewidth=5, markerType='bo', markerSize=10, sleep=0, order=2)
# # self.animateClosed = Animate(number=1, xlim=xlim, ylim=ylim, gridSize=1,linewidth=5, markerType='o', markerSize=10, sleep=0, order=-1)
# # self.animateNeighbors = Animate(number=1, xlim=xlim, ylim=ylim, gridSize=1,linewidth=5, markerType='o', markerSize=5, sleep=0, order=-1)
# # self.animatePath = Animate(number=1, xlim=xlim, ylim=ylim, gridSize=1,linewidth=5, markerType='o', markerSize=5, sleep=0.000, order=-1)
# pass
@property
def goal(self):
"""Returns goals, keyed by id. The id and goal should be fixed to each other!
At the moment, usage with the `Framework` class will work best
when a dict is passed in. Else expects goal to be an iterable object,
like a list or provide a dict
"""
return self._goal
@goal.setter
def goal(self, goal):
if isinstance(goal, dict):
self._goal = goal
else:
self._goal = {}
try:
for ndx, k in enumerate(goal):
if not set((ndx,)).issubset(set(self.id)):
self._goal[ndx] = k
except Exception as err:
print(err)
def nominate(self):
"""In this function, a node is nominated from the open set, which essentially updates the open set.
`nominate` is done using a priority queue. A flag is used in the conditional to
ensure the function is not called more than once prior to an update.
Returns:
True: if a node was nominated
"""
frontier = self.frontier
parent = self.parent
g = self.g
# NOTE Probably dont need this ""'nominated' ensures this function doesn't get called multiple times before update"
if not frontier.empty():
# current node is immediately in the closed list
currentP, current = frontier.get_test() # update current to be the item with best priority
self.current = current
self.currentF = self.f[current]
self.currentP = currentP
# LOG nomination
MyLogger.add_message("{} nominated {} with priority {}".format(self.id, self.current, self.currentP), __name__, "DEBUG")
#print("Terminal, current: ",self.start, current)
if self.visualize:
# Update plot with visuals
# self.animateCurrent.update_clean(current)
AnimateV2.add_line("nominated_{}".format(self.id), current[0], current[1], 'ko', zorder=15, draw_clean=True, markersize=10)
# AnimateV2.update()
# #Early exit if we reached our goal
# if current == self.goal:
# return parent, g, current
# return true if nominated
return True
# if no nomination
return False
def reprioritize(self):
"""Reprioritize the open set / frontier when heuristics change.
For now, re-calculate each node's priority and put it into the queue.
This is easier than searching and updating every key
"""
# Modify frontier structure
for o in self.frontier.entry_table.copy():
# make sure goal is not empty
if self.goal:
# priority changes as a result of destination change.
# Hence both fmin and pmin need to be updated
priority = self.fCosts(self, self.g, o)
self.frontier.put(o, priority)
self.fmin_heap.put(o, self.f[o])
def update(self):
"""The open/closed list is updated here, and the open list is expanded with neighboring nodes
For each neighbor of the nominated node (denoted as `current`), we identify its gcost,
parent node, and priority. These 3 items are stored into 3 separate dictionaries.
"""
frontier = self.frontier
parent = self.parent
g = self.g
# current = self.current
# frontier.delete(current)
priority, current = frontier.get()
# Update gmin,rmin,fmin heaps
self.gmin_heap.delete(current)
self.rmin_heap.delete(current)
self.fmin_heap.delete(current)
# self.closedList[current] = currentP
# Delete current node from frontier
#expand current node and check neighbors
# Update stats logging
GenericSearch.update_expanded_nodes()
# visualize the recently closed node
if self.visualize:
# self.animateClosed.update(current)
# Delete nominated node drawing, add it as closed
AnimateV2.add_line("closed_{}".format(self.id), current[0], current[1], 'mo', markersize=10)
# AnimateV2.update()
# hide the nominate node temporarily
AnimateV2.add_line("nominated_{}".format(self.id), current[0], current[1], 'ko', alpha=0, zorder=15, draw_clean=True, markersize=10)
# Show recently closed node with a white x (the best nominated node over all)
# AnimateV2.add_line("recent_closed_{}".format(self.id), current[0], current[1], 'wx', alpha=1, zorder=16, draw_clean=True, markersize=10)
# AnimateV2.update()
# refresh neighbors
self.currentNeighs = []
# Add new nodes to frontier
for next in self.graph.neighbors(current):
g_next = g[current] + self.graph.cost(current, next)
# if next location not in CLOSED LIST or its cost is less than before
if next not in g or g_next < g[next]:
# Store neighbor's gcost
g[next] = g_next
# Calculate priority and time it
# Call priority function to get next node's priority (TODO: rename fcosts -> priority!)
start = timer()
priority = self.fCosts(self, g, next)
end = timer()
MyTimer.add_time("fcosts_time", end - start )
# Update frontier and parent list
frontier.put(next, priority)
parent[next] = current
# update gmin,rmin, fmin heaps
self.gmin_heap.put(next,g_next)
self.rmin_heap.put(next, g[current])
self.fmin_heap.put(next, self.f[next])
# track current neighbors
self.currentNeighs.append(next)
if self.visualize:
# self.animateNeighbors.update(next)
# Add neighbors
x = []
y = []
for n in self.frontier.elements:
x.append(n[0])
y.append(n[1])
AnimateV2.add_line("neighbors_{}".format(self.id), x,y, 'cD', markersize=7, draw_clean=True)
# Hide the best nominated node now
# AnimateV2.add_line("recent_closed_{}".format(self.id), current[0], current[1], 'wx', alpha=0, draw_clean=True, markersize=10)
# if self.visualize:
# AnimateV2.update()
# consider deleting fvalues to save memory, since it's only relevant to openset
del self.f[current]
self.nominated = False
MyLogger.add_message("{} updated!".format(self.id), __name__, "DEBUG")
def boundary_nodes(self):
r = []
for f in self.frontier.elements:
if self.parent[f] is not None:
r.append(self.parent[f])
return r
@property
def rmin(self):
"""Additional function to estimate min radius.
Returns:
minR (float): the minimum radius of the 'boundary nodes', i.e. closed set of nodes
with a child in the open set
"""
# minR = None
# for f in self.frontier.elements:
# # when starting off the parent is none
# if self.parent[f] is None:
# minR = 0
# else:
# # check the gcost of boundary nodes
# r = self.g[self.parent[f]]
# if minR is None or r < minR:
# minR = r
# if minR is None:
# minR = 0
# return minR
try:
value, _ = self.rmin_heap.get_test()
return value
except Exception as e_:
return np.inf
@property
def fmin(self):
"""Returns the minimum f-value from the open list
"""
# try:
# return min((self.f[k[2]] for k in self.frontier.elements))
# except Exception as e_:
# # FIX: Figure out whether this should be 0 or np.inf
# # if open set is empty
# # if self.frontier.elements:
# # return 0
# # else:
# # return np.inf
# return self.lmin
# # return 0
try:
value, _ = self.fmin_heap.get_test()
return value
except Exception as e_:
# when frontier is empty, there is nothing else to explore!
return np.inf
@property
def gmin(self):
"""Returns the minimum g-value from the open list
"""
# return min((self.g[k] for k in self.frontier.elements))
# return min(self.g[k[2]] for k in self.frontier.elements)
try:
value, _ = self.gmin_heap.get_test()
return value
except Exception as e_:
return np.inf
@property
def pmin(self):
"""Returns the minimum p-value from the open list
"""
# return min(self.frontier.elements.values())
try:
priority, _ = self.frontier.get_test()
return priority
except:
return np.inf
@property
def lmin(self):
"""Returns the current declared shortest path distance
"""
return self._lmin
@lmin.setter
def lmin(self, val):
"""Set the current component's current shortest path distance
"""
self._lmin = val
def __add__(self, other):
"""Merges two `GenericSearch` objects into a single object using the '+' operator
Merges the individual id, g list, parent list, and goals.
Parameters:
self (GenericSearch): Class object, left side of the '+' sign
other (GenericSearch): Class object, right side of the '+' sign
Example:
mergedGS = gs1 + gs2
Returns:
mergedGS (GenericSearch): class object
Todo:
- Refactor this method
"""
## Initialize some merged structures
mergedF = PriorityQueueHeap() # merged frontier #tricky, priorityQueue or priorityQueueHeap?
mergedG = {} # merged closed list/ cost_so_far
mergedP = {} # merged parent list
mergedID = []
mergedGoal = {}
## Merge the terminal indices
# TODO. PROB DONT NEED list
# mergedID.extend(list(self.id))
# mergedID.extend(list(other.id))
mergedID.extend(self.id)
mergedID.extend(other.id)
mergedID = tuple(mergedID)
## Update destinations based on indices
# mergedGoal = {ndx: term for ndx, term in self.goal.items() if not set((ndx,)).issubset(set(mergedID))}
## Make sure all components have been updated. See issue #10
# self.update()
# other.update()
joint_goal_set = set(self.goal).union(set(other.goal))-set(mergedID)
for k in joint_goal_set:
if k in self.goal:
mergedGoal.update({k: self.goal[k]})
elif k in other.goal:
mergedGoal.update({k: other.goal[k]})
## Create a GenericSearch Object to return
mergedGS = GenericSearch(self.graph, self.fCosts, 'Temp', mergedF, goal=mergedGoal, visualize=cfg.Animation.visualize)
## new variables for ease: Linked lists, frontier, and g costs
p1 = self.parent
p2 = other.parent
f1 = self.frontier.elements
f2 = other.frontier.elements
g1 = self.g
g2 = other.g
c1 = set(g1) - set(f1)
c2 = set(g2) - set(f2)
## Get Merged g and p structures, need to handle overlapping of lists
setG = set(g1).union(set(g2)) # works; handle c/o overlapping
# closedSet = (set(g1) - set(f1)).union(set(g2) - set(f2)) # original case, working
closedSet = c1.union(c2)
for next in setG:
# for overlapping nodes, retain the one with least g.
# else, just keep them according tot he component
if next in g1 and next in g2:
if g1[next] < g2[next]:
g_next = g1[next]
current = p1[next]
else:
g_next = g2[next]
current = p2[next]
elif next in g1:
g_next = g1[next]
current = p1[next]
elif next in g2:
g_next = g2[next]
current = p2[next]
mergedG[next] = g_next
mergedP[next] = current
# get merged f and update merged p structures
# setF = set(f1).union(set(f2)) - closedSet # original case, working
setF = set(f1).union(set(f2)) # works; handle c/o overlapping
# DO I NEED TO SET THE G COSTS HERE TOO?.
# NO need to set current?
for next in setF:
if next in f1 and next in f2:
if g1[next] < g2[next]:
priority = f1[next][0]
current = p1[next]
# g_next = g1[next]
# Add fcosts, since they are not guaranteed to be the same as priorities
mergedGS.f[next] = self.f[next]
else:
priority = f2[next][0]
current = p2[next]
mergedGS.f[next] = other.f[next]
# g_next = g2[next]
elif next in f1:
if next in c2 and g2[next] < g1[next]:
# If node is closer to terminal 2, DONT retain node in frontier of 1
continue
elif next in c2 and g2[next] >= g1[next]:
# If node is closer to terminal 1, DO retain node in frontier and remove
# from the closed list
priority = f1[next][0]
current = p1[next]
# # DID I FORGET THIS?????????????????????
# g_next = g1[next]
mergedGS.f[next] = self.f[next]
else:
# node doesn't overlap with c2, so retain in frontier
priority = f1[next][0]
current = p1[next]
# g_next = g1[next]
mergedGS.f[next] = self.f[next]
elif next in f2:
if next in c1 and g1[next] < g2[next]:
continue
elif next in c1 and g1[next] >= g2[next]:
priority = f2[next][0]
current = p2[next]
mergedGS.f[next] = other.f[next]
else:
priority = f2[next][0]
current = p2[next]
mergedGS.f[next] = other.f[next]
# Try updating the F costs here explicitly if mergedGoal is not empty
# if mergedGoal:
################ COMMENT AS NEEDED ##############
priority = self.fCosts(mergedGS, mergedG, next)
mergedF.put(next, priority)
# Also update the gmin, rmin, fmin heaps
mergedGS.gmin_heap.put(next, mergedG[next])
if current is None:
mergedGS.rmin_heap.put(next, 0)
else:
mergedGS.rmin_heap.put(next, mergedG[current])
mergedGS.fmin_heap.put(next, mergedGS.f[next])
mergedP[next] = current
# mergedG[next] = g_next
# removed start="Temp" from frontier and related heaps
mergedGS.frontier.delete('Temp')
mergedGS.fmin_heap.delete("Temp")
mergedGS.gmin_heap.delete("Temp")
mergedGS.rmin_heap.delete("Temp")
# set closed list, valued by currentF
# Set current node and currentF
# if self.currentF < other.currentF:
# mergedGS.currentF = self.currentF
# mergedGS.current = self.current
# else:
# mergedGS.currentF = other.currentF
# mergedGS.current = other.current
## modify generic search object values
mergedGS.g = mergedG
mergedGS.parent = mergedP
mergedGS.id = mergedID
mergedGS.frontier = mergedF
# if g1[self.current] < g2[other.current]
# if self.currentF < other.currentF:
# mergedGS.current = self.current
# mergedGS.currentF = self.currentF
# else:
# mergedGS.current = other.current
# mergedGS.currentF = other.currentF
# mergedGS.nominated = True
# TODO also initialize closed List..but you really dont need to
# mergedGS.closedList =
# Set lmin? NOTE don't!!!!
# mergedGS.lmin = min(self.lmin, other.lmin)
# mergedGS.lmin = max(self.lmin, other.lmin)
## Update plot colors
if cfg.Animation.visualize:
# mergedGS.animateClosed.order=10
# mergedGS.animateClosed.update(np.array(list(closedSet)).T.tolist()) # remember to pass a structure of size 2
# mergedGS.animateNeighbors.update(np.array(list(setF)).T.tolist()) # remember to pass a structure of size 2
# Delete previous drawings
AnimateV2.delete("nominated_{}".format(self.id))
AnimateV2.delete("closed_{}".format(self.id))
AnimateV2.delete("neighbors_{}".format(self.id))
AnimateV2.delete("nominated_{}".format(other.id))
AnimateV2.delete("closed_{}".format(other.id))
AnimateV2.delete("neighbors_{}".format(other.id))
# Draw new merged components
dataClosedSet = np.array(list(closedSet)).T.tolist()
dataSetF = np.array(list(setF)).T.tolist()
AnimateV2.add_line("closed_{}".format(mergedGS.id), dataClosedSet[0], dataClosedSet[1], 'mo', markersize=10)
AnimateV2.add_line("neighbors_{}".format(mergedGS.id), dataSetF[0], dataSetF[1], 'cD', markersize=7, draw_clean=True)
return mergedGS
| 38.72063 | 152 | 0.564917 |
import matplotlib.pyplot as plt
import numpy as np
from timeit import default_timer as timer
import steinerpy.config as cfg
from steinerpy.library.animation import AnimateV2
from steinerpy.library.logger import MyLogger
from steinerpy.library.misc.utils import MyTimer
from steinerpy.library.search.search_utils import PriorityQueue, PriorityQueueHeap
from steinerpy.library.search.search_utils import DoublyLinkedList
class Search:
total_expanded_nodes = 0
def __init__(self, graph, start, goal, frontierType, fCostsFunc, id):
self.graph = graph
self.start = start
self.goal = goal
self.current = None
self.frontier = frontierType
self.frontier.put(start, 0)
self.g = {}
self.g[start] = 0
self.parent = {}
self.parent[start] = None
self.fCosts = fCostsFunc
kerSize=10, sleep=0, order=2)
# # self.animateClosed = Animate(number=1, xlim=xlim, ylim=ylim, gridSize=1,linewidth=5, markerType='o', markerSize=10, sleep=0, order=-1)
# # self.animateNeighbors = Animate(number=1, xlim=xlim, ylim=ylim, gridSize=1,linewidth=5, markerType='o', markerSize=5, sleep=0, order=-1)
# # self.animatePath = Animate(number=1, xlim=xlim, ylim=ylim, gridSize=1,linewidth=5, markerType='o', markerSize=5, sleep=0.000, order=-1)
# pass
@property
def goal(self):
return self._goal
@goal.setter
def goal(self, goal):
if isinstance(goal, dict):
self._goal = goal
else:
self._goal = {}
try:
for ndx, k in enumerate(goal):
if not set((ndx,)).issubset(set(self.id)):
self._goal[ndx] = k
except Exception as err:
print(err)
def nominate(self):
frontier = self.frontier
parent = self.parent
g = self.g
# NOTE Probably dont need this ""'nominated' ensures this function doesn't get called multiple times before update"
if not frontier.empty():
# current node is immediately in the closed list
currentP, current = frontier.get_test() # update current to be the item with best priority
self.current = current
self.currentF = self.f[current]
self.currentP = currentP
# LOG nomination
MyLogger.add_message("{} nominated {} with priority {}".format(self.id, self.current, self.currentP), __name__, "DEBUG")
#print("Terminal, current: ",self.start, current)
if self.visualize:
# Update plot with visuals
# self.animateCurrent.update_clean(current)
AnimateV2.add_line("nominated_{}".format(self.id), current[0], current[1], 'ko', zorder=15, draw_clean=True, markersize=10)
# AnimateV2.update()
# #Early exit if we reached our goal
# if current == self.goal:
# return parent, g, current
# return true if nominated
return True
# if no nomination
return False
def reprioritize(self):
# Modify frontier structure
for o in self.frontier.entry_table.copy():
# make sure goal is not empty
if self.goal:
# priority changes as a result of destination change.
# Hence both fmin and pmin need to be updated
priority = self.fCosts(self, self.g, o)
self.frontier.put(o, priority)
self.fmin_heap.put(o, self.f[o])
def update(self):
frontier = self.frontier
parent = self.parent
g = self.g
# current = self.current
# frontier.delete(current)
priority, current = frontier.get()
# Update gmin,rmin,fmin heaps
self.gmin_heap.delete(current)
self.rmin_heap.delete(current)
self.fmin_heap.delete(current)
# self.closedList[current] = currentP
# Delete current node from frontier
#expand current node and check neighbors
# Update stats logging
GenericSearch.update_expanded_nodes()
# visualize the recently closed node
if self.visualize:
# self.animateClosed.update(current)
# Delete nominated node drawing, add it as closed
AnimateV2.add_line("closed_{}".format(self.id), current[0], current[1], 'mo', markersize=10)
# AnimateV2.update()
# hide the nominate node temporarily
AnimateV2.add_line("nominated_{}".format(self.id), current[0], current[1], 'ko', alpha=0, zorder=15, draw_clean=True, markersize=10)
# Show recently closed node with a white x (the best nominated node over all)
# AnimateV2.add_line("recent_closed_{}".format(self.id), current[0], current[1], 'wx', alpha=1, zorder=16, draw_clean=True, markersize=10)
# AnimateV2.update()
# refresh neighbors
self.currentNeighs = []
# Add new nodes to frontier
for next in self.graph.neighbors(current):
g_next = g[current] + self.graph.cost(current, next)
# if next location not in CLOSED LIST or its cost is less than before
if next not in g or g_next < g[next]:
# Store neighbor's gcost
g[next] = g_next
# Calculate priority and time it
# Call priority function to get next node's priority (TODO: rename fcosts -> priority!)
start = timer()
priority = self.fCosts(self, g, next)
end = timer()
MyTimer.add_time("fcosts_time", end - start )
# Update frontier and parent list
frontier.put(next, priority)
parent[next] = current
# update gmin,rmin, fmin heaps
self.gmin_heap.put(next,g_next)
self.rmin_heap.put(next, g[current])
self.fmin_heap.put(next, self.f[next])
# track current neighbors
self.currentNeighs.append(next)
if self.visualize:
# self.animateNeighbors.update(next)
# Add neighbors
x = []
y = []
for n in self.frontier.elements:
x.append(n[0])
y.append(n[1])
AnimateV2.add_line("neighbors_{}".format(self.id), x,y, 'cD', markersize=7, draw_clean=True)
# Hide the best nominated node now
# AnimateV2.add_line("recent_closed_{}".format(self.id), current[0], current[1], 'wx', alpha=0, draw_clean=True, markersize=10)
# if self.visualize:
# AnimateV2.update()
# consider deleting fvalues to save memory, since it's only relevant to openset
del self.f[current]
self.nominated = False
MyLogger.add_message("{} updated!".format(self.id), __name__, "DEBUG")
def boundary_nodes(self):
r = []
for f in self.frontier.elements:
if self.parent[f] is not None:
r.append(self.parent[f])
return r
@property
def rmin(self):
# minR = None
# for f in self.frontier.elements:
# # when starting off the parent is none
# if self.parent[f] is None:
# minR = 0
# else:
# # check the gcost of boundary nodes
# r = self.g[self.parent[f]]
# if minR is None or r < minR:
# minR = r
# if minR is None:
# minR = 0
# return minR
try:
value, _ = self.rmin_heap.get_test()
return value
except Exception as e_:
return np.inf
@property
def fmin(self):
# try:
# return min((self.f[k[2]] for k in self.frontier.elements))
# except Exception as e_:
# # FIX: Figure out whether this should be 0 or np.inf
# # if open set is empty
# # if self.frontier.elements:
# # return 0
# # else:
# # return np.inf
# return self.lmin
# # return 0
try:
value, _ = self.fmin_heap.get_test()
return value
except Exception as e_:
# when frontier is empty, there is nothing else to explore!
return np.inf
@property
def gmin(self):
# return min((self.g[k] for k in self.frontier.elements))
# return min(self.g[k[2]] for k in self.frontier.elements)
try:
value, _ = self.gmin_heap.get_test()
return value
except Exception as e_:
return np.inf
@property
def pmin(self):
# return min(self.frontier.elements.values())
try:
priority, _ = self.frontier.get_test()
return priority
except:
return np.inf
@property
def lmin(self):
return self._lmin
@lmin.setter
def lmin(self, val):
self._lmin = val
def __add__(self, other):
## Initialize some merged structures
mergedF = PriorityQueueHeap() # merged frontier #tricky, priorityQueue or priorityQueueHeap?
mergedG = {} # merged closed list/ cost_so_far
mergedP = {} # merged parent list
mergedID = []
mergedGoal = {}
## Merge the terminal indices
# TODO. PROB DONT NEED list
# mergedID.extend(list(self.id))
# mergedID.extend(list(other.id))
mergedID.extend(self.id)
mergedID.extend(other.id)
mergedID = tuple(mergedID)
## Update destinations based on indices
# mergedGoal = {ndx: term for ndx, term in self.goal.items() if not set((ndx,)).issubset(set(mergedID))}
## Make sure all components have been updated. See issue #10
# self.update()
# other.update()
joint_goal_set = set(self.goal).union(set(other.goal))-set(mergedID)
for k in joint_goal_set:
if k in self.goal:
mergedGoal.update({k: self.goal[k]})
elif k in other.goal:
mergedGoal.update({k: other.goal[k]})
## Create a GenericSearch Object to return
mergedGS = GenericSearch(self.graph, self.fCosts, 'Temp', mergedF, goal=mergedGoal, visualize=cfg.Animation.visualize)
## new variables for ease: Linked lists, frontier, and g costs
p1 = self.parent
p2 = other.parent
f1 = self.frontier.elements
f2 = other.frontier.elements
g1 = self.g
g2 = other.g
c1 = set(g1) - set(f1)
c2 = set(g2) - set(f2)
## Get Merged g and p structures, need to handle overlapping of lists
setG = set(g1).union(set(g2)) # works; handle c/o overlapping
# closedSet = (set(g1) - set(f1)).union(set(g2) - set(f2)) # original case, working
closedSet = c1.union(c2)
for next in setG:
# for overlapping nodes, retain the one with least g.
# else, just keep them according tot he component
if next in g1 and next in g2:
if g1[next] < g2[next]:
g_next = g1[next]
current = p1[next]
else:
g_next = g2[next]
current = p2[next]
elif next in g1:
g_next = g1[next]
current = p1[next]
elif next in g2:
g_next = g2[next]
current = p2[next]
mergedG[next] = g_next
mergedP[next] = current
# get merged f and update merged p structures
# setF = set(f1).union(set(f2)) - closedSet # original case, working
setF = set(f1).union(set(f2)) # works; handle c/o overlapping
# DO I NEED TO SET THE G COSTS HERE TOO?.
# NO need to set current?
for next in setF:
if next in f1 and next in f2:
if g1[next] < g2[next]:
priority = f1[next][0]
current = p1[next]
# g_next = g1[next]
# Add fcosts, since they are not guaranteed to be the same as priorities
mergedGS.f[next] = self.f[next]
else:
priority = f2[next][0]
current = p2[next]
mergedGS.f[next] = other.f[next]
# g_next = g2[next]
elif next in f1:
if next in c2 and g2[next] < g1[next]:
# If node is closer to terminal 2, DONT retain node in frontier of 1
continue
elif next in c2 and g2[next] >= g1[next]:
# If node is closer to terminal 1, DO retain node in frontier and remove
# from the closed list
priority = f1[next][0]
current = p1[next]
# # DID I FORGET THIS?????????????????????
# g_next = g1[next]
mergedGS.f[next] = self.f[next]
else:
# node doesn't overlap with c2, so retain in frontier
priority = f1[next][0]
current = p1[next]
# g_next = g1[next]
mergedGS.f[next] = self.f[next]
elif next in f2:
if next in c1 and g1[next] < g2[next]:
continue
elif next in c1 and g1[next] >= g2[next]:
priority = f2[next][0]
current = p2[next]
mergedGS.f[next] = other.f[next]
else:
priority = f2[next][0]
current = p2[next]
mergedGS.f[next] = other.f[next]
# Try updating the F costs here explicitly if mergedGoal is not empty
# if mergedGoal:
################ COMMENT AS NEEDED ##############
priority = self.fCosts(mergedGS, mergedG, next)
mergedF.put(next, priority)
# Also update the gmin, rmin, fmin heaps
mergedGS.gmin_heap.put(next, mergedG[next])
if current is None:
mergedGS.rmin_heap.put(next, 0)
else:
mergedGS.rmin_heap.put(next, mergedG[current])
mergedGS.fmin_heap.put(next, mergedGS.f[next])
mergedP[next] = current
# mergedG[next] = g_next
# removed start="Temp" from frontier and related heaps
mergedGS.frontier.delete('Temp')
mergedGS.fmin_heap.delete("Temp")
mergedGS.gmin_heap.delete("Temp")
mergedGS.rmin_heap.delete("Temp")
# set closed list, valued by currentF
# Set current node and currentF
# if self.currentF < other.currentF:
# mergedGS.currentF = self.currentF
# mergedGS.current = self.current
# else:
# mergedGS.currentF = other.currentF
# mergedGS.current = other.current
## modify generic search object values
mergedGS.g = mergedG
mergedGS.parent = mergedP
mergedGS.id = mergedID
mergedGS.frontier = mergedF
# if g1[self.current] < g2[other.current]
# if self.currentF < other.currentF:
# mergedGS.current = self.current
# mergedGS.currentF = self.currentF
# else:
# mergedGS.current = other.current
# mergedGS.currentF = other.currentF
# mergedGS.nominated = True
# TODO also initialize closed List..but you really dont need to
# mergedGS.closedList =
# Set lmin? NOTE don't!!!!
# mergedGS.lmin = min(self.lmin, other.lmin)
# mergedGS.lmin = max(self.lmin, other.lmin)
## Update plot colors
if cfg.Animation.visualize:
# mergedGS.animateClosed.order=10
# mergedGS.animateClosed.update(np.array(list(closedSet)).T.tolist()) # remember to pass a structure of size 2
# mergedGS.animateNeighbors.update(np.array(list(setF)).T.tolist()) # remember to pass a structure of size 2
# Delete previous drawings
AnimateV2.delete("nominated_{}".format(self.id))
AnimateV2.delete("closed_{}".format(self.id))
AnimateV2.delete("neighbors_{}".format(self.id))
AnimateV2.delete("nominated_{}".format(other.id))
AnimateV2.delete("closed_{}".format(other.id))
AnimateV2.delete("neighbors_{}".format(other.id))
# Draw new merged components
dataClosedSet = np.array(list(closedSet)).T.tolist()
dataSetF = np.array(list(setF)).T.tolist()
AnimateV2.add_line("closed_{}".format(mergedGS.id), dataClosedSet[0], dataClosedSet[1], 'mo', markersize=10)
AnimateV2.add_line("neighbors_{}".format(mergedGS.id), dataSetF[0], dataSetF[1], 'cD', markersize=7, draw_clean=True)
return mergedGS
| true | true |
f7facf8ff238e891071ee8086fe2a9503f5fa45b | 407 | py | Python | attr_and_methods/topics/topic.py | borko81/SU_OOP_2021 | 8c38682bd4a2b032ca09f85b0a579be152223a59 | [
"MIT"
] | null | null | null | attr_and_methods/topics/topic.py | borko81/SU_OOP_2021 | 8c38682bd4a2b032ca09f85b0a579be152223a59 | [
"MIT"
] | null | null | null | attr_and_methods/topics/topic.py | borko81/SU_OOP_2021 | 8c38682bd4a2b032ca09f85b0a579be152223a59 | [
"MIT"
] | null | null | null | class Topic:
def __init__(self, id: int, topic: str, storage_folder: str):
self.id = id
self.topic = topic
self.storage_folder = storage_folder
def edit(self, new_topic, new_storage_folder):
self.topic = new_topic
self.storage_folder = new_storage_folder
def __repr__(self) ->str:
return f"Topic {self.id}: {self.topic} is {self.storage_folder}" | 33.916667 | 72 | 0.653563 | class Topic:
def __init__(self, id: int, topic: str, storage_folder: str):
self.id = id
self.topic = topic
self.storage_folder = storage_folder
def edit(self, new_topic, new_storage_folder):
self.topic = new_topic
self.storage_folder = new_storage_folder
def __repr__(self) ->str:
return f"Topic {self.id}: {self.topic} is {self.storage_folder}" | true | true |
f7fad01f0aa06487ca6bf418a103cee17ac8b369 | 751 | py | Python | subsampler.py | Puraneshi/pocket2vec | bb6f00f8e218ba032d8a802ac0a2900720227202 | [
"MIT"
] | 1 | 2019-04-24T16:32:53.000Z | 2019-04-24T16:32:53.000Z | subsampler.py | Puraneshi/pocket2vec | bb6f00f8e218ba032d8a802ac0a2900720227202 | [
"MIT"
] | null | null | null | subsampler.py | Puraneshi/pocket2vec | bb6f00f8e218ba032d8a802ac0a2900720227202 | [
"MIT"
] | null | null | null | def multiIter(lista, n):
'''
:param lista: a list of strings
:param n: how many objects will return
before and after any element
:return: 'context' is a tuple of the element plus its n-neighbors AND the element index
'''
for i in range(len(lista)):
context = []
before = n
index = 0
while before:
if i - before >= 0:
context.append(lista[i-before])
index += 1
before -= 1
context.append(lista[i])
after = 1
while after <= n:
if i + after < len(lista):
context.append(lista[i+after])
after += 1
yield [tuple(context), index]
| 31.291667 | 92 | 0.48735 | def multiIter(lista, n):
for i in range(len(lista)):
context = []
before = n
index = 0
while before:
if i - before >= 0:
context.append(lista[i-before])
index += 1
before -= 1
context.append(lista[i])
after = 1
while after <= n:
if i + after < len(lista):
context.append(lista[i+after])
after += 1
yield [tuple(context), index]
| true | true |
f7fad116fc0a57afb735c4ff40fbccc6103fac17 | 12,931 | py | Python | RobustGaussianFittingLibrary/cWrapper.py | ARSadri/RobustGaussianFittingLibrary | e8f273f0fb363f3092628ff295758d45595b1f19 | [
"MIT"
] | 1 | 2021-05-31T09:35:59.000Z | 2021-05-31T09:35:59.000Z | RobustGaussianFittingLibrary/cWrapper.py | ARSadri/RobustGaussianFittingLibrary | e8f273f0fb363f3092628ff295758d45595b1f19 | [
"MIT"
] | 33 | 2020-09-22T13:05:17.000Z | 2022-01-07T09:44:18.000Z | RobustGaussianFittingLibrary/cWrapper.py | ARSadri/RobustGaussianFittingLibrary | e8f273f0fb363f3092628ff295758d45595b1f19 | [
"MIT"
] | null | null | null | """
------------------------------------------------------
This file is part of RobustGaussianFittingLibrary,
a free library WITHOUT ANY WARRANTY
Copyright: 2017-2020 LaTrobe University Melbourne,
2019-2020 Deutsches Elektronen-Synchrotron
------------------------------------------------------
"""
""" A ctypes wrapper for the Robust Gaussian Fitting Library C file
Nothing to look for in this file, its just a wrapper
"""
import numpy as np
import ctypes
import os
import fnmatch
dir_path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + '..' + os.path.sep
fileNameTemplate = 'RGFLib*.so'
flist = fnmatch.filter(os.listdir(dir_path + os.path.sep), fileNameTemplate)
if(len(flist)==0): #for those who use make
dir_path = os.path.dirname(os.path.realpath(__file__))
fileNameTemplate = 'RGFLib*.so'
flist = fnmatch.filter(os.listdir(dir_path + os.path.sep), fileNameTemplate)
RGFCLib = ctypes.cdll.LoadLibrary(dir_path + os.path.sep + flist[0])
'''
void islandRemoval(unsigned char* inMask, unsigned char* labelMap,
unsigned int X, unsigned int Y,
unsigned int islandSizeThreshold)
'''
RGFCLib.islandRemoval.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32]
'''
void indexCheck(float* inTensor, float* targetLoc, unsigned int X, unsigned int Y, unsigned int Z)
'''
RGFCLib.indexCheck.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_int, ctypes.c_int, ctypes.c_float]
'''
float MSSE(float *error, unsigned int vecLen, float MSSE_LAMBDA, unsigned int k, float minimumResidual)
'''
RGFCLib.MSSE.restype = ctypes.c_float
RGFCLib.MSSE.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint, ctypes.c_float, ctypes.c_uint, ctypes.c_float ]
'''
float MSSEWeighted(float* error, float* weights, unsigned int vecLen,
float MSSE_LAMBDA, unsigned int k, float minimumResidual)
'''
RGFCLib.MSSEWeighted.restype = ctypes.c_float
RGFCLib.MSSEWeighted.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint, ctypes.c_float, ctypes.c_uint, ctypes.c_float ]
'''
void fitValue(float* inVec,
float* inWeights,
float* modelParams,
float theta,
unsigned int inN,
float topkPerc,
float botkPerc,
float MSSE_LAMBDA,
unsigned char optIters,
float minimumResidual,
unsigned int downSampledSize);
'''
RGFCLib.fitValue.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_float,
ctypes.c_int,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_uint8,
ctypes.c_float,
ctypes.c_int]
'''
void fitValue2Skewed(float* inVec,
float* inWeights,
float* modelParams,
float theta,
unsigned int inN,
float topkPerc,
float botkPerc,
float MSSE_LAMBDA,
unsigned char optIters,
float minimumResidual,
unsigned int downSampledSize);
'''
RGFCLib.fitValue2Skewed.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_float,
ctypes.c_int,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_uint8,
ctypes.c_float,
ctypes.c_int]
'''
void medianOfFits(float *vec, float *weights,
float *modelParams, float theta, unsigned int N,
float topkMin, float topkMax, unsigned int numSamples, float samplePerc,
float MSSE_LAMBDA, unsigned char optIters, float minimumResidual)
'''
RGFCLib.medianOfFits.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_float, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float, ctypes.c_uint32, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8, ctypes.c_float]
'''
void RobustAlgebraicLineFitting(float* x, float* y, float* mP, unsigned int N,
float topKthPerc, float bottomKthPerc, float MSSE_LAMBDA)
'''
RGFCLib.RobustAlgebraicLineFitting.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_int, ctypes.c_float, ctypes.c_float, ctypes.c_float]
'''
void RobustAlgebraicLineFittingTensor(float *inTensorX, float *inTensorY,
float *modelParamsMap, unsigned int N,
unsigned int X, unsigned int Y,
float topKthPerc, float bottomKthPerc, float MSSE_LAMBDA)
'''
RGFCLib.RobustAlgebraicLineFittingTensor.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint, ctypes.c_uint, ctypes.c_uint,
ctypes.c_float, ctypes.c_float, ctypes.c_float]
'''
void fitValueTensor(float* inTensor, float* inWeights, float* modelParamsMap,
unsigned int N, unsigned int X, unsigned int Y,
float topkPerc, float botkPerc, float MSSE_LAMBDA,
unsigned char optIters, float minimumResidual,
unsigned int downSampledSize);
'''
RGFCLib.fitValueTensor.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float, ctypes.c_float,
ctypes.c_uint8, ctypes.c_float, ctypes.c_uint32]
'''
void RobustAlgebraicPlaneFitting(float* x, float* y, float* z, float* mP, float* mP_Init,
unsigned int N, float topkPerc, float botkPerc,
float MSSE_LAMBDA, unsigned char stretch2CornersOpt,
float minimumResidual, unsigned char optIters)
'''
RGFCLib.RobustAlgebraicPlaneFitting.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_int, ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8, ctypes.c_float, ctypes.c_uint8]
'''
void RSGImage(float* inImage, unsigned char* inMask, float *modelParamsMap,
unsigned int winX, unsigned int winY,
unsigned int X, unsigned int Y,
float topkPerc, float botkPerc,
float MSSE_LAMBDA, unsigned char stretch2CornersOpt,
unsigned char numModelParams, unsigned char optIters,
float minimumResidual)
'''
RGFCLib.RSGImage.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8,
ctypes.c_uint8, ctypes.c_uint8, ctypes.c_float]
'''
void RSGImage_by_Image_Tensor(float* inImage_Tensor, unsigned char* inMask_Tensor,
float *model_mean, float *model_std,
unsigned int winX, unsigned int winY,
unsigned int N, unsigned int X, unsigned int Y,
float topkPerc, float botkPerc,
float MSSE_LAMBDA, unsigned char stretch2CornersOpt,
unsigned char numModelParams, unsigned char optIters,
float minimumResidual)
'''
RGFCLib.RSGImage_by_Image_Tensor.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8,
ctypes.c_uint8, ctypes.c_uint8, ctypes.c_float]
'''
void fitBackgroundRadially(float* inImage, unsigned char* inMask,
float* modelParamsMap, float* vecMP,
unsigned int minRes,
unsigned int maxRes,
unsigned int shellWidth,
unsigned int stride,
unsigned int X_Cent,
unsigned int Y_Cent,
unsigned char includeCenter,
unsigned int finiteSampleBias,
unsigned int X, unsigned int Y,
float topkPerc, float botkPerc,
float MSSE_LAMBDA,
unsigned char optIters,
float minimumResidual);
'''
RGFCLib.fitBackgroundRadially.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint8, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8, ctypes.c_float]
'''
void fitBackgroundCylindrically(float* inTensor,
unsigned char* inMask,
float* modelParamsMap,
float* vecMP,
unsigned int minRes,
unsigned int maxRes,
unsigned int shellWidth,
unsigned char includeCenter,
unsigned int finiteSampleBias,
unsigned int N,
unsigned int X,
unsigned int Y,
float topkPerc,
float botkPerc,
float MSSE_LAMBDA,
unsigned char optIters,
float minimumResidual)
'''
RGFCLib.fitBackgroundCylindrically.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint8, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8, ctypes.c_float]
| 45.53169 | 103 | 0.603434 |
import numpy as np
import ctypes
import os
import fnmatch
dir_path = os.path.dirname(os.path.realpath(__file__)) + os.path.sep + '..' + os.path.sep
fileNameTemplate = 'RGFLib*.so'
flist = fnmatch.filter(os.listdir(dir_path + os.path.sep), fileNameTemplate)
if(len(flist)==0):
dir_path = os.path.dirname(os.path.realpath(__file__))
fileNameTemplate = 'RGFLib*.so'
flist = fnmatch.filter(os.listdir(dir_path + os.path.sep), fileNameTemplate)
RGFCLib = ctypes.cdll.LoadLibrary(dir_path + os.path.sep + flist[0])
RGFCLib.islandRemoval.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32]
RGFCLib.indexCheck.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_int, ctypes.c_int, ctypes.c_float]
RGFCLib.MSSE.restype = ctypes.c_float
RGFCLib.MSSE.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint, ctypes.c_float, ctypes.c_uint, ctypes.c_float ]
RGFCLib.MSSEWeighted.restype = ctypes.c_float
RGFCLib.MSSEWeighted.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint, ctypes.c_float, ctypes.c_uint, ctypes.c_float ]
RGFCLib.fitValue.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_float,
ctypes.c_int,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_uint8,
ctypes.c_float,
ctypes.c_int]
RGFCLib.fitValue2Skewed.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_float,
ctypes.c_int,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_uint8,
ctypes.c_float,
ctypes.c_int]
RGFCLib.medianOfFits.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_float, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float, ctypes.c_uint32, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8, ctypes.c_float]
RGFCLib.RobustAlgebraicLineFitting.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_int, ctypes.c_float, ctypes.c_float, ctypes.c_float]
RGFCLib.RobustAlgebraicLineFittingTensor.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint, ctypes.c_uint, ctypes.c_uint,
ctypes.c_float, ctypes.c_float, ctypes.c_float]
RGFCLib.fitValueTensor.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float, ctypes.c_float,
ctypes.c_uint8, ctypes.c_float, ctypes.c_uint32]
RGFCLib.RobustAlgebraicPlaneFitting.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_int, ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8, ctypes.c_float, ctypes.c_uint8]
RGFCLib.RSGImage.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8,
ctypes.c_uint8, ctypes.c_uint8, ctypes.c_float]
RGFCLib.RSGImage_by_Image_Tensor.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8,
ctypes.c_uint8, ctypes.c_uint8, ctypes.c_float]
RGFCLib.fitBackgroundRadially.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint8, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8, ctypes.c_float]
RGFCLib.fitBackgroundCylindrically.argtypes = [
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_uint8, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint8, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_float, ctypes.c_float,
ctypes.c_float, ctypes.c_uint8, ctypes.c_float]
| true | true |
f7fad11a710e7aa83dd989480644e9233166a080 | 32,251 | py | Python | plexapi/myplex.py | adamredfern92/PlexDownload | 003086b8e12c47636ea9ec7785b25123812d1c7f | [
"MIT"
] | 3 | 2018-01-26T04:53:13.000Z | 2019-10-16T03:48:08.000Z | plexapi/myplex.py | adamredfern92/PlexDownload | 003086b8e12c47636ea9ec7785b25123812d1c7f | [
"MIT"
] | null | null | null | plexapi/myplex.py | adamredfern92/PlexDownload | 003086b8e12c47636ea9ec7785b25123812d1c7f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import copy
import requests
import time
from requests.status_codes import _codes as codes
from plexapi import BASE_HEADERS, CONFIG, TIMEOUT
from plexapi import log, logfilter, utils
from plexapi.base import PlexObject
from plexapi.exceptions import BadRequest, NotFound
from plexapi.client import PlexClient
from plexapi.compat import ElementTree, quote
from plexapi.library import LibrarySection
from plexapi.server import PlexServer
class MyPlexAccount(PlexObject):
""" MyPlex account and profile information. This object represents the data found Account on
the myplex.tv servers at the url https://plex.tv/users/account. You may create this object
directly by passing in your username & password (or token). There is also a convenience
method provided at :class:`~plexapi.server.PlexServer.myPlexAccount()` which will create
and return this object.
Parameters:
username (str): Your MyPlex username.
password (str): Your MyPlex password.
session (requests.Session, optional): Use your own session object if you want to
cache the http responses from PMS
timeout (int): timeout in seconds on initial connect to myplex (default config.TIMEOUT).
Attributes:
SIGNIN (str): 'https://my.plexapp.com/users/sign_in.xml'
key (str): 'https://plex.tv/users/account'
authenticationToken (str): Unknown.
certificateVersion (str): Unknown.
cloudSyncDevice (str): Unknown.
email (str): Your current Plex email address.
entitlements (List<str>): List of devices your allowed to use with this account.
guest (bool): Unknown.
home (bool): Unknown.
homeSize (int): Unknown.
id (str): Your Plex account ID.
locale (str): Your Plex locale
mailing_list_status (str): Your current mailing list status.
maxHomeSize (int): Unknown.
queueEmail (str): Email address to add items to your `Watch Later` queue.
queueUid (str): Unknown.
restricted (bool): Unknown.
roles: (List<str>) Lit of account roles. Plexpass membership listed here.
scrobbleTypes (str): Description
secure (bool): Description
subscriptionActive (bool): True if your subsctiption is active.
subscriptionFeatures: (List<str>) List of features allowed on your subscription.
subscriptionPlan (str): Name of subscription plan.
subscriptionStatus (str): String representation of `subscriptionActive`.
thumb (str): URL of your account thumbnail.
title (str): Unknown. - Looks like an alias for `username`.
username (str): Your account username.
uuid (str): Unknown.
_token (str): Token used to access this client.
_session (obj): Requests session object used to access this client.
"""
FRIENDINVITE = 'https://plex.tv/api/servers/{machineId}/shared_servers' # post with data
FRIENDSERVERS = 'https://plex.tv/api/servers/{machineId}/shared_servers/{serverId}' # put with data
PLEXSERVERS = 'https://plex.tv/api/servers/{machineId}' # get
FRIENDUPDATE = 'https://plex.tv/api/friends/{userId}' # put with args, delete
REMOVEINVITE = 'https://plex.tv/api/invites/requested/{userId}?friend=0&server=1&home=0' # delete
REQUESTED = 'https://plex.tv/api/invites/requested' # get
REQUESTS = 'https://plex.tv/api/invites/requests' # get
SIGNIN = 'https://my.plexapp.com/users/sign_in.xml' # get with auth
WEBHOOKS = 'https://plex.tv/api/v2/user/webhooks' # get, post with data
# Key may someday switch to the following url. For now the current value works.
# https://plex.tv/api/v2/user?X-Plex-Token={token}&X-Plex-Client-Identifier={clientId}
key = 'https://plex.tv/users/account'
def __init__(self, username=None, password=None, token=None, session=None, timeout=None):
self._token = token
self._session = session or requests.Session()
data, initpath = self._signin(username, password, timeout)
super(MyPlexAccount, self).__init__(self, data, initpath)
def _signin(self, username, password, timeout):
if self._token:
return self.query(self.key), self.key
username = username or CONFIG.get('auth.myplex_username')
password = password or CONFIG.get('auth.myplex_password')
data = self.query(self.SIGNIN, method=self._session.post, auth=(username, password), timeout=timeout)
return data, self.SIGNIN
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self._token = logfilter.add_secret(data.attrib.get('authenticationToken'))
self._webhooks = []
self.authenticationToken = self._token
self.certificateVersion = data.attrib.get('certificateVersion')
self.cloudSyncDevice = data.attrib.get('cloudSyncDevice')
self.email = data.attrib.get('email')
self.guest = utils.cast(bool, data.attrib.get('guest'))
self.home = utils.cast(bool, data.attrib.get('home'))
self.homeSize = utils.cast(int, data.attrib.get('homeSize'))
self.id = data.attrib.get('id')
self.locale = data.attrib.get('locale')
self.mailing_list_status = data.attrib.get('mailing_list_status')
self.maxHomeSize = utils.cast(int, data.attrib.get('maxHomeSize'))
self.queueEmail = data.attrib.get('queueEmail')
self.queueUid = data.attrib.get('queueUid')
self.restricted = utils.cast(bool, data.attrib.get('restricted'))
self.scrobbleTypes = data.attrib.get('scrobbleTypes')
self.secure = utils.cast(bool, data.attrib.get('secure'))
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.username = data.attrib.get('username')
self.uuid = data.attrib.get('uuid')
# TODO: Fetch missing MyPlexAccount attributes
self.subscriptionActive = None # renamed on server
self.subscriptionStatus = None # renamed on server
self.subscriptionPlan = None # renmaed on server
self.subscriptionFeatures = None # renamed on server
self.roles = None
self.entitlements = None
def device(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexDevice` that matches the name specified.
Parameters:
name (str): Name to match against.
"""
for device in self.devices():
if device.name.lower() == name.lower():
return device
raise NotFound('Unable to find device %s' % name)
def devices(self):
""" Returns a list of all :class:`~plexapi.myplex.MyPlexDevice` objects connected to the server. """
data = self.query(MyPlexDevice.key)
return [MyPlexDevice(self, elem) for elem in data]
def query(self, url, method=None, headers=None, timeout=None, **kwargs):
method = method or self._session.get
delim = '&' if '?' in url else '?'
url = '%s%sX-Plex-Token=%s' % (url, delim, self._token)
timeout = timeout or TIMEOUT
log.debug('%s %s %s', method.__name__.upper(), url, kwargs.get('json', ''))
allheaders = BASE_HEADERS.copy()
allheaders.update(headers or {})
response = method(url, headers=allheaders, timeout=timeout, **kwargs)
if response.status_code not in (200, 201):
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
log.warn('BadRequest (%s) %s %s; %s' % (response.status_code, codename, response.url, errtext))
raise BadRequest('(%s) %s; %s' % (response.status_code, codename, errtext))
data = response.text.encode('utf8')
return ElementTree.fromstring(data) if data.strip() else None
def resource(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexResource` that matches the name specified.
Parameters:
name (str): Name to match against.
"""
for resource in self.resources():
if resource.name.lower() == name.lower():
return resource
raise NotFound('Unable to find resource %s' % name)
def resources(self):
""" Returns a list of all :class:`~plexapi.myplex.MyPlexResource` objects connected to the server. """
data = self.query(MyPlexResource.key)
return [MyPlexResource(self, elem) for elem in data]
def inviteFriend(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Share library content with the specified user.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections ([Section]): Library sections, names or ids to be shared (default None shares all sections).
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
username = user.username if isinstance(user, MyPlexUser) else user
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(machineId, sections)
params = {
'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_email': username},
'sharing_settings': {
'allowSync': ('1' if allowSync else '0'),
'allowCameraUpload': ('1' if allowCameraUpload else '0'),
'allowChannels': ('1' if allowChannels else '0'),
'filterMovies': self._filterDictToStr(filterMovies or {}),
'filterTelevision': self._filterDictToStr(filterTelevision or {}),
'filterMusic': self._filterDictToStr(filterMusic or {}),
},
}
headers = {'Content-Type': 'application/json'}
url = self.FRIENDINVITE.format(machineId=machineId)
return self.query(url, self._session.post, json=params, headers=headers)
def removeFriend(self, user):
""" Remove the specified user from all sharing.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
"""
user = self.user(user)
url = self.FRIENDUPDATE if user.friend else self.REMOVEINVITE
url = url.format(userId=user.id)
return self.query(url, self._session.delete)
def updateFriend(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Update the specified user's share settings.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections: ([Section]): Library sections, names or ids to be shared (default None shares all sections).
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
# Update friend servers
user = self.user(user)
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
serverId = [s for s in user.servers if s.machineIdentifier == machineId][0].id
sectionIds = self._getSectionIds(machineId, sections)
params = {'server_id': machineId, 'shared_server': {'library_section_ids': sectionIds}}
headers = {'Content-Type': 'application/json'}
url = self.FRIENDSERVERS.format(machineId=machineId, serverId=serverId)
response_servers = self.query(url, self._session.put, json=params, headers=headers)
# Update friend filters
url = self.FRIENDUPDATE.format(userId=user.id)
url += '?allowSync=%s' % ('1' if allowSync else '0')
url += '&allowCameraUpload=%s' % ('1' if allowCameraUpload else '0')
url += '&allowChannels=%s' % ('1' if allowChannels else '0')
url += '&filterMovies=%s' % quote(self._filterDictToStr(filterMovies or {}))
url += '&filterTelevision=%s' % quote(self._filterDictToStr(filterTelevision or {}))
url += '&filterMusic=%s' % quote(self._filterDictToStr(filterMusic or {}))
response_filters = self.query(url, self._session.put)
return response_servers, response_filters
def user(self, username):
""" Returns the :class:`~myplex.MyPlexUser` that matches the email or username specified.
Parameters:
username (str): Username, email or id of the user to return.
"""
for user in self.users():
if username.lower() in (user.username.lower(), user.email.lower(), str(user.id)):
return user
raise NotFound('Unable to find user %s' % username)
def users(self):
""" Returns a list of all :class:`~plexapi.myplex.MyPlexUser` objects connected to your account.
This includes both friends and pending invites. You can reference the user.friend to
distinguish between the two.
"""
friends = [MyPlexUser(self, elem) for elem in self.query(MyPlexUser.key)]
requested = [MyPlexUser(self, elem, self.REQUESTED) for elem in self.query(self.REQUESTED)]
return friends + requested
def _getSectionIds(self, server, sections):
""" Converts a list of section objects or names to sectionIds needed for library sharing. """
if not sections: return []
# Get a list of all section ids for looking up each section.
allSectionIds = {}
machineIdentifier = server.machineIdentifier if isinstance(server, PlexServer) else server
url = self.PLEXSERVERS.replace('{machineId}', machineIdentifier)
data = self.query(url, self._session.get)
for elem in data[0]:
allSectionIds[elem.attrib.get('id', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('title', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('key', '').lower()] = elem.attrib.get('id')
log.info(allSectionIds)
# Convert passed in section items to section ids from above lookup
sectionIds = []
for section in sections:
sectionKey = section.key if isinstance(section, LibrarySection) else section
sectionIds.append(allSectionIds[sectionKey.lower()])
return sectionIds
def _filterDictToStr(self, filterDict):
""" Converts friend filters to a string representation for transport. """
values = []
for key, vals in filterDict.items():
if key not in ('contentRating', 'label'):
raise BadRequest('Unknown filter key: %s', key)
values.append('%s=%s' % (key, '%2C'.join(vals)))
return '|'.join(values)
def addWebhook(self, url):
# copy _webhooks and append url
urls = self._webhooks[:] + [url]
return self.setWebhooks(urls)
def deleteWebhook(self, url):
urls = copy.copy(self._webhooks)
if url not in urls:
raise BadRequest('Webhook does not exist: %s' % url)
urls.remove(url)
return self.setWebhooks(urls)
def setWebhooks(self, urls):
log.info('Setting webhooks: %s' % urls)
data = self.query(self.WEBHOOKS, self._session.post, data={'urls[]': urls})
self._webhooks = self.listAttrs(data, 'url', etag='webhook')
return self._webhooks
def webhooks(self):
data = self.query(self.WEBHOOKS)
self._webhooks = self.listAttrs(data, 'url', etag='webhook')
return self._webhooks
class MyPlexUser(PlexObject):
""" This object represents non-signed in users such as friends and linked
accounts. NOTE: This should not be confused with the :class:`~myplex.MyPlexAccount`
which is your specific account. The raw xml for the data presented here
can be found at: https://plex.tv/api/users/
Attributes:
TAG (str): 'User'
key (str): 'https://plex.tv/api/users/'
allowCameraUpload (bool): True if this user can upload images.
allowChannels (bool): True if this user has access to channels.
allowSync (bool): True if this user can sync.
email (str): User's email address (user@gmail.com).
filterAll (str): Unknown.
filterMovies (str): Unknown.
filterMusic (str): Unknown.
filterPhotos (str): Unknown.
filterTelevision (str): Unknown.
home (bool): Unknown.
id (int): User's Plex account ID.
protected (False): Unknown (possibly SSL enabled?).
recommendationsPlaylistId (str): Unknown.
restricted (str): Unknown.
thumb (str): Link to the users avatar.
title (str): Seems to be an aliad for username.
username (str): User's username.
"""
TAG = 'User'
key = 'https://plex.tv/api/users/'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.friend = self._initpath == self.key
self.allowCameraUpload = utils.cast(bool, data.attrib.get('allowCameraUpload'))
self.allowChannels = utils.cast(bool, data.attrib.get('allowChannels'))
self.allowSync = utils.cast(bool, data.attrib.get('allowSync'))
self.email = data.attrib.get('email')
self.filterAll = data.attrib.get('filterAll')
self.filterMovies = data.attrib.get('filterMovies')
self.filterMusic = data.attrib.get('filterMusic')
self.filterPhotos = data.attrib.get('filterPhotos')
self.filterTelevision = data.attrib.get('filterTelevision')
self.home = utils.cast(bool, data.attrib.get('home'))
self.id = utils.cast(int, data.attrib.get('id'))
self.protected = utils.cast(bool, data.attrib.get('protected'))
self.recommendationsPlaylistId = data.attrib.get('recommendationsPlaylistId')
self.restricted = data.attrib.get('restricted')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.username = data.attrib.get('username')
self.servers = self.findItems(data, MyPlexServerShare)
class MyPlexServerShare(PlexObject):
""" Represents a single user's server reference. Used for library sharing. """
TAG = 'Server'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.id = utils.cast(int, data.attrib.get('id'))
self.serverId = utils.cast(int, data.attrib.get('serverId'))
self.machineIdentifier = data.attrib.get('machineIdentifier')
self.name = data.attrib.get('name')
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))
self.allLibraries = utils.cast(int, data.attrib.get('allLibraries'))
self.owned = utils.cast(int, data.attrib.get('owned'))
self.pending = utils.cast(int, data.attrib.get('pending'))
class MyPlexResource(PlexObject):
""" This object represents resources connected to your Plex server that can provide
content such as Plex Media Servers, iPhone or Android clients, etc. The raw xml
for the data presented here can be found at: https://plex.tv/api/resources?includeHttps=1
Attributes:
TAG (str): 'Device'
key (str): 'https://plex.tv/api/resources?includeHttps=1'
accessToken (str): This resources accesstoken.
clientIdentifier (str): Unique ID for this resource.
connections (list): List of :class:`~myplex.ResourceConnection` objects
for this resource.
createdAt (datetime): Timestamp this resource first connected to your server.
device (str): Best guess on the type of device this is (PS, iPhone, Linux, etc).
home (bool): Unknown
lastSeenAt (datetime): Timestamp this resource last connected.
name (str): Descriptive name of this resource.
owned (bool): True if this resource is one of your own (you logged into it).
platform (str): OS the resource is running (Linux, Windows, Chrome, etc.)
platformVersion (str): Version of the platform.
presence (bool): True if the resource is online
product (str): Plex product (Plex Media Server, Plex for iOS, Plex Web, etc.)
productVersion (str): Version of the product.
provides (str): List of services this resource provides (client, server,
player, pubsub-player, etc.)
synced (bool): Unknown (possibly True if the resource has synced content?)
"""
TAG = 'Device'
key = 'https://plex.tv/api/resources?includeHttps=1'
def _loadData(self, data):
self._data = data
self.name = data.attrib.get('name')
self.accessToken = logfilter.add_secret(data.attrib.get('accessToken'))
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.provides = data.attrib.get('provides')
self.owned = utils.cast(bool, data.attrib.get('owned'))
self.home = utils.cast(bool, data.attrib.get('home'))
self.synced = utils.cast(bool, data.attrib.get('synced'))
self.presence = utils.cast(bool, data.attrib.get('presence'))
self.connections = self.findItems(data, ResourceConnection)
def connect(self, ssl=None, timeout=None):
""" Returns a new :class:`~server.PlexServer` object. Often times there is more than
one address specified for a server or client. This function will prioritize local
connections before remote and HTTPS before HTTP. After trying to connect to all
available addresses for this resource and assuming at least one connection was
successful, the PlexServer object is built and returned.
Parameters:
ssl (optional): Set True to only connect to HTTPS connections. Set False to
only connect to HTTP connections. Set None (default) to connect to any
HTTP or HTTPS connection.
Raises:
:class:`~plexapi.exceptions.NotFound`: When unable to connect to any addresses for this resource.
"""
# Sort connections from (https, local) to (http, remote)
# Only check non-local connections unless we own the resource
connections = sorted(self.connections, key=lambda c: c.local, reverse=True)
owned_or_unowned_non_local = lambda x: self.owned or (not self.owned and not x.local)
https = [c.uri for c in connections if owned_or_unowned_non_local(c)]
http = [c.httpuri for c in connections if owned_or_unowned_non_local(c)]
# Force ssl, no ssl, or any (default)
if ssl is True: connections = https
elif ssl is False: connections = http
else: connections = https + http
# Try connecting to all known resource connections in parellel, but
# only return the first server (in order) that provides a response.
listargs = [[PlexServer, url, self.accessToken, timeout] for url in connections]
log.info('Testing %s resource connections..', len(listargs))
results = utils.threaded(_connect, listargs)
return _chooseConnection('Resource', self.name, results)
class ResourceConnection(PlexObject):
""" Represents a Resource Connection object found within the
:class:`~myplex.MyPlexResource` objects.
Attributes:
TAG (str): 'Connection'
address (str): Local IP address
httpuri (str): Full local address
local (bool): True if local
port (int): 32400
protocol (str): HTTP or HTTPS
uri (str): External address
"""
TAG = 'Connection'
def _loadData(self, data):
self._data = data
self.protocol = data.attrib.get('protocol')
self.address = data.attrib.get('address')
self.port = utils.cast(int, data.attrib.get('port'))
self.uri = data.attrib.get('uri')
self.local = utils.cast(bool, data.attrib.get('local'))
self.httpuri = 'http://%s:%s' % (self.address, self.port)
class MyPlexDevice(PlexObject):
""" This object represents resources connected to your Plex server that provide
playback ability from your Plex Server, iPhone or Android clients, Plex Web,
this API, etc. The raw xml for the data presented here can be found at:
https://plex.tv/devices.xml
Attributes:
TAG (str): 'Device'
key (str): 'https://plex.tv/devices.xml'
clientIdentifier (str): Unique ID for this resource.
connections (list): List of connection URIs for the device.
device (str): Best guess on the type of device this is (Linux, iPad, AFTB, etc).
id (str): MyPlex ID of the device.
model (str): Model of the device (bueller, Linux, x86_64, etc.)
name (str): Hostname of the device.
platform (str): OS the resource is running (Linux, Windows, Chrome, etc.)
platformVersion (str): Version of the platform.
product (str): Plex product (Plex Media Server, Plex for iOS, Plex Web, etc.)
productVersion (string): Version of the product.
provides (str): List of services this resource provides (client, controller,
sync-target, player, pubsub-player).
publicAddress (str): Public IP address.
screenDensity (str): Unknown
screenResolution (str): Screen resolution (750x1334, 1242x2208, etc.)
token (str): Plex authentication token for the device.
vendor (str): Device vendor (ubuntu, etc).
version (str): Unknown (1, 2, 1.3.3.3148-b38628e, 1.3.15, etc.)
"""
TAG = 'Device'
key = 'https://plex.tv/devices.xml'
def _loadData(self, data):
self._data = data
self.name = data.attrib.get('name')
self.publicAddress = data.attrib.get('publicAddress')
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.model = data.attrib.get('model')
self.vendor = data.attrib.get('vendor')
self.provides = data.attrib.get('provides')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.version = data.attrib.get('version')
self.id = data.attrib.get('id')
self.token = logfilter.add_secret(data.attrib.get('token'))
self.screenResolution = data.attrib.get('screenResolution')
self.screenDensity = data.attrib.get('screenDensity')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.connections = [connection.attrib.get('uri') for connection in data.iter('Connection')]
def connect(self, timeout=None):
""" Returns a new :class:`~plexapi.client.PlexClient` object. Sometimes there is more than
one address specified for a server or client. After trying to connect to all available
addresses for this client and assuming at least one connection was successful, the
PlexClient object is built and returned.
Raises:
:class:`~plexapi.exceptions.NotFound`: When unable to connect to any addresses for this device.
"""
listargs = [[PlexClient, url, self.token, timeout] for url in self.connections]
log.info('Testing %s device connections..', len(listargs))
results = utils.threaded(_connect, listargs)
_chooseConnection('Device', self.name, results)
def delete(self):
""" Remove this device from your account. """
key = 'https://plex.tv/devices/%s.xml' % self.id
self._server.query(key, self._server._session.delete)
def _connect(cls, url, token, timeout, results, i):
""" Connects to the specified cls with url and token. Stores the connection
information to results[i] in a threadsafe way.
"""
starttime = time.time()
try:
device = cls(baseurl=url, token=token, timeout=timeout)
runtime = int(time.time() - starttime)
results[i] = (url, token, device, runtime)
except Exception as err:
runtime = int(time.time() - starttime)
log.error('%s: %s', url, err)
results[i] = (url, token, None, runtime)
def _chooseConnection(ctype, name, results):
""" Chooses the first (best) connection from the given _connect results. """
# At this point we have a list of result tuples containing (url, token, PlexServer, runtime)
# or (url, token, None, runtime) in the case a connection could not be established.
for url, token, result, runtime in results:
okerr = 'OK' if result else 'ERR'
log.info('%s connection %s (%ss): %s?X-Plex-Token=%s', ctype, okerr, runtime, url, token)
results = [r[2] for r in results if r and r[2] is not None]
if results:
log.info('Connecting to %s: %s?X-Plex-Token=%s', ctype, results[0]._baseurl, results[0]._token)
return results[0]
raise NotFound('Unable to connect to %s: %s' % (ctype.lower(), name))
| 52.957307 | 119 | 0.632383 |
import copy
import requests
import time
from requests.status_codes import _codes as codes
from plexapi import BASE_HEADERS, CONFIG, TIMEOUT
from plexapi import log, logfilter, utils
from plexapi.base import PlexObject
from plexapi.exceptions import BadRequest, NotFound
from plexapi.client import PlexClient
from plexapi.compat import ElementTree, quote
from plexapi.library import LibrarySection
from plexapi.server import PlexServer
class MyPlexAccount(PlexObject):
FRIENDINVITE = 'https://plex.tv/api/servers/{machineId}/shared_servers'
FRIENDSERVERS = 'https://plex.tv/api/servers/{machineId}/shared_servers/{serverId}'
PLEXSERVERS = 'https://plex.tv/api/servers/{machineId}'
FRIENDUPDATE = 'https://plex.tv/api/friends/{userId}'
REMOVEINVITE = 'https://plex.tv/api/invites/requested/{userId}?friend=0&server=1&home=0'
REQUESTED = 'https://plex.tv/api/invites/requested'
REQUESTS = 'https://plex.tv/api/invites/requests'
SIGNIN = 'https://my.plexapp.com/users/sign_in.xml'
WEBHOOKS = 'https://plex.tv/api/v2/user/webhooks'
key = 'https://plex.tv/users/account'
def __init__(self, username=None, password=None, token=None, session=None, timeout=None):
self._token = token
self._session = session or requests.Session()
data, initpath = self._signin(username, password, timeout)
super(MyPlexAccount, self).__init__(self, data, initpath)
def _signin(self, username, password, timeout):
if self._token:
return self.query(self.key), self.key
username = username or CONFIG.get('auth.myplex_username')
password = password or CONFIG.get('auth.myplex_password')
data = self.query(self.SIGNIN, method=self._session.post, auth=(username, password), timeout=timeout)
return data, self.SIGNIN
def _loadData(self, data):
self._data = data
self._token = logfilter.add_secret(data.attrib.get('authenticationToken'))
self._webhooks = []
self.authenticationToken = self._token
self.certificateVersion = data.attrib.get('certificateVersion')
self.cloudSyncDevice = data.attrib.get('cloudSyncDevice')
self.email = data.attrib.get('email')
self.guest = utils.cast(bool, data.attrib.get('guest'))
self.home = utils.cast(bool, data.attrib.get('home'))
self.homeSize = utils.cast(int, data.attrib.get('homeSize'))
self.id = data.attrib.get('id')
self.locale = data.attrib.get('locale')
self.mailing_list_status = data.attrib.get('mailing_list_status')
self.maxHomeSize = utils.cast(int, data.attrib.get('maxHomeSize'))
self.queueEmail = data.attrib.get('queueEmail')
self.queueUid = data.attrib.get('queueUid')
self.restricted = utils.cast(bool, data.attrib.get('restricted'))
self.scrobbleTypes = data.attrib.get('scrobbleTypes')
self.secure = utils.cast(bool, data.attrib.get('secure'))
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.username = data.attrib.get('username')
self.uuid = data.attrib.get('uuid')
self.subscriptionActive = None
self.subscriptionStatus = None
self.subscriptionPlan = None
self.subscriptionFeatures = None
self.roles = None
self.entitlements = None
def device(self, name):
for device in self.devices():
if device.name.lower() == name.lower():
return device
raise NotFound('Unable to find device %s' % name)
def devices(self):
data = self.query(MyPlexDevice.key)
return [MyPlexDevice(self, elem) for elem in data]
def query(self, url, method=None, headers=None, timeout=None, **kwargs):
method = method or self._session.get
delim = '&' if '?' in url else '?'
url = '%s%sX-Plex-Token=%s' % (url, delim, self._token)
timeout = timeout or TIMEOUT
log.debug('%s %s %s', method.__name__.upper(), url, kwargs.get('json', ''))
allheaders = BASE_HEADERS.copy()
allheaders.update(headers or {})
response = method(url, headers=allheaders, timeout=timeout, **kwargs)
if response.status_code not in (200, 201):
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
log.warn('BadRequest (%s) %s %s; %s' % (response.status_code, codename, response.url, errtext))
raise BadRequest('(%s) %s; %s' % (response.status_code, codename, errtext))
data = response.text.encode('utf8')
return ElementTree.fromstring(data) if data.strip() else None
def resource(self, name):
for resource in self.resources():
if resource.name.lower() == name.lower():
return resource
raise NotFound('Unable to find resource %s' % name)
def resources(self):
data = self.query(MyPlexResource.key)
return [MyPlexResource(self, elem) for elem in data]
def inviteFriend(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
username = user.username if isinstance(user, MyPlexUser) else user
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(machineId, sections)
params = {
'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_email': username},
'sharing_settings': {
'allowSync': ('1' if allowSync else '0'),
'allowCameraUpload': ('1' if allowCameraUpload else '0'),
'allowChannels': ('1' if allowChannels else '0'),
'filterMovies': self._filterDictToStr(filterMovies or {}),
'filterTelevision': self._filterDictToStr(filterTelevision or {}),
'filterMusic': self._filterDictToStr(filterMusic or {}),
},
}
headers = {'Content-Type': 'application/json'}
url = self.FRIENDINVITE.format(machineId=machineId)
return self.query(url, self._session.post, json=params, headers=headers)
def removeFriend(self, user):
user = self.user(user)
url = self.FRIENDUPDATE if user.friend else self.REMOVEINVITE
url = url.format(userId=user.id)
return self.query(url, self._session.delete)
def updateFriend(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
user = self.user(user)
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
serverId = [s for s in user.servers if s.machineIdentifier == machineId][0].id
sectionIds = self._getSectionIds(machineId, sections)
params = {'server_id': machineId, 'shared_server': {'library_section_ids': sectionIds}}
headers = {'Content-Type': 'application/json'}
url = self.FRIENDSERVERS.format(machineId=machineId, serverId=serverId)
response_servers = self.query(url, self._session.put, json=params, headers=headers)
url = self.FRIENDUPDATE.format(userId=user.id)
url += '?allowSync=%s' % ('1' if allowSync else '0')
url += '&allowCameraUpload=%s' % ('1' if allowCameraUpload else '0')
url += '&allowChannels=%s' % ('1' if allowChannels else '0')
url += '&filterMovies=%s' % quote(self._filterDictToStr(filterMovies or {}))
url += '&filterTelevision=%s' % quote(self._filterDictToStr(filterTelevision or {}))
url += '&filterMusic=%s' % quote(self._filterDictToStr(filterMusic or {}))
response_filters = self.query(url, self._session.put)
return response_servers, response_filters
def user(self, username):
for user in self.users():
if username.lower() in (user.username.lower(), user.email.lower(), str(user.id)):
return user
raise NotFound('Unable to find user %s' % username)
def users(self):
friends = [MyPlexUser(self, elem) for elem in self.query(MyPlexUser.key)]
requested = [MyPlexUser(self, elem, self.REQUESTED) for elem in self.query(self.REQUESTED)]
return friends + requested
def _getSectionIds(self, server, sections):
if not sections: return []
allSectionIds = {}
machineIdentifier = server.machineIdentifier if isinstance(server, PlexServer) else server
url = self.PLEXSERVERS.replace('{machineId}', machineIdentifier)
data = self.query(url, self._session.get)
for elem in data[0]:
allSectionIds[elem.attrib.get('id', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('title', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('key', '').lower()] = elem.attrib.get('id')
log.info(allSectionIds)
sectionIds = []
for section in sections:
sectionKey = section.key if isinstance(section, LibrarySection) else section
sectionIds.append(allSectionIds[sectionKey.lower()])
return sectionIds
def _filterDictToStr(self, filterDict):
values = []
for key, vals in filterDict.items():
if key not in ('contentRating', 'label'):
raise BadRequest('Unknown filter key: %s', key)
values.append('%s=%s' % (key, '%2C'.join(vals)))
return '|'.join(values)
def addWebhook(self, url):
urls = self._webhooks[:] + [url]
return self.setWebhooks(urls)
def deleteWebhook(self, url):
urls = copy.copy(self._webhooks)
if url not in urls:
raise BadRequest('Webhook does not exist: %s' % url)
urls.remove(url)
return self.setWebhooks(urls)
def setWebhooks(self, urls):
log.info('Setting webhooks: %s' % urls)
data = self.query(self.WEBHOOKS, self._session.post, data={'urls[]': urls})
self._webhooks = self.listAttrs(data, 'url', etag='webhook')
return self._webhooks
def webhooks(self):
data = self.query(self.WEBHOOKS)
self._webhooks = self.listAttrs(data, 'url', etag='webhook')
return self._webhooks
class MyPlexUser(PlexObject):
TAG = 'User'
key = 'https://plex.tv/api/users/'
def _loadData(self, data):
self._data = data
self.friend = self._initpath == self.key
self.allowCameraUpload = utils.cast(bool, data.attrib.get('allowCameraUpload'))
self.allowChannels = utils.cast(bool, data.attrib.get('allowChannels'))
self.allowSync = utils.cast(bool, data.attrib.get('allowSync'))
self.email = data.attrib.get('email')
self.filterAll = data.attrib.get('filterAll')
self.filterMovies = data.attrib.get('filterMovies')
self.filterMusic = data.attrib.get('filterMusic')
self.filterPhotos = data.attrib.get('filterPhotos')
self.filterTelevision = data.attrib.get('filterTelevision')
self.home = utils.cast(bool, data.attrib.get('home'))
self.id = utils.cast(int, data.attrib.get('id'))
self.protected = utils.cast(bool, data.attrib.get('protected'))
self.recommendationsPlaylistId = data.attrib.get('recommendationsPlaylistId')
self.restricted = data.attrib.get('restricted')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.username = data.attrib.get('username')
self.servers = self.findItems(data, MyPlexServerShare)
class MyPlexServerShare(PlexObject):
TAG = 'Server'
def _loadData(self, data):
self._data = data
self.id = utils.cast(int, data.attrib.get('id'))
self.serverId = utils.cast(int, data.attrib.get('serverId'))
self.machineIdentifier = data.attrib.get('machineIdentifier')
self.name = data.attrib.get('name')
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))
self.allLibraries = utils.cast(int, data.attrib.get('allLibraries'))
self.owned = utils.cast(int, data.attrib.get('owned'))
self.pending = utils.cast(int, data.attrib.get('pending'))
class MyPlexResource(PlexObject):
TAG = 'Device'
key = 'https://plex.tv/api/resources?includeHttps=1'
def _loadData(self, data):
self._data = data
self.name = data.attrib.get('name')
self.accessToken = logfilter.add_secret(data.attrib.get('accessToken'))
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.provides = data.attrib.get('provides')
self.owned = utils.cast(bool, data.attrib.get('owned'))
self.home = utils.cast(bool, data.attrib.get('home'))
self.synced = utils.cast(bool, data.attrib.get('synced'))
self.presence = utils.cast(bool, data.attrib.get('presence'))
self.connections = self.findItems(data, ResourceConnection)
def connect(self, ssl=None, timeout=None):
connections = sorted(self.connections, key=lambda c: c.local, reverse=True)
owned_or_unowned_non_local = lambda x: self.owned or (not self.owned and not x.local)
https = [c.uri for c in connections if owned_or_unowned_non_local(c)]
http = [c.httpuri for c in connections if owned_or_unowned_non_local(c)]
if ssl is True: connections = https
elif ssl is False: connections = http
else: connections = https + http
listargs = [[PlexServer, url, self.accessToken, timeout] for url in connections]
log.info('Testing %s resource connections..', len(listargs))
results = utils.threaded(_connect, listargs)
return _chooseConnection('Resource', self.name, results)
class ResourceConnection(PlexObject):
TAG = 'Connection'
def _loadData(self, data):
self._data = data
self.protocol = data.attrib.get('protocol')
self.address = data.attrib.get('address')
self.port = utils.cast(int, data.attrib.get('port'))
self.uri = data.attrib.get('uri')
self.local = utils.cast(bool, data.attrib.get('local'))
self.httpuri = 'http://%s:%s' % (self.address, self.port)
class MyPlexDevice(PlexObject):
TAG = 'Device'
key = 'https://plex.tv/devices.xml'
def _loadData(self, data):
self._data = data
self.name = data.attrib.get('name')
self.publicAddress = data.attrib.get('publicAddress')
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.model = data.attrib.get('model')
self.vendor = data.attrib.get('vendor')
self.provides = data.attrib.get('provides')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.version = data.attrib.get('version')
self.id = data.attrib.get('id')
self.token = logfilter.add_secret(data.attrib.get('token'))
self.screenResolution = data.attrib.get('screenResolution')
self.screenDensity = data.attrib.get('screenDensity')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.connections = [connection.attrib.get('uri') for connection in data.iter('Connection')]
def connect(self, timeout=None):
listargs = [[PlexClient, url, self.token, timeout] for url in self.connections]
log.info('Testing %s device connections..', len(listargs))
results = utils.threaded(_connect, listargs)
_chooseConnection('Device', self.name, results)
def delete(self):
key = 'https://plex.tv/devices/%s.xml' % self.id
self._server.query(key, self._server._session.delete)
def _connect(cls, url, token, timeout, results, i):
starttime = time.time()
try:
device = cls(baseurl=url, token=token, timeout=timeout)
runtime = int(time.time() - starttime)
results[i] = (url, token, device, runtime)
except Exception as err:
runtime = int(time.time() - starttime)
log.error('%s: %s', url, err)
results[i] = (url, token, None, runtime)
def _chooseConnection(ctype, name, results):
for url, token, result, runtime in results:
okerr = 'OK' if result else 'ERR'
log.info('%s connection %s (%ss): %s?X-Plex-Token=%s', ctype, okerr, runtime, url, token)
results = [r[2] for r in results if r and r[2] is not None]
if results:
log.info('Connecting to %s: %s?X-Plex-Token=%s', ctype, results[0]._baseurl, results[0]._token)
return results[0]
raise NotFound('Unable to connect to %s: %s' % (ctype.lower(), name))
| true | true |
f7fad12e94f097d9af74b0aac3942946b1480a3d | 6,124 | py | Python | modules/encounter.py | HeercoGrond/Cha5ebot | 06ffbcd453a747b9b0d0812934bb0b3730b1ed4d | [
"Unlicense"
] | null | null | null | modules/encounter.py | HeercoGrond/Cha5ebot | 06ffbcd453a747b9b0d0812934bb0b3730b1ed4d | [
"Unlicense"
] | null | null | null | modules/encounter.py | HeercoGrond/Cha5ebot | 06ffbcd453a747b9b0d0812934bb0b3730b1ed4d | [
"Unlicense"
] | null | null | null | from discord.ext import commands
import discord
import json
import os
class Encounter(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("loaded cog")
@commands.command()
async def encounter(self, ctx, *args):
if discord.utils.get(ctx.guild.roles, name="DM") in ctx.message.author.roles:
if ctx.guild != None:
# Variables
currentGuildPath = "./guilds/" + str(ctx.guild.id)
currentEncounterPath = currentGuildPath + "/encounters"
argumentCount = len(args)
if argumentCount == 0:
await ctx.send("No arguments were provided, please make sure to provide an argument to the command.")
else:
argument = args[0]
if argument == "make":
if argumentCount != 2:
await ctx.send("There was either less or more than 1 argument input into the command. Propper usage is '>encounter make {x}' where {x} is the encounter name.")
else:
filename = args[1]
if not os.path.exists(currentGuildPath):
os.makedirs(currentGuildPath)
print("Created guild path.")
if not os.path.exists(currentEncounterPath):
os.makedirs(currentEncounterPath)
print("Created encounter path")
if not os.path.exists(currentEncounterPath + "/" + filename + ".json"):
with open(currentEncounterPath + "/" + filename + ".json", 'w') as fp:
data = self.make_encounter(filename)
json.dump(data, fp, indent=4, sort_keys=True)
await ctx.send("Created an encounter with the name: " + data["name"])
else:
await ctx.send("An encounter with the name '" + filename + "' was already found.")
elif argument == "delete":
if argumentCount != 2:
await ctx.send("There was either less or more than 1 argument into the command. Proper usage is `>encounter delete {x}` where {x} is the encounter's name that will be deleted.")
else:
filename = args[1]
path = currentEncounterPath + "/" + filename + ".json"
if os.path.exists(path):
os.remove(path)
await ctx.send("Succesfully deleted encounter '" + filename + "'.")
else:
await ctx.send("The encounter you are trying to delete doesn't seem to exist.")
elif argument == "list":
with os.scandir(currentEncounterPath + "/") as encounters:
description = ""
for file in encounters:
description += file.name.replace(".json", "") + "\n"
embed_totalEncounters = discord.Embed(title="Currently active encounters:", description=description)
await ctx.send(embed=embed_totalEncounters)
elif os.path.exists(currentEncounterPath + "/" + argument + ".json"):
if args[1] == "add":
with open("./modules/libraries/monsters.json") as f:
monster_data = json.load(f)
for monster in monster_data:
if monster["title"].lower() in args:
with open(currentEncounterPath + "/" + argument + ".json", "r+") as f:
encounter_file = json.load(f)
encounter_file["participants"].append(monster["title"])
f.seek(0)
json.dump(encounter_file, f, indent=4)
f.truncate()
await ctx.send("Added one " + monster["title"] + " to the " + argument + " encounter.")
elif args[1] == "remove":
with open(currentEncounterPath + "/" + argument + ".json") as f:
encounter_data = json.load(f)
for monster in encounter_data["participants"]:
if monster.lower() in args:
encounter_data["participants"].remove(monster)
with open(currentEncounterPath + "/" + argument + ".json", "w") as fw:
json.dump(encounter_data, fw, indent=4)
await ctx.send("Removed one " + monster + " from the " + argument + " encounter.")
break
elif args[1] == "list":
with open(currentEncounterPath + "/" + argument + ".json", "r") as f:
encounter_data = json.load(f)
description = ""
for monster in encounter_data["participants"]:
description += monster + "\n"
embed_totalMonsters = discord.Embed(title="Current monsters in encounter: " + args[0], description=description)
await ctx.send(embed=embed_totalMonsters)
else:
await ctx.send("You don't have permission to use that command.")
def make_encounter(self, enc_name):
encounter = {}
encounter["name"] = enc_name
encounter["description"] = ""
encounter["participants"] = []
encounter["map"] = ""
return encounter
def setup(client):
client.add_cog(Encounter(client)) | 47.107692 | 201 | 0.475506 | from discord.ext import commands
import discord
import json
import os
class Encounter(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("loaded cog")
@commands.command()
async def encounter(self, ctx, *args):
if discord.utils.get(ctx.guild.roles, name="DM") in ctx.message.author.roles:
if ctx.guild != None:
currentGuildPath = "./guilds/" + str(ctx.guild.id)
currentEncounterPath = currentGuildPath + "/encounters"
argumentCount = len(args)
if argumentCount == 0:
await ctx.send("No arguments were provided, please make sure to provide an argument to the command.")
else:
argument = args[0]
if argument == "make":
if argumentCount != 2:
await ctx.send("There was either less or more than 1 argument input into the command. Propper usage is '>encounter make {x}' where {x} is the encounter name.")
else:
filename = args[1]
if not os.path.exists(currentGuildPath):
os.makedirs(currentGuildPath)
print("Created guild path.")
if not os.path.exists(currentEncounterPath):
os.makedirs(currentEncounterPath)
print("Created encounter path")
if not os.path.exists(currentEncounterPath + "/" + filename + ".json"):
with open(currentEncounterPath + "/" + filename + ".json", 'w') as fp:
data = self.make_encounter(filename)
json.dump(data, fp, indent=4, sort_keys=True)
await ctx.send("Created an encounter with the name: " + data["name"])
else:
await ctx.send("An encounter with the name '" + filename + "' was already found.")
elif argument == "delete":
if argumentCount != 2:
await ctx.send("There was either less or more than 1 argument into the command. Proper usage is `>encounter delete {x}` where {x} is the encounter's name that will be deleted.")
else:
filename = args[1]
path = currentEncounterPath + "/" + filename + ".json"
if os.path.exists(path):
os.remove(path)
await ctx.send("Succesfully deleted encounter '" + filename + "'.")
else:
await ctx.send("The encounter you are trying to delete doesn't seem to exist.")
elif argument == "list":
with os.scandir(currentEncounterPath + "/") as encounters:
description = ""
for file in encounters:
description += file.name.replace(".json", "") + "\n"
embed_totalEncounters = discord.Embed(title="Currently active encounters:", description=description)
await ctx.send(embed=embed_totalEncounters)
elif os.path.exists(currentEncounterPath + "/" + argument + ".json"):
if args[1] == "add":
with open("./modules/libraries/monsters.json") as f:
monster_data = json.load(f)
for monster in monster_data:
if monster["title"].lower() in args:
with open(currentEncounterPath + "/" + argument + ".json", "r+") as f:
encounter_file = json.load(f)
encounter_file["participants"].append(monster["title"])
f.seek(0)
json.dump(encounter_file, f, indent=4)
f.truncate()
await ctx.send("Added one " + monster["title"] + " to the " + argument + " encounter.")
elif args[1] == "remove":
with open(currentEncounterPath + "/" + argument + ".json") as f:
encounter_data = json.load(f)
for monster in encounter_data["participants"]:
if monster.lower() in args:
encounter_data["participants"].remove(monster)
with open(currentEncounterPath + "/" + argument + ".json", "w") as fw:
json.dump(encounter_data, fw, indent=4)
await ctx.send("Removed one " + monster + " from the " + argument + " encounter.")
break
elif args[1] == "list":
with open(currentEncounterPath + "/" + argument + ".json", "r") as f:
encounter_data = json.load(f)
description = ""
for monster in encounter_data["participants"]:
description += monster + "\n"
embed_totalMonsters = discord.Embed(title="Current monsters in encounter: " + args[0], description=description)
await ctx.send(embed=embed_totalMonsters)
else:
await ctx.send("You don't have permission to use that command.")
def make_encounter(self, enc_name):
encounter = {}
encounter["name"] = enc_name
encounter["description"] = ""
encounter["participants"] = []
encounter["map"] = ""
return encounter
def setup(client):
client.add_cog(Encounter(client)) | true | true |
f7fad15c620f7dfe7cd3ef5776c35594cf56ee75 | 4,095 | py | Python | synapse/handlers/devicemessage.py | khanof/jsynapse | 1200f28d661747a019d2f33bd5623c7bc635c59e | [
"Apache-2.0"
] | 1 | 2017-02-03T18:58:29.000Z | 2017-02-03T18:58:29.000Z | synapse/handlers/devicemessage.py | khanof/jsynapse | 1200f28d661747a019d2f33bd5623c7bc635c59e | [
"Apache-2.0"
] | null | null | null | synapse/handlers/devicemessage.py | khanof/jsynapse | 1200f28d661747a019d2f33bd5623c7bc635c59e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from twisted.internet import defer
from synapse.types import get_domain_from_id
from synapse.util.stringutils import random_string
logger = logging.getLogger(__name__)
class DeviceMessageHandler(object):
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
self.store = hs.get_datastore()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
self.federation = hs.get_federation_sender()
hs.get_replication_layer().register_edu_handler(
"m.direct_to_device", self.on_direct_to_device_edu
)
@defer.inlineCallbacks
def on_direct_to_device_edu(self, origin, content):
local_messages = {}
sender_user_id = content["sender"]
if origin != get_domain_from_id(sender_user_id):
logger.warn(
"Dropping device message from %r with spoofed sender %r",
origin, sender_user_id
)
message_type = content["type"]
message_id = content["message_id"]
for user_id, by_device in content["messages"].items():
messages_by_device = {
device_id: {
"content": message_content,
"type": message_type,
"sender": sender_user_id,
}
for device_id, message_content in by_device.items()
}
if messages_by_device:
local_messages[user_id] = messages_by_device
stream_id = yield self.store.add_messages_from_remote_to_device_inbox(
origin, message_id, local_messages
)
self.notifier.on_new_event(
"to_device_key", stream_id, users=local_messages.keys()
)
@defer.inlineCallbacks
def send_device_message(self, sender_user_id, message_type, messages):
local_messages = {}
remote_messages = {}
for user_id, by_device in messages.items():
if self.is_mine_id(user_id):
messages_by_device = {
device_id: {
"content": message_content,
"type": message_type,
"sender": sender_user_id,
}
for device_id, message_content in by_device.items()
}
if messages_by_device:
local_messages[user_id] = messages_by_device
else:
destination = get_domain_from_id(user_id)
remote_messages.setdefault(destination, {})[user_id] = by_device
message_id = random_string(16)
remote_edu_contents = {}
for destination, messages in remote_messages.items():
remote_edu_contents[destination] = {
"messages": messages,
"sender": sender_user_id,
"type": message_type,
"message_id": message_id,
}
stream_id = yield self.store.add_messages_to_device_inbox(
local_messages, remote_edu_contents
)
self.notifier.on_new_event(
"to_device_key", stream_id, users=local_messages.keys()
)
for destination in remote_messages.keys():
# Enqueue a new federation transaction to send the new
# device messages to each remote destination.
self.federation.send_device_messages(destination)
| 34.70339 | 80 | 0.610501 |
import logging
from twisted.internet import defer
from synapse.types import get_domain_from_id
from synapse.util.stringutils import random_string
logger = logging.getLogger(__name__)
class DeviceMessageHandler(object):
def __init__(self, hs):
self.store = hs.get_datastore()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
self.federation = hs.get_federation_sender()
hs.get_replication_layer().register_edu_handler(
"m.direct_to_device", self.on_direct_to_device_edu
)
@defer.inlineCallbacks
def on_direct_to_device_edu(self, origin, content):
local_messages = {}
sender_user_id = content["sender"]
if origin != get_domain_from_id(sender_user_id):
logger.warn(
"Dropping device message from %r with spoofed sender %r",
origin, sender_user_id
)
message_type = content["type"]
message_id = content["message_id"]
for user_id, by_device in content["messages"].items():
messages_by_device = {
device_id: {
"content": message_content,
"type": message_type,
"sender": sender_user_id,
}
for device_id, message_content in by_device.items()
}
if messages_by_device:
local_messages[user_id] = messages_by_device
stream_id = yield self.store.add_messages_from_remote_to_device_inbox(
origin, message_id, local_messages
)
self.notifier.on_new_event(
"to_device_key", stream_id, users=local_messages.keys()
)
@defer.inlineCallbacks
def send_device_message(self, sender_user_id, message_type, messages):
local_messages = {}
remote_messages = {}
for user_id, by_device in messages.items():
if self.is_mine_id(user_id):
messages_by_device = {
device_id: {
"content": message_content,
"type": message_type,
"sender": sender_user_id,
}
for device_id, message_content in by_device.items()
}
if messages_by_device:
local_messages[user_id] = messages_by_device
else:
destination = get_domain_from_id(user_id)
remote_messages.setdefault(destination, {})[user_id] = by_device
message_id = random_string(16)
remote_edu_contents = {}
for destination, messages in remote_messages.items():
remote_edu_contents[destination] = {
"messages": messages,
"sender": sender_user_id,
"type": message_type,
"message_id": message_id,
}
stream_id = yield self.store.add_messages_to_device_inbox(
local_messages, remote_edu_contents
)
self.notifier.on_new_event(
"to_device_key", stream_id, users=local_messages.keys()
)
for destination in remote_messages.keys():
self.federation.send_device_messages(destination)
| true | true |
f7fad1e4a4233910271caf4fcf2b4baf3b413155 | 161 | py | Python | segmentify/model/layers/identity.py | kne42/segmentify | cdacf55be64d066958d0114c0748141203708a06 | [
"BSD-3-Clause"
] | 26 | 2019-07-29T21:52:08.000Z | 2022-03-30T16:47:12.000Z | segmentify/model/layers/identity.py | joaomamede/segmentify | bd57cfcc94ad2f6dfcb080ae786f410e044659c4 | [
"BSD-3-Clause"
] | 24 | 2019-07-25T20:38:43.000Z | 2021-02-09T21:53:55.000Z | segmentify/model/layers/identity.py | joaomamede/segmentify | bd57cfcc94ad2f6dfcb080ae786f410e044659c4 | [
"BSD-3-Clause"
] | 11 | 2019-06-18T22:37:34.000Z | 2021-12-14T05:35:24.000Z | import torch.nn as nn
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
| 14.636364 | 40 | 0.621118 | import torch.nn as nn
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
| true | true |
f7fad1f35fca48a03bd697853e60bef374d0162b | 1,684 | py | Python | PythonClient/box.py | woxihuanwangdanling/AirsimWithVS2017 | 0e8c65bbc28cd250fb5d23d67faed5fa127fec76 | [
"MIT"
] | null | null | null | PythonClient/box.py | woxihuanwangdanling/AirsimWithVS2017 | 0e8c65bbc28cd250fb5d23d67faed5fa127fec76 | [
"MIT"
] | null | null | null | PythonClient/box.py | woxihuanwangdanling/AirsimWithVS2017 | 0e8c65bbc28cd250fb5d23d67faed5fa127fec76 | [
"MIT"
] | null | null | null | from AirSimClient import *
import sys
import time
client = MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
client.takeoff()
print("Flying a small square box using moveByVelocityZ")
print("Try pressing 't' in the AirSim view to see a pink trace of the flight")
# AirSim uses NED coordinates so negative axis is up.
# z of -7 is 7 meters above the original launch point.
z = -7
# Fly given velocity vector for 5 seconds
duration = 5
speed = 1
delay = duration * speed;
# using DrivetrainType.MaxDegreeOfFreedom means we can control the drone yaw independently
# from the direction the drone is flying. I've set values here that make the drone always point inwards
# towards the inside of the box (which would be handy if you are building a 3d scan of an object in the real world).
vx = speed
vy = 0
print("moving by velocity vx=" + str(vx) + ", vy=" + str(vy) + ", yaw=90")
client.moveByVelocityZ(vx,vy,z,duration, DrivetrainType.MaxDegreeOfFreedom, YawMode(False, 90))
time.sleep(delay)
vx = 0
vy = speed
print("moving by velocity vx=" + str(vx) + ", vy=" + str(vy)+ ", yaw=180")
client.moveByVelocityZ(vx,vy,z,duration, DrivetrainType.MaxDegreeOfFreedom, YawMode(False, 180))
time.sleep(delay)
vx = -speed
vy = 0
print("moving by velocity vx=" + str(vx) + ", vy=" + str(vy)+ ", yaw=270")
client.moveByVelocityZ(vx, vy, z,duration, DrivetrainType.MaxDegreeOfFreedom, YawMode(False, 270))
time.sleep(delay)
vx = 0
vy = -speed
print("moving by velocity vx=" + str(vx) + ", vy=" + str(vy) + ", yaw=0")
client.moveByVelocityZ(vx, vy,z,duration, DrivetrainType.MaxDegreeOfFreedom, YawMode(False, 0))
time.sleep(delay)
client.hover()
| 35.829787 | 116 | 0.733373 | from AirSimClient import *
import sys
import time
client = MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
client.takeoff()
print("Flying a small square box using moveByVelocityZ")
print("Try pressing 't' in the AirSim view to see a pink trace of the flight")
z = -7
duration = 5
speed = 1
delay = duration * speed;
# towards the inside of the box (which would be handy if you are building a 3d scan of an object in the real world).
vx = speed
vy = 0
print("moving by velocity vx=" + str(vx) + ", vy=" + str(vy) + ", yaw=90")
client.moveByVelocityZ(vx,vy,z,duration, DrivetrainType.MaxDegreeOfFreedom, YawMode(False, 90))
time.sleep(delay)
vx = 0
vy = speed
print("moving by velocity vx=" + str(vx) + ", vy=" + str(vy)+ ", yaw=180")
client.moveByVelocityZ(vx,vy,z,duration, DrivetrainType.MaxDegreeOfFreedom, YawMode(False, 180))
time.sleep(delay)
vx = -speed
vy = 0
print("moving by velocity vx=" + str(vx) + ", vy=" + str(vy)+ ", yaw=270")
client.moveByVelocityZ(vx, vy, z,duration, DrivetrainType.MaxDegreeOfFreedom, YawMode(False, 270))
time.sleep(delay)
vx = 0
vy = -speed
print("moving by velocity vx=" + str(vx) + ", vy=" + str(vy) + ", yaw=0")
client.moveByVelocityZ(vx, vy,z,duration, DrivetrainType.MaxDegreeOfFreedom, YawMode(False, 0))
time.sleep(delay)
client.hover()
| true | true |
f7fad29beb4af51301026f0d4bde4876a538764a | 10,179 | py | Python | keystone/identity/backends/sql.py | ISCAS-VDI/keystone | 11af181c06d78026c89a873f62931558e80f3192 | [
"Apache-2.0"
] | null | null | null | keystone/identity/backends/sql.py | ISCAS-VDI/keystone | 11af181c06d78026c89a873f62931558e80f3192 | [
"Apache-2.0"
] | null | null | null | keystone/identity/backends/sql.py | ISCAS-VDI/keystone | 11af181c06d78026c89a873f62931558e80f3192 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from keystone.common import driver_hints
from keystone.common import sql
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
from keystone.identity.backends import base
from keystone.identity.backends import sql_model as model
class Identity(base.IdentityDriverV8):
# NOTE(henry-nash): Override the __init__() method so as to take a
# config parameter to enable sql to be used as a domain-specific driver.
def __init__(self, conf=None):
self.conf = conf
super(Identity, self).__init__()
@property
def is_sql(self):
return True
def _check_password(self, password, user_ref):
"""Check the specified password against the data store.
Note that we'll pass in the entire user_ref in case the subclass
needs things like user_ref.get('name')
For further justification, please see the follow up suggestion at
https://blueprints.launchpad.net/keystone/+spec/sql-identiy-pam
"""
return utils.check_password(password, user_ref.password)
# Identity interface
def authenticate(self, user_id, password):
with sql.session_for_read() as session:
user_ref = None
try:
user_ref = self._get_user(session, user_id)
except exception.UserNotFound:
raise AssertionError(_('Invalid user / password'))
if not self._check_password(password, user_ref):
raise AssertionError(_('Invalid user / password'))
return base.filter_user(user_ref.to_dict())
# user crud
@sql.handle_conflicts(conflict_type='user')
def create_user(self, user_id, user):
user = utils.hash_user_password(user)
with sql.session_for_write() as session:
user_ref = model.User.from_dict(user)
session.add(user_ref)
return base.filter_user(user_ref.to_dict())
@driver_hints.truncated
def list_users(self, hints):
with sql.session_for_read() as session:
query = session.query(model.User).outerjoin(model.LocalUser)
user_refs = sql.filter_limit_query(model.User, query, hints)
return [base.filter_user(x.to_dict()) for x in user_refs]
def _get_user(self, session, user_id):
user_ref = session.query(model.User).get(user_id)
if not user_ref:
raise exception.UserNotFound(user_id=user_id)
return user_ref
def get_user(self, user_id):
with sql.session_for_read() as session:
return base.filter_user(
self._get_user(session, user_id).to_dict())
def get_user_by_name(self, user_name, domain_id):
with sql.session_for_read() as session:
query = session.query(model.User).join(model.LocalUser)
query = query.filter(sqlalchemy.and_(
model.LocalUser.name == user_name,
model.LocalUser.domain_id == domain_id))
try:
user_ref = query.one()
except sql.NotFound:
raise exception.UserNotFound(user_id=user_name)
return base.filter_user(user_ref.to_dict())
@sql.handle_conflicts(conflict_type='user')
def update_user(self, user_id, user):
with sql.session_for_write() as session:
user_ref = self._get_user(session, user_id)
old_user_dict = user_ref.to_dict()
user = utils.hash_user_password(user)
for k in user:
old_user_dict[k] = user[k]
new_user = model.User.from_dict(old_user_dict)
for attr in model.User.attributes:
if attr != 'id':
setattr(user_ref, attr, getattr(new_user, attr))
user_ref.extra = new_user.extra
return base.filter_user(
user_ref.to_dict(include_extra_dict=True))
def add_user_to_group(self, user_id, group_id):
with sql.session_for_write() as session:
self.get_group(group_id)
self.get_user(user_id)
query = session.query(model.UserGroupMembership)
query = query.filter_by(user_id=user_id)
query = query.filter_by(group_id=group_id)
rv = query.first()
if rv:
return
session.add(model.UserGroupMembership(user_id=user_id,
group_id=group_id))
def check_user_in_group(self, user_id, group_id):
with sql.session_for_read() as session:
self.get_group(group_id)
self.get_user(user_id)
query = session.query(model.UserGroupMembership)
query = query.filter_by(user_id=user_id)
query = query.filter_by(group_id=group_id)
if not query.first():
raise exception.NotFound(_("User '%(user_id)s' not found in"
" group '%(group_id)s'") %
{'user_id': user_id,
'group_id': group_id})
def remove_user_from_group(self, user_id, group_id):
# We don't check if user or group are still valid and let the remove
# be tried anyway - in case this is some kind of clean-up operation
with sql.session_for_write() as session:
query = session.query(model.UserGroupMembership)
query = query.filter_by(user_id=user_id)
query = query.filter_by(group_id=group_id)
membership_ref = query.first()
if membership_ref is None:
# Check if the group and user exist to return descriptive
# exceptions.
self.get_group(group_id)
self.get_user(user_id)
raise exception.NotFound(_("User '%(user_id)s' not found in"
" group '%(group_id)s'") %
{'user_id': user_id,
'group_id': group_id})
session.delete(membership_ref)
def list_groups_for_user(self, user_id, hints):
with sql.session_for_read() as session:
self.get_user(user_id)
query = session.query(model.Group).join(model.UserGroupMembership)
query = query.filter(model.UserGroupMembership.user_id == user_id)
query = sql.filter_limit_query(model.Group, query, hints)
return [g.to_dict() for g in query]
def list_users_in_group(self, group_id, hints):
with sql.session_for_read() as session:
self.get_group(group_id)
query = session.query(model.User).outerjoin(model.LocalUser)
query = query.join(model.UserGroupMembership)
query = query.filter(
model.UserGroupMembership.group_id == group_id)
query = sql.filter_limit_query(model.User, query, hints)
return [base.filter_user(u.to_dict()) for u in query]
def delete_user(self, user_id):
with sql.session_for_write() as session:
ref = self._get_user(session, user_id)
q = session.query(model.UserGroupMembership)
q = q.filter_by(user_id=user_id)
q.delete(False)
session.delete(ref)
# group crud
@sql.handle_conflicts(conflict_type='group')
def create_group(self, group_id, group):
with sql.session_for_write() as session:
ref = model.Group.from_dict(group)
session.add(ref)
return ref.to_dict()
@driver_hints.truncated
def list_groups(self, hints):
with sql.session_for_read() as session:
query = session.query(model.Group)
refs = sql.filter_limit_query(model.Group, query, hints)
return [ref.to_dict() for ref in refs]
def _get_group(self, session, group_id):
ref = session.query(model.Group).get(group_id)
if not ref:
raise exception.GroupNotFound(group_id=group_id)
return ref
def get_group(self, group_id):
with sql.session_for_read() as session:
return self._get_group(session, group_id).to_dict()
def get_group_by_name(self, group_name, domain_id):
with sql.session_for_read() as session:
query = session.query(model.Group)
query = query.filter_by(name=group_name)
query = query.filter_by(domain_id=domain_id)
try:
group_ref = query.one()
except sql.NotFound:
raise exception.GroupNotFound(group_id=group_name)
return group_ref.to_dict()
@sql.handle_conflicts(conflict_type='group')
def update_group(self, group_id, group):
with sql.session_for_write() as session:
ref = self._get_group(session, group_id)
old_dict = ref.to_dict()
for k in group:
old_dict[k] = group[k]
new_group = model.Group.from_dict(old_dict)
for attr in model.Group.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_group, attr))
ref.extra = new_group.extra
return ref.to_dict()
def delete_group(self, group_id):
with sql.session_for_write() as session:
ref = self._get_group(session, group_id)
q = session.query(model.UserGroupMembership)
q = q.filter_by(group_id=group_id)
q.delete(False)
session.delete(ref)
| 40.716 | 78 | 0.616858 |
import sqlalchemy
from keystone.common import driver_hints
from keystone.common import sql
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
from keystone.identity.backends import base
from keystone.identity.backends import sql_model as model
class Identity(base.IdentityDriverV8):
def __init__(self, conf=None):
self.conf = conf
super(Identity, self).__init__()
@property
def is_sql(self):
return True
def _check_password(self, password, user_ref):
return utils.check_password(password, user_ref.password)
def authenticate(self, user_id, password):
with sql.session_for_read() as session:
user_ref = None
try:
user_ref = self._get_user(session, user_id)
except exception.UserNotFound:
raise AssertionError(_('Invalid user / password'))
if not self._check_password(password, user_ref):
raise AssertionError(_('Invalid user / password'))
return base.filter_user(user_ref.to_dict())
@sql.handle_conflicts(conflict_type='user')
def create_user(self, user_id, user):
user = utils.hash_user_password(user)
with sql.session_for_write() as session:
user_ref = model.User.from_dict(user)
session.add(user_ref)
return base.filter_user(user_ref.to_dict())
@driver_hints.truncated
def list_users(self, hints):
with sql.session_for_read() as session:
query = session.query(model.User).outerjoin(model.LocalUser)
user_refs = sql.filter_limit_query(model.User, query, hints)
return [base.filter_user(x.to_dict()) for x in user_refs]
def _get_user(self, session, user_id):
user_ref = session.query(model.User).get(user_id)
if not user_ref:
raise exception.UserNotFound(user_id=user_id)
return user_ref
def get_user(self, user_id):
with sql.session_for_read() as session:
return base.filter_user(
self._get_user(session, user_id).to_dict())
def get_user_by_name(self, user_name, domain_id):
with sql.session_for_read() as session:
query = session.query(model.User).join(model.LocalUser)
query = query.filter(sqlalchemy.and_(
model.LocalUser.name == user_name,
model.LocalUser.domain_id == domain_id))
try:
user_ref = query.one()
except sql.NotFound:
raise exception.UserNotFound(user_id=user_name)
return base.filter_user(user_ref.to_dict())
@sql.handle_conflicts(conflict_type='user')
def update_user(self, user_id, user):
with sql.session_for_write() as session:
user_ref = self._get_user(session, user_id)
old_user_dict = user_ref.to_dict()
user = utils.hash_user_password(user)
for k in user:
old_user_dict[k] = user[k]
new_user = model.User.from_dict(old_user_dict)
for attr in model.User.attributes:
if attr != 'id':
setattr(user_ref, attr, getattr(new_user, attr))
user_ref.extra = new_user.extra
return base.filter_user(
user_ref.to_dict(include_extra_dict=True))
def add_user_to_group(self, user_id, group_id):
with sql.session_for_write() as session:
self.get_group(group_id)
self.get_user(user_id)
query = session.query(model.UserGroupMembership)
query = query.filter_by(user_id=user_id)
query = query.filter_by(group_id=group_id)
rv = query.first()
if rv:
return
session.add(model.UserGroupMembership(user_id=user_id,
group_id=group_id))
def check_user_in_group(self, user_id, group_id):
with sql.session_for_read() as session:
self.get_group(group_id)
self.get_user(user_id)
query = session.query(model.UserGroupMembership)
query = query.filter_by(user_id=user_id)
query = query.filter_by(group_id=group_id)
if not query.first():
raise exception.NotFound(_("User '%(user_id)s' not found in"
" group '%(group_id)s'") %
{'user_id': user_id,
'group_id': group_id})
def remove_user_from_group(self, user_id, group_id):
# be tried anyway - in case this is some kind of clean-up operation
with sql.session_for_write() as session:
query = session.query(model.UserGroupMembership)
query = query.filter_by(user_id=user_id)
query = query.filter_by(group_id=group_id)
membership_ref = query.first()
if membership_ref is None:
# Check if the group and user exist to return descriptive
# exceptions.
self.get_group(group_id)
self.get_user(user_id)
raise exception.NotFound(_("User '%(user_id)s' not found in"
" group '%(group_id)s'") %
{'user_id': user_id,
'group_id': group_id})
session.delete(membership_ref)
def list_groups_for_user(self, user_id, hints):
with sql.session_for_read() as session:
self.get_user(user_id)
query = session.query(model.Group).join(model.UserGroupMembership)
query = query.filter(model.UserGroupMembership.user_id == user_id)
query = sql.filter_limit_query(model.Group, query, hints)
return [g.to_dict() for g in query]
def list_users_in_group(self, group_id, hints):
with sql.session_for_read() as session:
self.get_group(group_id)
query = session.query(model.User).outerjoin(model.LocalUser)
query = query.join(model.UserGroupMembership)
query = query.filter(
model.UserGroupMembership.group_id == group_id)
query = sql.filter_limit_query(model.User, query, hints)
return [base.filter_user(u.to_dict()) for u in query]
def delete_user(self, user_id):
with sql.session_for_write() as session:
ref = self._get_user(session, user_id)
q = session.query(model.UserGroupMembership)
q = q.filter_by(user_id=user_id)
q.delete(False)
session.delete(ref)
# group crud
@sql.handle_conflicts(conflict_type='group')
def create_group(self, group_id, group):
with sql.session_for_write() as session:
ref = model.Group.from_dict(group)
session.add(ref)
return ref.to_dict()
@driver_hints.truncated
def list_groups(self, hints):
with sql.session_for_read() as session:
query = session.query(model.Group)
refs = sql.filter_limit_query(model.Group, query, hints)
return [ref.to_dict() for ref in refs]
def _get_group(self, session, group_id):
ref = session.query(model.Group).get(group_id)
if not ref:
raise exception.GroupNotFound(group_id=group_id)
return ref
def get_group(self, group_id):
with sql.session_for_read() as session:
return self._get_group(session, group_id).to_dict()
def get_group_by_name(self, group_name, domain_id):
with sql.session_for_read() as session:
query = session.query(model.Group)
query = query.filter_by(name=group_name)
query = query.filter_by(domain_id=domain_id)
try:
group_ref = query.one()
except sql.NotFound:
raise exception.GroupNotFound(group_id=group_name)
return group_ref.to_dict()
@sql.handle_conflicts(conflict_type='group')
def update_group(self, group_id, group):
with sql.session_for_write() as session:
ref = self._get_group(session, group_id)
old_dict = ref.to_dict()
for k in group:
old_dict[k] = group[k]
new_group = model.Group.from_dict(old_dict)
for attr in model.Group.attributes:
if attr != 'id':
setattr(ref, attr, getattr(new_group, attr))
ref.extra = new_group.extra
return ref.to_dict()
def delete_group(self, group_id):
with sql.session_for_write() as session:
ref = self._get_group(session, group_id)
q = session.query(model.UserGroupMembership)
q = q.filter_by(group_id=group_id)
q.delete(False)
session.delete(ref)
| true | true |
f7fad3075f031c1ae85ab2d48d9309b41ed7b022 | 664 | py | Python | jp.atcoder/abc214/abc214_f/26740844.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc214/abc214_f/26740844.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc214/abc214_f/26740844.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
import typing
import numba as nb
import numpy as np
@nb.njit((nb.i8[:], ), cache=True)
def solve(a: np.ndarray) -> typing.NoReturn:
n = len(a)
prev = np.empty(n, np.int64)
last = np.zeros(26, np.int64)
for i in range(n):
prev[i] = last[a[i]]
last[a[i]] = i + 1
mod = 10 ** 9 + 7
dp = np.zeros(n + 3, np.int64)
for i in range(2, n + 2):
j = prev[i - 2]
dp[i] = dp[i - 2] - dp[j - 1] + (j == 0)
dp[i] = (dp[i] + dp[i - 1]) % mod
print(dp[n + 1])
def main() -> typing.NoReturn:
a = np.array([ord(x) - 97 for x in input()])
solve(a)
main()
| 21.419355 | 49 | 0.472892 | import sys
import typing
import numba as nb
import numpy as np
@nb.njit((nb.i8[:], ), cache=True)
def solve(a: np.ndarray) -> typing.NoReturn:
n = len(a)
prev = np.empty(n, np.int64)
last = np.zeros(26, np.int64)
for i in range(n):
prev[i] = last[a[i]]
last[a[i]] = i + 1
mod = 10 ** 9 + 7
dp = np.zeros(n + 3, np.int64)
for i in range(2, n + 2):
j = prev[i - 2]
dp[i] = dp[i - 2] - dp[j - 1] + (j == 0)
dp[i] = (dp[i] + dp[i - 1]) % mod
print(dp[n + 1])
def main() -> typing.NoReturn:
a = np.array([ord(x) - 97 for x in input()])
solve(a)
main()
| true | true |
f7fad4089fb11570038e537c7a646b7bb71bebd9 | 2,685 | py | Python | doc/source/conf.py | stackhpc/ansible-collection-kolla | b3867aa23b00906fb3844b7a63bd95d664ad8fd3 | [
"Apache-2.0"
] | 1 | 2021-11-26T20:02:11.000Z | 2021-11-26T20:02:11.000Z | doc/source/conf.py | stackhpc/ansible-collection-kolla | b3867aa23b00906fb3844b7a63bd95d664ad8fd3 | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | stackhpc/ansible-collection-kolla | b3867aa23b00906fb3844b7a63bd95d664ad8fd3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'openstackdocstheme',
#'sphinx.ext.intersphinx',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ansible-collection-kolla'
copyright = u'2017, OpenStack Developers'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/ansible-collection-kolla'
openstackdocs_bug_project = 'ansible-collection-kolla'
openstackdocs_bug_tag = ''
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
html_theme = 'openstackdocs'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Developers', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| 32.743902 | 79 | 0.708752 |
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
extensions = [
'sphinx.ext.autodoc',
'openstackdocstheme',
]
source_suffix = '.rst'
master_doc = 'index'
project = u'ansible-collection-kolla'
copyright = u'2017, OpenStack Developers'
openstackdocs_repo_name = 'openstack/ansible-collection-kolla'
openstackdocs_bug_project = 'ansible-collection-kolla'
openstackdocs_bug_tag = ''
add_function_parentheses = True
add_module_names = True
pygments_style = 'native'
html_theme = 'openstackdocs'
htmlhelp_basename = '%sdoc' % project
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Developers', 'manual'),
]
| true | true |
f7fad45bd124cd83768da5d7e12f88133cc9938e | 5,491 | py | Python | BiblioAlly/scopus.py | gambit4348/BiblioAlly | c04ac378770a3cdcbba863799383103049df22f3 | [
"MIT"
] | null | null | null | BiblioAlly/scopus.py | gambit4348/BiblioAlly | c04ac378770a3cdcbba863799383103049df22f3 | [
"MIT"
] | null | null | null | BiblioAlly/scopus.py | gambit4348/BiblioAlly | c04ac378770a3cdcbba863799383103049df22f3 | [
"MIT"
] | null | null | null | from BiblioAlly import catalog as cat, domain, translator as bibtex
class ScopusTranslator(bibtex.Translator):
def _document_from_proto_document(self, proto_document):
bibtex.Translator._translate_kind(proto_document)
kind = proto_document['type']
fields = proto_document['field']
title = self._unbroken(self._uncurlied(fields['title']))
if 'abstract' in fields:
abstract = self._unbroken(self._uncurlied(fields['abstract']))
else:
abstract = ''
year = int(fields['year'])
if 'author' in fields:
author_field = self._unbroken(self._uncurlied(fields['author']))
else:
author_field = ''
authors = self._authors_from_field(author_field)
if 'affiliation' in fields:
affiliations = self._affiliations_from_field(self._all_uncurly(fields['affiliation']))
else:
affiliations = None
affiliations = self._expand_affiliations(affiliations, authors)
keywords = []
if 'author_keywords' in fields:
all_keywords = self._all_uncurly(fields['author_keywords']).split(';')
keyword_names = set()
for keyword_name in all_keywords:
name = keyword_name.strip().capitalize()
if name not in keyword_names:
keyword_names.add(name)
keyword_names = list(keyword_names)
for keyword_name in keyword_names:
keywords.append(domain.Keyword(name=keyword_name))
document = domain.Document(proto_document['id'].strip(), kind, title, abstract, keywords, year, affiliations)
document.generator = "Scopus"
if 'document_type' in fields:
document.document_type = self._uncurlied(fields['document_type'])
for name in ['doi', 'pages', 'url', 'volume', 'number', 'language', 'journal']:
if name in fields:
value = self._uncurlied(fields[name])
if len(value) > 0:
setattr(document, name, value)
return document
return document
def _proto_document_from_document(self, document: domain.Document):
kind = document.kind
if kind == 'proceedings':
kind = 'conference'
fields = dict()
fields['external_key'] = document.external_key
doc_authors = document.authors
doc_authors.sort(key=lambda doc_author: doc_author.first)
doc_authors.reverse()
all_authors = [(doc_author.author.long_name if doc_author.author.long_name is not None
else doc_author.author.short_name) for doc_author in doc_authors]
fields['author'] = self._curly(all_authors, separator=' and ')
fields['title'] = self._curly(document.title)
affiliations = []
for doc_author in doc_authors:
institution = doc_author.institution
if institution is not None:
affiliation = ', '.join([institution.name, institution.country])
affiliations.append(affiliation)
if len(affiliations) > 0:
fields['affiliation'] = self._curly(affiliations, '; ')
fields['year'] = self._curly(str(document.year))
if document.international_number is not None:
fields['issn'] = self._curly(str(document.international_number))
if document.publisher is not None:
fields['publisher'] = self._curly(str(document.publisher))
if document.address is not None:
fields['address'] = self._curly(str(document.address))
if document.doi is not None:
fields['doi'] = self._curly(str(document.doi))
if document.international_number is not None:
fields['url'] = self._curly(str(document.url))
fields['abstract'] = self._curly(document.abstract)
if document.journal is not None:
fields['journal'] = self._curly(str(document.journal))
if document.pages is not None:
fields['pages'] = self._curly(str(document.pages))
if document.volume is not None:
fields['volume'] = self._curly(str(document.volume))
if document.number is not None:
fields['number'] = self._curly(str(document.number))
if document.language is not None:
fields['language'] = self._curly(str(document.language))
keywords = [keyword.name for keyword in document.keywords]
fields['author_keywords'] = self._curly(keywords, '; ')
if len(document.references) > 0:
fields['references'] = self._curly('; '.join(document.references))
if document.document_type is not None:
fields['document_type'] = self._curly(document.document_type)
fields['source'] = self._curly(document.generator)
proto_document = {
'type': kind,
'fields': fields
}
return proto_document
def _as_bibtex(self, proto_document):
kind = proto_document['type'].upper()
fields = proto_document['fields']
external_key = fields['external_key']
del fields['external_key']
key_value = []
for key, value in fields.items():
key_value.append(f'{key}={value}')
bibtex = f'@{kind}' + '{' + f'{external_key},\n' + ',\n'.join(key_value) + '\n}\n'
return bibtex
Scopus = "Scopus"
cat.Catalog.translators[Scopus] = ScopusTranslator
| 43.928 | 117 | 0.616463 | from BiblioAlly import catalog as cat, domain, translator as bibtex
class ScopusTranslator(bibtex.Translator):
def _document_from_proto_document(self, proto_document):
bibtex.Translator._translate_kind(proto_document)
kind = proto_document['type']
fields = proto_document['field']
title = self._unbroken(self._uncurlied(fields['title']))
if 'abstract' in fields:
abstract = self._unbroken(self._uncurlied(fields['abstract']))
else:
abstract = ''
year = int(fields['year'])
if 'author' in fields:
author_field = self._unbroken(self._uncurlied(fields['author']))
else:
author_field = ''
authors = self._authors_from_field(author_field)
if 'affiliation' in fields:
affiliations = self._affiliations_from_field(self._all_uncurly(fields['affiliation']))
else:
affiliations = None
affiliations = self._expand_affiliations(affiliations, authors)
keywords = []
if 'author_keywords' in fields:
all_keywords = self._all_uncurly(fields['author_keywords']).split(';')
keyword_names = set()
for keyword_name in all_keywords:
name = keyword_name.strip().capitalize()
if name not in keyword_names:
keyword_names.add(name)
keyword_names = list(keyword_names)
for keyword_name in keyword_names:
keywords.append(domain.Keyword(name=keyword_name))
document = domain.Document(proto_document['id'].strip(), kind, title, abstract, keywords, year, affiliations)
document.generator = "Scopus"
if 'document_type' in fields:
document.document_type = self._uncurlied(fields['document_type'])
for name in ['doi', 'pages', 'url', 'volume', 'number', 'language', 'journal']:
if name in fields:
value = self._uncurlied(fields[name])
if len(value) > 0:
setattr(document, name, value)
return document
return document
def _proto_document_from_document(self, document: domain.Document):
kind = document.kind
if kind == 'proceedings':
kind = 'conference'
fields = dict()
fields['external_key'] = document.external_key
doc_authors = document.authors
doc_authors.sort(key=lambda doc_author: doc_author.first)
doc_authors.reverse()
all_authors = [(doc_author.author.long_name if doc_author.author.long_name is not None
else doc_author.author.short_name) for doc_author in doc_authors]
fields['author'] = self._curly(all_authors, separator=' and ')
fields['title'] = self._curly(document.title)
affiliations = []
for doc_author in doc_authors:
institution = doc_author.institution
if institution is not None:
affiliation = ', '.join([institution.name, institution.country])
affiliations.append(affiliation)
if len(affiliations) > 0:
fields['affiliation'] = self._curly(affiliations, '; ')
fields['year'] = self._curly(str(document.year))
if document.international_number is not None:
fields['issn'] = self._curly(str(document.international_number))
if document.publisher is not None:
fields['publisher'] = self._curly(str(document.publisher))
if document.address is not None:
fields['address'] = self._curly(str(document.address))
if document.doi is not None:
fields['doi'] = self._curly(str(document.doi))
if document.international_number is not None:
fields['url'] = self._curly(str(document.url))
fields['abstract'] = self._curly(document.abstract)
if document.journal is not None:
fields['journal'] = self._curly(str(document.journal))
if document.pages is not None:
fields['pages'] = self._curly(str(document.pages))
if document.volume is not None:
fields['volume'] = self._curly(str(document.volume))
if document.number is not None:
fields['number'] = self._curly(str(document.number))
if document.language is not None:
fields['language'] = self._curly(str(document.language))
keywords = [keyword.name for keyword in document.keywords]
fields['author_keywords'] = self._curly(keywords, '; ')
if len(document.references) > 0:
fields['references'] = self._curly('; '.join(document.references))
if document.document_type is not None:
fields['document_type'] = self._curly(document.document_type)
fields['source'] = self._curly(document.generator)
proto_document = {
'type': kind,
'fields': fields
}
return proto_document
def _as_bibtex(self, proto_document):
kind = proto_document['type'].upper()
fields = proto_document['fields']
external_key = fields['external_key']
del fields['external_key']
key_value = []
for key, value in fields.items():
key_value.append(f'{key}={value}')
bibtex = f'@{kind}' + '{' + f'{external_key},\n' + ',\n'.join(key_value) + '\n}\n'
return bibtex
Scopus = "Scopus"
cat.Catalog.translators[Scopus] = ScopusTranslator
| true | true |
f7fad4b64a6a34a94a5f9892021ec5f63805fd14 | 21,740 | py | Python | src/vpoller/worker.py | nikypint/py-vpoller | c7657dfc73831adf03c88363dee51c545ff8d511 | [
"BSD-2-Clause"
] | null | null | null | src/vpoller/worker.py | nikypint/py-vpoller | c7657dfc73831adf03c88363dee51c545ff8d511 | [
"BSD-2-Clause"
] | null | null | null | src/vpoller/worker.py | nikypint/py-vpoller | c7657dfc73831adf03c88363dee51c545ff8d511 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2013-2015 Marin Atanasov Nikolov <dnaeon@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer
# in this position and unchanged.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
vPoller Worker module
"""
import json
import importlib
import multiprocessing
from platform import node
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
import zmq
import pyVmomi
from vpoller import __version__
from vpoller.log import logger
from vpoller.client import validate_message
from vpoller.exceptions import VPollerException
from vpoller.task.registry import registry
from vconnector.core import VConnector
from vconnector.core import VConnectorDatabase
__all__ = ['VPollerWorkerManager', 'VPollerWorker', 'DefaultJSONEncoder']
class DefaultJSONEncoder(json.JSONEncoder):
"""
DefaultJSONEncoder is a custom JSONEncoder class that knows how to
encode core custom objects.
Until pyVmomi supports encoding of core objects to JSON we cannot
marshal arbitrary objects on the fly, thus the need for this class.
See https://github.com/vmware/pyvmomi/issues/21 for more info.
"""
def default(self, obj):
try:
return super(DefaultJSONEncoder, self).default(obj)
except Exception:
return obj.__dict__
class VPollerWorkerManager(object):
"""
Manager of vPoller Workers
"""
def __init__(self, config_file, num_workers=0):
"""
Initializes a new vPoller Worker Manager
Args:
config_file (str): Path to the vPoller configuration file
num_workers (str): Number of vPoller Worker
processes to create
"""
self.node = node()
self.config_file = config_file
self.num_workers = num_workers
self.time_to_die = multiprocessing.Event()
self.config = {}
self.workers = []
self.zcontext = None
self.zpoller = None
self.mgmt_socket = None
self.mgmt_methods = {
'status': self.status,
'shutdown': self.signal_stop,
}
self.config_defaults = {
'db': '/var/lib/vconnector/vconnector.db',
'mgmt': 'tcp://*:10000',
'proxy': 'tcp://localhost:10123',
'helpers': None,
'tasks': None,
'cache_maxsize': 0,
'cache_enabled': False,
'cache_ttl': 3600,
'cache_housekeeping': 480,
}
def start(self):
"""
Start the vPoller Worker Manager and processes
"""
logger.info('Starting Worker Manager [%s release]', __version__)
self.load_config()
self.create_sockets()
self.start_workers()
logger.info('Worker Manager is ready and running')
while not self.time_to_die.is_set():
try:
self.wait_for_mgmt_task()
except KeyboardInterrupt:
self.signal_stop()
self.stop()
def stop(self):
"""
Stop the vPoller Manager and Workers
"""
logger.info('Worker Manager is shutting down')
self.close_sockets()
self.stop_workers()
def signal_stop(self):
"""
Signal the vPoller Worker Manager that shutdown time has arrived
"""
logger.info('Received shutdown signal')
self.time_to_die.set()
return {'success': 0, 'msg': 'Shutdown time has arrived'}
def load_config(self):
"""
Loads the vPoller Worker Manager configuration settings
"""
logger.debug('Loading config file %s', self.config_file)
#This lie gave an error with python > 3.7
#parser = ConfigParser(self.config_defaults)
parser = ConfigParser()
parser.read(self.config_file)
self.config['mgmt'] = parser.get('worker', 'mgmt')
self.config['db'] = parser.get('worker', 'db')
self.config['proxy'] = parser.get('worker', 'proxy')
self.config['helpers'] = parser.get('worker', 'helpers')
self.config['tasks'] = parser.get('worker', 'tasks')
self.config['cache_enabled'] = parser.getboolean('cache', 'enabled')
self.config['cache_maxsize'] = parser.getint('cache', 'maxsize')
self.config['cache_ttl'] = parser.getint('cache', 'ttl')
self.config['cache_housekeeping'] = parser.getint('cache', 'housekeeping')
if self.config['helpers']:
self.config['helpers'] = self.config['helpers'].split(',')
if self.config['tasks']:
self.config['tasks'] = self.config['tasks'].split(',')
logger.debug(
'Worker Manager configuration: %s',
self.config
)
def start_workers(self):
"""
Start the vPoller Worker processes
"""
logger.info('Starting Worker processes')
if self.num_workers <= 0:
self.num_workers = multiprocessing.cpu_count()
logger.info(
'Concurrency: %d (processes)',
self.num_workers
)
for i in range(self.num_workers):
worker = VPollerWorker(
db=self.config.get('db'),
proxy=self.config.get('proxy'),
helpers=self.config.get('helpers'),
tasks=self.config.get('tasks'),
cache_enabled=self.config.get('cache_enabled'),
cache_maxsize=self.config.get('cache_maxsize'),
cache_ttl=self.config.get('cache_ttl'),
cache_housekeeping=self.config.get('cache_housekeeping')
)
worker.daemon = True
self.workers.append(worker)
worker.start()
def stop_workers(self):
"""
Stop the vPoller Worker processes
"""
logger.info('Stopping Worker processes')
for worker in self.workers:
worker.signal_stop()
worker.join(3)
def create_sockets(self):
"""
Creates the ZeroMQ sockets used by the vPoller Worker Manager
"""
logger.debug('Creating Worker Manager sockets')
self.zcontext = zmq.Context()
self.mgmt_socket = self.zcontext.socket(zmq.REP)
self.mgmt_socket.bind(self.config.get('mgmt'))
self.zpoller = zmq.Poller()
self.zpoller.register(self.mgmt_socket, zmq.POLLIN)
def close_sockets(self):
"""
Closes the ZeroMQ sockets used by the Manager
"""
logger.debug('Closing Worker Manager sockets')
self.zpoller.unregister(self.mgmt_socket)
self.mgmt_socket.close()
self.zcontext.term()
def wait_for_mgmt_task(self):
"""
Poll the management socket for management tasks
"""
socks = dict(self.zpoller.poll())
if socks.get(self.mgmt_socket) == zmq.POLLIN:
try:
msg = self.mgmt_socket.recv_json()
except TypeError:
logger.warning(
'Invalid message received on management interface',
)
self.mgmt_socket.send('Invalid message received')
return
result = self.process_mgmt_task(msg)
self.mgmt_socket.send_json(result)
def process_mgmt_task(self, msg):
"""
Processes a message for the management interface
Example client message to shutdown the vPoller Worker would be:
{
"method": "shutdown"
}
Args:
msg (dict): The client message for processing
"""
logger.debug('Processing management message: %s', msg)
if 'method' not in msg:
return {'success': 1, 'msg': 'Missing method name'}
if msg['method'] not in self.mgmt_methods:
return {'success': 1, 'msg': 'Unknown method name received'}
method = msg['method']
result = self.mgmt_methods[method]()
return result
def status(self):
"""
Get status information about the vPoller Worker
"""
logger.debug('Getting Worker status')
result = {
'success': 0,
'msg': 'vPoller Worker status',
'result': {
'status': 'running',
'hostname': self.node,
'proxy': self.config.get('proxy'),
'mgmt': self.config.get('mgmt'),
'db': self.config.get('db'),
'concurrency': self.num_workers,
'helpers': self.config.get('helpers'),
'tasks': self.config.get('tasks'),
}
}
logger.debug('Returning result to client: %s', result)
return result
class VPollerWorker(multiprocessing.Process):
"""
VPollerWorker class
A vPoller Worker object runs the vSphere Agents, which are
responsible for making vSphere API requests
Extends:
multiprocessing.Process
Overrides:
run() method
"""
def __init__(self,
db,
proxy,
helpers,
tasks,
cache_enabled,
cache_maxsize,
cache_ttl,
cache_housekeeping,
):
"""
Initialize a new VPollerWorker object
Args:
db (str): Path to the vConnector database file
proxy (str): Endpoint to which vPoller Workers connect
and receive new tasks for processing
helpers (list): A list of helper modules to be loaded
task (list): A list of task modules to be loaded
cache_enabled (bool): If True use an expiring cache for the
managed objects
cache_maxsize (int): Upperbound limit on the number of items
that will be stored in the cache
cache_ttl (int): Time in seconds after which a cached
object is considered as expired
cache_housekeeping (int): Time in minutes to perform
periodic housekeeping of the cache
"""
super(VPollerWorker, self).__init__()
self.config = {
'db': db,
'proxy': proxy,
'helpers': helpers,
'tasks': tasks,
'cache_enabled': cache_enabled,
'cache_maxsize': cache_maxsize,
'cache_ttl': cache_ttl,
'cache_housekeeping': cache_housekeeping,
}
self.task_modules = {}
self.helper_modules = {}
self.time_to_die = multiprocessing.Event()
self.agents = {}
self.zcontext = None
self.zpoller = None
self.worker_socket = None
def run(self):
"""
The main worker method.
Args:
config (str): Path to the confuguration file for vPoller Worker
"""
logger.info('Worker process is starting')
self.load_task_modules()
self.load_helper_modules()
self.create_sockets()
self.create_agents()
logger.info('Worker process is ready and running')
while not self.time_to_die.is_set():
try:
self.wait_for_tasks()
except KeyboardInterrupt:
self.signal_stop()
self.stop()
def stop(self):
"""
Stop vPoller Worker process
"""
logger.info('Worker process is shutting down')
self.close_sockets()
self.stop_agents()
def signal_stop(self):
"""
Signal the vPoller Worker process that shutdown time has arrived
"""
self.time_to_die.set()
def load_task_modules(self):
"""
Loads the task modules
"""
if not self.config.get('tasks'):
raise VPollerException('No task modules provided')
for task in self.config.get('tasks'):
task = task.strip()
logger.info('Loading task module %s', task)
try:
module = importlib.import_module(task)
except ImportError as e:
logger.warning(
'Cannot import task module: %s',
e.message
)
continue
self.task_modules[task] = module
if not self.task_modules:
raise VPollerException('No task modules loaded')
def load_helper_modules(self):
"""
Loads helper modules for post-processing of results
"""
if not self.config.get('helpers'):
return
for helper in self.config.get('helpers'):
helper = helper.strip()
logger.info('Loading helper module %s', helper)
try:
module = importlib.import_module(helper)
except ImportError as e:
logger.warning(
'Cannot import helper module: %s',
e
)
continue
if not hasattr(module, 'HelperAgent'):
logger.warning(
'Module %s does not provide a HelperAgent interface',
helper
)
continue
if not hasattr(module.HelperAgent, 'run'):
logger.warning(
'In module %s HelperAgent class does not provide a run() method',
helper
)
continue
self.helper_modules[helper] = module
def run_helper(self, helper, msg, data):
"""
Run a helper to post-process result data
Args:
helper (str): Name of the helper to run
msg (dict): The original message request
data (dict): The data to be processed
"""
logger.debug(
'Invoking helper module %s for processing of data',
helper
)
module = self.helper_modules[helper]
h = module.HelperAgent(msg=msg, data=data)
try:
result = h.run()
except Exception as e:
logger.warning('Helper module raised an exception: %s', e)
return data
return result
def wait_for_tasks(self):
"""
Poll the worker socket for new tasks
"""
socks = dict(self.zpoller.poll(1000))
# The routing envelope of the message on the worker socket is this:
#
# Frame 1: [ N ][...] <- Identity of connection
# Frame 2: [ 0 ][] <- Empty delimiter frame
# Frame 3: [ N ][...] <- Data frame
if socks.get(self.worker_socket) == zmq.POLLIN:
# TODO: Use recv_multipart()
_id = self.worker_socket.recv()
_empty = self.worker_socket.recv()
try:
msg = self.worker_socket.recv_json()
except Exception as e:
logger.warning(
'Invalid client message received, will be ignored',
)
self.worker_socket.send(_id, zmq.SNDMORE)
self.worker_socket.send(_empty, zmq.SNDMORE)
self.worker_socket.send_json(
{'success': 1, 'msg': 'Invalid message received'}
)
return
# Process task and return result to client
result = self.process_client_msg(msg)
# Process data using a helper before sending it to client?
if 'helper' in msg and msg['helper'] in self.helper_modules:
data = self.run_helper(
helper=msg['helper'],
msg=msg,
data=result
)
else:
# No helper specified, dump data to JSON
try:
data = json.dumps(result, cls=DefaultJSONEncoder, ensure_ascii=False)
except (ValueError, TypeError) as e:
logger.warning('Cannot serialize result: %s', e)
r = {
'success': 1,
'msg': 'Cannot serialize result: %s' % e
}
data = json.dumps(r)
# Send data to client
self.worker_socket.send(_id, zmq.SNDMORE)
self.worker_socket.send(_empty, zmq.SNDMORE)
try:
self.worker_socket.send_unicode(data)
except TypeError as e:
logger.warning('Cannot send result: %s', e)
r = {'success': 1, 'msg': 'Cannot send result: %s' % e}
self.worker_socket.send_unicode(json.dumps(r))
def create_sockets(self):
"""
Creates the ZeroMQ sockets used by the vPoller Worker
Creates two sockets:
"""
logger.info('Creating Worker sockets')
self.zcontext = zmq.Context()
self.worker_socket = self.zcontext.socket(zmq.DEALER)
self.worker_socket.connect(self.config.get('proxy'))
self.zpoller = zmq.Poller()
self.zpoller.register(self.worker_socket, zmq.POLLIN)
def close_sockets(self):
"""
Closes the ZeroMQ sockets used by the vPoller Worker
"""
logger.info('Closing Worker process sockets')
self.zpoller.unregister(self.worker_socket)
self.worker_socket.close()
self.zcontext.term()
def create_agents(self):
"""
Prepares the vSphere Agents used by the vPoller Worker
Raises:
VPollerException
"""
logger.debug('Creating vSphere Agents')
db = VConnectorDatabase(self.config.get('db'))
agents = db.get_agents(only_enabled=True)
if not agents:
logger.warning('No registered or enabled vSphere Agents found')
raise VPollerException(
'No registered or enabled vSphere Agents found'
)
for agent in agents:
a = VConnector(
user=agent['user'],
pwd=agent['pwd'],
host=agent['host'],
cache_enabled=self.config.get('cache_enabled'),
cache_maxsize=self.config.get('cache_maxsize'),
cache_ttl=self.config.get('cache_ttl'),
cache_housekeeping=self.config.get('cache_housekeeping')
)
self.agents[a.host] = a
logger.info('Created vSphere Agent for %s', agent['host'])
def stop_agents(self):
"""
Disconnects all vPoller Agents
"""
logger.debug('Shutting down vSphere Agents')
for agent in self.agents:
self.agents[agent].disconnect()
def process_client_msg(self, msg):
"""
Processes a client message received on the vPoller Worker socket
The message is passed to the VSphereAgent object of the
respective vSphere host in order to do the actual polling.
Args:
msg (dict): Client message for processing
An example message for discovering the hosts could be:
{
"method": "host.discover",
"hostname": "vc01.example.org",
}
An example message for polling a datastore property could be:
{
"method": "datastore.poll",
"hostname": "vc01.example.org",
"info.url": "ds:///vmfs/volumes/5190e2a7-d2b7c58e-b1e2/",
"property": "summary.capacity"
}
"""
logger.debug('Processing client message: %s', msg)
if not isinstance(msg, dict):
return {
'success': 1,
'msg': 'Expected a JSON message, received {}'.format(msg.__class__)
}
task = registry.get(msg.get('method'))
agent = self.agents.get(msg.get('hostname'))
if not task:
return {'success': 1, 'msg': 'Unknown or missing task/method name'}
if not agent:
return {'success': 1, 'msg': 'Unknown or missing agent name'}
if not validate_message(msg=msg, required=task.required):
return {'success': 1, 'msg': 'Invalid task request'}
result = task.function(agent, msg)
return result
| 31.598837 | 89 | 0.56058 |
import json
import importlib
import multiprocessing
from platform import node
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
import zmq
import pyVmomi
from vpoller import __version__
from vpoller.log import logger
from vpoller.client import validate_message
from vpoller.exceptions import VPollerException
from vpoller.task.registry import registry
from vconnector.core import VConnector
from vconnector.core import VConnectorDatabase
__all__ = ['VPollerWorkerManager', 'VPollerWorker', 'DefaultJSONEncoder']
class DefaultJSONEncoder(json.JSONEncoder):
def default(self, obj):
try:
return super(DefaultJSONEncoder, self).default(obj)
except Exception:
return obj.__dict__
class VPollerWorkerManager(object):
def __init__(self, config_file, num_workers=0):
self.node = node()
self.config_file = config_file
self.num_workers = num_workers
self.time_to_die = multiprocessing.Event()
self.config = {}
self.workers = []
self.zcontext = None
self.zpoller = None
self.mgmt_socket = None
self.mgmt_methods = {
'status': self.status,
'shutdown': self.signal_stop,
}
self.config_defaults = {
'db': '/var/lib/vconnector/vconnector.db',
'mgmt': 'tcp://*:10000',
'proxy': 'tcp://localhost:10123',
'helpers': None,
'tasks': None,
'cache_maxsize': 0,
'cache_enabled': False,
'cache_ttl': 3600,
'cache_housekeeping': 480,
}
def start(self):
logger.info('Starting Worker Manager [%s release]', __version__)
self.load_config()
self.create_sockets()
self.start_workers()
logger.info('Worker Manager is ready and running')
while not self.time_to_die.is_set():
try:
self.wait_for_mgmt_task()
except KeyboardInterrupt:
self.signal_stop()
self.stop()
def stop(self):
logger.info('Worker Manager is shutting down')
self.close_sockets()
self.stop_workers()
def signal_stop(self):
logger.info('Received shutdown signal')
self.time_to_die.set()
return {'success': 0, 'msg': 'Shutdown time has arrived'}
def load_config(self):
logger.debug('Loading config file %s', self.config_file)
parser = ConfigParser()
parser.read(self.config_file)
self.config['mgmt'] = parser.get('worker', 'mgmt')
self.config['db'] = parser.get('worker', 'db')
self.config['proxy'] = parser.get('worker', 'proxy')
self.config['helpers'] = parser.get('worker', 'helpers')
self.config['tasks'] = parser.get('worker', 'tasks')
self.config['cache_enabled'] = parser.getboolean('cache', 'enabled')
self.config['cache_maxsize'] = parser.getint('cache', 'maxsize')
self.config['cache_ttl'] = parser.getint('cache', 'ttl')
self.config['cache_housekeeping'] = parser.getint('cache', 'housekeeping')
if self.config['helpers']:
self.config['helpers'] = self.config['helpers'].split(',')
if self.config['tasks']:
self.config['tasks'] = self.config['tasks'].split(',')
logger.debug(
'Worker Manager configuration: %s',
self.config
)
def start_workers(self):
logger.info('Starting Worker processes')
if self.num_workers <= 0:
self.num_workers = multiprocessing.cpu_count()
logger.info(
'Concurrency: %d (processes)',
self.num_workers
)
for i in range(self.num_workers):
worker = VPollerWorker(
db=self.config.get('db'),
proxy=self.config.get('proxy'),
helpers=self.config.get('helpers'),
tasks=self.config.get('tasks'),
cache_enabled=self.config.get('cache_enabled'),
cache_maxsize=self.config.get('cache_maxsize'),
cache_ttl=self.config.get('cache_ttl'),
cache_housekeeping=self.config.get('cache_housekeeping')
)
worker.daemon = True
self.workers.append(worker)
worker.start()
def stop_workers(self):
logger.info('Stopping Worker processes')
for worker in self.workers:
worker.signal_stop()
worker.join(3)
def create_sockets(self):
logger.debug('Creating Worker Manager sockets')
self.zcontext = zmq.Context()
self.mgmt_socket = self.zcontext.socket(zmq.REP)
self.mgmt_socket.bind(self.config.get('mgmt'))
self.zpoller = zmq.Poller()
self.zpoller.register(self.mgmt_socket, zmq.POLLIN)
def close_sockets(self):
logger.debug('Closing Worker Manager sockets')
self.zpoller.unregister(self.mgmt_socket)
self.mgmt_socket.close()
self.zcontext.term()
def wait_for_mgmt_task(self):
socks = dict(self.zpoller.poll())
if socks.get(self.mgmt_socket) == zmq.POLLIN:
try:
msg = self.mgmt_socket.recv_json()
except TypeError:
logger.warning(
'Invalid message received on management interface',
)
self.mgmt_socket.send('Invalid message received')
return
result = self.process_mgmt_task(msg)
self.mgmt_socket.send_json(result)
def process_mgmt_task(self, msg):
logger.debug('Processing management message: %s', msg)
if 'method' not in msg:
return {'success': 1, 'msg': 'Missing method name'}
if msg['method'] not in self.mgmt_methods:
return {'success': 1, 'msg': 'Unknown method name received'}
method = msg['method']
result = self.mgmt_methods[method]()
return result
def status(self):
logger.debug('Getting Worker status')
result = {
'success': 0,
'msg': 'vPoller Worker status',
'result': {
'status': 'running',
'hostname': self.node,
'proxy': self.config.get('proxy'),
'mgmt': self.config.get('mgmt'),
'db': self.config.get('db'),
'concurrency': self.num_workers,
'helpers': self.config.get('helpers'),
'tasks': self.config.get('tasks'),
}
}
logger.debug('Returning result to client: %s', result)
return result
class VPollerWorker(multiprocessing.Process):
def __init__(self,
db,
proxy,
helpers,
tasks,
cache_enabled,
cache_maxsize,
cache_ttl,
cache_housekeeping,
):
super(VPollerWorker, self).__init__()
self.config = {
'db': db,
'proxy': proxy,
'helpers': helpers,
'tasks': tasks,
'cache_enabled': cache_enabled,
'cache_maxsize': cache_maxsize,
'cache_ttl': cache_ttl,
'cache_housekeeping': cache_housekeeping,
}
self.task_modules = {}
self.helper_modules = {}
self.time_to_die = multiprocessing.Event()
self.agents = {}
self.zcontext = None
self.zpoller = None
self.worker_socket = None
def run(self):
logger.info('Worker process is starting')
self.load_task_modules()
self.load_helper_modules()
self.create_sockets()
self.create_agents()
logger.info('Worker process is ready and running')
while not self.time_to_die.is_set():
try:
self.wait_for_tasks()
except KeyboardInterrupt:
self.signal_stop()
self.stop()
def stop(self):
logger.info('Worker process is shutting down')
self.close_sockets()
self.stop_agents()
def signal_stop(self):
self.time_to_die.set()
def load_task_modules(self):
if not self.config.get('tasks'):
raise VPollerException('No task modules provided')
for task in self.config.get('tasks'):
task = task.strip()
logger.info('Loading task module %s', task)
try:
module = importlib.import_module(task)
except ImportError as e:
logger.warning(
'Cannot import task module: %s',
e.message
)
continue
self.task_modules[task] = module
if not self.task_modules:
raise VPollerException('No task modules loaded')
def load_helper_modules(self):
if not self.config.get('helpers'):
return
for helper in self.config.get('helpers'):
helper = helper.strip()
logger.info('Loading helper module %s', helper)
try:
module = importlib.import_module(helper)
except ImportError as e:
logger.warning(
'Cannot import helper module: %s',
e
)
continue
if not hasattr(module, 'HelperAgent'):
logger.warning(
'Module %s does not provide a HelperAgent interface',
helper
)
continue
if not hasattr(module.HelperAgent, 'run'):
logger.warning(
'In module %s HelperAgent class does not provide a run() method',
helper
)
continue
self.helper_modules[helper] = module
def run_helper(self, helper, msg, data):
logger.debug(
'Invoking helper module %s for processing of data',
helper
)
module = self.helper_modules[helper]
h = module.HelperAgent(msg=msg, data=data)
try:
result = h.run()
except Exception as e:
logger.warning('Helper module raised an exception: %s', e)
return data
return result
def wait_for_tasks(self):
socks = dict(self.zpoller.poll(1000))
if socks.get(self.worker_socket) == zmq.POLLIN:
_id = self.worker_socket.recv()
_empty = self.worker_socket.recv()
try:
msg = self.worker_socket.recv_json()
except Exception as e:
logger.warning(
'Invalid client message received, will be ignored',
)
self.worker_socket.send(_id, zmq.SNDMORE)
self.worker_socket.send(_empty, zmq.SNDMORE)
self.worker_socket.send_json(
{'success': 1, 'msg': 'Invalid message received'}
)
return
result = self.process_client_msg(msg)
if 'helper' in msg and msg['helper'] in self.helper_modules:
data = self.run_helper(
helper=msg['helper'],
msg=msg,
data=result
)
else:
try:
data = json.dumps(result, cls=DefaultJSONEncoder, ensure_ascii=False)
except (ValueError, TypeError) as e:
logger.warning('Cannot serialize result: %s', e)
r = {
'success': 1,
'msg': 'Cannot serialize result: %s' % e
}
data = json.dumps(r)
self.worker_socket.send(_id, zmq.SNDMORE)
self.worker_socket.send(_empty, zmq.SNDMORE)
try:
self.worker_socket.send_unicode(data)
except TypeError as e:
logger.warning('Cannot send result: %s', e)
r = {'success': 1, 'msg': 'Cannot send result: %s' % e}
self.worker_socket.send_unicode(json.dumps(r))
def create_sockets(self):
logger.info('Creating Worker sockets')
self.zcontext = zmq.Context()
self.worker_socket = self.zcontext.socket(zmq.DEALER)
self.worker_socket.connect(self.config.get('proxy'))
self.zpoller = zmq.Poller()
self.zpoller.register(self.worker_socket, zmq.POLLIN)
def close_sockets(self):
logger.info('Closing Worker process sockets')
self.zpoller.unregister(self.worker_socket)
self.worker_socket.close()
self.zcontext.term()
def create_agents(self):
logger.debug('Creating vSphere Agents')
db = VConnectorDatabase(self.config.get('db'))
agents = db.get_agents(only_enabled=True)
if not agents:
logger.warning('No registered or enabled vSphere Agents found')
raise VPollerException(
'No registered or enabled vSphere Agents found'
)
for agent in agents:
a = VConnector(
user=agent['user'],
pwd=agent['pwd'],
host=agent['host'],
cache_enabled=self.config.get('cache_enabled'),
cache_maxsize=self.config.get('cache_maxsize'),
cache_ttl=self.config.get('cache_ttl'),
cache_housekeeping=self.config.get('cache_housekeeping')
)
self.agents[a.host] = a
logger.info('Created vSphere Agent for %s', agent['host'])
def stop_agents(self):
logger.debug('Shutting down vSphere Agents')
for agent in self.agents:
self.agents[agent].disconnect()
def process_client_msg(self, msg):
logger.debug('Processing client message: %s', msg)
if not isinstance(msg, dict):
return {
'success': 1,
'msg': 'Expected a JSON message, received {}'.format(msg.__class__)
}
task = registry.get(msg.get('method'))
agent = self.agents.get(msg.get('hostname'))
if not task:
return {'success': 1, 'msg': 'Unknown or missing task/method name'}
if not agent:
return {'success': 1, 'msg': 'Unknown or missing agent name'}
if not validate_message(msg=msg, required=task.required):
return {'success': 1, 'msg': 'Invalid task request'}
result = task.function(agent, msg)
return result
| true | true |
f7fad4fa00de69e0ff28ba2e26f9f2d1185db522 | 730 | py | Python | tests/embedding/clustering/test_kmeans.py | microsoft/topologic | d3a2155a42469ccb16de178f47bec81b0476fdc8 | [
"MIT"
] | 24 | 2020-02-10T23:51:06.000Z | 2021-11-17T02:34:47.000Z | tests/embedding/clustering/test_kmeans.py | microsoft/topologic | d3a2155a42469ccb16de178f47bec81b0476fdc8 | [
"MIT"
] | 26 | 2020-02-11T18:37:33.000Z | 2020-11-11T00:14:41.000Z | tests/embedding/clustering/test_kmeans.py | microsoft/topologic | d3a2155a42469ccb16de178f47bec81b0476fdc8 | [
"MIT"
] | 6 | 2020-07-31T11:05:36.000Z | 2021-11-10T08:18:52.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import topologic.embedding.clustering as tc_embedding_clustering
import numpy as np
import unittest
from tests.utils import data_file
class TestKmeans(unittest.TestCase):
def test_kmeans_returns_correctly_shaped_labels(self):
matrix = np.loadtxt(data_file('gmm-input.csv'), delimiter=',', usecols=range(19))
cluster_labels = tc_embedding_clustering.kmeans(matrix,
n_clusters=50
)
self.assertEqual(cluster_labels.shape[0], 70, 'Incorrect shape of cluster_labels')
if __name__ == '__main__':
unittest.main()
| 31.73913 | 90 | 0.643836 |
import topologic.embedding.clustering as tc_embedding_clustering
import numpy as np
import unittest
from tests.utils import data_file
class TestKmeans(unittest.TestCase):
def test_kmeans_returns_correctly_shaped_labels(self):
matrix = np.loadtxt(data_file('gmm-input.csv'), delimiter=',', usecols=range(19))
cluster_labels = tc_embedding_clustering.kmeans(matrix,
n_clusters=50
)
self.assertEqual(cluster_labels.shape[0], 70, 'Incorrect shape of cluster_labels')
if __name__ == '__main__':
unittest.main()
| true | true |
f7fad566236054ea2dbbc89c4beeaf2f0a5a0d71 | 32,766 | py | Python | faster_rcnn/core/loader.py | whywhs/Detection_and_Recognition_in_Remote_Sensing_Image | 201c7450ad45d203b59d8345fb6fad903fad8748 | [
"Apache-2.0"
] | 20 | 2019-02-13T12:14:19.000Z | 2022-03-30T07:14:50.000Z | faster_rcnn/core/loader.py | whywhs/Detection_and_Recognition_in_Remote_Sensing_Image | 201c7450ad45d203b59d8345fb6fad903fad8748 | [
"Apache-2.0"
] | 1 | 2019-05-15T01:53:52.000Z | 2019-05-15T06:27:15.000Z | faster_rcnn/core/loader.py | whywhs/Detection_and_Recognition_in_Remote_Sensing_Image | 201c7450ad45d203b59d8345fb6fad903fad8748 | [
"Apache-2.0"
] | 12 | 2019-05-13T09:42:00.000Z | 2021-08-03T02:25:32.000Z | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
import numpy as np
import mxnet as mx
from mxnet.executor_manager import _split_input_slice
from config.config import config
from utils.image import tensor_vstack
from rpn.rpn import get_rpn_testbatch, get_rpn_batch, assign_anchor, get_rpn_batch_quadrangle, assign_quadrangle_anchor, get_rpn_quadrangle_testbatch
from rcnn import get_rcnn_testbatch, get_rcnn_batch
class TestLoader(mx.io.DataIter):
def __init__(self, roidb, config, batch_size=1, shuffle=False,
has_rpn=False):
super(TestLoader, self).__init__()
# save parameters as properties
self.cfg = config
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
# infer properties from roidb
self.size = len(self.roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
if has_rpn:
self.data_name = ['data', 'im_info']
else:
self.data_name = ['data', 'rois']
self.label_name = None
# status variable for synchronization between get_data and get_label
self.cur = 0
self.data = None
self.label = []
self.im_info = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, idata)] for idata in self.data]
@property
def provide_label(self):
return [None for _ in range(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur < self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.im_info, mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [[mx.nd.array(idata[name]) for name in self.data_name] for idata in data]
self.im_info = im_info
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [mx.nd.array(data[name]) for name in self.data_name]
self.im_info = im_info
class QuadrangleTestLoader(mx.io.DataIter):
def __init__(self, roidb, config, batch_size=1, shuffle=False,
has_rpn=False):
super(QuadrangleTestLoader, self).__init__()
# save parameters as properties
self.cfg = config
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
# infer properties from roidb
self.size = len(self.roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
if has_rpn:
self.data_name = ['data', 'im_info']
else:
self.data_name = ['data', 'rois']
self.label_name = None
# status variable for synchronization between get_data and get_label
self.cur = 0
self.data = None
self.label = []
self.im_info = None
# get first batch to fill in provide_data and provide_label
# self.reset()
self.get_batch()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, idata)] for idata in self.data]
@property
def provide_label(self):
return [None for _ in range(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur < self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.im_info, mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_quadrangle_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [[mx.nd.array(idata[name]) for name in self.data_name] for idata in data]
self.im_info = im_info
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_quadrangle_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [mx.nd.array(data[name]) for name in self.data_name]
self.im_info = im_info
class ROIIter(mx.io.DataIter):
def __init__(self, roidb, config, batch_size=2, shuffle=False, ctx=None, work_load_list=None, aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: ROIIter
"""
super(ROIIter, self).__init__()
# save parameters as properties
self.roidb = roidb
self.cfg = config
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
self.data_name = ['data', 'rois']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch_individual()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_individual()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slices
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get each device
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rcnn_batch(iroidb, self.cfg)
data_list.append(data)
label_list.append(label)
all_data = dict()
for key in data_list[0].keys():
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in label_list[0].keys():
all_label[key] = tensor_vstack([batch[key] for batch in label_list])
self.data = [mx.nd.array(all_data[name]) for name in self.data_name]
self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
def get_batch_individual(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slices
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
rst = []
for idx, islice in enumerate(slices):
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
rst.append(self.parfetch(iroidb))
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]
def parfetch(self, iroidb):
data, label = get_rcnn_batch(iroidb, self.cfg)
return {'data': data, 'label': label}
class AnchorLoader(mx.io.DataIter):
def __init__(self, feat_sym, roidb, cfg, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2), allowed_border=0,
aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param feat_sym: to infer shape of assign_output
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: AnchorLoader
"""
super(AnchorLoader, self).__init__()
# save parameters as properties
self.feat_sym = feat_sym
self.roidb = roidb
self.cfg = cfg
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.feat_stride = feat_stride
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names
if config.TRAIN.END2END:
self.data_name = ['data', 'im_info', 'gt_boxes']
else:
self.data_name = ['data']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch_individual()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_individual()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
_, feat_shape, _ = self.feat_sym.infer_shape(**max_shapes)
label = assign_anchor(feat_shape[0], np.zeros((0, 5)), im_info, self.cfg,
self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border)
label = [label[k] for k in self.label_name]
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
return max_data_shape, label_shape
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get testing data for multigpu
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rpn_batch(iroidb, self.cfg)
data_list.append(data)
label_list.append(label)
# pad data first and then assign anchor (read label)
data_tensor = tensor_vstack([batch['data'] for batch in data_list])
for data, data_pad in zip(data_list, data_tensor):
data['data'] = data_pad[np.newaxis, :]
new_label_list = []
for data, label in zip(data_list, label_list):
# infer label shape
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
# add gt_boxes to data for e2e
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
# assign anchor for label
label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
new_label_list.append(label)
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in self.label_name:
pad = -1 if key == 'label' else 0
all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
rst = []
for idx, islice in enumerate(slices):
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
rst.append(self.parfetch(iroidb))
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]
def parfetch(self, iroidb):
# get testing data for multigpu
data, label = get_rpn_batch(iroidb, self.cfg)
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
# add gt_boxes to data for e2e
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
# assign anchor for label
label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
return {'data': data, 'label': label}
# TODO test this dataloader for quadrangle
class QuadrangleAnchorLoader(mx.io.DataIter):
def __init__(self, feat_sym, roidb, cfg, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2), allowed_border=0,
aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param feat_sym: to infer shape of assign_output
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: AnchorLoader
"""
super(QuadrangleAnchorLoader, self).__init__()
# save parameters as properties
self.feat_sym = feat_sym
self.roidb = roidb
self.cfg = cfg
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.feat_stride = feat_stride
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names
if config.TRAIN.END2END:
self.data_name = ['data', 'im_info', 'gt_boxes']
else:
self.data_name = ['data']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch_individual()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_individual()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
# change the shape of im_info
im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
feat_shape_list = []
for i in range(len(self.feat_stride)):
_, feat_shape, _ = self.feat_sym[i].infer_shape(**max_shapes)
feat_shape = [int(i) for i in feat_shape[0]]
feat_shape_list.append(feat_shape)
label = assign_quadrangle_anchor(feat_shape_list, np.zeros((0, 9)), im_info, self.cfg,
self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border)
label = [label[k] for k in self.label_name]
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
return max_data_shape, label_shape
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get testing data for multigpu
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rpn_batch_quadrangle(iroidb, self.cfg)
data_list.append(data)
label_list.append(label)
# pad data first and then assign anchor (read label)
data_tensor = tensor_vstack([batch['data'] for batch in data_list])
for data, data_pad in zip(data_list, data_tensor):
data['data'] = data_pad[np.newaxis, :]
new_label_list = []
for data, label in zip(data_list, label_list):
# infer label shape
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
# add gt_boxes to data for e2e
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
# assign quadrangle anchor for label
label = assign_quadrangle_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
new_label_list.append(label)
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in self.label_name:
pad = -1 if key == 'label' else 0
all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
rst = []
for idx, islice in enumerate(slices):
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
rst.append(self.parfetch(iroidb))
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]
def parfetch(self, iroidb):
# get testing data for multigpu
data, label = get_rpn_batch_quadrangle(iroidb, self.cfg)
# print data
# print label
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
feat_shape_list = []
for s in range(len(self.feat_stride)):
_, feat_shape, _ = self.feat_sym[s].infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
feat_shape_list.append(feat_shape)
# add gt_boxes to data for e2e
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
# assign anchor for label
label = assign_quadrangle_anchor(feat_shape_list, label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
return {'data': data, 'label': label}
| 38.822275 | 149 | 0.596441 |
import numpy as np
import mxnet as mx
from mxnet.executor_manager import _split_input_slice
from config.config import config
from utils.image import tensor_vstack
from rpn.rpn import get_rpn_testbatch, get_rpn_batch, assign_anchor, get_rpn_batch_quadrangle, assign_quadrangle_anchor, get_rpn_quadrangle_testbatch
from rcnn import get_rcnn_testbatch, get_rcnn_batch
class TestLoader(mx.io.DataIter):
def __init__(self, roidb, config, batch_size=1, shuffle=False,
has_rpn=False):
super(TestLoader, self).__init__()
self.cfg = config
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
self.size = len(self.roidb)
self.index = np.arange(self.size)
if has_rpn:
self.data_name = ['data', 'im_info']
else:
self.data_name = ['data', 'rois']
self.label_name = None
self.cur = 0
self.data = None
self.label = []
self.im_info = None
self.reset()
self.get_batch()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, idata)] for idata in self.data]
@property
def provide_label(self):
return [None for _ in range(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur < self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.im_info, mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [[mx.nd.array(idata[name]) for name in self.data_name] for idata in data]
self.im_info = im_info
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [mx.nd.array(data[name]) for name in self.data_name]
self.im_info = im_info
class QuadrangleTestLoader(mx.io.DataIter):
def __init__(self, roidb, config, batch_size=1, shuffle=False,
has_rpn=False):
super(QuadrangleTestLoader, self).__init__()
self.cfg = config
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
self.size = len(self.roidb)
self.index = np.arange(self.size)
if has_rpn:
self.data_name = ['data', 'im_info']
else:
self.data_name = ['data', 'rois']
self.label_name = None
self.cur = 0
self.data = None
self.label = []
self.im_info = None
self.get_batch()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, idata)] for idata in self.data]
@property
def provide_label(self):
return [None for _ in range(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur < self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.im_info, mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_quadrangle_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [[mx.nd.array(idata[name]) for name in self.data_name] for idata in data]
self.im_info = im_info
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_quadrangle_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [mx.nd.array(data[name]) for name in self.data_name]
self.im_info = im_info
class ROIIter(mx.io.DataIter):
def __init__(self, roidb, config, batch_size=2, shuffle=False, ctx=None, work_load_list=None, aspect_grouping=False):
super(ROIIter, self).__init__()
self.roidb = roidb
self.cfg = config
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.aspect_grouping = aspect_grouping
self.size = len(roidb)
self.index = np.arange(self.size)
self.data_name = ['data', 'rois']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
self.cur = 0
self.batch = None
self.data = None
self.label = None
self.reset()
self.get_batch_individual()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_individual()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rcnn_batch(iroidb, self.cfg)
data_list.append(data)
label_list.append(label)
all_data = dict()
for key in data_list[0].keys():
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in label_list[0].keys():
all_label[key] = tensor_vstack([batch[key] for batch in label_list])
self.data = [mx.nd.array(all_data[name]) for name in self.data_name]
self.label = [mx.nd.array(all_label[name]) for name in self.label_name]
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
rst = []
for idx, islice in enumerate(slices):
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
rst.append(self.parfetch(iroidb))
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]
def parfetch(self, iroidb):
data, label = get_rcnn_batch(iroidb, self.cfg)
return {'data': data, 'label': label}
class AnchorLoader(mx.io.DataIter):
def __init__(self, feat_sym, roidb, cfg, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2), allowed_border=0,
aspect_grouping=False):
super(AnchorLoader, self).__init__()
self.feat_sym = feat_sym
self.roidb = roidb
self.cfg = cfg
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.feat_stride = feat_stride
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
self.size = len(roidb)
self.index = np.arange(self.size)
if config.TRAIN.END2END:
self.data_name = ['data', 'im_info', 'gt_boxes']
else:
self.data_name = ['data']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
self.cur = 0
self.batch = None
self.data = None
self.label = None
self.reset()
self.get_batch_individual()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_individual()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
_, feat_shape, _ = self.feat_sym.infer_shape(**max_shapes)
label = assign_anchor(feat_shape[0], np.zeros((0, 5)), im_info, self.cfg,
self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border)
label = [label[k] for k in self.label_name]
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
return max_data_shape, label_shape
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rpn_batch(iroidb, self.cfg)
data_list.append(data)
label_list.append(label)
data_tensor = tensor_vstack([batch['data'] for batch in data_list])
for data, data_pad in zip(data_list, data_tensor):
data['data'] = data_pad[np.newaxis, :]
new_label_list = []
for data, label in zip(data_list, label_list):
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
new_label_list.append(label)
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in self.label_name:
pad = -1 if key == 'label' else 0
all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
rst = []
for idx, islice in enumerate(slices):
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
rst.append(self.parfetch(iroidb))
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]
def parfetch(self, iroidb):
data, label = get_rpn_batch(iroidb, self.cfg)
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
return {'data': data, 'label': label}
class QuadrangleAnchorLoader(mx.io.DataIter):
def __init__(self, feat_sym, roidb, cfg, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2), allowed_border=0,
aspect_grouping=False):
super(QuadrangleAnchorLoader, self).__init__()
self.feat_sym = feat_sym
self.roidb = roidb
self.cfg = cfg
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.feat_stride = feat_stride
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
self.size = len(roidb)
self.index = np.arange(self.size)
if config.TRAIN.END2END:
self.data_name = ['data', 'im_info', 'gt_boxes']
else:
self.data_name = ['data']
self.label_name = ['label', 'bbox_target', 'bbox_weight']
self.cur = 0
self.batch = None
self.data = None
self.label = None
self.reset()
self.get_batch_individual()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_individual()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
feat_shape_list = []
for i in range(len(self.feat_stride)):
_, feat_shape, _ = self.feat_sym[i].infer_shape(**max_shapes)
feat_shape = [int(i) for i in feat_shape[0]]
feat_shape_list.append(feat_shape)
label = assign_quadrangle_anchor(feat_shape_list, np.zeros((0, 9)), im_info, self.cfg,
self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border)
label = [label[k] for k in self.label_name]
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
return max_data_shape, label_shape
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rpn_batch_quadrangle(iroidb, self.cfg)
data_list.append(data)
label_list.append(label)
data_tensor = tensor_vstack([batch['data'] for batch in data_list])
for data, data_pad in zip(data_list, data_tensor):
data['data'] = data_pad[np.newaxis, :]
new_label_list = []
for data, label in zip(data_list, label_list):
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
label = assign_quadrangle_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
new_label_list.append(label)
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in self.label_name:
pad = -1 if key == 'label' else 0
all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
rst = []
for idx, islice in enumerate(slices):
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
rst.append(self.parfetch(iroidb))
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]
def parfetch(self, iroidb):
data, label = get_rpn_batch_quadrangle(iroidb, self.cfg)
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
feat_shape_list = []
for s in range(len(self.feat_stride)):
_, feat_shape, _ = self.feat_sym[s].infer_shape(**data_shape)
feat_shape = [int(i) for i in feat_shape[0]]
feat_shape_list.append(feat_shape)
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
label = assign_quadrangle_anchor(feat_shape_list, label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride, self.anchor_scales,
self.anchor_ratios, self.allowed_border)
return {'data': data, 'label': label}
| true | true |
f7fad649e27092449b3feae00984eb3561b8597a | 283 | py | Python | {{cookiecutter.project_name}}/{{cookiecutter.project_name}}_api/api/__init__.py | frank2411/cookiecutter_flasktemplate | fc80827f0f7e7b87679790c8c1d9094518576b5b | [
"Apache-2.0"
] | null | null | null | {{cookiecutter.project_name}}/{{cookiecutter.project_name}}_api/api/__init__.py | frank2411/cookiecutter_flasktemplate | fc80827f0f7e7b87679790c8c1d9094518576b5b | [
"Apache-2.0"
] | null | null | null | {{cookiecutter.project_name}}/{{cookiecutter.project_name}}_api/api/__init__.py | frank2411/cookiecutter_flasktemplate | fc80827f0f7e7b87679790c8c1d9094518576b5b | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint
from flask_restful import Api
from .resources import SwaggerView
api_blueprint = Blueprint('api', __name__, url_prefix='/api/{{cookiecutter.api_version}}')
api = Api(api_blueprint)
# Swagger API
api.add_resource(SwaggerView, '/docs', methods=["GET"])
| 23.583333 | 90 | 0.770318 | from flask import Blueprint
from flask_restful import Api
from .resources import SwaggerView
api_blueprint = Blueprint('api', __name__, url_prefix='/api/{{cookiecutter.api_version}}')
api = Api(api_blueprint)
api.add_resource(SwaggerView, '/docs', methods=["GET"])
| true | true |
f7fad6556d64ec85bca1397236a73ab277feeba2 | 13,102 | py | Python | sdk/lusid/models/instrument.py | fossabot/lusid-sdk-python | 154a0232a00026d79379aec7196555f24d742ade | [
"MIT"
] | null | null | null | sdk/lusid/models/instrument.py | fossabot/lusid-sdk-python | 154a0232a00026d79379aec7196555f24d742ade | [
"MIT"
] | null | null | null | sdk/lusid/models/instrument.py | fossabot/lusid-sdk-python | 154a0232a00026d79379aec7196555f24d742ade | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2321
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Instrument(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'href': 'str',
'lusid_instrument_id': 'str',
'version': 'Version',
'name': 'str',
'identifiers': 'dict(str, str)',
'properties': 'list[ModelProperty]',
'lookthrough_portfolio': 'ResourceId',
'instrument_definition': 'LusidInstrument',
'state': 'str',
'links': 'list[Link]'
}
attribute_map = {
'href': 'href',
'lusid_instrument_id': 'lusidInstrumentId',
'version': 'version',
'name': 'name',
'identifiers': 'identifiers',
'properties': 'properties',
'lookthrough_portfolio': 'lookthroughPortfolio',
'instrument_definition': 'instrumentDefinition',
'state': 'state',
'links': 'links'
}
required_map = {
'href': 'optional',
'lusid_instrument_id': 'required',
'version': 'required',
'name': 'required',
'identifiers': 'required',
'properties': 'optional',
'lookthrough_portfolio': 'optional',
'instrument_definition': 'optional',
'state': 'required',
'links': 'optional'
}
def __init__(self, href=None, lusid_instrument_id=None, version=None, name=None, identifiers=None, properties=None, lookthrough_portfolio=None, instrument_definition=None, state=None, links=None): # noqa: E501
"""
Instrument - a model defined in OpenAPI
:param href: The specific Uniform Resource Identifier (URI) for this resource at the requested effective and asAt datetime.
:type href: str
:param lusid_instrument_id: The unique LUSID Instrument Identifier (LUID) of the instrument. (required)
:type lusid_instrument_id: str
:param version: (required)
:type version: lusid.Version
:param name: The name of the instrument. (required)
:type name: str
:param identifiers: The set of identifiers that can be used to identify the instrument. (required)
:type identifiers: dict(str, str)
:param properties: The requested instrument properties. These will be from the 'Instrument' domain.
:type properties: list[lusid.ModelProperty]
:param lookthrough_portfolio:
:type lookthrough_portfolio: lusid.ResourceId
:param instrument_definition:
:type instrument_definition: lusid.LusidInstrument
:param state: The state of of the instrument at the asAt datetime of this version of the instrument definition. The available values are: Active, Inactive (required)
:type state: str
:param links:
:type links: list[lusid.Link]
""" # noqa: E501
self._href = None
self._lusid_instrument_id = None
self._version = None
self._name = None
self._identifiers = None
self._properties = None
self._lookthrough_portfolio = None
self._instrument_definition = None
self._state = None
self._links = None
self.discriminator = None
self.href = href
self.lusid_instrument_id = lusid_instrument_id
self.version = version
self.name = name
self.identifiers = identifiers
self.properties = properties
if lookthrough_portfolio is not None:
self.lookthrough_portfolio = lookthrough_portfolio
if instrument_definition is not None:
self.instrument_definition = instrument_definition
self.state = state
self.links = links
@property
def href(self):
"""Gets the href of this Instrument. # noqa: E501
The specific Uniform Resource Identifier (URI) for this resource at the requested effective and asAt datetime. # noqa: E501
:return: The href of this Instrument. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this Instrument.
The specific Uniform Resource Identifier (URI) for this resource at the requested effective and asAt datetime. # noqa: E501
:param href: The href of this Instrument. # noqa: E501
:type: str
"""
self._href = href
@property
def lusid_instrument_id(self):
"""Gets the lusid_instrument_id of this Instrument. # noqa: E501
The unique LUSID Instrument Identifier (LUID) of the instrument. # noqa: E501
:return: The lusid_instrument_id of this Instrument. # noqa: E501
:rtype: str
"""
return self._lusid_instrument_id
@lusid_instrument_id.setter
def lusid_instrument_id(self, lusid_instrument_id):
"""Sets the lusid_instrument_id of this Instrument.
The unique LUSID Instrument Identifier (LUID) of the instrument. # noqa: E501
:param lusid_instrument_id: The lusid_instrument_id of this Instrument. # noqa: E501
:type: str
"""
if lusid_instrument_id is None:
raise ValueError("Invalid value for `lusid_instrument_id`, must not be `None`") # noqa: E501
self._lusid_instrument_id = lusid_instrument_id
@property
def version(self):
"""Gets the version of this Instrument. # noqa: E501
:return: The version of this Instrument. # noqa: E501
:rtype: Version
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this Instrument.
:param version: The version of this Instrument. # noqa: E501
:type: Version
"""
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
self._version = version
@property
def name(self):
"""Gets the name of this Instrument. # noqa: E501
The name of the instrument. # noqa: E501
:return: The name of this Instrument. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Instrument.
The name of the instrument. # noqa: E501
:param name: The name of this Instrument. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def identifiers(self):
"""Gets the identifiers of this Instrument. # noqa: E501
The set of identifiers that can be used to identify the instrument. # noqa: E501
:return: The identifiers of this Instrument. # noqa: E501
:rtype: dict(str, str)
"""
return self._identifiers
@identifiers.setter
def identifiers(self, identifiers):
"""Sets the identifiers of this Instrument.
The set of identifiers that can be used to identify the instrument. # noqa: E501
:param identifiers: The identifiers of this Instrument. # noqa: E501
:type: dict(str, str)
"""
if identifiers is None:
raise ValueError("Invalid value for `identifiers`, must not be `None`") # noqa: E501
self._identifiers = identifiers
@property
def properties(self):
"""Gets the properties of this Instrument. # noqa: E501
The requested instrument properties. These will be from the 'Instrument' domain. # noqa: E501
:return: The properties of this Instrument. # noqa: E501
:rtype: list[ModelProperty]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this Instrument.
The requested instrument properties. These will be from the 'Instrument' domain. # noqa: E501
:param properties: The properties of this Instrument. # noqa: E501
:type: list[ModelProperty]
"""
self._properties = properties
@property
def lookthrough_portfolio(self):
"""Gets the lookthrough_portfolio of this Instrument. # noqa: E501
:return: The lookthrough_portfolio of this Instrument. # noqa: E501
:rtype: ResourceId
"""
return self._lookthrough_portfolio
@lookthrough_portfolio.setter
def lookthrough_portfolio(self, lookthrough_portfolio):
"""Sets the lookthrough_portfolio of this Instrument.
:param lookthrough_portfolio: The lookthrough_portfolio of this Instrument. # noqa: E501
:type: ResourceId
"""
self._lookthrough_portfolio = lookthrough_portfolio
@property
def instrument_definition(self):
"""Gets the instrument_definition of this Instrument. # noqa: E501
:return: The instrument_definition of this Instrument. # noqa: E501
:rtype: LusidInstrument
"""
return self._instrument_definition
@instrument_definition.setter
def instrument_definition(self, instrument_definition):
"""Sets the instrument_definition of this Instrument.
:param instrument_definition: The instrument_definition of this Instrument. # noqa: E501
:type: LusidInstrument
"""
self._instrument_definition = instrument_definition
@property
def state(self):
"""Gets the state of this Instrument. # noqa: E501
The state of of the instrument at the asAt datetime of this version of the instrument definition. The available values are: Active, Inactive # noqa: E501
:return: The state of this Instrument. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this Instrument.
The state of of the instrument at the asAt datetime of this version of the instrument definition. The available values are: Active, Inactive # noqa: E501
:param state: The state of this Instrument. # noqa: E501
:type: str
"""
if state is None:
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
allowed_values = ["Active", "Inactive"] # noqa: E501
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def links(self):
"""Gets the links of this Instrument. # noqa: E501
:return: The links of this Instrument. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Instrument.
:param links: The links of this Instrument. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Instrument):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.270936 | 214 | 0.612884 |
import pprint
import re
import six
class Instrument(object):
openapi_types = {
'href': 'str',
'lusid_instrument_id': 'str',
'version': 'Version',
'name': 'str',
'identifiers': 'dict(str, str)',
'properties': 'list[ModelProperty]',
'lookthrough_portfolio': 'ResourceId',
'instrument_definition': 'LusidInstrument',
'state': 'str',
'links': 'list[Link]'
}
attribute_map = {
'href': 'href',
'lusid_instrument_id': 'lusidInstrumentId',
'version': 'version',
'name': 'name',
'identifiers': 'identifiers',
'properties': 'properties',
'lookthrough_portfolio': 'lookthroughPortfolio',
'instrument_definition': 'instrumentDefinition',
'state': 'state',
'links': 'links'
}
required_map = {
'href': 'optional',
'lusid_instrument_id': 'required',
'version': 'required',
'name': 'required',
'identifiers': 'required',
'properties': 'optional',
'lookthrough_portfolio': 'optional',
'instrument_definition': 'optional',
'state': 'required',
'links': 'optional'
}
def __init__(self, href=None, lusid_instrument_id=None, version=None, name=None, identifiers=None, properties=None, lookthrough_portfolio=None, instrument_definition=None, state=None, links=None):
self._href = None
self._lusid_instrument_id = None
self._version = None
self._name = None
self._identifiers = None
self._properties = None
self._lookthrough_portfolio = None
self._instrument_definition = None
self._state = None
self._links = None
self.discriminator = None
self.href = href
self.lusid_instrument_id = lusid_instrument_id
self.version = version
self.name = name
self.identifiers = identifiers
self.properties = properties
if lookthrough_portfolio is not None:
self.lookthrough_portfolio = lookthrough_portfolio
if instrument_definition is not None:
self.instrument_definition = instrument_definition
self.state = state
self.links = links
@property
def href(self):
return self._href
@href.setter
def href(self, href):
self._href = href
@property
def lusid_instrument_id(self):
return self._lusid_instrument_id
@lusid_instrument_id.setter
def lusid_instrument_id(self, lusid_instrument_id):
if lusid_instrument_id is None:
raise ValueError("Invalid value for `lusid_instrument_id`, must not be `None`")
self._lusid_instrument_id = lusid_instrument_id
@property
def version(self):
return self._version
@version.setter
def version(self, version):
if version is None:
raise ValueError("Invalid value for `version`, must not be `None`")
self._version = version
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def identifiers(self):
return self._identifiers
@identifiers.setter
def identifiers(self, identifiers):
if identifiers is None:
raise ValueError("Invalid value for `identifiers`, must not be `None`")
self._identifiers = identifiers
@property
def properties(self):
return self._properties
@properties.setter
def properties(self, properties):
self._properties = properties
@property
def lookthrough_portfolio(self):
return self._lookthrough_portfolio
@lookthrough_portfolio.setter
def lookthrough_portfolio(self, lookthrough_portfolio):
self._lookthrough_portfolio = lookthrough_portfolio
@property
def instrument_definition(self):
return self._instrument_definition
@instrument_definition.setter
def instrument_definition(self, instrument_definition):
self._instrument_definition = instrument_definition
@property
def state(self):
return self._state
@state.setter
def state(self, state):
if state is None:
raise ValueError("Invalid value for `state`, must not be `None`")
allowed_values = ["Active", "Inactive"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Instrument):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fad6aafda698b3400a9a1a2a99c7a01ffa8e7a | 5,657 | py | Python | cf_xarray/helpers.py | dcherian/cf-xarray | c881164fd308c98b5b22426c2164539b36c307b3 | [
"Apache-2.0"
] | null | null | null | cf_xarray/helpers.py | dcherian/cf-xarray | c881164fd308c98b5b22426c2164539b36c307b3 | [
"Apache-2.0"
] | 2 | 2020-10-07T04:39:04.000Z | 2020-10-18T18:13:33.000Z | cf_xarray/helpers.py | jukent/cf-xarray | a5aa1d601f9c37ef47b4cd6026a65b57b727c043 | [
"Apache-2.0"
] | null | null | null | from typing import Optional, Sequence
import numpy as np
import xarray as xr
from xarray import DataArray
def bounds_to_vertices(
bounds: DataArray,
bounds_dim: str,
core_dims=None,
order: Optional[str] = "counterclockwise",
) -> DataArray:
"""
Convert bounds variable to vertices. There 2 covered cases:
- 1D coordinates, with bounds of shape (N, 2),
converted to vertices of shape (N+1,)
- 2D coordinates, with bounds of shape (N, M, 4).
converted to vertices of shape (N+1, M+1).
Parameters
----------
bounds : DataArray
The bounds to convert.
bounds_dim : str
The name of the bounds dimension of `bounds` (the one of length 2 or 4).
order : {'counterclockwise', 'clockwise', None}
Valid for 2D coordinates only (i.e. bounds of shape (..., N, M, 4), ignored otherwise.
Order the bounds are given in, assuming that ax0-ax1-upward is a right handed
coordinate system, where ax0 and ax1 are the two first dimensions of `bounds`.
If None, the counterclockwise version is computed and then verified. If the
check fails the clockwise version is returned. See Notes for more details.
core_dims : list, optional
List of core dimensions for apply_ufunc. This must not include bounds_dims.
The shape of (*core_dims, bounds_dim) must be (N, 2) or (N, M, 4).
Returns
-------
DataArray
Either of shape (N+1,) or (N+1, M+1). New vertex dimensions are named
from the intial dimension and suffix "_vertices".
Notes
-----
Getting the correct axes "order" is tricky. There are no real standards for
dimension names or even axes order, even though the CF conventions mentions the
ax0-ax1-upward (counterclockwise bounds) as being the default. Moreover, xarray can
tranpose data without raising any warning or error, which make attributes
unreliable.
Please refer to the CF conventions document : http://cfconventions.org/Data/cf-conventions/cf-conventions-1.8/cf-conventions.html#cell-boundaries.
"""
if core_dims is None:
core_dims = [dim for dim in bounds.dims if dim != bounds_dim]
output_sizes = {f"{dim}_vertices": bounds.sizes[dim] + 1 for dim in core_dims}
output_core_dims = list(output_sizes.keys())
n_core_dims = len(core_dims)
nbounds = bounds[bounds_dim].size
if not (n_core_dims == 2 and nbounds == 4) and not (
n_core_dims == 1 and nbounds == 2
):
raise ValueError(
f"Bounds format not understood. Got {bounds.dims} with shape {bounds.shape}."
)
return xr.apply_ufunc(
_bounds_helper,
bounds,
input_core_dims=[core_dims + [bounds_dim]],
dask="parallelized",
kwargs={"n_core_dims": n_core_dims, "nbounds": nbounds, "order": order},
output_core_dims=[output_core_dims],
dask_gufunc_kwargs=dict(output_sizes=output_sizes),
output_dtypes=[bounds.dtype],
)
def _bounds_helper(values, n_core_dims, nbounds, order):
if n_core_dims == 2 and nbounds == 4:
# Vertices case (2D lat/lon)
if order in ["counterclockwise", None]:
# Names assume we are drawing axis 1 upward et axis 2 rightward.
bot_left = values[..., :, :, 0]
bot_right = values[..., :, -1:, 1]
top_right = values[..., -1:, -1:, 2]
top_left = values[..., -1:, :, 3]
vertex_vals = np.block([[bot_left, bot_right], [top_left, top_right]])
if order is None: # We verify if the ccw version works.
calc_bnds = np.moveaxis(vertices_to_bounds(vertex_vals).values, 0, -1)
order = "counterclockwise" if np.all(calc_bnds == values) else "clockwise"
if order == "clockwise":
bot_left = values[..., :, :, 0]
top_left = values[..., -1:, :, 1]
top_right = values[..., -1:, -1:, 2]
bot_right = values[..., :, -1:, 3]
# Our asumption was wrong, axis 1 is rightward and axis 2 is upward
vertex_vals = np.block([[bot_left, bot_right], [top_left, top_right]])
elif n_core_dims == 1 and nbounds == 2:
# Middle points case (1D lat/lon)
vertex_vals = np.concatenate((values[..., :, 0], values[..., -1:, 1]), axis=-1)
return vertex_vals
def vertices_to_bounds(
vertices: DataArray, out_dims: Sequence[str] = ("bounds", "x", "y")
) -> DataArray:
"""
Convert vertices to CF-compliant bounds. There 2 covered cases:
- 1D coordinates, with vertices of shape (N+1,),
converted to bounds of shape (N, 2)
- 2D coordinates, with vertices of shape (N+1, M+1).
converted to bounds of shape (N, M, 4).
Parameters
----------
bounds : DataArray
The bounds to convert. Must be of shape (N, 2) or (N, M, 4).
out_dims : Sequence[str],
The name of the dimension in the output. The first is the 'bounds'
dimension and the following are the coordinate dimensions.
Returns
-------
DataArray
"""
if vertices.ndim == 1:
bnd_vals = np.stack((vertices[:-1], vertices[1:]), axis=0)
elif vertices.ndim == 2:
bnd_vals = np.stack(
(
vertices[:-1, :-1],
vertices[:-1, 1:],
vertices[1:, 1:],
vertices[1:, :-1],
),
axis=0,
)
else:
raise ValueError(
f"vertices format not understood. Got {vertices.dims} with shape {vertices.shape}."
)
return xr.DataArray(bnd_vals, dims=out_dims[: vertices.ndim + 1])
| 38.482993 | 150 | 0.612162 | from typing import Optional, Sequence
import numpy as np
import xarray as xr
from xarray import DataArray
def bounds_to_vertices(
bounds: DataArray,
bounds_dim: str,
core_dims=None,
order: Optional[str] = "counterclockwise",
) -> DataArray:
if core_dims is None:
core_dims = [dim for dim in bounds.dims if dim != bounds_dim]
output_sizes = {f"{dim}_vertices": bounds.sizes[dim] + 1 for dim in core_dims}
output_core_dims = list(output_sizes.keys())
n_core_dims = len(core_dims)
nbounds = bounds[bounds_dim].size
if not (n_core_dims == 2 and nbounds == 4) and not (
n_core_dims == 1 and nbounds == 2
):
raise ValueError(
f"Bounds format not understood. Got {bounds.dims} with shape {bounds.shape}."
)
return xr.apply_ufunc(
_bounds_helper,
bounds,
input_core_dims=[core_dims + [bounds_dim]],
dask="parallelized",
kwargs={"n_core_dims": n_core_dims, "nbounds": nbounds, "order": order},
output_core_dims=[output_core_dims],
dask_gufunc_kwargs=dict(output_sizes=output_sizes),
output_dtypes=[bounds.dtype],
)
def _bounds_helper(values, n_core_dims, nbounds, order):
if n_core_dims == 2 and nbounds == 4:
if order in ["counterclockwise", None]:
bot_left = values[..., :, :, 0]
bot_right = values[..., :, -1:, 1]
top_right = values[..., -1:, -1:, 2]
top_left = values[..., -1:, :, 3]
vertex_vals = np.block([[bot_left, bot_right], [top_left, top_right]])
if order is None:
calc_bnds = np.moveaxis(vertices_to_bounds(vertex_vals).values, 0, -1)
order = "counterclockwise" if np.all(calc_bnds == values) else "clockwise"
if order == "clockwise":
bot_left = values[..., :, :, 0]
top_left = values[..., -1:, :, 1]
top_right = values[..., -1:, -1:, 2]
bot_right = values[..., :, -1:, 3]
vertex_vals = np.block([[bot_left, bot_right], [top_left, top_right]])
elif n_core_dims == 1 and nbounds == 2:
vertex_vals = np.concatenate((values[..., :, 0], values[..., -1:, 1]), axis=-1)
return vertex_vals
def vertices_to_bounds(
vertices: DataArray, out_dims: Sequence[str] = ("bounds", "x", "y")
) -> DataArray:
if vertices.ndim == 1:
bnd_vals = np.stack((vertices[:-1], vertices[1:]), axis=0)
elif vertices.ndim == 2:
bnd_vals = np.stack(
(
vertices[:-1, :-1],
vertices[:-1, 1:],
vertices[1:, 1:],
vertices[1:, :-1],
),
axis=0,
)
else:
raise ValueError(
f"vertices format not understood. Got {vertices.dims} with shape {vertices.shape}."
)
return xr.DataArray(bnd_vals, dims=out_dims[: vertices.ndim + 1])
| true | true |
f7fad7b93eed2afcebf7935be2cdf12ef8d11a80 | 382 | py | Python | notebook/numpy_rot90_image.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 174 | 2018-05-30T21:14:50.000Z | 2022-03-25T07:59:37.000Z | notebook/numpy_rot90_image.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 5 | 2019-08-10T03:22:02.000Z | 2021-07-12T20:31:17.000Z | notebook/numpy_rot90_image.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 53 | 2018-04-27T05:26:35.000Z | 2022-03-25T07:59:37.000Z | import numpy as np
from PIL import Image
img = np.array(Image.open('data/src/lena.jpg'))
print(type(img))
# <class 'numpy.ndarray'>
print(img.shape)
# (225, 400, 3)
Image.fromarray(np.rot90(img)).save('data/dst/lena_np_rot90.jpg')
Image.fromarray(np.rot90(img, 2)).save('data/dst/lena_np_rot90_180.jpg')
Image.fromarray(np.rot90(img, 3)).save('data/dst/lena_np_rot90_270.jpg')
| 23.875 | 72 | 0.727749 | import numpy as np
from PIL import Image
img = np.array(Image.open('data/src/lena.jpg'))
print(type(img))
print(img.shape)
Image.fromarray(np.rot90(img)).save('data/dst/lena_np_rot90.jpg')
Image.fromarray(np.rot90(img, 2)).save('data/dst/lena_np_rot90_180.jpg')
Image.fromarray(np.rot90(img, 3)).save('data/dst/lena_np_rot90_270.jpg')
| true | true |
f7fad81db54f53e418bc54ea11d1cd3af05b2249 | 67,856 | py | Python | lib/rucio/tests/test_bin_rucio.py | abhijeetsharma200/rucio | 02de234f82fa314988d2a16e7bf27077718e32ac | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_bin_rucio.py | abhijeetsharma200/rucio | 02de234f82fa314988d2a16e7bf27077718e32ac | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_bin_rucio.py | abhijeetsharma200/rucio | 02de234f82fa314988d2a16e7bf27077718e32ac | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2012-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2012-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2019
# - Angelos Molfetas <Angelos.Molfetas@cern.ch>, 2012
# - Thomas Beermann <thomas.beermann@cern.ch>, 2012-2021
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2014-2018
# - Cheng-Hsi Chao <cheng-hsi.chao@cern.ch>, 2014
# - Cedric Serfon <cedric.serfon@cern.ch>, 2015
# - Martin Barisits <martin.barisits@cern.ch>, 2015-2019
# - Frank Berghaus <frank.berghaus@cern.ch>, 2017-2018
# - Tobias Wegner <twegner@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Tomas Javurek <tomas.javurek@cern.ch>, 2020
# - Radu Carpa <radu.carpa@cern.ch>, 2021
from __future__ import print_function
import os
import re
import unittest
from datetime import datetime, timedelta
from os import remove, unlink, listdir, rmdir, stat, path, environ
import pytest
from rucio.client.accountlimitclient import AccountLimitClient
from rucio.client.didclient import DIDClient
from rucio.client.replicaclient import ReplicaClient
from rucio.client.rseclient import RSEClient
from rucio.client.ruleclient import RuleClient
from rucio.common.config import config_get, config_get_bool
from rucio.common.types import InternalScope, InternalAccount
from rucio.common.utils import generate_uuid, get_tmp_dir, md5, render_json
from rucio.rse import rsemanager as rsemgr
from rucio.tests.common import execute, account_name_generator, rse_name_generator, file_generator, scope_name_generator
class TestBinRucio(unittest.TestCase):
def setUp(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
try:
remove(get_tmp_dir() + '/.rucio_root@%s/auth_token_root' % self.vo['vo'])
except OSError as error:
if error.args[0] != 2:
raise error
else:
self.vo = {}
try:
remove(get_tmp_dir() + '/.rucio_root/auth_token_root')
except OSError as e:
if e.args[0] != 2:
raise e
self.marker = '$> '
self.host = config_get('client', 'rucio_host')
self.auth_host = config_get('client', 'auth_host')
self.user = 'data13_hip'
self.def_rse = 'MOCK4'
self.rse_client = RSEClient()
self.def_rse_id = self.rse_client.get_rse(rse=self.def_rse)['id']
self.did_client = DIDClient()
self.replica_client = ReplicaClient()
self.rule_client = RuleClient()
self.account_client = AccountLimitClient()
self.account_client.set_local_account_limit('root', self.def_rse, -1)
self.rse_client.add_rse_attribute(self.def_rse, 'istape', 'False')
self.upload_success_str = 'Successfully uploaded file %s'
def test_rucio_version(self):
"""CLIENT(USER): Rucio version"""
cmd = 'bin/rucio --version'
exitcode, out, err = execute(cmd)
assert 'rucio' in out or 'rucio' in err
def test_rucio_ping(self):
"""CLIENT(USER): Rucio ping"""
cmd = 'rucio --host %s ping' % self.host
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
def test_rucio_config_arg(self):
"""CLIENT(USER): Rucio config argument"""
cmd = 'rucio --config errconfig ping'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Could not load Rucio configuration file' in err and re.match('.*errconfig.*$', err, re.DOTALL)
def test_add_account(self):
"""CLIENT(ADMIN): Add account"""
tmp_val = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new account: %s\n' % tmp_val == out
def test_whoami(self):
"""CLIENT(USER): Rucio whoami"""
cmd = 'rucio whoami'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'account' in out
def test_add_identity(self):
"""CLIENT(ADMIN): Add identity"""
tmp_val = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_val
exitcode, out, err = execute(cmd)
assert 'Added new account: %s\n' % tmp_val == out
cmd = 'rucio-admin identity add --account %s --type GSS --id jdoe@CERN.CH --email jdoe@CERN.CH' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new identity to account: jdoe@CERN.CH-%s\n' % tmp_val == out
def test_del_identity(self):
"""CLIENT(ADMIN): Test del identity"""
tmp_acc = account_name_generator()
# create account
cmd = 'rucio-admin account add %s' % tmp_acc
exitcode, out, err = execute(cmd)
# add identity to account
cmd = 'rucio-admin identity add --account %s --type GSS --id jdoe@CERN.CH --email jdoe@CERN.CH' % tmp_acc
exitcode, out, err = execute(cmd)
# delete identity from account
cmd = 'rucio-admin identity delete --account %s --type GSS --id jdoe@CERN.CH' % tmp_acc
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Deleted identity: jdoe@CERN.CH\n' == out
# list identities for account
cmd = 'rucio-admin account list-identities %s' % (tmp_acc)
print(self.marker + cmd)
print(cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert '' == out
def test_attributes(self):
"""CLIENT(ADMIN): Add/List/Delete attributes"""
tmp_acc = account_name_generator()
# create account
cmd = 'rucio-admin account add %s' % tmp_acc
exitcode, out, err = execute(cmd)
# add attribute to the account
cmd = 'rucio-admin account add-attribute {0} --key test_attribute_key --value true'.format(tmp_acc)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert exitcode == 0
# list attributes
cmd = 'rucio-admin account list-attributes {0}'.format(tmp_acc)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert exitcode == 0
# delete attribute to the account
cmd = 'rucio-admin account delete-attribute {0} --key test_attribute_key'.format(tmp_acc)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert exitcode == 0
def test_add_scope(self):
"""CLIENT(ADMIN): Add scope"""
tmp_scp = scope_name_generator()
tmp_acc = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_acc
exitcode, out, err = execute(cmd)
cmd = 'rucio-admin scope add --account %s --scope %s' % (tmp_acc, tmp_scp)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Added new scope to account: %s-%s\n' % (tmp_scp, tmp_acc) == out
def test_add_rse(self):
"""CLIENT(ADMIN): Add RSE"""
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add %s' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new deterministic RSE: %s\n' % tmp_val == out
def test_add_rse_nondet(self):
"""CLIENT(ADMIN): Add non-deterministic RSE"""
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add --non-deterministic %s' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new non-deterministic RSE: %s\n' % tmp_val == out
def test_list_rses(self):
"""CLIENT(ADMIN): List RSEs"""
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add %s' % tmp_val
exitcode, out, err = execute(cmd)
cmd = 'rucio-admin rse list'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert tmp_val in out
def test_rse_add_distance(self):
"""CLIENT (ADMIN): Add distance to RSE"""
# add RSEs
temprse1 = rse_name_generator()
cmd = 'rucio-admin rse add %s' % temprse1
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
temprse2 = rse_name_generator()
cmd = 'rucio-admin rse add %s' % temprse2
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
# add distance between the RSEs
cmd = 'rucio-admin rse add-distance --distance 1 --ranking 1 %s %s' % (temprse1, temprse2)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
cmd = 'rucio-admin rse add-distance --distance 1 --ranking 1 %s %s' % (temprse2, temprse1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
# add duplicate distance
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err, exitcode)
assert exitcode != 0
assert 'Distance from %s to %s already exists!' % (temprse2, temprse1) in err
def test_upload(self):
"""CLIENT(USER): Upload"""
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add %s' % tmp_val
exitcode, out, err = execute(cmd)
cmd = 'rucio upload'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
def test_download(self):
"""CLIENT(USER): Download"""
cmd = 'rucio download'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
def test_upload_file(self):
"""CLIENT(USER): Rucio upload files"""
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
upload_string_1 = (self.upload_success_str % path.basename(tmp_file1))
upload_string_2 = (self.upload_success_str % path.basename(tmp_file2))
upload_string_3 = (self.upload_success_str % path.basename(tmp_file3))
assert upload_string_1 in out or upload_string_1 in err
assert upload_string_2 in out or upload_string_2 in err
assert upload_string_3 in out or upload_string_3 in err
def test_upload_file_register_after_upload(self):
"""CLIENT(USER): Rucio upload files with registration after upload"""
# normal upload
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} --register-after-upload'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
upload_string_1 = (self.upload_success_str % path.basename(tmp_file1))
upload_string_2 = (self.upload_success_str % path.basename(tmp_file2))
upload_string_3 = (self.upload_success_str % path.basename(tmp_file3))
assert upload_string_1 in out or upload_string_1 in err
assert upload_string_2 in out or upload_string_2 in err
assert upload_string_3 in out or upload_string_3 in err
# removing replica -> file on RSE should be overwritten
# (simulating an upload error, where a part of the file is uploaded but the replica is not registered)
if environ.get('SUITE', 'all') != 'client':
from rucio.db.sqla import session, models
db_session = session.get_session()
internal_scope = InternalScope(self.user, **self.vo)
db_session.query(models.RSEFileAssociation).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.ReplicaLock).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.ReplicationRule).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.DidMeta).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.DataIdentifier).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.commit()
tmp_file4 = file_generator()
checksum_tmp_file4 = md5(tmp_file4)
cmd = 'rucio -v upload --rse {0} --scope {1} --name {2} {3} --register-after-upload'.format(self.def_rse, self.user, tmp_file1_name, tmp_file4)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert (self.upload_success_str % path.basename(tmp_file4)) in out or (self.upload_success_str % path.basename(tmp_file4)) in err
assert checksum_tmp_file4 == [replica for replica in self.replica_client.list_replicas(dids=[{'name': tmp_file1_name, 'scope': self.user}])][0]['md5']
# try to upload file that already exists on RSE and is already registered -> no overwrite
cmd = 'rucio -v upload --rse {0} --scope {1} --name {2} {3} --register-after-upload'.format(self.def_rse, self.user, tmp_file1_name, tmp_file4)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file4)
assert 'File already registered' in out or 'File already registered' in err
def test_upload_file_guid(self):
"""CLIENT(USER): Rucio upload file with guid"""
tmp_file1 = file_generator()
tmp_guid = generate_uuid()
cmd = 'rucio -v upload --rse {0} --guid {1} --scope {2} {3}'.format(self.def_rse, tmp_guid, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
upload_string_1 = (self.upload_success_str % path.basename(tmp_file1))
assert upload_string_1 in out or upload_string_1 in err
def test_upload_repeated_file(self):
"""CLIENT(USER): Rucio upload repeated files"""
# One of the files to upload is already catalogued but was removed
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
cmd = 'rucio -v upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# get the rule for the file
cmd = r"rucio list-rules {0}:{1} | grep {0}:{1} | cut -f1 -d\ ".format(self.user, tmp_file1_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
rule = out
# delete the file from the catalog
cmd = "rucio delete-rule {0}".format(rule)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# delete the physical file
cmd = "find /tmp/rucio_rse/ -name {0} |xargs rm".format(tmp_file1_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
upload_string_1 = (self.upload_success_str % tmp_file1_name)
assert upload_string_1 in out or upload_string_1 in err
def test_upload_repeated_file_dataset(self):
"""CLIENT(USER): Rucio upload repeated files to dataset"""
# One of the files to upload is already in the dataset
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
tmp_file3_name = path.basename(tmp_file3)
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# upload the files to the dataset
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# tmp_file1 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file1_name), out) is not None
# tmp_file3 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file3_name), out) is not None
def test_upload_file_dataset(self):
"""CLIENT(USER): Rucio upload files to dataset"""
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("{0}:{1}".format(self.user, tmp_file1_name), out) is not None
def test_upload_file_dataset_register_after_upload(self):
"""CLIENT(USER): Rucio upload files to dataset with file registration after upload"""
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio -v upload --register-after-upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("{0}:{1}".format(self.user, tmp_file1_name), out) is not None
def test_upload_adds_md5digest(self):
"""CLIENT(USER): Upload Checksums"""
# user has a file to upload
filename = file_generator()
tmp_file1_name = path.basename(filename)
file_md5 = md5(filename)
# user uploads file
cmd = 'rucio -v upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, filename)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# When inspecting the metadata of the new file the user finds the md5 checksum
meta = self.did_client.get_metadata(scope=self.user, name=tmp_file1_name)
assert 'md5' in meta
assert meta['md5'] == file_md5
remove(filename)
def test_create_dataset(self):
"""CLIENT(USER): Rucio add dataset"""
tmp_name = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
cmd = 'rucio add-dataset ' + tmp_name
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search('Added ' + tmp_name, out) is not None
def test_add_files_to_dataset(self):
"""CLIENT(USER): Rucio add files to dataset"""
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_dataset = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_file2)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# create dataset
cmd = 'rucio add-dataset ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files to dataset
cmd = 'rucio attach {0} {3}:{1} {3}:{2}'.format(tmp_dataset, tmp_file1[5:], tmp_file2[5:], self.user) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# find the added files
cmd = 'rucio list-files ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
def test_download_file(self):
"""CLIENT(USER): Rucio download files"""
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# download files
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, tmp_file1[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the files with ls
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# download files
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, tmp_file1[5:-2] + '*') # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the files with ls
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
@pytest.mark.noparallel(reason='fails when run in parallel')
def test_download_no_subdir(self):
"""CLIENT(USER): Rucio download files with --no-subdir and check that files already found locally are not replaced"""
tmp_file = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
# download files with --no-subdir
cmd = 'rucio -v download --no-subdir --dir /tmp {0}:{1}'.format(self.user, tmp_file[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
# search for the files with ls
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert tmp_file[5:] in out
# download again with --no-subdir
cmd = 'rucio -v download --no-subdir --dir /tmp {0}:{1}'.format(self.user, tmp_file[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
assert re.search(r'Downloaded files:\s+0', out) is not None
assert re.search(r'Files already found locally:\s+1', out) is not None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
def test_download_filter(self):
"""CLIENT(USER): Rucio download with filter options"""
# Use filter option to download file with wildcarded name
tmp_file1 = file_generator()
uuid = generate_uuid()
cmd = 'rucio upload --rse {0} --scope {1} --guid {2} {3}'.format(self.def_rse, self.user, uuid, tmp_file1)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
wrong_guid = generate_uuid()
cmd = 'rucio -v download --dir /tmp {0}:{1} --filter guid={2}'.format(self.user, '*', wrong_guid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio -v download --dir /tmp {0}:{1} --filter guid={2}'.format(self.user, '*', uuid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
# Only use filter option to download file
tmp_file1 = file_generator()
uuid = generate_uuid()
cmd = 'rucio upload --rse {0} --scope {1} --guid {2} {3}'.format(self.def_rse, self.user, uuid, tmp_file1)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
wrong_guid = generate_uuid()
cmd = 'rucio -v download --dir /tmp --scope {0} --filter guid={1}'.format(self.user, wrong_guid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio -v download --dir /tmp --scope {0} --filter guid={1}'.format(self.user, uuid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
# Only use filter option to download dataset
tmp_file1 = file_generator()
dataset_name = 'dataset_%s' % generate_uuid()
cmd = 'rucio upload --rse {0} --scope {1} {2} {1}:{3}'.format(self.def_rse, self.user, tmp_file1, dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
cmd = 'rucio download --dir /tmp --scope {0} --filter created_before=1900-01-01T00:00:00.000Z'.format(self.user)
exitcode, out, err = execute(cmd)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio download --dir /tmp --scope {0} --filter created_after=1900-01-01T00:00:00.000Z'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# TODO: https://github.com/rucio/rucio/issues/2926 !
# assert re.search(tmp_file1[5:], out) is not None
# Use filter option to download dataset with wildcarded name
tmp_file1 = file_generator()
cmd = 'rucio upload --rse {0} --scope {1} {2} {1}:{3}'.format(self.def_rse, self.user, tmp_file1, dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
cmd = 'rucio download --dir /tmp {0}:{1} --filter created_before=1900-01-01T00:00:00.000Z'.format(self.user, dataset_name[0:-1] + '*')
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio download --dir /tmp {0}:{1} --filter created_after=1900-01-01T00:00:00.000Z'.format(self.user, dataset_name[0:-1] + '*')
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
def test_download_timeout_options_accepted(self):
"""CLIENT(USER): Rucio download timeout options """
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# download files
cmd = 'rucio download --dir /tmp --transfer-timeout 3 --transfer-speed-timeout 1000 {0}:{1}'.format(self.user, tmp_file1[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert not err
# search for the files with ls
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# Check that PFN the transfer-speed-timeout option is not accepted for --pfn
cmd = 'rucio -v download --rse {0} --transfer-speed-timeout 1 --pfn http://a.b.c/ {1}:{2}'.format(self.def_rse, self.user, tmp_file1)
exitcode, out, err = execute(cmd)
print(out, err)
assert "Download with --pfn doesn't support --transfer-speed-timeout" in err
def test_download_metalink_file(self):
"""CLIENT(USER): Rucio download with metalink file"""
metalink_file_path = generate_uuid()
scope = self.user
# Use filter and metalink option
cmd = 'rucio download --scope mock --filter size=1 --metalink=test'
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Arguments filter and metalink cannot be used together' in err
# Use did and metalink option
cmd = 'rucio download --metalink=test mock:test'
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Arguments dids and metalink cannot be used together' in err
# Download only with metalink file
tmp_file = file_generator()
tmp_file_name = tmp_file[5:]
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, scope, tmp_file)
exitcode, out, err = execute(cmd)
print(out, err)
replica_file = ReplicaClient().list_replicas([{'scope': scope, 'name': tmp_file_name}], metalink=True)
with open(metalink_file_path, 'w+') as metalink_file:
metalink_file.write(replica_file)
cmd = 'rucio download --dir /tmp --metalink {0}'.format(metalink_file_path)
exitcode, out, err = execute(cmd)
print(out, err)
remove(metalink_file_path)
cmd = 'ls /tmp/{0}'.format(scope)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file_name, out) is not None
def test_download_succeeds_md5only(self):
"""CLIENT(USER): Rucio download succeeds MD5 only"""
# user has a file to upload
filename = file_generator()
file_md5 = md5(filename)
filesize = stat(filename).st_size
lfn = {'name': filename[5:], 'scope': self.user, 'bytes': filesize, 'md5': file_md5}
# user uploads file
self.replica_client.add_replicas(files=[lfn], rse=self.def_rse)
rse_settings = rsemgr.get_rse_info(rse=self.def_rse, **self.vo)
protocol = rsemgr.create_protocol(rse_settings, 'write')
protocol.connect()
pfn = list(protocol.lfns2pfns(lfn).values())[0]
protocol.put(filename[5:], pfn, filename[:5])
protocol.close()
remove(filename)
# download files
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, filename[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the files with ls
cmd = 'ls /tmp/{0}'.format(self.user) # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(filename[5:], out) is not None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
def test_download_fails_badmd5(self):
"""CLIENT(USER): Rucio download fails on MD5 mismatch"""
# user has a file to upload
filename = file_generator()
file_md5 = md5(filename)
filesize = stat(filename).st_size
lfn = {'name': filename[5:], 'scope': self.user, 'bytes': filesize, 'md5': '0123456789abcdef0123456789abcdef'}
# user uploads file
self.replica_client.add_replicas(files=[lfn], rse=self.def_rse)
rse_settings = rsemgr.get_rse_info(rse=self.def_rse, **self.vo)
protocol = rsemgr.create_protocol(rse_settings, 'write')
protocol.connect()
pfn = list(protocol.lfns2pfns(lfn).values())[0]
protocol.put(filename[5:], pfn, filename[:5])
protocol.close()
remove(filename)
# download file
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, filename[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
report = r'Local\ checksum\:\ {0},\ Rucio\ checksum\:\ 0123456789abcdef0123456789abcdef'.format(file_md5)
print('searching', report, 'in', err)
assert re.search(report, err) is not None
# The file should not exist
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(filename[5:], out) is None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
def test_download_dataset(self):
"""CLIENT(USER): Rucio download dataset"""
tmp_file1 = file_generator()
tmp_dataset = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# create dataset
cmd = 'rucio add-dataset ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files to dataset
cmd = 'rucio attach {0} {1}:{2}'.format(tmp_dataset, self.user, tmp_file1[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# download dataset
cmd = 'rucio -v download --dir /tmp {0}'.format(tmp_dataset) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
search = '{0} successfully downloaded'.format(tmp_file1[5:]) # triming '/tmp/' from filename
assert re.search(search, err) is not None
def test_list_blacklisted_replicas(self):
"""CLIENT(USER): Rucio list replicas"""
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio-admin rse add-protocol --hostname blacklistreplica --scheme file --prefix /rucio --port 0 --impl rucio.rse.protocols.posix.Default ' \
'--domain-json \'{"wan": {"read": 1, "write": 1, "delete": 1, "third_party_copy": 1}}\' %s' % tmp_rse
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files
tmp_file1 = file_generator()
file_name = tmp_file1[5:] # triming '/tmp/' from filename
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(tmp_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# create dataset
tmp_dataset = self.user + ':DSet' + rse_name_generator()
cmd = 'rucio add-dataset ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files to dataset
cmd = 'rucio attach {0} {1}:{2}'.format(tmp_dataset, self.user, file_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# Listing the replica should work before blacklisting the RSE
cmd = 'rucio list-file-replicas {}'.format(tmp_dataset)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert tmp_rse in out
# Blacklist the rse
cmd = 'rucio-admin rse update --rse {} --setting availability_read --value False'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert not err
# list-file-replicas should, by default, list replicas from blacklisted rses
cmd = 'rucio list-file-replicas {}'.format(tmp_dataset)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert tmp_rse in out
def test_create_rule(self):
"""CLIENT(USER): Rucio add rule"""
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASSCRATCHDISK'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASSCRATCHDISK'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASSCRATCHDISK'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rules
cmd = "rucio add-rule {0}:{1} 3 'spacetoken=ATLASSCRATCHDISK'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
assert not err
rule = out[:-1] # triming new line character
assert re.match(r'^\w+$', rule)
# check if rule exist for the file
cmd = "rucio list-rules {0}:{1}".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(rule, out) is not None
def test_create_rule_delayed(self):
"""CLIENT(USER): Rucio add rule delayed"""
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASRULEDELAYED'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# try adding rule with an incorrect delay-injection. Must fail
cmd = "rucio add-rule --delay-injection asdsaf {0}:{1} 1 'spacetoken=ATLASRULEDELAYED'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
assert err
# Add a correct rule
cmd = "rucio add-rule --delay-injection 3600 {0}:{1} 1 'spacetoken=ATLASRULEDELAYED'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert not err
rule = out[:-1] # triming new line character
cmd = "rucio rule-info {0}".format(rule)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
out_lines = out.splitlines()
assert any(re.match(r'State:.* INJECT', line) for line in out_lines)
assert any(re.match(r'Locks OK/REPLICATING/STUCK:.* 0/0/0', line) for line in out_lines)
# Check that "Created at" is approximately 3600 seconds in the future
[created_at_line] = filter(lambda x: "Created at" in x, out_lines)
created_at = re.search(r'Created at:\s+(\d.*\d)$', created_at_line).group(1)
created_at = datetime.strptime(created_at, "%Y-%m-%d %H:%M:%S")
assert datetime.utcnow() + timedelta(seconds=3550) < created_at < datetime.utcnow() + timedelta(seconds=3650)
def test_delete_rule(self):
"""CLIENT(USER): rule deletion"""
self.account_client.set_local_account_limit('root', self.def_rse, -1)
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASDELETERULE'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rules
cmd = "rucio add-rule {0}:{1} 1 'spacetoken=ATLASDELETERULE'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(err)
print(out)
# get the rules for the file
cmd = r"rucio list-rules {0}:{1} | grep {0}:{1} | cut -f1 -d\ ".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
(rule1, rule2) = out.split()
# delete the rules for the file
cmd = "rucio delete-rule {0}".format(rule1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = "rucio delete-rule {0}".format(rule2)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the file
cmd = "rucio list-dids --filter type=all {0}:{1}".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 5 == len(out.splitlines())
def test_add_file_twice(self):
"""CLIENT(USER): Add file twice"""
tmp_file1 = file_generator()
# add file twice
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
assert re.search("File {0}:{1} successfully uploaded on the storage".format(self.user, tmp_file1[5:]), out) is None
def test_add_delete_add_file(self):
"""CLIENT(USER): Add/Delete/Add"""
tmp_file1 = file_generator()
# add file
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# get the rule for the file
cmd = r"rucio list-rules {0}:{1} | grep {0}:{1} | cut -f1 -d\ ".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
rule = out
# delete the file from the catalog
cmd = "rucio delete-rule {0}".format(rule)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# delete the fisical file
cmd = "find /tmp/rucio_rse/ -name {0} |xargs rm".format(tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# modify the file to avoid same checksum
cmd = "echo 'delta' >> {0}".format(tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add the same file
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search("File {0}:{1} successfully uploaded on the storage".format(self.user, tmp_file1[5:]), out) is None
def test_attach_files_dataset(self):
"""CLIENT(USER): Rucio attach files to dataset"""
# Attach files to a dataset using the attach method
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# upload the files
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# attach the files to the dataset
cmd = 'rucio attach {0} {1}:{2} {1}:{3}'.format(tmp_dsn, self.user, tmp_file2[5:], tmp_file3[5:]) # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# tmp_file2 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file2[5:]), out) is not None
# tmp_file3 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file3[5:]), out) is not None
def test_detach_files_dataset(self):
"""CLIENT(USER): Rucio detach files to dataset"""
# Attach files to a dataset using the attach method
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# detach the files to the dataset
cmd = 'rucio detach {0} {1}:{2} {1}:{3}'.format(tmp_dsn, self.user, tmp_file2[5:], tmp_file3[5:]) # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# tmp_file1 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file1[5:]), out) is not None
# tmp_file3 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file3[5:]), out) is None
def test_attach_file_twice(self):
"""CLIENT(USER): Rucio attach a file twice"""
# Attach files to a dataset using the attach method
tmp_file1 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
# attach the files to the dataset
cmd = 'rucio attach {0} {1}:{2}'.format(tmp_dsn, self.user, tmp_file1[5:]) # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("The file already exists", err) is not None
def test_attach_dataset_twice(self):
""" CLIENT(USER): Rucio attach a dataset twice """
container = 'container_%s' % generate_uuid()
dataset = 'dataset_%s' % generate_uuid()
self.did_client.add_container(scope=self.user, name=container)
self.did_client.add_dataset(scope=self.user, name=dataset)
# Attach dataset to container
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(self.user, container, dataset)
exitcode, out, err = execute(cmd)
# Attach again
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(self.user, container, dataset)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("Data identifier already added to the destination content", err) is not None
def test_detach_non_existing_file(self):
"""CLIENT(USER): Rucio detach a non existing file"""
tmp_file1 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
# attach the files to the dataset
cmd = 'rucio detach {0} {1}:{2}'.format(tmp_dsn, self.user, 'file_ghost') # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("Data identifier not found.", err) is not None
@pytest.mark.dirty
def test_list_did_recursive(self):
""" CLIENT(USER): List did recursive """
# Setup nested collections
tmp_scope = 'mock'
tmp_container_1 = 'container_%s' % generate_uuid()
cmd = 'rucio add-container {0}:{1}'.format(tmp_scope, tmp_container_1)
exitcode, out, err = execute(cmd)
tmp_container_2 = 'container_%s' % generate_uuid()
cmd = 'rucio add-container {0}:{1}'.format(tmp_scope, tmp_container_2)
exitcode, out, err = execute(cmd)
tmp_container_3 = 'container_%s' % generate_uuid()
cmd = 'rucio add-container {0}:{1}'.format(tmp_scope, tmp_container_3)
exitcode, out, err = execute(cmd)
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(tmp_scope, tmp_container_1, tmp_container_2)
exitcode, out, err = execute(cmd)
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(tmp_scope, tmp_container_2, tmp_container_3)
exitcode, out, err = execute(cmd)
# All attached DIDs are expected
cmd = 'rucio list-dids {0}:{1} --recursive'.format(tmp_scope, tmp_container_1)
exitcode, out, err = execute(cmd)
assert re.search(tmp_container_1, out) is not None
assert re.search(tmp_container_2, out) is not None
assert re.search(tmp_container_3, out) is not None
# Wildcards are not allowed to use with --recursive
cmd = 'rucio list-dids {0}:* --recursive'.format(tmp_scope)
exitcode, out, err = execute(cmd)
assert re.search("Option recursive cannot be used with wildcards", err) is not None
@pytest.mark.dirty
def test_attach_many_dids(self):
""" CLIENT(USER): Rucio attach many (>1000) DIDs """
# Setup data for CLI check
tmp_dsn_name = 'Container' + rse_name_generator()
tmp_dsn_did = self.user + ':' + tmp_dsn_name
self.did_client.add_did(scope=self.user, name=tmp_dsn_name, type='CONTAINER')
files = [{'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'} for i in range(0, 1500)]
self.did_client.add_dids(files[:1000])
self.did_client.add_dids(files[1000:])
# Attaching over 1000 DIDs with CLI
cmd = 'rucio attach {0}'.format(tmp_dsn_did)
for tmp_file in files:
cmd += ' {0}:{1}'.format(tmp_file['scope'], tmp_file['name'])
exitcode, out, err = execute(cmd)
print(out)
print(err)
# Checking if the execution was successfull and if the DIDs belong together
assert re.search('DIDs successfully attached', out) is not None
cmd = 'rucio list-content {0}'.format(tmp_dsn_did)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
# first dataset must be in the container
assert re.search("{0}:{1}".format(self.user, files[0]['name']), out) is not None
# last dataset must be in the container
assert re.search("{0}:{1}".format(self.user, files[-1]['name']), out) is not None
# Setup data with file
did_file_path = 'list_dids.txt'
files = [{'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'} for i in range(0, 1500)]
self.did_client.add_dids(files[:1000])
self.did_client.add_dids(files[1000:])
with open(did_file_path, 'w') as did_file:
for file in files:
did_file.write(file['scope'] + ':' + file['name'] + '\n')
did_file.close()
# Attaching over 1000 files per file
cmd = 'rucio attach {0} -f {1}'.format(tmp_dsn_did, did_file_path)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(did_file_path)
# Checking if the execution was successfull and if the DIDs belong together
assert re.search('DIDs successfully attached', out) is not None
cmd = 'rucio list-content {0}'.format(tmp_dsn_did)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
# first file must be in the dataset
assert re.search("{0}:{1}".format(self.user, files[0]['name']), out) is not None
# last file must be in the dataset
assert re.search("{0}:{1}".format(self.user, files[-1]['name']), out) is not None
@pytest.mark.dirty
def test_attach_many_dids_twice(self):
""" CLIENT(USER): Attach many (>1000) DIDs twice """
# Setup data for CLI check
container_name = 'container' + generate_uuid()
container = self.user + ':' + container_name
self.did_client.add_did(scope=self.user, name=container_name, type='CONTAINER')
datasets = [{'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'} for i in range(0, 1500)]
self.did_client.add_dids(datasets[:1000])
self.did_client.add_dids(datasets[1000:])
# Attaching over 1000 DIDs with CLI
cmd = 'rucio attach {0}'.format(container)
for dataset in datasets:
cmd += ' {0}:{1}'.format(dataset['scope'], dataset['name'])
exitcode, out, err = execute(cmd)
# Attaching twice
cmd = 'rucio attach {0}'.format(container)
for dataset in datasets:
cmd += ' {0}:{1}'.format(dataset['scope'], dataset['name'])
exitcode, out, err = execute(cmd)
assert re.search("DIDs successfully attached", out) is not None
# Attaching twice plus one DID that is not already attached
new_dataset = {'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'}
datasets.append(new_dataset)
self.did_client.add_did(scope=self.user, name=new_dataset['name'], type='DATASET')
cmd = 'rucio attach {0}'.format(container)
for dataset in datasets:
cmd += ' {0}:{1}'.format(dataset['scope'], dataset['name'])
exitcode, out, err = execute(cmd)
assert re.search("DIDs successfully attached", out) is not None
cmd = 'rucio list-content {0}'.format(container)
exitcode, out, err = execute(cmd)
assert re.search("{0}:{1}".format(self.user, new_dataset['name']), out) is not None
@pytest.mark.noparallel(reason='might override global RSE settings')
def test_import_data(self):
""" CLIENT(ADMIN): Import data into rucio"""
file_path = 'data_import.json'
rses = {rse['rse']: rse for rse in self.rse_client.list_rses()}
rses[rse_name_generator()] = {'country_name': 'test'}
data = {'rses': rses}
with open(file_path, 'w+') as file:
file.write(render_json(**data))
cmd = 'rucio-admin data import {0}'.format(file_path)
exitcode, out, err = execute(cmd)
assert re.search('Data successfully imported', out) is not None
remove(file_path)
@pytest.mark.noparallel(reason='fails when run in parallel')
def test_export_data(self):
""" CLIENT(ADMIN): Export data from rucio"""
file_path = 'data_export.json'
cmd = 'rucio-admin data export {0}'.format(file_path)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search('Data successfully exported', out) is not None
remove(file_path)
@pytest.mark.dirty
@pytest.mark.noparallel(reason='fails when run in parallel')
def test_set_tombstone(self):
""" CLIENT(ADMIN): set a tombstone on a replica. """
# Set tombstone on one replica
rse = 'MOCK4'
scope = 'mock'
name = generate_uuid()
self.replica_client.add_replica(rse, scope, name, 4, 'aaaaaaaa')
cmd = 'rucio-admin replicas set-tombstone {0}:{1} --rse {2}'.format(scope, name, rse)
exitcode, out, err = execute(cmd)
assert re.search('Set tombstone successfully', err) is not None
# Set tombstone on locked replica
name = generate_uuid()
self.replica_client.add_replica(rse, scope, name, 4, 'aaaaaaaa')
self.rule_client.add_replication_rule([{'name': name, 'scope': scope}], 1, rse, locked=True)
cmd = 'rucio-admin replicas set-tombstone {0}:{1} --rse {2}'.format(scope, name, rse)
exitcode, out, err = execute(cmd)
assert re.search('Replica is locked', err) is not None
# Set tombstone on not found replica
name = generate_uuid()
cmd = 'rucio-admin replicas set-tombstone {0}:{1} --rse {2}'.format(scope, name, rse)
exitcode, out, err = execute(cmd)
assert re.search('Replica not found', err) is not None
@pytest.mark.noparallel(reason='modifies account limit on pre-defined RSE')
def test_list_account_limits(self):
""" CLIENT (USER): list account limits. """
rse = 'MOCK4'
rse_exp = 'MOCK3|MOCK4'
account = 'root'
local_limit = 10
global_limit = 20
self.account_client.set_local_account_limit(account, rse, local_limit)
self.account_client.set_global_account_limit(account, rse_exp, global_limit)
cmd = 'rucio list-account-limits {0}'.format(account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*'.format(rse, local_limit), out) is not None
assert re.search('.*{0}.*{1}.*'.format(rse_exp, global_limit), out) is not None
cmd = 'rucio list-account-limits --rse {0} {1}'.format(rse, account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*'.format(rse, local_limit), out) is not None
assert re.search('.*{0}.*{1}.*'.format(rse_exp, global_limit), out) is not None
self.account_client.set_local_account_limit(account, rse, -1)
self.account_client.set_global_account_limit(account, rse_exp, -1)
@pytest.mark.noparallel(reason='modifies account limit on pre-defined RSE')
@pytest.mark.skipif('SUITE' in os.environ and os.environ['SUITE'] == 'client', reason='uses abacus daemon and core functions')
def test_list_account_usage(self):
""" CLIENT (USER): list account usage. """
from rucio.db.sqla import session, models
from rucio.core.account_counter import increase
from rucio.daemons.abacus import account as abacus_account
db_session = session.get_session()
db_session.query(models.AccountUsage).delete()
db_session.query(models.AccountLimit).delete()
db_session.query(models.AccountGlobalLimit).delete()
db_session.query(models.UpdatedAccountCounter).delete()
db_session.commit()
rse = 'MOCK4'
rse_id = self.rse_client.get_rse(rse)['id']
rse_exp = 'MOCK|MOCK4'
account = 'root'
usage = 4
local_limit = 10
local_left = local_limit - usage
global_limit = 20
global_left = global_limit - usage
self.account_client.set_local_account_limit(account, rse, local_limit)
self.account_client.set_global_account_limit(account, rse_exp, global_limit)
increase(rse_id, InternalAccount(account, **self.vo), 1, usage)
abacus_account.run(once=True)
cmd = 'rucio list-account-usage {0}'.format(account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse, usage, local_limit, local_left), out) is not None
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse_exp, usage, global_limit, global_left), out) is not None
cmd = 'rucio list-account-usage --rse {0} {1}'.format(rse, account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse, usage, local_limit, local_left), out) is not None
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse_exp, usage, global_limit, global_left), out) is not None
self.account_client.set_local_account_limit(account, rse, -1)
self.account_client.set_global_account_limit(account, rse_exp, -1)
| 44.437459 | 167 | 0.608568 |
from __future__ import print_function
import os
import re
import unittest
from datetime import datetime, timedelta
from os import remove, unlink, listdir, rmdir, stat, path, environ
import pytest
from rucio.client.accountlimitclient import AccountLimitClient
from rucio.client.didclient import DIDClient
from rucio.client.replicaclient import ReplicaClient
from rucio.client.rseclient import RSEClient
from rucio.client.ruleclient import RuleClient
from rucio.common.config import config_get, config_get_bool
from rucio.common.types import InternalScope, InternalAccount
from rucio.common.utils import generate_uuid, get_tmp_dir, md5, render_json
from rucio.rse import rsemanager as rsemgr
from rucio.tests.common import execute, account_name_generator, rse_name_generator, file_generator, scope_name_generator
class TestBinRucio(unittest.TestCase):
def setUp(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
try:
remove(get_tmp_dir() + '/.rucio_root@%s/auth_token_root' % self.vo['vo'])
except OSError as error:
if error.args[0] != 2:
raise error
else:
self.vo = {}
try:
remove(get_tmp_dir() + '/.rucio_root/auth_token_root')
except OSError as e:
if e.args[0] != 2:
raise e
self.marker = '$> '
self.host = config_get('client', 'rucio_host')
self.auth_host = config_get('client', 'auth_host')
self.user = 'data13_hip'
self.def_rse = 'MOCK4'
self.rse_client = RSEClient()
self.def_rse_id = self.rse_client.get_rse(rse=self.def_rse)['id']
self.did_client = DIDClient()
self.replica_client = ReplicaClient()
self.rule_client = RuleClient()
self.account_client = AccountLimitClient()
self.account_client.set_local_account_limit('root', self.def_rse, -1)
self.rse_client.add_rse_attribute(self.def_rse, 'istape', 'False')
self.upload_success_str = 'Successfully uploaded file %s'
def test_rucio_version(self):
cmd = 'bin/rucio --version'
exitcode, out, err = execute(cmd)
assert 'rucio' in out or 'rucio' in err
def test_rucio_ping(self):
cmd = 'rucio --host %s ping' % self.host
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
def test_rucio_config_arg(self):
cmd = 'rucio --config errconfig ping'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Could not load Rucio configuration file' in err and re.match('.*errconfig.*$', err, re.DOTALL)
def test_add_account(self):
tmp_val = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new account: %s\n' % tmp_val == out
def test_whoami(self):
cmd = 'rucio whoami'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'account' in out
def test_add_identity(self):
tmp_val = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_val
exitcode, out, err = execute(cmd)
assert 'Added new account: %s\n' % tmp_val == out
cmd = 'rucio-admin identity add --account %s --type GSS --id jdoe@CERN.CH --email jdoe@CERN.CH' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new identity to account: jdoe@CERN.CH-%s\n' % tmp_val == out
def test_del_identity(self):
tmp_acc = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_acc
exitcode, out, err = execute(cmd)
cmd = 'rucio-admin identity add --account %s --type GSS --id jdoe@CERN.CH --email jdoe@CERN.CH' % tmp_acc
exitcode, out, err = execute(cmd)
cmd = 'rucio-admin identity delete --account %s --type GSS --id jdoe@CERN.CH' % tmp_acc
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Deleted identity: jdoe@CERN.CH\n' == out
cmd = 'rucio-admin account list-identities %s' % (tmp_acc)
print(self.marker + cmd)
print(cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert '' == out
def test_attributes(self):
tmp_acc = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_acc
exitcode, out, err = execute(cmd)
cmd = 'rucio-admin account add-attribute {0} --key test_attribute_key --value true'.format(tmp_acc)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert exitcode == 0
cmd = 'rucio-admin account list-attributes {0}'.format(tmp_acc)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert exitcode == 0
cmd = 'rucio-admin account delete-attribute {0} --key test_attribute_key'.format(tmp_acc)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert exitcode == 0
def test_add_scope(self):
tmp_scp = scope_name_generator()
tmp_acc = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_acc
exitcode, out, err = execute(cmd)
cmd = 'rucio-admin scope add --account %s --scope %s' % (tmp_acc, tmp_scp)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Added new scope to account: %s-%s\n' % (tmp_scp, tmp_acc) == out
def test_add_rse(self):
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add %s' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new deterministic RSE: %s\n' % tmp_val == out
def test_add_rse_nondet(self):
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add --non-deterministic %s' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new non-deterministic RSE: %s\n' % tmp_val == out
def test_list_rses(self):
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add %s' % tmp_val
exitcode, out, err = execute(cmd)
cmd = 'rucio-admin rse list'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert tmp_val in out
def test_rse_add_distance(self):
temprse1 = rse_name_generator()
cmd = 'rucio-admin rse add %s' % temprse1
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
temprse2 = rse_name_generator()
cmd = 'rucio-admin rse add %s' % temprse2
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
cmd = 'rucio-admin rse add-distance --distance 1 --ranking 1 %s %s' % (temprse1, temprse2)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
cmd = 'rucio-admin rse add-distance --distance 1 --ranking 1 %s %s' % (temprse2, temprse1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err, exitcode)
assert exitcode != 0
assert 'Distance from %s to %s already exists!' % (temprse2, temprse1) in err
def test_upload(self):
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add %s' % tmp_val
exitcode, out, err = execute(cmd)
cmd = 'rucio upload'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
def test_download(self):
cmd = 'rucio download'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
def test_upload_file(self):
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
upload_string_1 = (self.upload_success_str % path.basename(tmp_file1))
upload_string_2 = (self.upload_success_str % path.basename(tmp_file2))
upload_string_3 = (self.upload_success_str % path.basename(tmp_file3))
assert upload_string_1 in out or upload_string_1 in err
assert upload_string_2 in out or upload_string_2 in err
assert upload_string_3 in out or upload_string_3 in err
def test_upload_file_register_after_upload(self):
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} --register-after-upload'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
upload_string_1 = (self.upload_success_str % path.basename(tmp_file1))
upload_string_2 = (self.upload_success_str % path.basename(tmp_file2))
upload_string_3 = (self.upload_success_str % path.basename(tmp_file3))
assert upload_string_1 in out or upload_string_1 in err
assert upload_string_2 in out or upload_string_2 in err
assert upload_string_3 in out or upload_string_3 in err
if environ.get('SUITE', 'all') != 'client':
from rucio.db.sqla import session, models
db_session = session.get_session()
internal_scope = InternalScope(self.user, **self.vo)
db_session.query(models.RSEFileAssociation).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.ReplicaLock).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.ReplicationRule).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.DidMeta).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.DataIdentifier).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.commit()
tmp_file4 = file_generator()
checksum_tmp_file4 = md5(tmp_file4)
cmd = 'rucio -v upload --rse {0} --scope {1} --name {2} {3} --register-after-upload'.format(self.def_rse, self.user, tmp_file1_name, tmp_file4)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert (self.upload_success_str % path.basename(tmp_file4)) in out or (self.upload_success_str % path.basename(tmp_file4)) in err
assert checksum_tmp_file4 == [replica for replica in self.replica_client.list_replicas(dids=[{'name': tmp_file1_name, 'scope': self.user}])][0]['md5']
cmd = 'rucio -v upload --rse {0} --scope {1} --name {2} {3} --register-after-upload'.format(self.def_rse, self.user, tmp_file1_name, tmp_file4)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file4)
assert 'File already registered' in out or 'File already registered' in err
def test_upload_file_guid(self):
tmp_file1 = file_generator()
tmp_guid = generate_uuid()
cmd = 'rucio -v upload --rse {0} --guid {1} --scope {2} {3}'.format(self.def_rse, tmp_guid, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
upload_string_1 = (self.upload_success_str % path.basename(tmp_file1))
assert upload_string_1 in out or upload_string_1 in err
def test_upload_repeated_file(self):
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
cmd = 'rucio -v upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
cmd = r"rucio list-rules {0}:{1} | grep {0}:{1} | cut -f1 -d\ ".format(self.user, tmp_file1_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
rule = out
cmd = "rucio delete-rule {0}".format(rule)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = "find /tmp/rucio_rse/ -name {0} |xargs rm".format(tmp_file1_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
upload_string_1 = (self.upload_success_str % tmp_file1_name)
assert upload_string_1 in out or upload_string_1 in err
def test_upload_repeated_file_dataset(self):
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
tmp_file3_name = path.basename(tmp_file3)
tmp_dsn = self.user + ':DSet' + rse_name_generator()
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("{0}:{1}".format(self.user, tmp_file1_name), out) is not None
assert re.search("{0}:{1}".format(self.user, tmp_file3_name), out) is not None
def test_upload_file_dataset(self):
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
tmp_dsn = self.user + ':DSet' + rse_name_generator()
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("{0}:{1}".format(self.user, tmp_file1_name), out) is not None
def test_upload_file_dataset_register_after_upload(self):
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
tmp_dsn = self.user + ':DSet' + rse_name_generator()
cmd = 'rucio -v upload --register-after-upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("{0}:{1}".format(self.user, tmp_file1_name), out) is not None
def test_upload_adds_md5digest(self):
filename = file_generator()
tmp_file1_name = path.basename(filename)
file_md5 = md5(filename)
cmd = 'rucio -v upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, filename)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
meta = self.did_client.get_metadata(scope=self.user, name=tmp_file1_name)
assert 'md5' in meta
assert meta['md5'] == file_md5
remove(filename)
def test_create_dataset(self):
tmp_name = self.user + ':DSet' + rse_name_generator()
cmd = 'rucio add-dataset ' + tmp_name
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search('Added ' + tmp_name, out) is not None
def test_add_files_to_dataset(self):
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_dataset = self.user + ':DSet' + rse_name_generator()
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_file2)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio add-dataset ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio attach {0} {3}:{1} {3}:{2}'.format(tmp_dataset, tmp_file1[5:], tmp_file2[5:], self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio list-files ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
def test_download_file(self):
tmp_file1 = file_generator()
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
tmp_file1 = file_generator()
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, tmp_file1[5:-2] + '*')
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
@pytest.mark.noparallel(reason='fails when run in parallel')
def test_download_no_subdir(self):
tmp_file = file_generator()
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
cmd = 'rucio -v download --no-subdir --dir /tmp {0}:{1}'.format(self.user, tmp_file[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
cmd = 'ls /tmp/'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert tmp_file[5:] in out
cmd = 'rucio -v download --no-subdir --dir /tmp {0}:{1}'.format(self.user, tmp_file[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
assert re.search(r'Downloaded files:\s+0', out) is not None
assert re.search(r'Files already found locally:\s+1', out) is not None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
def test_download_filter(self):
tmp_file1 = file_generator()
uuid = generate_uuid()
cmd = 'rucio upload --rse {0} --scope {1} --guid {2} {3}'.format(self.def_rse, self.user, uuid, tmp_file1)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
wrong_guid = generate_uuid()
cmd = 'rucio -v download --dir /tmp {0}:{1} --filter guid={2}'.format(self.user, '*', wrong_guid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio -v download --dir /tmp {0}:{1} --filter guid={2}'.format(self.user, '*', uuid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
tmp_file1 = file_generator()
uuid = generate_uuid()
cmd = 'rucio upload --rse {0} --scope {1} --guid {2} {3}'.format(self.def_rse, self.user, uuid, tmp_file1)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
wrong_guid = generate_uuid()
cmd = 'rucio -v download --dir /tmp --scope {0} --filter guid={1}'.format(self.user, wrong_guid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio -v download --dir /tmp --scope {0} --filter guid={1}'.format(self.user, uuid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
tmp_file1 = file_generator()
dataset_name = 'dataset_%s' % generate_uuid()
cmd = 'rucio upload --rse {0} --scope {1} {2} {1}:{3}'.format(self.def_rse, self.user, tmp_file1, dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
cmd = 'rucio download --dir /tmp --scope {0} --filter created_before=1900-01-01T00:00:00.000Z'.format(self.user)
exitcode, out, err = execute(cmd)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio download --dir /tmp --scope {0} --filter created_after=1900-01-01T00:00:00.000Z'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
tmp_file1 = file_generator()
cmd = 'rucio upload --rse {0} --scope {1} {2} {1}:{3}'.format(self.def_rse, self.user, tmp_file1, dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
cmd = 'rucio download --dir /tmp {0}:{1} --filter created_before=1900-01-01T00:00:00.000Z'.format(self.user, dataset_name[0:-1] + '*')
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio download --dir /tmp {0}:{1} --filter created_after=1900-01-01T00:00:00.000Z'.format(self.user, dataset_name[0:-1] + '*')
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
def test_download_timeout_options_accepted(self):
tmp_file1 = file_generator()
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio download --dir /tmp --transfer-timeout 3 --transfer-speed-timeout 1000 {0}:{1}'.format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert not err
cmd = 'ls /tmp/'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio -v download --rse {0} --transfer-speed-timeout 1 --pfn http://a.b.c/ {1}:{2}'.format(self.def_rse, self.user, tmp_file1)
exitcode, out, err = execute(cmd)
print(out, err)
assert "Download with --pfn doesn't support --transfer-speed-timeout" in err
def test_download_metalink_file(self):
metalink_file_path = generate_uuid()
scope = self.user
# Use filter and metalink option
cmd = 'rucio download --scope mock --filter size=1 --metalink=test'
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Arguments filter and metalink cannot be used together' in err
# Use did and metalink option
cmd = 'rucio download --metalink=test mock:test'
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Arguments dids and metalink cannot be used together' in err
# Download only with metalink file
tmp_file = file_generator()
tmp_file_name = tmp_file[5:]
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, scope, tmp_file)
exitcode, out, err = execute(cmd)
print(out, err)
replica_file = ReplicaClient().list_replicas([{'scope': scope, 'name': tmp_file_name}], metalink=True)
with open(metalink_file_path, 'w+') as metalink_file:
metalink_file.write(replica_file)
cmd = 'rucio download --dir /tmp --metalink {0}'.format(metalink_file_path)
exitcode, out, err = execute(cmd)
print(out, err)
remove(metalink_file_path)
cmd = 'ls /tmp/{0}'.format(scope)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file_name, out) is not None
def test_download_succeeds_md5only(self):
# user has a file to upload
filename = file_generator()
file_md5 = md5(filename)
filesize = stat(filename).st_size
lfn = {'name': filename[5:], 'scope': self.user, 'bytes': filesize, 'md5': file_md5}
# user uploads file
self.replica_client.add_replicas(files=[lfn], rse=self.def_rse)
rse_settings = rsemgr.get_rse_info(rse=self.def_rse, **self.vo)
protocol = rsemgr.create_protocol(rse_settings, 'write')
protocol.connect()
pfn = list(protocol.lfns2pfns(lfn).values())[0]
protocol.put(filename[5:], pfn, filename[:5])
protocol.close()
remove(filename)
# download files
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, filename[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the files with ls
cmd = 'ls /tmp/{0}'.format(self.user) # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(filename[5:], out) is not None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
def test_download_fails_badmd5(self):
# user has a file to upload
filename = file_generator()
file_md5 = md5(filename)
filesize = stat(filename).st_size
lfn = {'name': filename[5:], 'scope': self.user, 'bytes': filesize, 'md5': '0123456789abcdef0123456789abcdef'}
# user uploads file
self.replica_client.add_replicas(files=[lfn], rse=self.def_rse)
rse_settings = rsemgr.get_rse_info(rse=self.def_rse, **self.vo)
protocol = rsemgr.create_protocol(rse_settings, 'write')
protocol.connect()
pfn = list(protocol.lfns2pfns(lfn).values())[0]
protocol.put(filename[5:], pfn, filename[:5])
protocol.close()
remove(filename)
# download file
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, filename[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
report = r'Local\ checksum\:\ {0},\ Rucio\ checksum\:\ 0123456789abcdef0123456789abcdef'.format(file_md5)
print('searching', report, 'in', err)
assert re.search(report, err) is not None
# The file should not exist
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(filename[5:], out) is None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
def test_download_dataset(self):
tmp_file1 = file_generator()
tmp_dataset = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# create dataset
cmd = 'rucio add-dataset ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files to dataset
cmd = 'rucio attach {0} {1}:{2}'.format(tmp_dataset, self.user, tmp_file1[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# download dataset
cmd = 'rucio -v download --dir /tmp {0}'.format(tmp_dataset) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
search = '{0} successfully downloaded'.format(tmp_file1[5:]) # triming '/tmp/' from filename
assert re.search(search, err) is not None
def test_list_blacklisted_replicas(self):
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio-admin rse add-protocol --hostname blacklistreplica --scheme file --prefix /rucio --port 0 --impl rucio.rse.protocols.posix.Default ' \
'--domain-json \'{"wan": {"read": 1, "write": 1, "delete": 1, "third_party_copy": 1}}\' %s' % tmp_rse
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files
tmp_file1 = file_generator()
file_name = tmp_file1[5:] # triming '/tmp/' from filename
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(tmp_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# create dataset
tmp_dataset = self.user + ':DSet' + rse_name_generator()
cmd = 'rucio add-dataset ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files to dataset
cmd = 'rucio attach {0} {1}:{2}'.format(tmp_dataset, self.user, file_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# Listing the replica should work before blacklisting the RSE
cmd = 'rucio list-file-replicas {}'.format(tmp_dataset)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert tmp_rse in out
# Blacklist the rse
cmd = 'rucio-admin rse update --rse {} --setting availability_read --value False'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert not err
# list-file-replicas should, by default, list replicas from blacklisted rses
cmd = 'rucio list-file-replicas {}'.format(tmp_dataset)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert tmp_rse in out
def test_create_rule(self):
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASSCRATCHDISK'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASSCRATCHDISK'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASSCRATCHDISK'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rules
cmd = "rucio add-rule {0}:{1} 3 'spacetoken=ATLASSCRATCHDISK'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
assert not err
rule = out[:-1] # triming new line character
assert re.match(r'^\w+$', rule)
# check if rule exist for the file
cmd = "rucio list-rules {0}:{1}".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(rule, out) is not None
def test_create_rule_delayed(self):
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASRULEDELAYED'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# try adding rule with an incorrect delay-injection. Must fail
cmd = "rucio add-rule --delay-injection asdsaf {0}:{1} 1 'spacetoken=ATLASRULEDELAYED'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
assert err
# Add a correct rule
cmd = "rucio add-rule --delay-injection 3600 {0}:{1} 1 'spacetoken=ATLASRULEDELAYED'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert not err
rule = out[:-1] # triming new line character
cmd = "rucio rule-info {0}".format(rule)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
out_lines = out.splitlines()
assert any(re.match(r'State:.* INJECT', line) for line in out_lines)
assert any(re.match(r'Locks OK/REPLICATING/STUCK:.* 0/0/0', line) for line in out_lines)
# Check that "Created at" is approximately 3600 seconds in the future
[created_at_line] = filter(lambda x: "Created at" in x, out_lines)
created_at = re.search(r'Created at:\s+(\d.*\d)$', created_at_line).group(1)
created_at = datetime.strptime(created_at, "%Y-%m-%d %H:%M:%S")
assert datetime.utcnow() + timedelta(seconds=3550) < created_at < datetime.utcnow() + timedelta(seconds=3650)
def test_delete_rule(self):
self.account_client.set_local_account_limit('root', self.def_rse, -1)
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASDELETERULE'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rules
cmd = "rucio add-rule {0}:{1} 1 'spacetoken=ATLASDELETERULE'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(err)
print(out)
# get the rules for the file
cmd = r"rucio list-rules {0}:{1} | grep {0}:{1} | cut -f1 -d\ ".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
(rule1, rule2) = out.split()
# delete the rules for the file
cmd = "rucio delete-rule {0}".format(rule1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = "rucio delete-rule {0}".format(rule2)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the file
cmd = "rucio list-dids --filter type=all {0}:{1}".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 5 == len(out.splitlines())
def test_add_file_twice(self):
tmp_file1 = file_generator()
# add file twice
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
assert re.search("File {0}:{1} successfully uploaded on the storage".format(self.user, tmp_file1[5:]), out) is None
def test_add_delete_add_file(self):
tmp_file1 = file_generator()
# add file
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# get the rule for the file
cmd = r"rucio list-rules {0}:{1} | grep {0}:{1} | cut -f1 -d\ ".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
rule = out
# delete the file from the catalog
cmd = "rucio delete-rule {0}".format(rule)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# delete the fisical file
cmd = "find /tmp/rucio_rse/ -name {0} |xargs rm".format(tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# modify the file to avoid same checksum
cmd = "echo 'delta' >> {0}".format(tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add the same file
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search("File {0}:{1} successfully uploaded on the storage".format(self.user, tmp_file1[5:]), out) is None
def test_attach_files_dataset(self):
# Attach files to a dataset using the attach method
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# upload the files
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# attach the files to the dataset
cmd = 'rucio attach {0} {1}:{2} {1}:{3}'.format(tmp_dsn, self.user, tmp_file2[5:], tmp_file3[5:]) # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# tmp_file2 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file2[5:]), out) is not None
# tmp_file3 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file3[5:]), out) is not None
def test_detach_files_dataset(self):
# Attach files to a dataset using the attach method
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# detach the files to the dataset
cmd = 'rucio detach {0} {1}:{2} {1}:{3}'.format(tmp_dsn, self.user, tmp_file2[5:], tmp_file3[5:]) # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# tmp_file1 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file1[5:]), out) is not None
# tmp_file3 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file3[5:]), out) is None
def test_attach_file_twice(self):
# Attach files to a dataset using the attach method
tmp_file1 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
# attach the files to the dataset
cmd = 'rucio attach {0} {1}:{2}'.format(tmp_dsn, self.user, tmp_file1[5:]) # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("The file already exists", err) is not None
def test_attach_dataset_twice(self):
container = 'container_%s' % generate_uuid()
dataset = 'dataset_%s' % generate_uuid()
self.did_client.add_container(scope=self.user, name=container)
self.did_client.add_dataset(scope=self.user, name=dataset)
# Attach dataset to container
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(self.user, container, dataset)
exitcode, out, err = execute(cmd)
# Attach again
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(self.user, container, dataset)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("Data identifier already added to the destination content", err) is not None
def test_detach_non_existing_file(self):
tmp_file1 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
# attach the files to the dataset
cmd = 'rucio detach {0} {1}:{2}'.format(tmp_dsn, self.user, 'file_ghost') # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("Data identifier not found.", err) is not None
@pytest.mark.dirty
def test_list_did_recursive(self):
# Setup nested collections
tmp_scope = 'mock'
tmp_container_1 = 'container_%s' % generate_uuid()
cmd = 'rucio add-container {0}:{1}'.format(tmp_scope, tmp_container_1)
exitcode, out, err = execute(cmd)
tmp_container_2 = 'container_%s' % generate_uuid()
cmd = 'rucio add-container {0}:{1}'.format(tmp_scope, tmp_container_2)
exitcode, out, err = execute(cmd)
tmp_container_3 = 'container_%s' % generate_uuid()
cmd = 'rucio add-container {0}:{1}'.format(tmp_scope, tmp_container_3)
exitcode, out, err = execute(cmd)
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(tmp_scope, tmp_container_1, tmp_container_2)
exitcode, out, err = execute(cmd)
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(tmp_scope, tmp_container_2, tmp_container_3)
exitcode, out, err = execute(cmd)
# All attached DIDs are expected
cmd = 'rucio list-dids {0}:{1} --recursive'.format(tmp_scope, tmp_container_1)
exitcode, out, err = execute(cmd)
assert re.search(tmp_container_1, out) is not None
assert re.search(tmp_container_2, out) is not None
assert re.search(tmp_container_3, out) is not None
# Wildcards are not allowed to use with --recursive
cmd = 'rucio list-dids {0}:* --recursive'.format(tmp_scope)
exitcode, out, err = execute(cmd)
assert re.search("Option recursive cannot be used with wildcards", err) is not None
@pytest.mark.dirty
def test_attach_many_dids(self):
# Setup data for CLI check
tmp_dsn_name = 'Container' + rse_name_generator()
tmp_dsn_did = self.user + ':' + tmp_dsn_name
self.did_client.add_did(scope=self.user, name=tmp_dsn_name, type='CONTAINER')
files = [{'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'} for i in range(0, 1500)]
self.did_client.add_dids(files[:1000])
self.did_client.add_dids(files[1000:])
# Attaching over 1000 DIDs with CLI
cmd = 'rucio attach {0}'.format(tmp_dsn_did)
for tmp_file in files:
cmd += ' {0}:{1}'.format(tmp_file['scope'], tmp_file['name'])
exitcode, out, err = execute(cmd)
print(out)
print(err)
# Checking if the execution was successfull and if the DIDs belong together
assert re.search('DIDs successfully attached', out) is not None
cmd = 'rucio list-content {0}'.format(tmp_dsn_did)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
# first dataset must be in the container
assert re.search("{0}:{1}".format(self.user, files[0]['name']), out) is not None
# last dataset must be in the container
assert re.search("{0}:{1}".format(self.user, files[-1]['name']), out) is not None
# Setup data with file
did_file_path = 'list_dids.txt'
files = [{'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'} for i in range(0, 1500)]
self.did_client.add_dids(files[:1000])
self.did_client.add_dids(files[1000:])
with open(did_file_path, 'w') as did_file:
for file in files:
did_file.write(file['scope'] + ':' + file['name'] + '\n')
did_file.close()
# Attaching over 1000 files per file
cmd = 'rucio attach {0} -f {1}'.format(tmp_dsn_did, did_file_path)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(did_file_path)
# Checking if the execution was successfull and if the DIDs belong together
assert re.search('DIDs successfully attached', out) is not None
cmd = 'rucio list-content {0}'.format(tmp_dsn_did)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
# first file must be in the dataset
assert re.search("{0}:{1}".format(self.user, files[0]['name']), out) is not None
# last file must be in the dataset
assert re.search("{0}:{1}".format(self.user, files[-1]['name']), out) is not None
@pytest.mark.dirty
def test_attach_many_dids_twice(self):
# Setup data for CLI check
container_name = 'container' + generate_uuid()
container = self.user + ':' + container_name
self.did_client.add_did(scope=self.user, name=container_name, type='CONTAINER')
datasets = [{'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'} for i in range(0, 1500)]
self.did_client.add_dids(datasets[:1000])
self.did_client.add_dids(datasets[1000:])
# Attaching over 1000 DIDs with CLI
cmd = 'rucio attach {0}'.format(container)
for dataset in datasets:
cmd += ' {0}:{1}'.format(dataset['scope'], dataset['name'])
exitcode, out, err = execute(cmd)
# Attaching twice
cmd = 'rucio attach {0}'.format(container)
for dataset in datasets:
cmd += ' {0}:{1}'.format(dataset['scope'], dataset['name'])
exitcode, out, err = execute(cmd)
assert re.search("DIDs successfully attached", out) is not None
# Attaching twice plus one DID that is not already attached
new_dataset = {'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'}
datasets.append(new_dataset)
self.did_client.add_did(scope=self.user, name=new_dataset['name'], type='DATASET')
cmd = 'rucio attach {0}'.format(container)
for dataset in datasets:
cmd += ' {0}:{1}'.format(dataset['scope'], dataset['name'])
exitcode, out, err = execute(cmd)
assert re.search("DIDs successfully attached", out) is not None
cmd = 'rucio list-content {0}'.format(container)
exitcode, out, err = execute(cmd)
assert re.search("{0}:{1}".format(self.user, new_dataset['name']), out) is not None
@pytest.mark.noparallel(reason='might override global RSE settings')
def test_import_data(self):
file_path = 'data_import.json'
rses = {rse['rse']: rse for rse in self.rse_client.list_rses()}
rses[rse_name_generator()] = {'country_name': 'test'}
data = {'rses': rses}
with open(file_path, 'w+') as file:
file.write(render_json(**data))
cmd = 'rucio-admin data import {0}'.format(file_path)
exitcode, out, err = execute(cmd)
assert re.search('Data successfully imported', out) is not None
remove(file_path)
@pytest.mark.noparallel(reason='fails when run in parallel')
def test_export_data(self):
file_path = 'data_export.json'
cmd = 'rucio-admin data export {0}'.format(file_path)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search('Data successfully exported', out) is not None
remove(file_path)
@pytest.mark.dirty
@pytest.mark.noparallel(reason='fails when run in parallel')
def test_set_tombstone(self):
# Set tombstone on one replica
rse = 'MOCK4'
scope = 'mock'
name = generate_uuid()
self.replica_client.add_replica(rse, scope, name, 4, 'aaaaaaaa')
cmd = 'rucio-admin replicas set-tombstone {0}:{1} --rse {2}'.format(scope, name, rse)
exitcode, out, err = execute(cmd)
assert re.search('Set tombstone successfully', err) is not None
# Set tombstone on locked replica
name = generate_uuid()
self.replica_client.add_replica(rse, scope, name, 4, 'aaaaaaaa')
self.rule_client.add_replication_rule([{'name': name, 'scope': scope}], 1, rse, locked=True)
cmd = 'rucio-admin replicas set-tombstone {0}:{1} --rse {2}'.format(scope, name, rse)
exitcode, out, err = execute(cmd)
assert re.search('Replica is locked', err) is not None
# Set tombstone on not found replica
name = generate_uuid()
cmd = 'rucio-admin replicas set-tombstone {0}:{1} --rse {2}'.format(scope, name, rse)
exitcode, out, err = execute(cmd)
assert re.search('Replica not found', err) is not None
@pytest.mark.noparallel(reason='modifies account limit on pre-defined RSE')
def test_list_account_limits(self):
rse = 'MOCK4'
rse_exp = 'MOCK3|MOCK4'
account = 'root'
local_limit = 10
global_limit = 20
self.account_client.set_local_account_limit(account, rse, local_limit)
self.account_client.set_global_account_limit(account, rse_exp, global_limit)
cmd = 'rucio list-account-limits {0}'.format(account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*'.format(rse, local_limit), out) is not None
assert re.search('.*{0}.*{1}.*'.format(rse_exp, global_limit), out) is not None
cmd = 'rucio list-account-limits --rse {0} {1}'.format(rse, account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*'.format(rse, local_limit), out) is not None
assert re.search('.*{0}.*{1}.*'.format(rse_exp, global_limit), out) is not None
self.account_client.set_local_account_limit(account, rse, -1)
self.account_client.set_global_account_limit(account, rse_exp, -1)
@pytest.mark.noparallel(reason='modifies account limit on pre-defined RSE')
@pytest.mark.skipif('SUITE' in os.environ and os.environ['SUITE'] == 'client', reason='uses abacus daemon and core functions')
def test_list_account_usage(self):
from rucio.db.sqla import session, models
from rucio.core.account_counter import increase
from rucio.daemons.abacus import account as abacus_account
db_session = session.get_session()
db_session.query(models.AccountUsage).delete()
db_session.query(models.AccountLimit).delete()
db_session.query(models.AccountGlobalLimit).delete()
db_session.query(models.UpdatedAccountCounter).delete()
db_session.commit()
rse = 'MOCK4'
rse_id = self.rse_client.get_rse(rse)['id']
rse_exp = 'MOCK|MOCK4'
account = 'root'
usage = 4
local_limit = 10
local_left = local_limit - usage
global_limit = 20
global_left = global_limit - usage
self.account_client.set_local_account_limit(account, rse, local_limit)
self.account_client.set_global_account_limit(account, rse_exp, global_limit)
increase(rse_id, InternalAccount(account, **self.vo), 1, usage)
abacus_account.run(once=True)
cmd = 'rucio list-account-usage {0}'.format(account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse, usage, local_limit, local_left), out) is not None
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse_exp, usage, global_limit, global_left), out) is not None
cmd = 'rucio list-account-usage --rse {0} {1}'.format(rse, account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse, usage, local_limit, local_left), out) is not None
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse_exp, usage, global_limit, global_left), out) is not None
self.account_client.set_local_account_limit(account, rse, -1)
self.account_client.set_global_account_limit(account, rse_exp, -1)
| true | true |
f7fad81f14d2704e351b3059e4600d9be81fecdd | 9,168 | py | Python | tools/testnet/files/dockerfiles/geth-testnet/run.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | 1 | 2018-11-26T01:40:37.000Z | 2018-11-26T01:40:37.000Z | tools/testnet/files/dockerfiles/geth-testnet/run.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | tools/testnet/files/dockerfiles/geth-testnet/run.py | anmolshl/raiden | f1cecb68cb43a2c00b2f719eadbe83137611a92a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import structlog
import os
import time
import subprocess
import sys
import logging
import click
from datetime import datetime, timedelta
import signal
import requests
from web3 import Web3, IPCProvider
from web3.utils.compat.compat_stdlib import Timeout
log = struct.get_logger(__name__)
# Since this will run inside a docker container and is written for Python 3 we
# have to disable flake8 since it will run with Python 2 and break on Travis-CI
# flake8: noqa
"""
Helper script to start geth.
Due to the ropsten revival there are a lot of nodes that aren't on the "correct" chain.
To ensure nodes sync to the "right" chain a rather involved process is necessary:
https://github.com/ethereum/ropsten/blob/master/README.md#troubleshooting
This scripts automates this process:
* Start geth with `--nodiscover`
* Add "known good" nodes via JS-API
* Wait until initial sync is done
* Restart geth w/o `--nodiscover`
"""
BOOTNODES = [
# Ropsten README
# "enode://6ce05930c72abc632c58e2e4324f7c7ea478cec0ed4fa2528982cf34483094e9cbc9216e7aa34969124"
# "2576d552a2a56aaeae426c5303ded677ce455ba1acd9d@13.84.180.240:30303",
# "enode://20c9ad97c081d63397d7b685a412227a40e23c8bdc6688c6f37e97cfbc22d2b4d1db1510d8f61e6a886"
# "6ad7f0e17c02b14182d37ea7c3c8b9c2683aeb6b733a1@52.169.14.227:30303",
# BB "ropster"
"enode://bed9a7af25633bbbb7bf23bfeb1518e2601868953d4b9dfcc490d00a5dd2c3ca17580fe23dcfb69208757"
"465d6e517109fd17b9cdfcccdc4a2cd2bdd81f93e1a@134.119.11.28:30303",
# https://gist.github.com/rfikki/7a95067f8cc02ae8b11bc34544f6aa3e#Ropsten-Peers-06282017.txt
"enode://00ae60771d9815daba35766d463a82a7b360b3a80e35ab2e0daa25bdc6ca6213ff4c8348025e7e1a908a8"
"f58411a364fe02a0fb3c2aa32008304f063d8aaf1a2@163.172.132.85:30303",
"enode://0a4d29ff55bc331bf4850bb967074beb02a2fc235c5fbd4511db57ed98781d5d75590368d69b3014d62fa"
"ab0d6146ce5221bf7e72a22404d7423c5e025019396@109.62.202.54:14574",
"enode://0f838387f82e14ffabaa6c48812ce0b33f79444ffd1d36d82f5e101803375e3911583fee2703ec3205d3c"
"729c2b0eb86d9fbb5de5bcadeff3aa05297a0af12e6@52.174.187.98:48036",
"enode://18a5676911f520ff7fd04013227513a0f2a0cea1bc39a53d3d6afc8f476d9e600db65a3235ea74ab363da"
"64c183d1f24c9f6fc606ab6f818e42049607d5b8e64@50.202.110.126:60668",
"enode://37a6360cf1597cfe9ba5c242b247137f7a222e86e5c2d23e483eeb314b794648f71dedb2c15ad85b8ad85"
"9f32b51c23e280982dd35b35d4432c963f3088e7165@31.16.253.42:8183",
"enode://3dd0079b86d9a126010a1b6b41ef2ca0227a839f5132a222e10bc8ebc25a180317fb00b4470cb4dd599e1"
"3ba330969c2d24b01231b8ba627be6845fdb0a69677@208.76.2.246:3872",
"enode://3fa5f2525f8edf67abcec110d7dd4d98e1ea936f618f10ea3418c6db3faf065258a7e652097e69275749e"
"921df9523ceabeaac2c702bbdff4ee1e89fe75dd932@217.234.145.135:49286",
"enode://4814abeb1d62f924861cfd53503eb8d16a8375f5061f5b109cf4f83cbddf9605caf6ae99ea4ec515b4deb"
"adeb172183edb1119d65e15abb5430b2737e157a810@188.60.170.25:45594",
"enode://4df3e91d952d96546adce09dfa279cc4617d98a9d88eda1a31a2214ec908632f545d5283ecb7816ce3052"
"3c9eb011348fa42a431a31ed2f3ca7d45f040c70bac@45.27.21.43:53576",
"enode://70aab01dbb9b8c9676a41c8458dfabe874777eb06a925692fc8901d575befb609e98fdc1023268003c6c0"
"9ac156f1cbbc22a2ba568eafbc32bbd40d62edd02db@46.53.177.238:51176",
"enode://7db1dd1c123eac9ef7f4a9e999c0abe2a5ec9886b61d2366645ff508e02455d7f139cc9fdfc84ca2b0ea4"
"11da1552d93a2508d3dacc3ef6704ff47a38426cb4a@216.151.184.87:53754",
"enode://82a91578bcc39447f084aba14f932056cc09bd57e3ac1be039c5f3202eeb7281a512da0a664fa3b10d935"
"4c1604db3b56d8bb585e2006c6fd24761c5a50056f0@99.235.74.76:43352",
"enode://86ebc843aa51669e08e27400e435f957918e39dc540b021a2f3291ab776c88bbda3d97631639219b6e77e"
"375ab7944222c47713bdeb3251b25779ce743a39d70@212.47.254.155:30303",
"enode://8ab78987908454be92f4aadbe379245cbf0e472547ede2f3efebc0ca733c51ed895515300a04f2ca60ccf"
"a0455f68d56f4734b2b931a0232594967c50f6b42cc@54.196.249.59:36388",
"enode://8b88dabdfdca2c7aab160b1a26c7e5be855bf55ed4dda05b176dee1d09fe00e1a1a6bce73585dbbbd3f05"
"cd94259dbe8fe201af0283a5a40a33408e4184df550@84.200.105.107:39521",
"enode://90f0c67ede3ff110d47cb76d44499105f337dca4046bef73b6fed8fc4b9bbf488917c96442c2f80e84894"
"9f77478893fc9dbefadf9a92414cb44677c2890ca69@52.233.195.36:1440",
"enode://9588c5cc481de306217d97e055312ded57276ee46beb6ff834b2aa46ed67e6b941fc99098aabece0cecec"
"0bf6f536d9c0e2337c0166a8ef49dc564440ddac8ed@124.169.136.13:51075",
"enode://9f2afd7068309d43adc91cd6f4dc41cbd69a9b9b3ea9ef8f3109cac395d3e08256b08a23fbccded6a7879"
"f00f05ed4b385047216373291466a8e4066f56977b5@12.46.122.13:24437",
"enode://aa927af666de44bbbe8ea6e0b3665125c1afed8841bb1c26daf10b0cf1b1683e9ceac49bdf2779ec0a954"
"e1d64ff98b7d5126f2feb7c6a37dba068038646676a@72.221.17.212:63725",
"enode://bf6848d2a994079293da3fa378bb9d228c0ae3e474ba5708d1719195044746cdaaa129801db8d0c86f24d"
"fff92963f6f58905b7fa06b3440d723208253516516@172.56.38.223:23377",
"enode://c2e2667ff2edb243160677a9452f4d4afff64645f0b39cd21e2b284567fa9e66279493763cfb63b1efda1"
"5b3608eb8bbd9f436bedbd22506f061cea3c222f72e@80.98.178.136:55803",
"enode://d42a19638fadfbc19991a1e9ab92055ea49209890d05405813d898cd769716d0de646ba13a07ab7f5ae3b"
"e476a166f6e5f15310a4aedf915212b045a3bebafe3@200.52.79.154:41694",
"enode://e2f51ca80c2cd6e1129f8b9769f59f2ff2d6a9579c07a244bde1b7c4dc7d18fcb8c4e951b1f131d22252e"
"4056c5f7a71958eb4e3286536a4b7c9b4b6bc2aa595@132.205.229.18:60102",
"enode://fe991752c4ceab8b90608fbf16d89a5f7d6d1825647d4981569ebcece1b243b2000420a5db721e214231c"
"7a6da3543fa821185c706cbd9b9be651494ec97f56a@51.15.67.119:56890",
]
ETHERSCAN_API_BLOCKNO = "https://ropsten.etherscan.io/api?module=proxy&action=eth_blockNumber"
GETH_PATH = "/usr/local/bin/geth"
GETH_CMD_RUN = [GETH_PATH, "--testnet", "--fast", "--rpc", "--rpcaddr", "0.0.0.0"]
GETH_CMD_RUN_INITIAL = [*GETH_CMD_RUN, "--nodiscover"]
# Max delay before syncing must have started
SYNC_START_DELAY = 120
# Percentage when we consider sync to be done
# XXX: FIXME: This is a hack to keep the node in "boot mode" (i.e. --nodiscover)
SYNC_FINISHED_PCT = 110
def get_current_block_no():
try:
return int(requests.get(ETHERSCAN_API_BLOCKNO).json()['result'], 0)
except (ValueError, KeyError):
return 0
@click.command()
@click.option("-b", "--bootnode", multiple=True, default=BOOTNODES)
def main(bootnode):
geth_proc = subprocess.Popen(GETH_CMD_RUN_INITIAL)
# Give node some time to start up
time.sleep(5)
web3 = Web3(IPCProvider(testnet=True))
try:
web3.eth.syncing
except FileNotFoundError:
log.critical("Can't connect to geth ipc port - check previous output")
geth_proc.terminate()
sys.exit(1)
for node in bootnode:
web3.admin.addPeer(node)
log.info("Adding bootnode %s", node)
log.info("Added bootnodes")
start = time.monotonic()
err_cnt = 0
synced = False
while geth_proc.poll() is None:
time.sleep(5)
try:
sync_state = web3.eth.syncing
block_number = web3.eth.blockNumber
err_cnt = 0
except Timeout:
err_cnt += 1
if err_cnt > 10:
log.critical("Timeout connecting to geth")
geth_proc.terminate()
sys.exit(3)
log.warning("Timeout connecting to geth, retrying.")
continue
if sync_state is False:
if abs(block_number - get_current_block_no()) < 5:
log.info("Node is already synced")
synced = True
break
if time.monotonic() - start > SYNC_START_DELAY:
log.critical("Node hasn't started syncing after {}s".format(SYNC_START_DELAY))
geth_proc.terminate()
sys.exit(2)
continue
if sync_state['currentBlock'] / sync_state['highestBlock'] * 100 >= SYNC_FINISHED_PCT:
log.info("Syncing done")
synced = True
break
else:
duration = time.monotonic() - start
blocks_synced = sync_state['currentBlock'] - sync_state['startingBlock']
blocks_remaining = sync_state['highestBlock'] - sync_state['currentBlock']
blocks_per_sec = blocks_synced / duration
time_remaining = timedelta(
seconds=int(blocks_remaining / blocks_per_sec) if blocks_per_sec else 0)
log.info("Blocks remaining: {:,d}; blk/s: {:.1f}; ETA: {!s} / {:%H:%M}".format(
blocks_remaining,
blocks_per_sec,
time_remaining,
datetime.now() + time_remaining
))
geth_proc.send_signal(signal.SIGINT)
geth_proc.wait(10)
if not synced:
log.critical("Geth terminated without finished syncing")
sys.exit(4)
log.info("Restarting geth")
os.execv(GETH_PATH, [*GETH_CMD_RUN, "--bootnodes", ",".join(bootnode)])
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
| 44.504854 | 99 | 0.754472 |
import structlog
import os
import time
import subprocess
import sys
import logging
import click
from datetime import datetime, timedelta
import signal
import requests
from web3 import Web3, IPCProvider
from web3.utils.compat.compat_stdlib import Timeout
log = struct.get_logger(__name__)
BOOTNODES = [
"enode://bed9a7af25633bbbb7bf23bfeb1518e2601868953d4b9dfcc490d00a5dd2c3ca17580fe23dcfb69208757"
"465d6e517109fd17b9cdfcccdc4a2cd2bdd81f93e1a@134.119.11.28:30303",
5daba35766d463a82a7b360b3a80e35ab2e0daa25bdc6ca6213ff4c8348025e7e1a908a8"
"f58411a364fe02a0fb3c2aa32008304f063d8aaf1a2@163.172.132.85:30303",
"enode://0a4d29ff55bc331bf4850bb967074beb02a2fc235c5fbd4511db57ed98781d5d75590368d69b3014d62fa"
"ab0d6146ce5221bf7e72a22404d7423c5e025019396@109.62.202.54:14574",
"enode://0f838387f82e14ffabaa6c48812ce0b33f79444ffd1d36d82f5e101803375e3911583fee2703ec3205d3c"
"729c2b0eb86d9fbb5de5bcadeff3aa05297a0af12e6@52.174.187.98:48036",
"enode://18a5676911f520ff7fd04013227513a0f2a0cea1bc39a53d3d6afc8f476d9e600db65a3235ea74ab363da"
"64c183d1f24c9f6fc606ab6f818e42049607d5b8e64@50.202.110.126:60668",
"enode://37a6360cf1597cfe9ba5c242b247137f7a222e86e5c2d23e483eeb314b794648f71dedb2c15ad85b8ad85"
"9f32b51c23e280982dd35b35d4432c963f3088e7165@31.16.253.42:8183",
"enode://3dd0079b86d9a126010a1b6b41ef2ca0227a839f5132a222e10bc8ebc25a180317fb00b4470cb4dd599e1"
"3ba330969c2d24b01231b8ba627be6845fdb0a69677@208.76.2.246:3872",
"enode://3fa5f2525f8edf67abcec110d7dd4d98e1ea936f618f10ea3418c6db3faf065258a7e652097e69275749e"
"921df9523ceabeaac2c702bbdff4ee1e89fe75dd932@217.234.145.135:49286",
"enode://4814abeb1d62f924861cfd53503eb8d16a8375f5061f5b109cf4f83cbddf9605caf6ae99ea4ec515b4deb"
"adeb172183edb1119d65e15abb5430b2737e157a810@188.60.170.25:45594",
"enode://4df3e91d952d96546adce09dfa279cc4617d98a9d88eda1a31a2214ec908632f545d5283ecb7816ce3052"
"3c9eb011348fa42a431a31ed2f3ca7d45f040c70bac@45.27.21.43:53576",
"enode://70aab01dbb9b8c9676a41c8458dfabe874777eb06a925692fc8901d575befb609e98fdc1023268003c6c0"
"9ac156f1cbbc22a2ba568eafbc32bbd40d62edd02db@46.53.177.238:51176",
"enode://7db1dd1c123eac9ef7f4a9e999c0abe2a5ec9886b61d2366645ff508e02455d7f139cc9fdfc84ca2b0ea4"
"11da1552d93a2508d3dacc3ef6704ff47a38426cb4a@216.151.184.87:53754",
"enode://82a91578bcc39447f084aba14f932056cc09bd57e3ac1be039c5f3202eeb7281a512da0a664fa3b10d935"
"4c1604db3b56d8bb585e2006c6fd24761c5a50056f0@99.235.74.76:43352",
"enode://86ebc843aa51669e08e27400e435f957918e39dc540b021a2f3291ab776c88bbda3d97631639219b6e77e"
"375ab7944222c47713bdeb3251b25779ce743a39d70@212.47.254.155:30303",
"enode://8ab78987908454be92f4aadbe379245cbf0e472547ede2f3efebc0ca733c51ed895515300a04f2ca60ccf"
"a0455f68d56f4734b2b931a0232594967c50f6b42cc@54.196.249.59:36388",
"enode://8b88dabdfdca2c7aab160b1a26c7e5be855bf55ed4dda05b176dee1d09fe00e1a1a6bce73585dbbbd3f05"
"cd94259dbe8fe201af0283a5a40a33408e4184df550@84.200.105.107:39521",
"enode://90f0c67ede3ff110d47cb76d44499105f337dca4046bef73b6fed8fc4b9bbf488917c96442c2f80e84894"
"9f77478893fc9dbefadf9a92414cb44677c2890ca69@52.233.195.36:1440",
"enode://9588c5cc481de306217d97e055312ded57276ee46beb6ff834b2aa46ed67e6b941fc99098aabece0cecec"
"0bf6f536d9c0e2337c0166a8ef49dc564440ddac8ed@124.169.136.13:51075",
"enode://9f2afd7068309d43adc91cd6f4dc41cbd69a9b9b3ea9ef8f3109cac395d3e08256b08a23fbccded6a7879"
"f00f05ed4b385047216373291466a8e4066f56977b5@12.46.122.13:24437",
"enode://aa927af666de44bbbe8ea6e0b3665125c1afed8841bb1c26daf10b0cf1b1683e9ceac49bdf2779ec0a954"
"e1d64ff98b7d5126f2feb7c6a37dba068038646676a@72.221.17.212:63725",
"enode://bf6848d2a994079293da3fa378bb9d228c0ae3e474ba5708d1719195044746cdaaa129801db8d0c86f24d"
"fff92963f6f58905b7fa06b3440d723208253516516@172.56.38.223:23377",
"enode://c2e2667ff2edb243160677a9452f4d4afff64645f0b39cd21e2b284567fa9e66279493763cfb63b1efda1"
"5b3608eb8bbd9f436bedbd22506f061cea3c222f72e@80.98.178.136:55803",
"enode://d42a19638fadfbc19991a1e9ab92055ea49209890d05405813d898cd769716d0de646ba13a07ab7f5ae3b"
"e476a166f6e5f15310a4aedf915212b045a3bebafe3@200.52.79.154:41694",
"enode://e2f51ca80c2cd6e1129f8b9769f59f2ff2d6a9579c07a244bde1b7c4dc7d18fcb8c4e951b1f131d22252e"
"4056c5f7a71958eb4e3286536a4b7c9b4b6bc2aa595@132.205.229.18:60102",
"enode://fe991752c4ceab8b90608fbf16d89a5f7d6d1825647d4981569ebcece1b243b2000420a5db721e214231c"
"7a6da3543fa821185c706cbd9b9be651494ec97f56a@51.15.67.119:56890",
]
ETHERSCAN_API_BLOCKNO = "https://ropsten.etherscan.io/api?module=proxy&action=eth_blockNumber"
GETH_PATH = "/usr/local/bin/geth"
GETH_CMD_RUN = [GETH_PATH, "--testnet", "--fast", "--rpc", "--rpcaddr", "0.0.0.0"]
GETH_CMD_RUN_INITIAL = [*GETH_CMD_RUN, "--nodiscover"]
SYNC_START_DELAY = 120
SYNC_FINISHED_PCT = 110
def get_current_block_no():
try:
return int(requests.get(ETHERSCAN_API_BLOCKNO).json()['result'], 0)
except (ValueError, KeyError):
return 0
@click.command()
@click.option("-b", "--bootnode", multiple=True, default=BOOTNODES)
def main(bootnode):
geth_proc = subprocess.Popen(GETH_CMD_RUN_INITIAL)
time.sleep(5)
web3 = Web3(IPCProvider(testnet=True))
try:
web3.eth.syncing
except FileNotFoundError:
log.critical("Can't connect to geth ipc port - check previous output")
geth_proc.terminate()
sys.exit(1)
for node in bootnode:
web3.admin.addPeer(node)
log.info("Adding bootnode %s", node)
log.info("Added bootnodes")
start = time.monotonic()
err_cnt = 0
synced = False
while geth_proc.poll() is None:
time.sleep(5)
try:
sync_state = web3.eth.syncing
block_number = web3.eth.blockNumber
err_cnt = 0
except Timeout:
err_cnt += 1
if err_cnt > 10:
log.critical("Timeout connecting to geth")
geth_proc.terminate()
sys.exit(3)
log.warning("Timeout connecting to geth, retrying.")
continue
if sync_state is False:
if abs(block_number - get_current_block_no()) < 5:
log.info("Node is already synced")
synced = True
break
if time.monotonic() - start > SYNC_START_DELAY:
log.critical("Node hasn't started syncing after {}s".format(SYNC_START_DELAY))
geth_proc.terminate()
sys.exit(2)
continue
if sync_state['currentBlock'] / sync_state['highestBlock'] * 100 >= SYNC_FINISHED_PCT:
log.info("Syncing done")
synced = True
break
else:
duration = time.monotonic() - start
blocks_synced = sync_state['currentBlock'] - sync_state['startingBlock']
blocks_remaining = sync_state['highestBlock'] - sync_state['currentBlock']
blocks_per_sec = blocks_synced / duration
time_remaining = timedelta(
seconds=int(blocks_remaining / blocks_per_sec) if blocks_per_sec else 0)
log.info("Blocks remaining: {:,d}; blk/s: {:.1f}; ETA: {!s} / {:%H:%M}".format(
blocks_remaining,
blocks_per_sec,
time_remaining,
datetime.now() + time_remaining
))
geth_proc.send_signal(signal.SIGINT)
geth_proc.wait(10)
if not synced:
log.critical("Geth terminated without finished syncing")
sys.exit(4)
log.info("Restarting geth")
os.execv(GETH_PATH, [*GETH_CMD_RUN, "--bootnodes", ",".join(bootnode)])
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
| true | true |
f7fad87c49fa546f99dedc65a401b42b0841073f | 2,166 | py | Python | crawler/management/commands/export_csv.py | ahmedshahriar/bd-medicine-scraper | ea97d929fc9cdcbdde2602827cdc3d12709e2ca9 | [
"Apache-2.0"
] | 1 | 2022-03-17T03:02:49.000Z | 2022-03-17T03:02:49.000Z | crawler/management/commands/export_csv.py | ahmedshahriar/bd-medicine-scraper | ea97d929fc9cdcbdde2602827cdc3d12709e2ca9 | [
"Apache-2.0"
] | null | null | null | crawler/management/commands/export_csv.py | ahmedshahriar/bd-medicine-scraper | ea97d929fc9cdcbdde2602827cdc3d12709e2ca9 | [
"Apache-2.0"
] | null | null | null | import csv
import datetime
from crawler.models import Medicine, Generic, DosageForm, DrugClass, Indication, Manufacturer
from django.core.management import BaseCommand
from django.utils.autoreload import logger
class Command(BaseCommand): # see https://gist.github.com/2724472
help = "Mapping the generics with medicines"
def add_arguments(self, parser):
parser.add_argument('model_name',
type=str,
help='model name for the csv export, e.g. medicine, generic, dosage_form, drug_class, '
'indication, manufacturer')
parser.add_argument('outfile',
nargs='?',
type=str,
help='Save path, like </path/to/outfile.csv> or "/data/medicine.csv"')
def handle(self, *args, **options):
model_name = options['model_name']
export_file = f"{options['outfile']}.csv" if options['outfile'] else '{}.csv'.format(model_name)
logger.info("Exporting... %s" % model_name)
model_dict = {'medicine': Medicine, 'generic': Generic, 'dosage_form': DosageForm, 'drug_class': DrugClass,
'indication': Indication, 'manufacturer': Manufacturer}
model_class = model_dict[model_name]
with open('%s' % export_file, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
fields = [field for field in model_class._meta.get_fields() if not field.many_to_many \
and not field.one_to_many]
# Write a first row with header information
writer.writerow([field.verbose_name for field in fields])
# Write data rows
for obj in model_class.objects.all():
data_row = []
for field in fields:
value = getattr(obj, field.name)
if isinstance(value, datetime.datetime):
value = value.strftime('%d/%m/%Y')
data_row.append(value)
writer.writerow(data_row)
logger.info(f.name, "exported")
| 40.867925 | 115 | 0.574792 | import csv
import datetime
from crawler.models import Medicine, Generic, DosageForm, DrugClass, Indication, Manufacturer
from django.core.management import BaseCommand
from django.utils.autoreload import logger
class Command(BaseCommand):
help = "Mapping the generics with medicines"
def add_arguments(self, parser):
parser.add_argument('model_name',
type=str,
help='model name for the csv export, e.g. medicine, generic, dosage_form, drug_class, '
'indication, manufacturer')
parser.add_argument('outfile',
nargs='?',
type=str,
help='Save path, like </path/to/outfile.csv> or "/data/medicine.csv"')
def handle(self, *args, **options):
model_name = options['model_name']
export_file = f"{options['outfile']}.csv" if options['outfile'] else '{}.csv'.format(model_name)
logger.info("Exporting... %s" % model_name)
model_dict = {'medicine': Medicine, 'generic': Generic, 'dosage_form': DosageForm, 'drug_class': DrugClass,
'indication': Indication, 'manufacturer': Manufacturer}
model_class = model_dict[model_name]
with open('%s' % export_file, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
fields = [field for field in model_class._meta.get_fields() if not field.many_to_many \
and not field.one_to_many]
writer.writerow([field.verbose_name for field in fields])
for obj in model_class.objects.all():
data_row = []
for field in fields:
value = getattr(obj, field.name)
if isinstance(value, datetime.datetime):
value = value.strftime('%d/%m/%Y')
data_row.append(value)
writer.writerow(data_row)
logger.info(f.name, "exported")
| true | true |
f7fadaad7cccab57b8cbd393c836ecfc66caef2b | 1,085 | py | Python | orders/views.py | pauljherrera/avantiweb | 40b87e754e68a0e2adcf5e1640d5e2e0c8637d0a | [
"MIT"
] | null | null | null | orders/views.py | pauljherrera/avantiweb | 40b87e754e68a0e2adcf5e1640d5e2e0c8637d0a | [
"MIT"
] | null | null | null | orders/views.py | pauljherrera/avantiweb | 40b87e754e68a0e2adcf5e1640d5e2e0c8637d0a | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib.admin.views.decorators import staff_member_required
from .models import Order, Product
from .forms import OrderCreateForm
from .tasks import order_created
# Create your views here.
def order_create(request, pk):
if request.method == 'POST':
form = OrderCreateForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
order.owner = request.user
order.product = Product.objects.filter(pk=request.session['product_id'])[0]
order.save()
request.session['order_id'] = order.id
request.session['product_id'] = pk
return redirect(reverse('payment:process'))
else:
request.session['product_id'] = pk
form = OrderCreateForm()
return render(request, 'orders/order/create.html',
{'form': form, 'product_id': pk})
@staff_member_required
def admin_order_detail(request, order_id):
order = get_object_or_404(Order, id=order_id)
return render(request, 'admin/orders/order/detail.html',
{'order': order})
| 30.138889 | 78 | 0.748387 | from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib.admin.views.decorators import staff_member_required
from .models import Order, Product
from .forms import OrderCreateForm
from .tasks import order_created
def order_create(request, pk):
if request.method == 'POST':
form = OrderCreateForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
order.owner = request.user
order.product = Product.objects.filter(pk=request.session['product_id'])[0]
order.save()
request.session['order_id'] = order.id
request.session['product_id'] = pk
return redirect(reverse('payment:process'))
else:
request.session['product_id'] = pk
form = OrderCreateForm()
return render(request, 'orders/order/create.html',
{'form': form, 'product_id': pk})
@staff_member_required
def admin_order_detail(request, order_id):
order = get_object_or_404(Order, id=order_id)
return render(request, 'admin/orders/order/detail.html',
{'order': order})
| true | true |
f7fadb47db37dd5e50248bd8ac038911f732ddae | 1,092 | py | Python | user/collection/manager/modify_one.py | dsvalenciah/ROAp | 24cbff0e719c5009ec1f1e7190924d4d9297e992 | [
"MIT"
] | 4 | 2018-04-23T00:04:01.000Z | 2018-10-28T22:56:51.000Z | user/collection/manager/modify_one.py | dsvalenciah/ROAp | 24cbff0e719c5009ec1f1e7190924d4d9297e992 | [
"MIT"
] | 23 | 2017-12-22T08:27:35.000Z | 2021-12-13T19:57:35.000Z | user/collection/manager/modify_one.py | dsvalenciah/ROAp | 24cbff0e719c5009ec1f1e7190924d4d9297e992 | [
"MIT"
] | 1 | 2020-06-03T02:07:26.000Z | 2020-06-03T02:07:26.000Z |
from datetime import datetime
from manager.exceptions.user import (
UserPermissionError, UserNotFoundError, UserSchemaError
)
from manager.schemas.user import User
def modify_one(db_client, old_user_id, new_user, auth_user):
"""Modify user."""
_ = auth_user.get('language')
if auth_user.get('role') != 'administrator':
if old_user_id != auth_user.get('_id'):
raise UserPermissionError(
_('User not have sufficient permissions to do this action.')
)
old_user = db_client.users.find_one({'_id': old_user_id})
if not old_user:
raise UserNotFoundError(_('User _id not found.'))
new_user.update({
'modified': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
})
new_user, errors = User(
exclude=[
'_id', 'password', 'email', 'created',
'modified', 'validated', 'last_activity'
],
).dump(new_user)
if errors:
raise UserSchemaError(errors)
db_client.users.update_one(
{'_id': old_user_id},
{'$set': new_user}
)
| 24.818182 | 76 | 0.60989 |
from datetime import datetime
from manager.exceptions.user import (
UserPermissionError, UserNotFoundError, UserSchemaError
)
from manager.schemas.user import User
def modify_one(db_client, old_user_id, new_user, auth_user):
_ = auth_user.get('language')
if auth_user.get('role') != 'administrator':
if old_user_id != auth_user.get('_id'):
raise UserPermissionError(
_('User not have sufficient permissions to do this action.')
)
old_user = db_client.users.find_one({'_id': old_user_id})
if not old_user:
raise UserNotFoundError(_('User _id not found.'))
new_user.update({
'modified': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
})
new_user, errors = User(
exclude=[
'_id', 'password', 'email', 'created',
'modified', 'validated', 'last_activity'
],
).dump(new_user)
if errors:
raise UserSchemaError(errors)
db_client.users.update_one(
{'_id': old_user_id},
{'$set': new_user}
)
| true | true |
f7fadbbb6e1c64dad3ccc8b8fadb28371e058de9 | 263 | py | Python | 04Cuarto/Sistemas_y_proteccion_de_sistemas_informaticos_SPSI/Practica5/src/model/__init__.py | elsudano/Facultad | 8ff2c5904f0a38a3a0682e040da4439f2bc872f2 | [
"MIT"
] | 2 | 2017-02-20T09:26:42.000Z | 2021-11-21T21:56:35.000Z | 04Cuarto/Sistemas_y_proteccion_de_sistemas_informaticos_SPSI/Practica5/src/model/__init__.py | elsudano/Facultad | 8ff2c5904f0a38a3a0682e040da4439f2bc872f2 | [
"MIT"
] | 1 | 2016-10-06T16:59:39.000Z | 2017-09-21T08:04:51.000Z | 04Cuarto/Sistemas_y_proteccion_de_sistemas_informaticos_SPSI/Practica5/src/model/__init__.py | elsudano/Facultad | 8ff2c5904f0a38a3a0682e040da4439f2bc872f2 | [
"MIT"
] | 4 | 2016-10-06T16:41:01.000Z | 2019-11-21T12:37:20.000Z | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""Fichero de inicialización del modulo
Este fichero se usa para poder inicializar las diferentes partes del modulo
se usa para poner los imports necesarios para la aplicación
"""
from src.model.Models import MainModel
| 26.3 | 75 | 0.764259 |
from src.model.Models import MainModel
| true | true |
f7fadc225c598d19f5506d8fce73a39b31507e8d | 3,463 | py | Python | Condor/Tools/Fun/Windows.py | OriolOriolOriol/Condor | 5b855ff7170e43149f9e9f81a97b6b88282915c5 | [
"MIT"
] | null | null | null | Condor/Tools/Fun/Windows.py | OriolOriolOriol/Condor | 5b855ff7170e43149f9e9f81a97b6b88282915c5 | [
"MIT"
] | null | null | null | Condor/Tools/Fun/Windows.py | OriolOriolOriol/Condor | 5b855ff7170e43149f9e9f81a97b6b88282915c5 | [
"MIT"
] | 1 | 2020-11-04T08:32:26.000Z | 2020-11-04T08:32:26.000Z |
# Import modules
from ctypes import windll, wintypes, create_unicode_buffer, WINFUNCTYPE, Structure, c_bool, c_int, c_long, c_ulong, byref, POINTER
from psutil import process_iter, Process # pip install psutil
__user32 = windll.user32
__EnumWindows = __user32.EnumWindows
__EnumWindowsProc = WINFUNCTYPE(c_bool, POINTER(c_int), POINTER(c_int))
__IsWindowVisible = __user32.IsWindowVisible
__GetWindowThreadProcessId = __user32.GetWindowThreadProcessId
""" Get Window object by process """
def GetWindowByProcess(processName):
for proc in process_iter():
if processName in proc.name():
pid = proc.pid
name = proc.name()
try:
hwnd = __FindHwndNyPid(pid)
except IndexError:
continue
else:
return __Window(pid, name, hwnd)
return False
""" Get PID by hwnd """
def __GetPidByHWND(hwnd):
pid = wintypes.DWORD()
__GetWindowThreadProcessId(hwnd, byref(pid))
return pid.value
""" Get hwnd by PID """
def __FindHwndNyPid(pid):
hwnds = []
def foreach_window(hwnd, lParam):
if __IsWindowVisible(hwnd):
if __GetPidByHWND(hwnd) == pid:
hwnds.append(hwnd)
return True
__EnumWindows(__EnumWindowsProc(foreach_window), 0)
return hwnds[0]
""" Rect """
class Rect(Structure):
_fields_ = [
('left', c_long),
('top', c_long),
('right', c_long),
('bottom', c_long)
]
""" Window class """
class __Window:
# Constructor
def __init__(self, pid, name, hwnd):
__rect = wintypes.RECT()
self.pid = pid
self.hwnd = hwnd
self.name = name
self.__user32 = windll.user32
self.__user32.GetWindowRect(self.hwnd, byref(__rect))
self.rect = Rect(__rect.left, __rect.top, __rect.right, __rect.bottom)
# Representation
def __repr__(self):
return f"Window (pid={self.pid}, name={repr(self.name)})"
# Maximize window
def Maximize(self):
return self.__user32.ShowWindow(self.hwnd, 3)
# is Maximized
def isMaximized(self):
return self.__user32.IsZoomed(self.hwnd) != 0
# Minimize window
def Minimize(self):
return self.__user32.ShowWindow(self.hwnd, 6)
# is Minimized
def isMinimized(self):
return self.__user32.IsIconic(self.hwnd) != 0
# Restore window
def Restore(self):
return self.__user32.ShowWindow(self.hwnd, 9)
# Activate window
def Activate(self):
return self.__user32.SetForegroundWindow(self.hwnd)
# Move window
def Move(self, x, y, height, width, repaint=True):
return self.__user32.MoveWindow(self.hwnd, x, y, height, width, repaint)
# Get title
def Title(self):
textLenInCharacters = self.__user32.GetWindowTextLengthW(self.hwnd)
stringBuffer = create_unicode_buffer(textLenInCharacters + 1)
self.__user32.GetWindowTextW(self.hwnd, stringBuffer, textLenInCharacters + 1)
return stringBuffer.value
# Get executable location
def Executable(self):
p = Process(self.pid)
return p.exe()
# Close window
def Close(self):
return self.__user32.PostMessageA(self.hwnd, 0x0010, 0, 0)
# Kill process
def Terminate(self):
p = Process(self.pid)
return p.terminate()
# is Visible
def isVisible(self):
return self.__user32.IsWindowVisible(self.hwnd) != 0 | 32.669811 | 130 | 0.645683 |
from ctypes import windll, wintypes, create_unicode_buffer, WINFUNCTYPE, Structure, c_bool, c_int, c_long, c_ulong, byref, POINTER
from psutil import process_iter, Process
__user32 = windll.user32
__EnumWindows = __user32.EnumWindows
__EnumWindowsProc = WINFUNCTYPE(c_bool, POINTER(c_int), POINTER(c_int))
__IsWindowVisible = __user32.IsWindowVisible
__GetWindowThreadProcessId = __user32.GetWindowThreadProcessId
def GetWindowByProcess(processName):
for proc in process_iter():
if processName in proc.name():
pid = proc.pid
name = proc.name()
try:
hwnd = __FindHwndNyPid(pid)
except IndexError:
continue
else:
return __Window(pid, name, hwnd)
return False
def __GetPidByHWND(hwnd):
pid = wintypes.DWORD()
__GetWindowThreadProcessId(hwnd, byref(pid))
return pid.value
def __FindHwndNyPid(pid):
hwnds = []
def foreach_window(hwnd, lParam):
if __IsWindowVisible(hwnd):
if __GetPidByHWND(hwnd) == pid:
hwnds.append(hwnd)
return True
__EnumWindows(__EnumWindowsProc(foreach_window), 0)
return hwnds[0]
class Rect(Structure):
_fields_ = [
('left', c_long),
('top', c_long),
('right', c_long),
('bottom', c_long)
]
class __Window:
def __init__(self, pid, name, hwnd):
__rect = wintypes.RECT()
self.pid = pid
self.hwnd = hwnd
self.name = name
self.__user32 = windll.user32
self.__user32.GetWindowRect(self.hwnd, byref(__rect))
self.rect = Rect(__rect.left, __rect.top, __rect.right, __rect.bottom)
def __repr__(self):
return f"Window (pid={self.pid}, name={repr(self.name)})"
def Maximize(self):
return self.__user32.ShowWindow(self.hwnd, 3)
def isMaximized(self):
return self.__user32.IsZoomed(self.hwnd) != 0
def Minimize(self):
return self.__user32.ShowWindow(self.hwnd, 6)
def isMinimized(self):
return self.__user32.IsIconic(self.hwnd) != 0
def Restore(self):
return self.__user32.ShowWindow(self.hwnd, 9)
def Activate(self):
return self.__user32.SetForegroundWindow(self.hwnd)
def Move(self, x, y, height, width, repaint=True):
return self.__user32.MoveWindow(self.hwnd, x, y, height, width, repaint)
def Title(self):
textLenInCharacters = self.__user32.GetWindowTextLengthW(self.hwnd)
stringBuffer = create_unicode_buffer(textLenInCharacters + 1)
self.__user32.GetWindowTextW(self.hwnd, stringBuffer, textLenInCharacters + 1)
return stringBuffer.value
def Executable(self):
p = Process(self.pid)
return p.exe()
def Close(self):
return self.__user32.PostMessageA(self.hwnd, 0x0010, 0, 0)
def Terminate(self):
p = Process(self.pid)
return p.terminate()
def isVisible(self):
return self.__user32.IsWindowVisible(self.hwnd) != 0 | true | true |
f7fadc2cb2daab721548b8fb3360d0ce49f6e886 | 8,030 | py | Python | src/shop/drognan.py | Cho0joy/botty | ed9c22b78a527443b46fdc3070cb128f32501e2e | [
"MIT"
] | 1 | 2022-02-09T03:19:59.000Z | 2022-02-09T03:19:59.000Z | src/shop/drognan.py | Cho0joy/botty | ed9c22b78a527443b46fdc3070cb128f32501e2e | [
"MIT"
] | null | null | null | src/shop/drognan.py | Cho0joy/botty | ed9c22b78a527443b46fdc3070cb128f32501e2e | [
"MIT"
] | 2 | 2022-01-10T12:46:31.000Z | 2022-02-12T20:26:16.000Z | import datetime
import os
import time
import math
import random
from typing import Dict, Tuple, Union, List, Callable
import keyboard
import numpy as np
from screen import Screen
from config import Config
from logger import Logger
from npc_manager import NpcManager, Npc
from template_finder import TemplateFinder
from utils.custom_mouse import mouse
from utils.misc import wait
def exit(run_obj):
run_time = str(datetime.timedelta(seconds=round(time.time() - run_obj.start_time)))
Logger.info("Exiting shopping mall...")
print(
"STATS \truns \t\ttime \titems_evaluated \titems_bought\n"
f"\t{run_obj.run_count} \t\t{run_time}"
f"\t\t{run_obj.items_evaluated} \t\t\t{run_obj.items_bought}"
)
os._exit(0)
class DrognanShopper:
"""
Shop at Drognan for Items.
Currently supported: Hammerdin scepters
In order to start the shopping bot:
1.) Run this this file in Python.
2.) Be ingame in Lut Golein (Act 2 town)
3.) Stand close to Drognan and the town exit (must be top right layout)
4.) While being ingame, press resume_key (default F11) to start the shopping, and exit_key (default F12) to stop it.
"""
def __init__(self, config: Config):
self._config = config
# Set look_for variables to False if you dont like your personal shopper to look for these
# Obviously something need to be set to True, or your shopper will be very confused
self.look_for_scepters = self._config.shop["shop_hammerdin_scepters"]
self.speed_factor = 1.0 + self._config.shop["speed_factor"]
if (self.speed_factor <= 0):
Logger.error("Can not use a speed factor less than negative 1!! Please update shop.ini. Exiting.")
os._exit(0)
self.apply_pather_adjustment = self._config.shop["apply_pather_adjustment"]
self._screen = Screen()
self._template_finder = TemplateFinder(self._screen, ["assets\\templates", "assets\\npc", "assets\\shop"], save_last_res=True)
self._npc_manager = NpcManager(
screen=self._screen, template_finder=self._template_finder
)
self.run_count = 0
self.start_time = time.time()
# items config
self.roi_shop_item_stats = [0, 0, config.ui_pos["screen_width"] // 2, config.ui_pos["screen_height"] - 100]
self.roi_vendor = config.ui_roi["left_inventory"]
self.rx, self.ry, _, _ = self.roi_vendor
self.sb_x, self.sb_y = self._screen.convert_screen_to_monitor((180, 77))
self.c_x, self.c_y = self._screen.convert_screen_to_monitor((config.ui_pos["center_x"], config.ui_pos["center_y"]))
self.items_evaluated = 0
self.items_bought = 0
def run(self):
Logger.info("Personal Drognan Shopper at your service! Hang on, running some errands...")
self.reset_shop()
self.shop_loop()
def shop_loop(self):
# This is the main shopping loop. It can be further generalized to more easily support new items,
# But this is sufficient for now.
while True:
self._npc_manager.open_npc_menu(Npc.DROGNAN)
self._npc_manager.press_npc_btn(Npc.DROGNAN, "trade")
time.sleep(0.1)
img = self._screen.grab()
if self.look_for_scepters is True:
mouse.move(self.sb_x, self.sb_y, randomize=3, delay_factor=[0.6, 0.8])
wait(0.05, 0.1)
mouse.press(button="left")
wait(0.05, 0.1)
mouse.release(button="left")
wait(0.3, 0.4)
# Search for items
item_pos = []
img = self._screen.grab().copy()
item_keys = ["SCEPTER1", "SCEPTER2", "SCEPTER3", "SCEPTER4", "SCEPTER5"]
for ck in item_keys:
template_match = self._template_finder.search(ck, img, roi=self.roi_vendor)
if template_match.valid:
(y, x) = np.where(self._template_finder.last_res >= 0.6)
for (x, y) in zip(x, y):
new_pos = [x + self.rx + 16, y + self.ry + 50]
# check if pos already exists in item_pos
exists_already = False
for pos in item_pos:
dist = math.dist(new_pos, pos)
if dist < 10:
exists_already = True
if not exists_already:
item_pos.append(new_pos)
# check out each item
for pos in item_pos:
x_m, y_m = self._screen.convert_screen_to_monitor(pos)
mouse.move(x_m, y_m, randomize=3, delay_factor=[0.5, 0.6])
wait(0.5, 0.6)
img_stats = self._screen.grab()
# First check for +2 Paladin Skills. This weeds out most scepters right away.
if self._template_finder.search("2_TO_PALADIN_SKILLS", img_stats, roi=self.roi_shop_item_stats, threshold=0.94).valid:
# Has 2 Pally skills, check blessed hammers next
if self._template_finder.search("TO_BLESSED_HAMMERS", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
# Has 2 Pally skills AND Blessed Hammers, check Concentration next
if self._template_finder.search("TO_CONCENTRATION", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
# Has 2 Pally skills AND Blessed Hammers AND Concentration. We're good! Buy it!
mouse.click(button="right")
Logger.info(f"Item bought!")
self.items_bought += 1
time.sleep(1)
self.items_evaluated += 1
keyboard.send("space")
# Done with this shopping round
self.reset_shop()
self.run_count += 1
def reset_shop(self):
# We want to walk out the town exit to the top right and come back down to drognan
# This can probably be tweaked but seems to work well enough for now.
# Exit town
pos_m = self._screen.convert_abs_to_monitor((200, -100))
mouse.move(pos_m[0], pos_m[1])
self.hold_move(pos_m, time_held=(3.0 / self.speed_factor))
# Return to town
pos_m = self._screen.convert_abs_to_monitor((-200, 100))
mouse.move(pos_m[0], pos_m[1])
self.hold_move(pos_m, time_held=(2.0 / self.speed_factor))
# A variation of the move() function from pather.py
def hold_move(self, pos_monitor: Tuple[float, float], time_held: float = 2.0):
factor = self._config.advanced_options["pathing_delay_factor"]
# in case we want to walk we actually want to move a bit before the point cause d2r will always "overwalk"
pos_screen = self._screen.convert_monitor_to_screen(pos_monitor)
pos_abs = self._screen.convert_screen_to_abs(pos_screen)
# This logic (from pather.py) sometimes negatively affects the shopper, so default is to skip this.
if self.apply_pather_adjustment:
dist = math.dist(pos_abs, (0, 0))
min_wd = self._config.ui_pos["min_walk_dist"]
max_wd = random.randint(int(self._config.ui_pos["max_walk_dist"] * 0.65), self._config.ui_pos["max_walk_dist"])
adjust_factor = max(max_wd, min(min_wd, dist - 50)) / dist
pos_abs = [int(pos_abs[0] * adjust_factor), int(pos_abs[1] * adjust_factor)]
x, y = self._screen.convert_abs_to_monitor(pos_abs)
mouse.move(x, y, randomize=5, delay_factor=[factor*0.1, factor*0.14])
wait(0.012, 0.02)
mouse.press(button="left")
wait(time_held - 0.05, time_held + 0.05)
mouse.release(button="left")
| 45.625 | 142 | 0.602864 | import datetime
import os
import time
import math
import random
from typing import Dict, Tuple, Union, List, Callable
import keyboard
import numpy as np
from screen import Screen
from config import Config
from logger import Logger
from npc_manager import NpcManager, Npc
from template_finder import TemplateFinder
from utils.custom_mouse import mouse
from utils.misc import wait
def exit(run_obj):
run_time = str(datetime.timedelta(seconds=round(time.time() - run_obj.start_time)))
Logger.info("Exiting shopping mall...")
print(
"STATS \truns \t\ttime \titems_evaluated \titems_bought\n"
f"\t{run_obj.run_count} \t\t{run_time}"
f"\t\t{run_obj.items_evaluated} \t\t\t{run_obj.items_bought}"
)
os._exit(0)
class DrognanShopper:
def __init__(self, config: Config):
self._config = config
self.look_for_scepters = self._config.shop["shop_hammerdin_scepters"]
self.speed_factor = 1.0 + self._config.shop["speed_factor"]
if (self.speed_factor <= 0):
Logger.error("Can not use a speed factor less than negative 1!! Please update shop.ini. Exiting.")
os._exit(0)
self.apply_pather_adjustment = self._config.shop["apply_pather_adjustment"]
self._screen = Screen()
self._template_finder = TemplateFinder(self._screen, ["assets\\templates", "assets\\npc", "assets\\shop"], save_last_res=True)
self._npc_manager = NpcManager(
screen=self._screen, template_finder=self._template_finder
)
self.run_count = 0
self.start_time = time.time()
self.roi_shop_item_stats = [0, 0, config.ui_pos["screen_width"] // 2, config.ui_pos["screen_height"] - 100]
self.roi_vendor = config.ui_roi["left_inventory"]
self.rx, self.ry, _, _ = self.roi_vendor
self.sb_x, self.sb_y = self._screen.convert_screen_to_monitor((180, 77))
self.c_x, self.c_y = self._screen.convert_screen_to_monitor((config.ui_pos["center_x"], config.ui_pos["center_y"]))
self.items_evaluated = 0
self.items_bought = 0
def run(self):
Logger.info("Personal Drognan Shopper at your service! Hang on, running some errands...")
self.reset_shop()
self.shop_loop()
def shop_loop(self):
while True:
self._npc_manager.open_npc_menu(Npc.DROGNAN)
self._npc_manager.press_npc_btn(Npc.DROGNAN, "trade")
time.sleep(0.1)
img = self._screen.grab()
if self.look_for_scepters is True:
mouse.move(self.sb_x, self.sb_y, randomize=3, delay_factor=[0.6, 0.8])
wait(0.05, 0.1)
mouse.press(button="left")
wait(0.05, 0.1)
mouse.release(button="left")
wait(0.3, 0.4)
item_pos = []
img = self._screen.grab().copy()
item_keys = ["SCEPTER1", "SCEPTER2", "SCEPTER3", "SCEPTER4", "SCEPTER5"]
for ck in item_keys:
template_match = self._template_finder.search(ck, img, roi=self.roi_vendor)
if template_match.valid:
(y, x) = np.where(self._template_finder.last_res >= 0.6)
for (x, y) in zip(x, y):
new_pos = [x + self.rx + 16, y + self.ry + 50]
exists_already = False
for pos in item_pos:
dist = math.dist(new_pos, pos)
if dist < 10:
exists_already = True
if not exists_already:
item_pos.append(new_pos)
for pos in item_pos:
x_m, y_m = self._screen.convert_screen_to_monitor(pos)
mouse.move(x_m, y_m, randomize=3, delay_factor=[0.5, 0.6])
wait(0.5, 0.6)
img_stats = self._screen.grab()
if self._template_finder.search("2_TO_PALADIN_SKILLS", img_stats, roi=self.roi_shop_item_stats, threshold=0.94).valid:
if self._template_finder.search("TO_BLESSED_HAMMERS", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
if self._template_finder.search("TO_CONCENTRATION", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
mouse.click(button="right")
Logger.info(f"Item bought!")
self.items_bought += 1
time.sleep(1)
self.items_evaluated += 1
keyboard.send("space")
# Done with this shopping round
self.reset_shop()
self.run_count += 1
def reset_shop(self):
# We want to walk out the town exit to the top right and come back down to drognan
# This can probably be tweaked but seems to work well enough for now.
# Exit town
pos_m = self._screen.convert_abs_to_monitor((200, -100))
mouse.move(pos_m[0], pos_m[1])
self.hold_move(pos_m, time_held=(3.0 / self.speed_factor))
# Return to town
pos_m = self._screen.convert_abs_to_monitor((-200, 100))
mouse.move(pos_m[0], pos_m[1])
self.hold_move(pos_m, time_held=(2.0 / self.speed_factor))
# A variation of the move() function from pather.py
def hold_move(self, pos_monitor: Tuple[float, float], time_held: float = 2.0):
factor = self._config.advanced_options["pathing_delay_factor"]
# in case we want to walk we actually want to move a bit before the point cause d2r will always "overwalk"
pos_screen = self._screen.convert_monitor_to_screen(pos_monitor)
pos_abs = self._screen.convert_screen_to_abs(pos_screen)
# This logic (from pather.py) sometimes negatively affects the shopper, so default is to skip this.
if self.apply_pather_adjustment:
dist = math.dist(pos_abs, (0, 0))
min_wd = self._config.ui_pos["min_walk_dist"]
max_wd = random.randint(int(self._config.ui_pos["max_walk_dist"] * 0.65), self._config.ui_pos["max_walk_dist"])
adjust_factor = max(max_wd, min(min_wd, dist - 50)) / dist
pos_abs = [int(pos_abs[0] * adjust_factor), int(pos_abs[1] * adjust_factor)]
x, y = self._screen.convert_abs_to_monitor(pos_abs)
mouse.move(x, y, randomize=5, delay_factor=[factor*0.1, factor*0.14])
wait(0.012, 0.02)
mouse.press(button="left")
wait(time_held - 0.05, time_held + 0.05)
mouse.release(button="left")
| true | true |
f7fadc5982d41fcde02b58713f0736117743ac26 | 5,835 | py | Python | utils/pointconv_util.py | MatteoPerotto/pointconv | 204a0d534c4d75e80bde7722c075a78365a64929 | [
"MIT"
] | 471 | 2019-03-26T02:01:55.000Z | 2022-03-10T03:09:10.000Z | utils/pointconv_util.py | MatteoPerotto/pointconv | 204a0d534c4d75e80bde7722c075a78365a64929 | [
"MIT"
] | 35 | 2019-03-28T05:28:17.000Z | 2021-08-19T10:22:47.000Z | utils/pointconv_util.py | MatteoPerotto/pointconv | 204a0d534c4d75e80bde7722c075a78365a64929 | [
"MIT"
] | 115 | 2019-04-21T07:33:00.000Z | 2022-03-04T07:21:12.000Z | """
Helper Function for PointConv
Author: Wenxuan Wu
Date: July 2018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import numpy as np
import tensorflow as tf
from transforms3d.euler import euler2mat
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../tf_ops/sampling'))
sys.path.append(os.path.join(BASE_DIR, '../tf_ops/grouping'))
import tf_sampling
import tf_grouping
from sklearn.neighbors import KDTree
def knn_kdtree(nsample, xyz, new_xyz):
batch_size = xyz.shape[0]
n_points = new_xyz.shape[1]
indices = np.zeros((batch_size, n_points, nsample), dtype=np.int32)
for batch_idx in range(batch_size):
X = xyz[batch_idx, ...]
q_X = new_xyz[batch_idx, ...]
kdt = KDTree(X, leaf_size=30)
_, indices[batch_idx] = kdt.query(q_X, k = nsample)
return indices
def kernel_density_estimation_ball(pts, radius, sigma, N_points = 128, is_norm = False):
with tf.variable_scope("ComputeDensity") as sc:
idx, pts_cnt = tf_grouping.query_ball_point(radius, N_points, pts, pts)
g_pts = tf_grouping.group_point(pts, idx)
g_pts -= tf.tile(tf.expand_dims(pts, 2), [1, 1, N_points, 1])
R = tf.sqrt(sigma)
xRinv = tf.div(g_pts, R)
quadform = tf.reduce_sum(tf.square(xRinv), axis = -1)
logsqrtdetSigma = tf.log(R) * 3
mvnpdf = tf.exp(-0.5 * quadform - logsqrtdetSigma - 3 * tf.log(2 * 3.1415926) / 2)
first_val, _ = tf.split(mvnpdf, [1, N_points - 1], axis = 2)
mvnpdf = tf.reduce_sum(mvnpdf, axis = 2, keepdims = True)
num_val_to_sub = tf.expand_dims(tf.cast(tf.subtract(N_points, pts_cnt), dtype = tf.float32), axis = -1)
val_to_sub = tf.multiply(first_val, num_val_to_sub)
mvnpdf = tf.subtract(mvnpdf, val_to_sub)
scale = tf.div(1.0, tf.expand_dims(tf.cast(pts_cnt, dtype = tf.float32), axis = -1))
density = tf.multiply(mvnpdf, scale)
if is_norm:
#grouped_xyz_sum = tf.reduce_sum(grouped_xyz, axis = 1, keepdims = True)
density_max = tf.reduce_max(density, axis = 1, keepdims = True)
density = tf.div(density, density_max)
return density
def kernel_density_estimation(pts, sigma, kpoint = 32, is_norm = False):
with tf.variable_scope("ComputeDensity") as sc:
batch_size = pts.get_shape()[0]
num_points = pts.get_shape()[1]
if num_points < kpoint:
kpoint = num_points.value - 1
with tf.device('/cpu:0'):
point_indices = tf.py_func(knn_kdtree, [kpoint, pts, pts], tf.int32)
batch_indices = tf.tile(tf.reshape(tf.range(batch_size), (-1, 1, 1, 1)), (1, num_points, kpoint, 1))
idx = tf.concat([batch_indices, tf.expand_dims(point_indices, axis = 3)], axis = 3)
idx.set_shape([batch_size, num_points, kpoint, 2])
grouped_pts = tf.gather_nd(pts, idx)
grouped_pts -= tf.tile(tf.expand_dims(pts, 2), [1,1,kpoint,1]) # translation normalization
R = tf.sqrt(sigma)
xRinv = tf.div(grouped_pts, R)
quadform = tf.reduce_sum(tf.square(xRinv), axis = -1)
logsqrtdetSigma = tf.log(R) * 3
mvnpdf = tf.exp(-0.5 * quadform - logsqrtdetSigma - 3 * tf.log(2 * 3.1415926) / 2)
mvnpdf = tf.reduce_sum(mvnpdf, axis = 2, keepdims = True)
scale = 1.0 / kpoint
density = tf.multiply(mvnpdf, scale)
if is_norm:
#grouped_xyz_sum = tf.reduce_sum(grouped_xyz, axis = 1, keepdims = True)
density_max = tf.reduce_max(density, axis = 1, keepdims = True)
density = tf.div(density, density_max)
return density
def sampling(npoint, pts):
'''
inputs:
npoint: scalar, number of points to sample
pointcloud: B * N * 3, input point cloud
output:
sub_pts: B * npoint * 3, sub-sampled point cloud
'''
sub_pts = tf_sampling.gather_point(pts, tf_sampling.farthest_point_sample(npoint, pts))
return sub_pts
def grouping(feature, K, src_xyz, q_xyz, use_xyz = True):
'''
K: neighbor size
src_xyz: original point xyz (batch_size, ndataset, 3)
q_xyz: query point xyz (batch_size, npoint, 3)
'''
batch_size = src_xyz.get_shape()[0]
npoint = q_xyz.get_shape()[1]
point_indices = tf.py_func(knn_kdtree, [K, src_xyz, q_xyz], tf.int32)
batch_indices = tf.tile(tf.reshape(tf.range(batch_size), (-1, 1, 1, 1)), (1, npoint, K, 1))
idx = tf.concat([batch_indices, tf.expand_dims(point_indices, axis = 3)], axis = 3)
idx.set_shape([batch_size, npoint, K, 2])
grouped_xyz = tf.gather_nd(src_xyz, idx)
grouped_xyz -= tf.tile(tf.expand_dims(q_xyz, 2), [1,1,K,1]) # translation normalization
grouped_feature = tf.gather_nd(feature, idx)
if use_xyz:
new_points = tf.concat([grouped_xyz, grouped_feature], axis = -1)
else:
new_points = grouped_feature
return grouped_xyz, new_points, idx
if __name__=='__main__':
#test KDE
import time
batch_size = 8
num_point = 8192
pts = np.random.randn(batch_size, num_point, 3).astype('float32')
import pdb
pdb.set_trace()
with tf.device('/gpu:1'):
points = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
density = kernel_density_estimation_ball(points, 1.0)
#density = kernel_density_estimation(points, 1.0)
init = tf.global_variables_initializer()
with tf.Session('') as sess:
sess.run(init)
t1 = time.time()
den = sess.run(density, feed_dict = {points:pts})
print(time.time() - t1)
#import scipy.io as sio
#sio.savemat('density.mat', dict([('pts', pts), ('density', den)]))
| 33.342857 | 111 | 0.643873 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import random
import numpy as np
import tensorflow as tf
from transforms3d.euler import euler2mat
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../tf_ops/sampling'))
sys.path.append(os.path.join(BASE_DIR, '../tf_ops/grouping'))
import tf_sampling
import tf_grouping
from sklearn.neighbors import KDTree
def knn_kdtree(nsample, xyz, new_xyz):
batch_size = xyz.shape[0]
n_points = new_xyz.shape[1]
indices = np.zeros((batch_size, n_points, nsample), dtype=np.int32)
for batch_idx in range(batch_size):
X = xyz[batch_idx, ...]
q_X = new_xyz[batch_idx, ...]
kdt = KDTree(X, leaf_size=30)
_, indices[batch_idx] = kdt.query(q_X, k = nsample)
return indices
def kernel_density_estimation_ball(pts, radius, sigma, N_points = 128, is_norm = False):
with tf.variable_scope("ComputeDensity") as sc:
idx, pts_cnt = tf_grouping.query_ball_point(radius, N_points, pts, pts)
g_pts = tf_grouping.group_point(pts, idx)
g_pts -= tf.tile(tf.expand_dims(pts, 2), [1, 1, N_points, 1])
R = tf.sqrt(sigma)
xRinv = tf.div(g_pts, R)
quadform = tf.reduce_sum(tf.square(xRinv), axis = -1)
logsqrtdetSigma = tf.log(R) * 3
mvnpdf = tf.exp(-0.5 * quadform - logsqrtdetSigma - 3 * tf.log(2 * 3.1415926) / 2)
first_val, _ = tf.split(mvnpdf, [1, N_points - 1], axis = 2)
mvnpdf = tf.reduce_sum(mvnpdf, axis = 2, keepdims = True)
num_val_to_sub = tf.expand_dims(tf.cast(tf.subtract(N_points, pts_cnt), dtype = tf.float32), axis = -1)
val_to_sub = tf.multiply(first_val, num_val_to_sub)
mvnpdf = tf.subtract(mvnpdf, val_to_sub)
scale = tf.div(1.0, tf.expand_dims(tf.cast(pts_cnt, dtype = tf.float32), axis = -1))
density = tf.multiply(mvnpdf, scale)
if is_norm:
density_max = tf.reduce_max(density, axis = 1, keepdims = True)
density = tf.div(density, density_max)
return density
def kernel_density_estimation(pts, sigma, kpoint = 32, is_norm = False):
with tf.variable_scope("ComputeDensity") as sc:
batch_size = pts.get_shape()[0]
num_points = pts.get_shape()[1]
if num_points < kpoint:
kpoint = num_points.value - 1
with tf.device('/cpu:0'):
point_indices = tf.py_func(knn_kdtree, [kpoint, pts, pts], tf.int32)
batch_indices = tf.tile(tf.reshape(tf.range(batch_size), (-1, 1, 1, 1)), (1, num_points, kpoint, 1))
idx = tf.concat([batch_indices, tf.expand_dims(point_indices, axis = 3)], axis = 3)
idx.set_shape([batch_size, num_points, kpoint, 2])
grouped_pts = tf.gather_nd(pts, idx)
grouped_pts -= tf.tile(tf.expand_dims(pts, 2), [1,1,kpoint,1])
R = tf.sqrt(sigma)
xRinv = tf.div(grouped_pts, R)
quadform = tf.reduce_sum(tf.square(xRinv), axis = -1)
logsqrtdetSigma = tf.log(R) * 3
mvnpdf = tf.exp(-0.5 * quadform - logsqrtdetSigma - 3 * tf.log(2 * 3.1415926) / 2)
mvnpdf = tf.reduce_sum(mvnpdf, axis = 2, keepdims = True)
scale = 1.0 / kpoint
density = tf.multiply(mvnpdf, scale)
if is_norm:
density_max = tf.reduce_max(density, axis = 1, keepdims = True)
density = tf.div(density, density_max)
return density
def sampling(npoint, pts):
sub_pts = tf_sampling.gather_point(pts, tf_sampling.farthest_point_sample(npoint, pts))
return sub_pts
def grouping(feature, K, src_xyz, q_xyz, use_xyz = True):
batch_size = src_xyz.get_shape()[0]
npoint = q_xyz.get_shape()[1]
point_indices = tf.py_func(knn_kdtree, [K, src_xyz, q_xyz], tf.int32)
batch_indices = tf.tile(tf.reshape(tf.range(batch_size), (-1, 1, 1, 1)), (1, npoint, K, 1))
idx = tf.concat([batch_indices, tf.expand_dims(point_indices, axis = 3)], axis = 3)
idx.set_shape([batch_size, npoint, K, 2])
grouped_xyz = tf.gather_nd(src_xyz, idx)
grouped_xyz -= tf.tile(tf.expand_dims(q_xyz, 2), [1,1,K,1])
grouped_feature = tf.gather_nd(feature, idx)
if use_xyz:
new_points = tf.concat([grouped_xyz, grouped_feature], axis = -1)
else:
new_points = grouped_feature
return grouped_xyz, new_points, idx
if __name__=='__main__':
import time
batch_size = 8
num_point = 8192
pts = np.random.randn(batch_size, num_point, 3).astype('float32')
import pdb
pdb.set_trace()
with tf.device('/gpu:1'):
points = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
density = kernel_density_estimation_ball(points, 1.0)
init = tf.global_variables_initializer()
with tf.Session('') as sess:
sess.run(init)
t1 = time.time()
den = sess.run(density, feed_dict = {points:pts})
print(time.time() - t1)
| true | true |
f7fadce1ff2f06e2e0bd5502472662090c6de119 | 3,074 | py | Python | QA_system/QA_model_build.py | Ennis0620/TF_IDF_QAsystem | 1b4e7a9408583fc43a0cd48c155f4f61e752ed9d | [
"MIT"
] | 2 | 2022-01-11T06:59:57.000Z | 2022-01-18T02:54:44.000Z | QA_system/QA_model_build.py | Ennis0620/TF_IDF_QAsystem | 1b4e7a9408583fc43a0cd48c155f4f61e752ed9d | [
"MIT"
] | null | null | null | QA_system/QA_model_build.py | Ennis0620/TF_IDF_QAsystem | 1b4e7a9408583fc43a0cd48c155f4f61e752ed9d | [
"MIT"
] | null | null | null | import re
import time
import jieba
import math
import json
#設定斷詞詞庫
jieba.load_userdict('lexicon_dict.txt')
len_Q = 0 #存下共有多少問題
IDF={} #字詞庫中所有詞彙
s = time.time()
QA_model = [] #要儲存的model 以[{},{},...]的形式儲存
#進行問題的斷詞
with open("Gossiping-QA-Dataset.txt","r",encoding='utf-8-sig') as fp:
all_ = fp.readlines()
for index,row in enumerate(all_):
dic = {} #存每一個document的
row_split = row.split("\t") #用tab來分割 問題 和 回答
Q_row = "".join(row_split[0].split())#去除document問題的空白
ID = '{0:06d}'.format(index)
dic.setdefault("ID",ID) #設置document的ID
dic.setdefault("Question",row_split[0]) #問題
dic.setdefault("Answer",row_split[1].strip("\n")) #回答
seg = {} #將斷詞的結果存成字典形式
count = 0 #計算斷詞共斷了幾項
TF_table={} #紀錄目前的TF_table
#進行jieba斷詞
for Hyphenation in jieba.cut(Q_row):
#將斷詞的詞彙 寫到IDF中 代表出現的 詞彙
count+=1 #統計斷詞 數量
seg.setdefault(Hyphenation,0)#先設置成0
if Hyphenation not in TF_table: #如果 "為什麼" 沒在TF_table中 代表第一次出現
TF_table.setdefault(Hyphenation,1) #設置出現次數=1
IDF.setdefault(Hyphenation,0) #先設置IDF=0 代表documnet的斷詞 並 更新在IDF中
else:
TF_table[Hyphenation] += 1#若有出現在字典中 代表之前就有出現過 直接+=1
#進行TF_normalization TF_table/count
for i in TF_table:
seg[i] = round(TF_table[i]/count,6)
#IDF 不管出現過幾次 同一個document中 只計算1次
#因為TF_table已經整理過 字彙的出現次數 例如:"為什麼":2 因此若 跑到"為什麼":2 就將 該筆的IDF 直接加1
IDF[i]+=1
dic.setdefault("Model",seg) #將斷完的詞存到字典中
QA_model.append(dic) #將此document的資訊添加到QA_model中
if index%50000==0:
print("目前處理筆數:",index)
len_Q = index
#計算IDF
#取log是為了不要讓權重太大 將差距拉小
#因為在TF中彼此差距很小 但是在IDF中 有可能 某詞在所有document中出現其IDF會超級小 某詞只在單一document出現其IDF超級大
for i in IDF:
if IDF[i] == 0 :
IDF[i] = round(math.log10(len_Q/1),6)
else:
IDF[i] = round(math.log10(len_Q/IDF[i]),6)
#計算TF_IDF
for index in range(0,len_Q+1):
#計算內積"Inner_Production"
inner_production = 0
#從model中儲存的TF 和 IDF 計算成TF-IDF
for i in QA_model[index]["Model"]:
#QA_model中的每一筆資料 進行 TF 和 IDF 的乘法
QA_model[index]["Model"][i] = round(QA_model[index]["Model"][i]*IDF[i],6)
#內積相當於 自己的平方
inner_production += QA_model[index]["Model"][i]*QA_model[index]["Model"][i]
#將計算完的內積加入這筆model當中
QA_model[index].setdefault("Inner_Production",inner_production)
e = time.time()
print("共花費秒數:",round(e-s,6))
#要儲存的有 IDF、QA_model
#存成json格式
with open('model.json',"w",encoding='utf-8-sig') as jsonfile:
json.dump(QA_model,jsonfile,separators=(',\n', ': '),indent=4,ensure_ascii=False)
with open('IDF.json',"w",encoding='utf-8-sig') as jsonfile:
json.dump(IDF,jsonfile,separators=(',\n', ': '),indent=4,ensure_ascii=False)
e2 = time.time()
print("建立model到儲存model所有耗時:",round(e2-s,6))
| 28.201835 | 85 | 0.592713 | import re
import time
import jieba
import math
import json
jieba.load_userdict('lexicon_dict.txt')
len_Q = 0
IDF={}
s = time.time()
QA_model = []
with open("Gossiping-QA-Dataset.txt","r",encoding='utf-8-sig') as fp:
all_ = fp.readlines()
for index,row in enumerate(all_):
dic = {}
row_split = row.split("\t")
Q_row = "".join(row_split[0].split())
ID = '{0:06d}'.format(index)
dic.setdefault("ID",ID)
dic.setdefault("Question",row_split[0])
dic.setdefault("Answer",row_split[1].strip("\n"))
seg = {}
count = 0
TF_table={}
for Hyphenation in jieba.cut(Q_row):
count+=1
seg.setdefault(Hyphenation,0)
if Hyphenation not in TF_table:
TF_table.setdefault(Hyphenation,1)
IDF.setdefault(Hyphenation,0)
else:
TF_table[Hyphenation] += 1
for i in TF_table:
seg[i] = round(TF_table[i]/count,6)
IDF[i]+=1
dic.setdefault("Model",seg)
QA_model.append(dic)
if index%50000==0:
print("目前處理筆數:",index)
len_Q = index
for i in IDF:
if IDF[i] == 0 :
IDF[i] = round(math.log10(len_Q/1),6)
else:
IDF[i] = round(math.log10(len_Q/IDF[i]),6)
for index in range(0,len_Q+1):
inner_production = 0
for i in QA_model[index]["Model"]:
QA_model[index]["Model"][i] = round(QA_model[index]["Model"][i]*IDF[i],6)
inner_production += QA_model[index]["Model"][i]*QA_model[index]["Model"][i]
QA_model[index].setdefault("Inner_Production",inner_production)
e = time.time()
print("共花費秒數:",round(e-s,6))
with open('model.json',"w",encoding='utf-8-sig') as jsonfile:
json.dump(QA_model,jsonfile,separators=(',\n', ': '),indent=4,ensure_ascii=False)
with open('IDF.json',"w",encoding='utf-8-sig') as jsonfile:
json.dump(IDF,jsonfile,separators=(',\n', ': '),indent=4,ensure_ascii=False)
e2 = time.time()
print("建立model到儲存model所有耗時:",round(e2-s,6))
| true | true |
f7fadd195d527c89e44441d529ff73afa1c40bd3 | 17,881 | py | Python | dataproc/google/cloud/dataproc_v1/proto/operations_pb2.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
] | 1 | 2019-03-26T21:44:51.000Z | 2019-03-26T21:44:51.000Z | dataproc/google/cloud/dataproc_v1/proto/operations_pb2.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
] | 40 | 2019-07-16T10:04:48.000Z | 2020-01-20T09:04:59.000Z | dataproc/google/cloud/dataproc_v1/proto/operations_pb2.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
] | 2 | 2019-11-13T05:27:48.000Z | 2020-01-21T06:35:19.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/dataproc_v1/proto/operations.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/dataproc_v1/proto/operations.proto",
package="google.cloud.dataproc.v1",
syntax="proto3",
serialized_options=_b(
"\n\034com.google.cloud.dataproc.v1B\017OperationsProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"
),
serialized_pb=_b(
'\n/google/cloud/dataproc_v1/proto/operations.proto\x12\x18google.cloud.dataproc.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\x89\x02\n\x16\x43lusterOperationStatus\x12J\n\x05state\x18\x01 \x01(\x0e\x32\x36.google.cloud.dataproc.v1.ClusterOperationStatus.StateB\x03\xe0\x41\x03\x12\x18\n\x0binner_state\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12\x14\n\x07\x64\x65tails\x18\x03 \x01(\tB\x03\xe0\x41\x03\x12\x39\n\x10state_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03"8\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03"\xb8\x03\n\x18\x43lusterOperationMetadata\x12\x19\n\x0c\x63luster_name\x18\x07 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x08 \x01(\tB\x03\xe0\x41\x03\x12\x45\n\x06status\x18\t \x01(\x0b\x32\x30.google.cloud.dataproc.v1.ClusterOperationStatusB\x03\xe0\x41\x03\x12M\n\x0estatus_history\x18\n \x03(\x0b\x32\x30.google.cloud.dataproc.v1.ClusterOperationStatusB\x03\xe0\x41\x03\x12\x1b\n\x0eoperation_type\x18\x0b \x01(\tB\x03\xe0\x41\x03\x12\x18\n\x0b\x64\x65scription\x18\x0c \x01(\tB\x03\xe0\x41\x03\x12S\n\x06labels\x18\r \x03(\x0b\x32>.google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntryB\x03\xe0\x41\x03\x12\x15\n\x08warnings\x18\x0e \x03(\tB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42s\n\x1c\x63om.google.cloud.dataproc.v1B\x0fOperationsProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3'
),
dependencies=[
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_CLUSTEROPERATIONSTATUS_STATE = _descriptor.EnumDescriptor(
name="State",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.State",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="UNKNOWN", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PENDING", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="RUNNING", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DONE", index=3, number=3, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=383,
serialized_end=439,
)
_sym_db.RegisterEnumDescriptor(_CLUSTEROPERATIONSTATUS_STATE)
_CLUSTEROPERATIONSTATUS = _descriptor.Descriptor(
name="ClusterOperationStatus",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.state",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="inner_state",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.inner_state",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="details",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.details",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state_start_time",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.state_start_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_CLUSTEROPERATIONSTATUS_STATE],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=174,
serialized_end=439,
)
_CLUSTEROPERATIONMETADATA_LABELSENTRY = _descriptor.Descriptor(
name="LabelsEntry",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=837,
serialized_end=882,
)
_CLUSTEROPERATIONMETADATA = _descriptor.Descriptor(
name="ClusterOperationMetadata",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="cluster_name",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.cluster_name",
index=0,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cluster_uuid",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.cluster_uuid",
index=1,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.status",
index=2,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status_history",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.status_history",
index=3,
number=10,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="operation_type",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.operation_type",
index=4,
number=11,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="description",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.description",
index=5,
number=12,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.labels",
index=6,
number=13,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="warnings",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.warnings",
index=7,
number=14,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_CLUSTEROPERATIONMETADATA_LABELSENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=442,
serialized_end=882,
)
_CLUSTEROPERATIONSTATUS.fields_by_name[
"state"
].enum_type = _CLUSTEROPERATIONSTATUS_STATE
_CLUSTEROPERATIONSTATUS.fields_by_name[
"state_start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CLUSTEROPERATIONSTATUS_STATE.containing_type = _CLUSTEROPERATIONSTATUS
_CLUSTEROPERATIONMETADATA_LABELSENTRY.containing_type = _CLUSTEROPERATIONMETADATA
_CLUSTEROPERATIONMETADATA.fields_by_name[
"status"
].message_type = _CLUSTEROPERATIONSTATUS
_CLUSTEROPERATIONMETADATA.fields_by_name[
"status_history"
].message_type = _CLUSTEROPERATIONSTATUS
_CLUSTEROPERATIONMETADATA.fields_by_name[
"labels"
].message_type = _CLUSTEROPERATIONMETADATA_LABELSENTRY
DESCRIPTOR.message_types_by_name["ClusterOperationStatus"] = _CLUSTEROPERATIONSTATUS
DESCRIPTOR.message_types_by_name["ClusterOperationMetadata"] = _CLUSTEROPERATIONMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ClusterOperationStatus = _reflection.GeneratedProtocolMessageType(
"ClusterOperationStatus",
(_message.Message,),
dict(
DESCRIPTOR=_CLUSTEROPERATIONSTATUS,
__module__="google.cloud.dataproc_v1.proto.operations_pb2",
__doc__="""The status of the operation.
Attributes:
state:
Output only. A message containing the operation state.
inner_state:
Output only. A message containing the detailed operation
state.
details:
Output only. A message containing any operation metadata
details.
state_start_time:
Output only. The time this state was entered.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperationStatus)
),
)
_sym_db.RegisterMessage(ClusterOperationStatus)
ClusterOperationMetadata = _reflection.GeneratedProtocolMessageType(
"ClusterOperationMetadata",
(_message.Message,),
dict(
LabelsEntry=_reflection.GeneratedProtocolMessageType(
"LabelsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_CLUSTEROPERATIONMETADATA_LABELSENTRY,
__module__="google.cloud.dataproc_v1.proto.operations_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry)
),
),
DESCRIPTOR=_CLUSTEROPERATIONMETADATA,
__module__="google.cloud.dataproc_v1.proto.operations_pb2",
__doc__="""Metadata describing the operation.
Attributes:
cluster_name:
Output only. Name of the cluster for the operation.
cluster_uuid:
Output only. Cluster UUID for the operation.
status:
Output only. Current operation status.
status_history:
Output only. The previous operation status.
operation_type:
Output only. The operation type.
description:
Output only. Short description of operation.
labels:
Output only. Labels associated with the operation
warnings:
Output only. Errors encountered during operation execution.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperationMetadata)
),
)
_sym_db.RegisterMessage(ClusterOperationMetadata)
_sym_db.RegisterMessage(ClusterOperationMetadata.LabelsEntry)
DESCRIPTOR._options = None
_CLUSTEROPERATIONSTATUS.fields_by_name["state"]._options = None
_CLUSTEROPERATIONSTATUS.fields_by_name["inner_state"]._options = None
_CLUSTEROPERATIONSTATUS.fields_by_name["details"]._options = None
_CLUSTEROPERATIONSTATUS.fields_by_name["state_start_time"]._options = None
_CLUSTEROPERATIONMETADATA_LABELSENTRY._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["cluster_name"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["cluster_uuid"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["status"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["status_history"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["operation_type"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["description"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["labels"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["warnings"]._options = None
# @@protoc_insertion_point(module_scope)
| 36.792181 | 1,636 | 0.645937 |
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/dataproc_v1/proto/operations.proto",
package="google.cloud.dataproc.v1",
syntax="proto3",
serialized_options=_b(
"\n\034com.google.cloud.dataproc.v1B\017OperationsProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc"
),
serialized_pb=_b(
'\n/google/cloud/dataproc_v1/proto/operations.proto\x12\x18google.cloud.dataproc.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\x89\x02\n\x16\x43lusterOperationStatus\x12J\n\x05state\x18\x01 \x01(\x0e\x32\x36.google.cloud.dataproc.v1.ClusterOperationStatus.StateB\x03\xe0\x41\x03\x12\x18\n\x0binner_state\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12\x14\n\x07\x64\x65tails\x18\x03 \x01(\tB\x03\xe0\x41\x03\x12\x39\n\x10state_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03"8\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03"\xb8\x03\n\x18\x43lusterOperationMetadata\x12\x19\n\x0c\x63luster_name\x18\x07 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x08 \x01(\tB\x03\xe0\x41\x03\x12\x45\n\x06status\x18\t \x01(\x0b\x32\x30.google.cloud.dataproc.v1.ClusterOperationStatusB\x03\xe0\x41\x03\x12M\n\x0estatus_history\x18\n \x03(\x0b\x32\x30.google.cloud.dataproc.v1.ClusterOperationStatusB\x03\xe0\x41\x03\x12\x1b\n\x0eoperation_type\x18\x0b \x01(\tB\x03\xe0\x41\x03\x12\x18\n\x0b\x64\x65scription\x18\x0c \x01(\tB\x03\xe0\x41\x03\x12S\n\x06labels\x18\r \x03(\x0b\x32>.google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntryB\x03\xe0\x41\x03\x12\x15\n\x08warnings\x18\x0e \x03(\tB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42s\n\x1c\x63om.google.cloud.dataproc.v1B\x0fOperationsProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3'
),
dependencies=[
google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_CLUSTEROPERATIONSTATUS_STATE = _descriptor.EnumDescriptor(
name="State",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.State",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="UNKNOWN", index=0, number=0, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PENDING", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="RUNNING", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DONE", index=3, number=3, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=383,
serialized_end=439,
)
_sym_db.RegisterEnumDescriptor(_CLUSTEROPERATIONSTATUS_STATE)
_CLUSTEROPERATIONSTATUS = _descriptor.Descriptor(
name="ClusterOperationStatus",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.state",
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="inner_state",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.inner_state",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="details",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.details",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state_start_time",
full_name="google.cloud.dataproc.v1.ClusterOperationStatus.state_start_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_CLUSTEROPERATIONSTATUS_STATE],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=174,
serialized_end=439,
)
_CLUSTEROPERATIONMETADATA_LABELSENTRY = _descriptor.Descriptor(
name="LabelsEntry",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b("8\001"),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=837,
serialized_end=882,
)
_CLUSTEROPERATIONMETADATA = _descriptor.Descriptor(
name="ClusterOperationMetadata",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="cluster_name",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.cluster_name",
index=0,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="cluster_uuid",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.cluster_uuid",
index=1,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.status",
index=2,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="status_history",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.status_history",
index=3,
number=10,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="operation_type",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.operation_type",
index=4,
number=11,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="description",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.description",
index=5,
number=12,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="labels",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.labels",
index=6,
number=13,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="warnings",
full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.warnings",
index=7,
number=14,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=_b("\340A\003"),
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[_CLUSTEROPERATIONMETADATA_LABELSENTRY],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=442,
serialized_end=882,
)
_CLUSTEROPERATIONSTATUS.fields_by_name[
"state"
].enum_type = _CLUSTEROPERATIONSTATUS_STATE
_CLUSTEROPERATIONSTATUS.fields_by_name[
"state_start_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CLUSTEROPERATIONSTATUS_STATE.containing_type = _CLUSTEROPERATIONSTATUS
_CLUSTEROPERATIONMETADATA_LABELSENTRY.containing_type = _CLUSTEROPERATIONMETADATA
_CLUSTEROPERATIONMETADATA.fields_by_name[
"status"
].message_type = _CLUSTEROPERATIONSTATUS
_CLUSTEROPERATIONMETADATA.fields_by_name[
"status_history"
].message_type = _CLUSTEROPERATIONSTATUS
_CLUSTEROPERATIONMETADATA.fields_by_name[
"labels"
].message_type = _CLUSTEROPERATIONMETADATA_LABELSENTRY
DESCRIPTOR.message_types_by_name["ClusterOperationStatus"] = _CLUSTEROPERATIONSTATUS
DESCRIPTOR.message_types_by_name["ClusterOperationMetadata"] = _CLUSTEROPERATIONMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ClusterOperationStatus = _reflection.GeneratedProtocolMessageType(
"ClusterOperationStatus",
(_message.Message,),
dict(
DESCRIPTOR=_CLUSTEROPERATIONSTATUS,
__module__="google.cloud.dataproc_v1.proto.operations_pb2",
__doc__="""The status of the operation.
Attributes:
state:
Output only. A message containing the operation state.
inner_state:
Output only. A message containing the detailed operation
state.
details:
Output only. A message containing any operation metadata
details.
state_start_time:
Output only. The time this state was entered.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperationStatus)
),
)
_sym_db.RegisterMessage(ClusterOperationStatus)
ClusterOperationMetadata = _reflection.GeneratedProtocolMessageType(
"ClusterOperationMetadata",
(_message.Message,),
dict(
LabelsEntry=_reflection.GeneratedProtocolMessageType(
"LabelsEntry",
(_message.Message,),
dict(
DESCRIPTOR=_CLUSTEROPERATIONMETADATA_LABELSENTRY,
__module__="google.cloud.dataproc_v1.proto.operations_pb2"
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry)
),
),
DESCRIPTOR=_CLUSTEROPERATIONMETADATA,
__module__="google.cloud.dataproc_v1.proto.operations_pb2",
__doc__="""Metadata describing the operation.
Attributes:
cluster_name:
Output only. Name of the cluster for the operation.
cluster_uuid:
Output only. Cluster UUID for the operation.
status:
Output only. Current operation status.
status_history:
Output only. The previous operation status.
operation_type:
Output only. The operation type.
description:
Output only. Short description of operation.
labels:
Output only. Labels associated with the operation
warnings:
Output only. Errors encountered during operation execution.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperationMetadata)
),
)
_sym_db.RegisterMessage(ClusterOperationMetadata)
_sym_db.RegisterMessage(ClusterOperationMetadata.LabelsEntry)
DESCRIPTOR._options = None
_CLUSTEROPERATIONSTATUS.fields_by_name["state"]._options = None
_CLUSTEROPERATIONSTATUS.fields_by_name["inner_state"]._options = None
_CLUSTEROPERATIONSTATUS.fields_by_name["details"]._options = None
_CLUSTEROPERATIONSTATUS.fields_by_name["state_start_time"]._options = None
_CLUSTEROPERATIONMETADATA_LABELSENTRY._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["cluster_name"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["cluster_uuid"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["status"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["status_history"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["operation_type"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["description"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["labels"]._options = None
_CLUSTEROPERATIONMETADATA.fields_by_name["warnings"]._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f7faddaf8f023fbb394a53850277c7c04cba5ee8 | 7,431 | py | Python | models/dfcvae.py | threewisemonkeys-as/PyTorch-VAE | 4ed0fc7581d4792b435134aa9e06d5e35a5db118 | [
"Apache-2.0"
] | null | null | null | models/dfcvae.py | threewisemonkeys-as/PyTorch-VAE | 4ed0fc7581d4792b435134aa9e06d5e35a5db118 | [
"Apache-2.0"
] | null | null | null | models/dfcvae.py | threewisemonkeys-as/PyTorch-VAE | 4ed0fc7581d4792b435134aa9e06d5e35a5db118 | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import vgg19_bn
from .base import BaseVAE
class DFCVAE(BaseVAE):
def __init__(
self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
alpha: float = 1,
beta: float = 0.5,
lr: float = 0.005,
weight_decay: Optional[float] = 0,
scheduler_gamma: Optional[float] = 0.95,
) -> None:
super(DFCVAE, self).__init__(
lr=lr, weight_decay=weight_decay, scheduler_gamma=scheduler_gamma
)
self.latent_dim = latent_dim
self.alpha = alpha
self.beta = beta
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(
in_channels,
out_channels=h_dim,
kernel_size=3,
stride=2,
padding=1,
),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU(),
)
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(
hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU(),
)
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(
hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels=3, kernel_size=3, padding=1),
nn.Tanh(),
)
self.feature_network = vgg19_bn(pretrained=True)
# Freeze the pretrained feature network
for param in self.feature_network.parameters():
param.requires_grad = False
self.feature_network.eval()
def encode(self, input: torch.Tensor) -> List[torch.Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (torch.Tensor) Input tensor to encoder [N x C x H x W]
:return: (torch.Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: torch.Tensor) -> torch.Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (torch.Tensor) [B x D]
:return: (torch.Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (torch.Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (torch.Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (torch.Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: torch.Tensor, **kwargs) -> List[torch.Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
recons = self.decode(z)
recons_features = self.extract_features(recons)
input_features = self.extract_features(input)
return [recons, input, recons_features, input_features, mu, log_var]
def extract_features(
self, input: torch.Tensor, feature_layers: List = None
) -> List[torch.Tensor]:
"""
Extracts the features from the pretrained model
at the layers indicated by feature_layers.
:param input: (torch.Tensor) [B x C x H x W]
:param feature_layers: List of string of IDs
:return: List of the extracted features
"""
if feature_layers is None:
feature_layers = ["14", "24", "34", "43"]
features = []
result = input
for (key, module) in self.feature_network.features._modules.items():
result = module(result)
if key in feature_layers:
features.append(result)
return features
def loss_function(self, *args, **kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
recons_features = args[2]
input_features = args[3]
mu = args[4]
log_var = args[5]
kld_weight = kwargs["M_N"] # Account for the minibatch samples from the dataset
recons_loss = F.mse_loss(recons, input)
feature_loss = 0.0
for (r, i) in zip(recons_features, input_features):
feature_loss += F.mse_loss(r, i)
kld_loss = torch.mean(
-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0
)
loss = (
self.beta * (recons_loss + feature_loss)
+ self.alpha * kld_weight * kld_loss
)
return {"loss": loss, "Reconstruction_Loss": recons_loss, "KLD": -kld_loss}
def sample(self, num_samples: int, current_device: int, **kwargs) -> torch.Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (torch.Tensor)
"""
z = torch.randn(num_samples, self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (torch.Tensor) [B x C x H x W]
:return: (torch.Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
| 32.030172 | 102 | 0.541246 | from typing import List, Optional
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import vgg19_bn
from .base import BaseVAE
class DFCVAE(BaseVAE):
def __init__(
self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
alpha: float = 1,
beta: float = 0.5,
lr: float = 0.005,
weight_decay: Optional[float] = 0,
scheduler_gamma: Optional[float] = 0.95,
) -> None:
super(DFCVAE, self).__init__(
lr=lr, weight_decay=weight_decay, scheduler_gamma=scheduler_gamma
)
self.latent_dim = latent_dim
self.alpha = alpha
self.beta = beta
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(
in_channels,
out_channels=h_dim,
kernel_size=3,
stride=2,
padding=1,
),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU(),
)
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(
hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU(),
)
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(
hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels=3, kernel_size=3, padding=1),
nn.Tanh(),
)
self.feature_network = vgg19_bn(pretrained=True)
for param in self.feature_network.parameters():
param.requires_grad = False
self.feature_network.eval()
def encode(self, input: torch.Tensor) -> List[torch.Tensor]:
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: torch.Tensor) -> torch.Tensor:
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: torch.Tensor, **kwargs) -> List[torch.Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
recons = self.decode(z)
recons_features = self.extract_features(recons)
input_features = self.extract_features(input)
return [recons, input, recons_features, input_features, mu, log_var]
def extract_features(
self, input: torch.Tensor, feature_layers: List = None
) -> List[torch.Tensor]:
if feature_layers is None:
feature_layers = ["14", "24", "34", "43"]
features = []
result = input
for (key, module) in self.feature_network.features._modules.items():
result = module(result)
if key in feature_layers:
features.append(result)
return features
def loss_function(self, *args, **kwargs) -> dict:
recons = args[0]
input = args[1]
recons_features = args[2]
input_features = args[3]
mu = args[4]
log_var = args[5]
kld_weight = kwargs["M_N"]
recons_loss = F.mse_loss(recons, input)
feature_loss = 0.0
for (r, i) in zip(recons_features, input_features):
feature_loss += F.mse_loss(r, i)
kld_loss = torch.mean(
-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0
)
loss = (
self.beta * (recons_loss + feature_loss)
+ self.alpha * kld_weight * kld_loss
)
return {"loss": loss, "Reconstruction_Loss": recons_loss, "KLD": -kld_loss}
def sample(self, num_samples: int, current_device: int, **kwargs) -> torch.Tensor:
z = torch.randn(num_samples, self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
return self.forward(x)[0]
| true | true |
f7fae097f524ea572e5c0c348a1f0770ffc40386 | 55,837 | py | Python | nemo/core/classes/modelPT.py | Fackor/NeMo | 941ef1fd71bd2515a4ba7092d65146edfddc1229 | [
"Apache-2.0"
] | null | null | null | nemo/core/classes/modelPT.py | Fackor/NeMo | 941ef1fd71bd2515a4ba7092d65146edfddc1229 | [
"Apache-2.0"
] | null | null | null | nemo/core/classes/modelPT.py | Fackor/NeMo | 941ef1fd71bd2515a4ba7092d65146edfddc1229 | [
"Apache-2.0"
] | 2 | 2021-02-04T14:45:50.000Z | 2021-02-04T14:56:05.000Z | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import os
import shutil
import tarfile
import tempfile
from abc import abstractmethod
from dataclasses import is_dataclass
from os import path
from typing import Callable, Dict, List, Optional, Union
import hydra
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.utilities import rank_zero_only
from nemo.core import optim
from nemo.core.classes.common import Model
from nemo.core.optim import prepare_lr_scheduler
from nemo.utils import config_utils, logging, model_utils
from nemo.utils.app_state import AppState
from nemo.utils.get_rank import is_global_rank_zero
# Need to set them before EFF import as it is using them.
_MODEL_CONFIG_YAML = "model_config.yaml"
_MODEL_WEIGHTS = "model_weights.ckpt"
try:
# Try to import strategies for .nemo archive.
from eff.cookbooks import NeMoCookbook
_EFF_PRESENT_ = True
except ImportError:
_EFF_PRESENT_ = False
__all__ = ['ModelPT']
"""
Internal global flags that determine core functionality of ModelPT.
_MODEL_IS_RESTORED:
This flag determines the context of the model - whether the model is currently being
restored or not.
- When set, it can be assumed that the model's will disable all automatic methods -
setup_training_data(), setup_validation/test_data() and their multi equivalents.
- If a model is being restored from a archive file (tarfile), it can be assumed that
under this context, the cwd is *inside* the tarfile itself.
_MODEL_RESTORE_PATH:
A string path to a a file from which the model is being restored.
This file can either be a PyTorch Lightning Checkpoint, or a archive (tarfile) that contains
artifact objects.
If it is an archive file, during restoration, the cwd will be temporarily moved to inside the
archive itself.
_MODEL_EFF_SAVE:
A global flag that switches the format of the archive file that will be stored.
This flag only enables EFF when the package support is available.
"""
_MODEL_IS_RESTORED = False
_MODEL_RESTORE_PATH = None
_MODEL_EFF_SAVE = True
class ModelPT(LightningModule, Model):
"""
Interface for Pytorch-lightning based NeMo models
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
"""
Base class from which all NeMo models should inherit
Args:
cfg (DictConfig): configuration object.
The cfg object should have (optionally) the following sub-configs:
* train_ds - to instantiate training dataset
* validation_ds - to instantiate validation dataset
* test_ds - to instantiate testing dataset
* optim - to instantiate optimizer with learning rate scheduler
trainer (Optional): Pytorch Lightning Trainer instance
"""
if trainer is not None and not isinstance(trainer, Trainer):
raise ValueError(
f"trainer constructor argument must be either None or pytroch_lightning.Trainer. But got {type(trainer)} instead."
)
super().__init__()
# Convert config to a DictConfig
cfg = model_utils.convert_model_config_to_dict_config(cfg)
# Convert config to support Hydra 1.0+ instantiation
cfg = model_utils.maybe_update_config_version(cfg)
if 'target' not in cfg:
# This is for Jarvis service.
OmegaConf.set_struct(cfg, False)
cfg.target = "{0}.{1}".format(self.__class__.__module__, self.__class__.__name__)
OmegaConf.set_struct(cfg, True)
self._cfg = cfg
self.save_hyperparameters(self._cfg)
self._train_dl = None
self._validation_dl = None
self._test_dl = None
self._optimizer = None
self._scheduler = None
self._trainer = trainer
# Set device_id in AppState
if torch.cuda.is_available() and torch.cuda.current_device() is not None:
app_state = AppState()
app_state.device_id = torch.cuda.current_device()
if self._cfg is not None and not self._is_model_being_restored():
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
self.setup_training_data(self._cfg.train_ds)
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
self.setup_multiple_validation_data(val_data_config=None)
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
self.setup_multiple_test_data(test_data_config=None)
else:
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
logging.warning(
f"Please call the ModelPT.setup_training_data() method "
f"and provide a valid configuration file to setup the train data loader.\n"
f"Train config : \n{OmegaConf.to_yaml(self._cfg.train_ds)}"
)
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
logging.warning(
f"Please call the ModelPT.setup_validation_data() or ModelPT.setup_multiple_validation_data() method "
f"and provide a valid configuration file to setup the validation data loader(s). \n"
f"Validation config : \n{OmegaConf.to_yaml(self._cfg.validation_ds)}"
)
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
logging.warning(
f"Please call the ModelPT.setup_test_data() or ModelPT.setup_multiple_test_data() method "
f"and provide a valid configuration file to setup the test data loader(s).\n"
f"Test config : \n{OmegaConf.to_yaml(self._cfg.test_ds)}"
)
# ModelPT wrappers over subclass implementations
self.training_step = model_utils.wrap_training_step(self.training_step)
def register_artifact(self, config_path: str, src: str):
"""
Register model artifacts with this function. These artifacts (files) will be included inside .nemo file
when model.save_to("mymodel.nemo") is called.
WARNING: If you specified /example_folder/example.txt but ./example.txt exists, then ./example.txt will be used.
Args:
config_path: config path where artifact is used
src: path to the artifact
Returns:
path to be used when accessing artifact. If src='' or None then '' or None will be returned
"""
if not hasattr(self, 'artifacts'):
self.artifacts = {}
if self.artifacts is None:
self.artifacts = {}
if src is not None and src.strip() != '':
archive_item = model_utils.ArtifactItem()
basename_src = os.path.basename(src)
# filename exists in current workdir - use it and raise warning
# this case is during model restoration or when file is written to cwd.
if os.path.exists(basename_src):
logging.warning(f"Using {os.path.abspath(basename_src)} instead of {src}.")
used_src = basename_src
# Case: register_artifact() called inside restoration context
if self._is_model_being_restored() and self._is_restore_type_tarfile():
archive_item.path_type = model_utils.ArtifactPathType.TAR_PATH
else:
archive_item.path_type = model_utils.ArtifactPathType.LOCAL_PATH
else:
used_src = src
archive_item.path_type = model_utils.ArtifactPathType.LOCAL_PATH
if not os.path.exists(used_src):
# File not found in local path or by basename
# Try to locate it inside the .nemo archive (if model was restored)
# Case: register_artifact() called outside restoration context
if self._is_restore_type_tarfile():
# Get path where the command is executed - the artifacts will be "retrieved" there
# (original .nemo behavior)
cwd = os.getcwd()
try:
# Step into the nemo archive to try and find the file
with tempfile.TemporaryDirectory() as tmpdir:
self.__unpack_nemo_file(path2file=_MODEL_RESTORE_PATH, out_folder=tmpdir)
os.chdir(tmpdir)
if os.path.exists(basename_src):
logging.warning(f"Using {os.path.abspath(basename_src)} instead of {src}.")
used_src = basename_src
archive_item.path = used_src
archive_item.path_type = model_utils.ArtifactPathType.TAR_PATH
else:
# No further action can be taken, file not found anywhere
raise FileNotFoundError(
f"Could not find {used_src} inside "
f"tarfile {_MODEL_RESTORE_PATH} or under local"
)
finally:
# change back working directory
os.chdir(cwd)
else:
# No further action can be taken, file not found anywhere
raise FileNotFoundError(f"Could not find {used_src}")
else:
# Found filepath
archive_item.path = used_src
# But disregarding whether you use "local" or "remote" artifact - always store the original path.
# This fixes issues raising when finetuning NLP models that create and register tokenizer vocabs.
if config_path in self.artifacts:
logging.warning(
f"Artifact {config_path} with value '{self.artifacts[config_path]}' "
f"already exists and will be overwritten with value '{src}'!"
)
self.artifacts[config_path] = archive_item
return used_src
else:
return src
def _default_save_to(self, save_path: str):
"""
Saves model instance (weights and configuration) into .nemo file.
You can use "restore_from" method to fully restore instance from .nemo file.
.nemo file is an archive (tar.gz) with the following:
model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor
model_wights.chpt - model checkpoint
Args:
save_path: Path to .nemo file where model instance should be saved
"""
with tempfile.TemporaryDirectory() as tmpdir:
config_yaml = path.join(tmpdir, _MODEL_CONFIG_YAML)
model_weights = path.join(tmpdir, _MODEL_WEIGHTS)
if hasattr(self, 'artifacts') and self.artifacts is not None:
for (conf_path, src) in self.artifacts.items(): # type: (str, model_utils.ArtifactItem)
try:
if src.path_type == model_utils.ArtifactPathType.LOCAL_PATH and os.path.exists(src.path):
shutil.copy2(src.path, tmpdir)
elif src.path_type == model_utils.ArtifactPathType.TAR_PATH:
# Need to step into nemo archive to extract file
# Get path where the command is executed - the artifacts will be "retrieved" there
# (original .nemo behavior)
cwd = os.getcwd()
try:
# Step into the nemo archive to try and find the file
with tempfile.TemporaryDirectory() as archive_dir:
self.__unpack_nemo_file(path2file=_MODEL_RESTORE_PATH, out_folder=archive_dir)
os.chdir(archive_dir)
shutil.copy2(src.path, tmpdir)
finally:
# change back working directory
os.chdir(cwd)
else:
raise ValueError(f"Invalid ArchivePathType found: {src.path_type}")
except Exception:
logging.error(f"Could not copy artifact {src} used in {conf_path}")
self.to_config_file(path2yaml_file=config_yaml)
torch.save(self.state_dict(), model_weights)
self.__make_nemo_file_from_folder(filename=save_path, source_dir=tmpdir)
def _eff_save_to(self, save_path: str):
"""
Saves model instance (weights, configuration and artifacts) into an EFF archive using
the default `save_to` recipe from NeMoCookbook.
.. note::
For NVIDIA NeMo the EFF archives will also use .nemo postfix.
Method creates an EFF-based file that is an archive (tar.gz) with the following:
manifest.yaml - yaml file describing the content of the archive.
model_config.yaml - model configuration in .yaml format.
You can deserialize this into cfg argument for model's constructor
model_wights.chpt - model checkpoint
Args:
save_path: Path to archive file where model instance should be saved.
"""
NeMoCookbook().save_to(obj=self, save_path=save_path)
@rank_zero_only
def save_to(self, save_path: str):
"""
Saves model instance (weights and configuration) into EFF archive or .
You can use "restore_from" method to fully restore instance from .nemo file.
.nemo file is an archive (tar.gz) with the following:
model_config.yaml - model configuration in .yaml format. You can deserialize this into cfg argument for model's constructor
model_wights.chpt - model checkpoint
Args:
save_path: Path to .nemo file where model instance should be saved
"""
# Add nemo rank check as well
if not is_global_rank_zero():
return
if _EFF_PRESENT_ and self.use_eff_save():
# Save EFF archive.
self._eff_save_to(save_path)
else:
# Save .nemo tar archive.
self._default_save_to(save_path)
@classmethod
def _default_restore_from(
cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = False,
return_config: bool = False,
):
"""
Restores model instance (weights and configuration) into .nemo file
Args:
restore_path: path to .nemo file from which model should be instantiated
override_config_path: path to a yaml config that will override the internal
config file or an OmegaConf / DictConfig object representing the model config.
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to load_state_dict.
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
Example:
```
model = nemo.collections.asr.models.EncDecCTCModel.restore_from('asr.nemo')
assert isinstance(model, nemo.collections.asr.models.EncDecCTCModel)
```
Returns:
An instance of type cls or its underlying config (if return_config is set).
"""
# Get path where the command is executed - the artifacts will be "retrieved" there
# (original .nemo behavior)
cwd = os.getcwd()
if map_location is None:
if torch.cuda.is_available():
map_location = torch.device('cuda')
else:
map_location = torch.device('cpu')
with tempfile.TemporaryDirectory() as tmpdir:
try:
cls._set_model_restore_state(is_being_restored=True)
cls.__unpack_nemo_file(path2file=restore_path, out_folder=tmpdir)
os.chdir(tmpdir)
if override_config_path is None:
config_yaml = path.join(tmpdir, _MODEL_CONFIG_YAML)
else:
# can be str path or OmegaConf / DictConfig object
config_yaml = override_config_path
if not isinstance(config_yaml, (OmegaConf, DictConfig)):
conf = OmegaConf.load(config_yaml)
else:
conf = config_yaml
if override_config_path is not None:
# Resolve the override config
conf = OmegaConf.to_container(conf, resolve=True)
conf = OmegaConf.create(conf)
# If override is top level config, extract just `model` from it
if 'model' in conf:
conf = conf.model
if return_config:
instance = conf
else:
model_weights = path.join(tmpdir, _MODEL_WEIGHTS)
OmegaConf.set_struct(conf, True)
instance = cls.from_config_dict(config=conf)
instance = instance.to(map_location)
instance.load_state_dict(torch.load(model_weights, map_location=map_location), strict=strict)
logging.info(f'Model {cls.__name__} was successfully restored from {restore_path}.')
finally:
cls._set_model_restore_state(is_being_restored=False)
os.chdir(cwd)
return instance
@classmethod
def _eff_restore_from(
cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = False,
return_config: bool = False,
):
"""
Restores model instance (weights, configuration and artifacts) from EFF Archive using
the default `restore_from` recipe from NeMoCookbook.
Args:
restore_path: path to file from which model should be instantiated
override_config_path: path to a yaml config that will override the internal
config file
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to load_state_dict.
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
Returns:
An instance of type cls
"""
if return_config is True:
raise NotImplementedError("`return_config` is not implemented for EFF based restoration of models.")
return NeMoCookbook().restore_from(
restore_path=restore_path,
obj_cls=cls,
override_config_path=override_config_path,
map_location=map_location,
strict=strict,
)
@classmethod
def restore_from(
cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = False,
return_config: bool = False,
):
"""
Restores model instance (weights and configuration) from file.
The methods tries to load it as EFF archive.
If EFF library is not present in the system, or the indicated file is not EFF archive,
the function defaults to the original .nemo restore method.
Args:
restore_path: path to .nemo file from which model should be instantiated
override_config_path: path to a yaml config that will override the internal
config file or an OmegaConf / DictConfig object representing the model config.
map_location: Optional torch.device() to map the instantiated model to a device.
By default (None), it will select a GPU if available, falling back to CPU otherwise.
strict: Passed to load_state_dict.
return_config: If set to true, will return just the underlying config of the restored
model as an OmegaConf DictConfig object without instantiating the model.
Example:
```
model = nemo.collections.asr.models.EncDecCTCModel.restore_from('asr.nemo')
assert isinstance(model, nemo.collections.asr.models.EncDecCTCModel)
```
Returns:
An instance of type cls or its underlying config (if return_config is set).
"""
if not path.exists(restore_path):
raise FileNotFoundError(f"Can't find {restore_path}")
global _MODEL_RESTORE_PATH
_MODEL_RESTORE_PATH = os.path.abspath(os.path.expanduser(restore_path))
if _EFF_PRESENT_:
# Try to load the EFF archive.
try:
return cls._eff_restore_from(restore_path, override_config_path, map_location, strict, return_config)
except (FileNotFoundError, TypeError):
# Default to the old .nemo tar archive restore method.
return cls._default_restore_from(
restore_path, override_config_path, map_location, strict, return_config
)
else:
# Load .nemo tar archive using the old restore method.
return cls._default_restore_from(restore_path, override_config_path, map_location, strict, return_config)
@classmethod
def extract_state_dict_from(cls, restore_path: str, save_dir: str, split_by_module: bool = False):
"""
Extract the state dict(s) from a provided .nemo tarfile and save it to a directory.
Args:
restore_path: path to .nemo file from which state dict(s) should be extracted
save_dir: directory in which the saved state dict(s) should be stored
split_by_module: bool flag, which determins whether the output checkpoint should
be for the entire Model, or the individual module's that comprise the Model
Example:
To convert the .nemo tarfile into a single Model level PyTorch checkpoint
```
state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts)
```
To restore a model from a Model level checkpoint
```
model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration
model.load_state_dict(torch.load("./asr_ckpts/model_weights.ckpt"))
```
To convert the .nemo tarfile into multiple Module level PyTorch checkpoints
```
state_dict = nemo.collections.asr.models.EncDecCTCModel.extract_state_dict_from('asr.nemo', './asr_ckpts,
split_by_module=True)
```
To restore a module from a Module level checkpoint
```
model = model = nemo.collections.asr.models.EncDecCTCModel(cfg) # or any other method of restoration
# load the individual components
model.preprocessor.load_state_dict(torch.load("./asr_ckpts/preprocessor.ckpt"))
model.encoder.load_state_dict(torch.load("./asr_ckpts/encoder.ckpt"))
model.decoder.load_state_dict(torch.load("./asr_ckpts/decoder.ckpt"))
```
Returns:
The state dict that was loaded from the original .nemo checkpoint
"""
if not path.exists(restore_path):
raise FileExistsError(f"Can't find {restore_path}")
cwd = os.getcwd()
save_dir = os.path.abspath(save_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
with tempfile.TemporaryDirectory() as tmpdir:
try:
cls.__unpack_nemo_file(path2file=restore_path, out_folder=tmpdir)
os.chdir(tmpdir)
model_weights = path.join(tmpdir, _MODEL_WEIGHTS)
state_dict = torch.load(model_weights)
if not split_by_module:
filepath = os.path.join(save_dir, _MODEL_WEIGHTS)
torch.save(state_dict, filepath)
else:
key_set = set([key.split(".")[0] for key in state_dict.keys()])
for primary_key in key_set:
inner_keys = [key for key in state_dict.keys() if key.split(".")[0] == primary_key]
state_dict_subset = {
".".join(inner_key.split(".")[1:]): state_dict[inner_key] for inner_key in inner_keys
}
filepath = os.path.join(save_dir, f"{primary_key}.ckpt")
torch.save(state_dict_subset, filepath)
logging.info(f'Checkpoints from {restore_path} were successfully extracted into {save_dir}.')
finally:
os.chdir(cwd)
return state_dict
@classmethod
def load_from_checkpoint(
cls,
checkpoint_path: str,
*args,
map_location: Optional[Union[Dict[str, str], str, torch.device, int, Callable]] = None,
hparams_file: Optional[str] = None,
strict: bool = True,
**kwargs,
):
"""
Loads ModelPT from checkpoint, with some maintenance of restoration.
For documentation, please refer to LightningModule.load_from_checkpoin() documentation.
"""
checkpoint = None
try:
cls._set_model_restore_state(is_being_restored=True)
checkpoint = super().load_from_checkpoint(
checkpoint_path=checkpoint_path,
*args,
map_location=map_location,
hparams_file=hparams_file,
strict=strict,
**kwargs,
)
finally:
cls._set_model_restore_state(is_being_restored=False)
return checkpoint
@abstractmethod
def setup_training_data(self, train_data_config: Union[DictConfig, Dict]):
"""
Setups data loader to be used in training
Args:
train_data_layer_config: training data layer parameters.
Returns:
"""
pass
@abstractmethod
def setup_validation_data(self, val_data_config: Union[DictConfig, Dict]):
"""
Setups data loader to be used in validation
Args:
val_data_layer_config: validation data layer parameters.
Returns:
"""
pass
def setup_test_data(self, test_data_config: Union[DictConfig, Dict]):
"""
(Optionally) Setups data loader to be used in test
Args:
test_data_layer_config: test data layer parameters.
Returns:
"""
raise NotImplementedError()
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict]):
"""
(Optionally) Setups data loader to be used in validation, with support for multiple data loaders.
Args:
val_data_layer_config: validation data layer parameters.
"""
# Set some placeholder overriden by helper method
self._val_dl_idx = 0
self._validation_names = None
self._validation_dl = None # type: torch.utils.data.DataLoader
# preserve config
self._update_dataset_config(dataset_name='validation', config=val_data_config)
try:
self._multi_dataset_mode = True
model_utils.resolve_validation_dataloaders(model=self)
finally:
self._multi_dataset_mode = False
if self._validation_names is None:
if self._validation_dl is not None and type(self._validation_dl) in [list, tuple]:
self._validation_names = ['val_{}_'.format(idx) for idx in range(len(self._validation_dl))]
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
"""
(Optionally) Setups data loader to be used in test, with support for multiple data loaders.
Args:
test_data_layer_config: test data layer parameters.
"""
# Set some placeholder overriden by helper method
self._test_dl_idx = 0
self._test_names = None
self._test_dl = None # type: torch.utils.data.DataLoader
# preserve config
self._update_dataset_config(dataset_name='test', config=test_data_config)
try:
self._multi_dataset_mode = True
model_utils.resolve_test_dataloaders(model=self)
finally:
self._multi_dataset_mode = False
if self._test_names is None:
if self._test_dl is not None and type(self._test_dl) in [list, tuple]:
self._test_names = ['test_{}_'.format(idx) for idx in range(len(self._test_dl))]
def setup_optimization(self, optim_config: Optional[Union[DictConfig, Dict]] = None):
"""
Prepares an optimizer from a string name and its optional config parameters.
Args:
optim_config: A dictionary containing the following keys:
* "lr": mandatory key for learning rate. Will raise ValueError if not provided.
* "optimizer": string name pointing to one of the available optimizers in the registry. \
If not provided, defaults to "adam".
* "opt_args": Optional list of strings, in the format "arg_name=arg_value". \
The list of "arg_value" will be parsed and a dictionary of optimizer kwargs \
will be built and supplied to instantiate the optimizer.
"""
# If config was not explicitly passed to us
if optim_config is None:
# See if internal config has `optim` namespace
if self._cfg is not None and hasattr(self._cfg, 'optim'):
optim_config = self._cfg.optim
# If config is still None, or internal config has no Optim, return without instantiation
if optim_config is None:
logging.info('No optimizer config provided, therefore no optimizer was created')
return
else:
# Preserve the configuration
if not isinstance(optim_config, DictConfig):
optim_config = OmegaConf.create(optim_config)
# See if internal config has `optim` namespace before preservation
if self._cfg is not None and hasattr(self._cfg, 'optim'):
if self._cfg.optim is None:
self._cfg.optim = copy.deepcopy(optim_config)
else:
with open_dict(self._cfg.optim):
self._cfg.optim = copy.deepcopy(optim_config)
# Setup optimizer and scheduler
if optim_config is not None and isinstance(optim_config, DictConfig):
optim_config = OmegaConf.to_container(optim_config, resolve=True)
if 'sched' in optim_config and self._trainer is not None:
if not isinstance(self._trainer.accumulate_grad_batches, int):
raise ValueError("We do not currently support gradient acculumation that is not an integer.")
if self._trainer.max_steps is None:
# Store information needed to calculate max_steps
optim_config['sched']['t_max_epochs'] = self._trainer.max_epochs
optim_config['sched']['t_accumulate_grad_batches'] = self._trainer.accumulate_grad_batches
optim_config['sched']['t_limit_train_batches'] = self._trainer.limit_train_batches
if self._trainer.distributed_backend is None:
optim_config['sched']['t_num_workers'] = self._trainer.num_gpus or 1
elif self._trainer.distributed_backend == "ddp_cpu":
optim_config['sched']['t_num_workers'] = self._trainer.num_processes * self._trainer.num_nodes
elif self._trainer.distributed_backend == "ddp":
optim_config['sched']['t_num_workers'] = self._trainer.num_gpus * self._trainer.num_nodes
else:
logging.warning(
f"The lightning trainer received accelerator: {self._trainer.distributed_backend}. We "
"recommend to use 'ddp' instead."
)
optim_config['sched']['t_num_workers'] = self._trainer.num_gpus * self._trainer.num_nodes
else:
optim_config['sched']['max_steps'] = self._trainer.max_steps
# Force into DictConfig from nested structure
optim_config = OmegaConf.create(optim_config)
# Get back nested dict so we its mutable
optim_config = OmegaConf.to_container(optim_config, resolve=True)
# Extract scheduler config if inside optimizer config
if 'sched' in optim_config:
scheduler_config = optim_config.pop('sched')
else:
scheduler_config = None
# Check if caller provided optimizer name, default to Adam otherwise
optimizer_cls = optim_config.get('_target_', None)
if optimizer_cls is None:
# Try to get optimizer name for dynamic resolution, defaulting to Adam
optimizer_name = optim_config.get('name', 'adam')
else:
if inspect.isclass(optimizer_cls):
optimizer_name = optimizer_cls.__name__.lower()
else:
# resolve the class name (lowercase) from the class path if not provided
optimizer_name = optimizer_cls.split(".")[-1].lower()
# We are guarenteed to have lr since it is required by the argparser
# But maybe user forgot to pass it to this function
lr = optim_config.get('lr', None)
# Check if caller has optimizer kwargs, default to empty dictionary
if 'args' in optim_config:
optimizer_args = optim_config.pop('args')
optimizer_args = optim.parse_optimizer_args(optimizer_name, optimizer_args)
else:
optimizer_args = copy.deepcopy(optim_config)
# Remove extra parameters from optimizer_args nest
# Assume all other parameters are to be passed into optimizer constructor
optimizer_args.pop('name', None)
optimizer_args.pop('cls', None)
optimizer_args.pop('lr', None)
# Adaptive schedulers don't need `lr`
if lr is not None:
optimizer_args['lr'] = lr
# Actually instantiate the optimizer
if optimizer_cls is not None:
if inspect.isclass(optimizer_cls):
optimizer = optimizer_cls(self.parameters(), **optimizer_args)
logging.info("Optimizer config = %s", str(optimizer))
self._optimizer = optimizer
else:
# Attempt class path resolution
try:
optimizer_cls = OmegaConf.create({'_target_': optimizer_cls})
if lr is not None:
optimizer_config = {'lr': lr}
else:
optimizer_config = {}
optimizer_config.update(optimizer_args)
optimizer_instance = hydra.utils.instantiate(
optimizer_cls, self.parameters(), **optimizer_config
) # type: DictConfig
logging.info("Optimizer config = %s", str(optimizer_instance))
self._optimizer = optimizer_instance
except Exception as e:
logging.error(
"Could not instantiate class path - {} with kwargs {}".format(
optimizer_cls, str(optimizer_config)
)
)
raise e
else:
optimizer = optim.get_optimizer(optimizer_name)
optimizer = optimizer(self.parameters(), **optimizer_args)
logging.info("Optimizer config = %s", str(optimizer))
self._optimizer = optimizer
# Try to instantiate scheduler for optimizer
self._scheduler = prepare_lr_scheduler(
optimizer=self._optimizer, scheduler_config=scheduler_config, train_dataloader=self._train_dl
)
# Return the optimizer with/without scheduler
# This return allows multiple optimizers or schedulers to be created
return self._optimizer, self._scheduler
def configure_optimizers(self):
self.setup_optimization()
if self._scheduler is None:
return self._optimizer
else:
return [self._optimizer], [self._scheduler]
def train_dataloader(self):
if self._train_dl is not None:
return self._train_dl
def val_dataloader(self):
if self._validation_dl is not None:
return self._validation_dl
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
def validation_epoch_end(
self, outputs: Union[List[Dict[str, torch.Tensor]], List[List[Dict[str, torch.Tensor]]]]
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Default DataLoader for Validation set which automatically supports multiple data loaders
via `multi_validation_epoch_end`.
If multi dataset support is not required, override this method entirely in base class.
In such a case, there is no need to implement `multi_validation_epoch_end` either.
.. note::
If more than one data loader exists, and they all provide `val_loss`,
only the `val_loss` of the first data loader will be used by default.
This default can be changed by passing the special key `val_dl_idx: int`
inside the `validation_ds` config.
Args:
outputs: Single or nested list of tensor outputs from one or more data loaders.
Returns:
A dictionary containing the union of all items from individual data_loaders,
along with merged logs from all data loaders.
"""
# Case where we dont provide data loaders
if outputs is not None and len(outputs) == 0:
return {}
# Case where we provide exactly 1 data loader
if type(outputs[0]) == dict:
output_dict = self.multi_validation_epoch_end(outputs, dataloader_idx=0)
if output_dict is not None and 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
return output_dict
else: # Case where we provide more than 1 data loader
output_dict = {'log': {}}
# The output is a list of list of dicts, outer list corresponds to dataloader idx
for dataloader_idx, val_outputs in enumerate(outputs):
# Get prefix and dispatch call to multi epoch end
dataloader_prefix = self.get_validation_dataloader_prefix(dataloader_idx)
dataloader_logs = self.multi_validation_epoch_end(val_outputs, dataloader_idx=dataloader_idx)
# If result was not provided, generate empty dict
dataloader_logs = dataloader_logs or {}
# Perform `val_loss` resolution first (if provided outside logs)
if 'val_loss' in dataloader_logs:
if 'val_loss' not in output_dict and dataloader_idx == self._val_dl_idx:
output_dict['val_loss'] = dataloader_logs['val_loss']
# For every item in the result dictionary
for k, v in dataloader_logs.items():
# If the key is `log`
if k == 'log':
# Parse every element of the log, and attach the prefix name of the data loader
log_dict = {}
for k_log, v_log in v.items():
# If we are logging the metric, but dont provide it at result level,
# store it twice - once in log and once in result level.
# Also mark log with prefix name to avoid log level clash with other data loaders
if k_log not in output_dict['log'] and dataloader_idx == self._val_dl_idx:
new_k_log = k_log
# Also insert duplicate key with prefix for ease of comparison / avoid name clash
log_dict[dataloader_prefix + k_log] = v_log
else:
# Simply prepend prefix to key and save
new_k_log = dataloader_prefix + k_log
# Store log value
log_dict[new_k_log] = v_log
# Update log storage of individual data loader
output_logs = output_dict['log']
output_logs.update(log_dict)
# Update global log storage
output_dict['log'] = output_logs
else:
# If any values are stored outside 'log', simply prefix name and store
new_k = dataloader_prefix + k
output_dict[new_k] = v
if 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
# return everything else
return output_dict
def test_epoch_end(
self, outputs: Union[List[Dict[str, torch.Tensor]], List[List[Dict[str, torch.Tensor]]]]
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
"""
Default DataLoader for Test set which automatically supports multiple data loaders
via `multi_test_epoch_end`.
If multi dataset support is not required, override this method entirely in base class.
In such a case, there is no need to implement `multi_test_epoch_end` either.
.. note::
If more than one data loader exists, and they all provide `test_loss`,
only the `test_loss` of the first data loader will be used by default.
This default can be changed by passing the special key `test_dl_idx: int`
inside the `test_ds` config.
Args:
outputs: Single or nested list of tensor outputs from one or more data loaders.
Returns:
A dictionary containing the union of all items from individual data_loaders,
along with merged logs from all data loaders.
"""
# Case where we dont provide data loaders
if outputs is not None and len(outputs) == 0:
return {}
# Case where we provide exactly 1 data loader
if type(outputs[0]) == dict:
output_dict = self.multi_test_epoch_end(outputs, dataloader_idx=0)
if output_dict is not None and 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
return output_dict
else: # Case where we provide more than 1 data loader
output_dict = {'log': {}}
# The output is a list of list of dicts, outer list corresponds to dataloader idx
for dataloader_idx, test_outputs in enumerate(outputs):
# Get prefix and dispatch call to multi epoch end
dataloader_prefix = self.get_test_dataloader_prefix(dataloader_idx)
dataloader_logs = self.multi_test_epoch_end(test_outputs, dataloader_idx=dataloader_idx)
# If result was not provided, generate empty dict
dataloader_logs = dataloader_logs or {}
# Perform `test_loss` resolution first (if provided outside logs)
if 'test_loss' in dataloader_logs:
if 'test_loss' not in output_dict and dataloader_idx == self._test_dl_idx:
output_dict['test_loss'] = dataloader_logs['test_loss']
# For every item in the result dictionary
for k, v in dataloader_logs.items():
# If the key is `log`
if k == 'log':
# Parse every element of the log, and attach the prefix name of the data loader
log_dict = {}
for k_log, v_log in v.items():
# If we are logging the loss, but dont provide it at result level,
# store it twice - once in log and once in result level.
# Also mark log with prefix name to avoid log level clash with other data loaders
if k_log not in output_dict['log'] and dataloader_idx == self._test_dl_idx:
new_k_log = k_log
# Also insert duplicate key with prefix for ease of comparison / avoid name clash
log_dict[dataloader_prefix + k_log] = v_log
else:
# Simply prepend prefix to key and save
new_k_log = dataloader_prefix + k_log
log_dict[new_k_log] = v_log
# Update log storage of individual data loader
output_logs = output_dict.get('log', {})
output_logs.update(log_dict)
# Update global log storage
output_dict['log'] = output_logs
else:
# If any values are stored outside 'log', simply prefix name and store
new_k = dataloader_prefix + k
output_dict[new_k] = v
if 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
# return everything else
return output_dict
def multi_validation_epoch_end(
self, outputs: List[Dict[str, torch.Tensor]], dataloader_idx: int = 0
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
logging.warning(
"Multi data loader support has been enabled, but "
"`multi_validation_epoch_end(outputs, dataloader_idx) has not been implemented.\n"
"If you require multi data loader support for validation sets, please override this method.\n"
"If you do not require multi data loader support, please instead override "
"`validation_epoch_end(outputs)."
)
def multi_test_epoch_end(
self, outputs: List[Dict[str, torch.Tensor]], dataloader_idx: int = 0
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
logging.warning(
"Multi data loader support has been enabled, but "
"`multi_test_epoch_end(outputs, dataloader_idx) has not been implemented.\n"
"If you require multi data loader support for validation sets, please override this method.\n"
"If you do not require multi data loader support, please instead override "
"`test_epoch_end(outputs)."
)
def get_validation_dataloader_prefix(self, dataloader_idx: int = 0) -> str:
"""
Get the name of one or more data loaders, which will be prepended to all logs.
Args:
dataloader_idx: Index of the data loader.
Returns:
str name of the data loader at index provided.
"""
return self._validation_names[dataloader_idx]
def get_test_dataloader_prefix(self, dataloader_idx: int = 0) -> str:
"""
Get the name of one or more data loaders, which will be prepended to all logs.
Args:
dataloader_idx: Index of the data loader.
Returns:
str name of the data loader at index provided.
"""
return self._test_names[dataloader_idx]
def teardown(self, stage: str):
"""
Called at the end of fit and test.
Args:
stage: either 'fit' or 'test'
"""
if stage == 'fit':
# Update env variable to bypass multi gpu issue after training
# This fix affects usage of trainer.test() after trainer.train()
# If trainer.train() was done on multiple GPUs, then trainer.test()
# will try to do ddp, even if its a new Trainer object with just 1 GPU.
# Temporary patch to fix that
if 'PL_TRAINER_GPUS' in os.environ:
os.environ.pop('PL_TRAINER_GPUS')
super().teardown(stage)
def prepare_test(self, trainer: 'Trainer') -> bool:
"""
Helper method to check whether the model can safely be tested
on a dataset after training (or loading a checkpoint).
# Usage:
trainer = Trainer()
if model.prepare_test(trainer):
trainer.test(model)
Returns:
bool which declares the model safe to test. Provides warnings if it has to
return False to guide the user.
"""
if not hasattr(self._cfg, 'test_ds'):
logging.info("No `test_ds` config found within the manifest.")
return False
# Replace ddp multi-gpu until PTL has a fix
DDP_WARN = """\n\nDuring testing, it is currently advisable to construct a new Trainer "
"with single GPU and no DDP to obtain accurate results.
"Following pattern should be used: "
"gpu = 1 if cfg.trainer.gpus != 0 else 0"
"trainer = Trainer(gpus=gpu)"
"if model.prepare_test(trainer):"
" trainer.test(model)\n\n"""
if trainer is not None:
if trainer.num_gpus > 1:
logging.warning(DDP_WARN)
return False
# Assign trainer to the model
self.set_trainer(trainer)
return True
def set_trainer(self, trainer: Trainer):
"""
Set an instance of Trainer object.
Args:
trainer: PyTorch Lightning Trainer object.
"""
self._trainer = trainer
self.set_world_size(self._trainer)
def set_world_size(self, trainer: Trainer):
"""
Determines the world size from the PyTorch Lightning Trainer.
And then updates AppState.
Args:
trainer (Trainer): PyTorch Lightning Trainer object
"""
# Update AppState with world information from trainer
if isinstance(trainer, Trainer):
app_state = AppState()
if self._trainer.num_gpus and self._trainer.num_nodes:
app_state.world_size = self._trainer.num_gpus * self._trainer.num_nodes
else:
logging.warning(f'World size can only be set by PyTorch Lightning Trainer.')
def _update_dataset_config(self, dataset_name: str, config: Optional[Union[DictConfig, Dict]]):
"""
Update the config (if not None) of the dataset by given name.
Preserves said config after updating.
Args:
dataset_name: str name of the dataset whose config is being updated.
Can be one of `train`, `validation` and `test`.
config: Optional DictConfig or dict. If None is passed, this method simply returns.
If dict is passed, it is cast into a DictConfig.
The internal config is updated with the passed config.
"""
if hasattr(self, '_multi_dataset_mode') and self._multi_dataset_mode is True:
return
if config is not None:
if not isinstance(config, DictConfig):
config = OmegaConf.create(config)
if dataset_name in ['train', 'validation', 'test']:
OmegaConf.set_struct(self.cfg, False)
key_name = dataset_name + "_ds"
self.cfg[key_name] = config
OmegaConf.set_struct(self.cfg, True)
# Update hyper parameters by calling property setter
self.cfg = self._cfg
else:
raise ValueError("`dataset_name` when updating config must be one of [train, validation, test]")
@property
def num_weights(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
@property
def cfg(self):
return self._cfg
@cfg.setter
def cfg(self, cfg):
self._cfg = cfg
self._set_hparams(cfg)
@staticmethod
def __make_nemo_file_from_folder(filename, source_dir):
with tarfile.open(filename, "w:gz") as tar:
# tar.add(source_dir, arcname=path.basename(source_dir))
tar.add(source_dir, arcname="./")
@staticmethod
def __unpack_nemo_file(path2file: str, out_folder: str) -> str:
if not path.exists(path2file):
raise FileNotFoundError(f"{path2file} does not exist")
tar = tarfile.open(path2file, "r:gz")
tar.extractall(path=out_folder)
tar.close()
return out_folder
@staticmethod
def _is_model_being_restored() -> bool:
global _MODEL_IS_RESTORED
return _MODEL_IS_RESTORED
@staticmethod
def _set_model_restore_state(is_being_restored: bool):
global _MODEL_IS_RESTORED
_MODEL_IS_RESTORED = is_being_restored
@staticmethod
def _is_restore_type_tarfile() -> bool:
"""
Utility method that checks if the restore path of the underlying Model
is a tarfile (can be any valid archive)._MODEL_EFF_SAVE
"""
global _MODEL_RESTORE_PATH
if _MODEL_RESTORE_PATH is None:
return False
else:
if tarfile.is_tarfile(_MODEL_RESTORE_PATH):
return True
else:
return False
@staticmethod
def set_eff_save(use_eff_save: bool):
global _MODEL_EFF_SAVE
_MODEL_EFF_SAVE = use_eff_save
@staticmethod
def use_eff_save() -> bool:
global _MODEL_EFF_SAVE
return _MODEL_EFF_SAVE
| 43.150696 | 135 | 0.60356 |
import copy
import inspect
import os
import shutil
import tarfile
import tempfile
from abc import abstractmethod
from dataclasses import is_dataclass
from os import path
from typing import Callable, Dict, List, Optional, Union
import hydra
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.utilities import rank_zero_only
from nemo.core import optim
from nemo.core.classes.common import Model
from nemo.core.optim import prepare_lr_scheduler
from nemo.utils import config_utils, logging, model_utils
from nemo.utils.app_state import AppState
from nemo.utils.get_rank import is_global_rank_zero
_MODEL_CONFIG_YAML = "model_config.yaml"
_MODEL_WEIGHTS = "model_weights.ckpt"
try:
from eff.cookbooks import NeMoCookbook
_EFF_PRESENT_ = True
except ImportError:
_EFF_PRESENT_ = False
__all__ = ['ModelPT']
_MODEL_IS_RESTORED = False
_MODEL_RESTORE_PATH = None
_MODEL_EFF_SAVE = True
class ModelPT(LightningModule, Model):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
if trainer is not None and not isinstance(trainer, Trainer):
raise ValueError(
f"trainer constructor argument must be either None or pytroch_lightning.Trainer. But got {type(trainer)} instead."
)
super().__init__()
cfg = model_utils.convert_model_config_to_dict_config(cfg)
cfg = model_utils.maybe_update_config_version(cfg)
if 'target' not in cfg:
OmegaConf.set_struct(cfg, False)
cfg.target = "{0}.{1}".format(self.__class__.__module__, self.__class__.__name__)
OmegaConf.set_struct(cfg, True)
self._cfg = cfg
self.save_hyperparameters(self._cfg)
self._train_dl = None
self._validation_dl = None
self._test_dl = None
self._optimizer = None
self._scheduler = None
self._trainer = trainer
if torch.cuda.is_available() and torch.cuda.current_device() is not None:
app_state = AppState()
app_state.device_id = torch.cuda.current_device()
if self._cfg is not None and not self._is_model_being_restored():
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
self.setup_training_data(self._cfg.train_ds)
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
self.setup_multiple_validation_data(val_data_config=None)
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
self.setup_multiple_test_data(test_data_config=None)
else:
if 'train_ds' in self._cfg and self._cfg.train_ds is not None:
logging.warning(
f"Please call the ModelPT.setup_training_data() method "
f"and provide a valid configuration file to setup the train data loader.\n"
f"Train config : \n{OmegaConf.to_yaml(self._cfg.train_ds)}"
)
if 'validation_ds' in self._cfg and self._cfg.validation_ds is not None:
logging.warning(
f"Please call the ModelPT.setup_validation_data() or ModelPT.setup_multiple_validation_data() method "
f"and provide a valid configuration file to setup the validation data loader(s). \n"
f"Validation config : \n{OmegaConf.to_yaml(self._cfg.validation_ds)}"
)
if 'test_ds' in self._cfg and self._cfg.test_ds is not None:
logging.warning(
f"Please call the ModelPT.setup_test_data() or ModelPT.setup_multiple_test_data() method "
f"and provide a valid configuration file to setup the test data loader(s).\n"
f"Test config : \n{OmegaConf.to_yaml(self._cfg.test_ds)}"
)
self.training_step = model_utils.wrap_training_step(self.training_step)
def register_artifact(self, config_path: str, src: str):
if not hasattr(self, 'artifacts'):
self.artifacts = {}
if self.artifacts is None:
self.artifacts = {}
if src is not None and src.strip() != '':
archive_item = model_utils.ArtifactItem()
basename_src = os.path.basename(src)
if os.path.exists(basename_src):
logging.warning(f"Using {os.path.abspath(basename_src)} instead of {src}.")
used_src = basename_src
if self._is_model_being_restored() and self._is_restore_type_tarfile():
archive_item.path_type = model_utils.ArtifactPathType.TAR_PATH
else:
archive_item.path_type = model_utils.ArtifactPathType.LOCAL_PATH
else:
used_src = src
archive_item.path_type = model_utils.ArtifactPathType.LOCAL_PATH
if not os.path.exists(used_src):
if self._is_restore_type_tarfile():
cwd = os.getcwd()
try:
with tempfile.TemporaryDirectory() as tmpdir:
self.__unpack_nemo_file(path2file=_MODEL_RESTORE_PATH, out_folder=tmpdir)
os.chdir(tmpdir)
if os.path.exists(basename_src):
logging.warning(f"Using {os.path.abspath(basename_src)} instead of {src}.")
used_src = basename_src
archive_item.path = used_src
archive_item.path_type = model_utils.ArtifactPathType.TAR_PATH
else:
raise FileNotFoundError(
f"Could not find {used_src} inside "
f"tarfile {_MODEL_RESTORE_PATH} or under local"
)
finally:
os.chdir(cwd)
else:
raise FileNotFoundError(f"Could not find {used_src}")
else:
archive_item.path = used_src
if config_path in self.artifacts:
logging.warning(
f"Artifact {config_path} with value '{self.artifacts[config_path]}' "
f"already exists and will be overwritten with value '{src}'!"
)
self.artifacts[config_path] = archive_item
return used_src
else:
return src
def _default_save_to(self, save_path: str):
with tempfile.TemporaryDirectory() as tmpdir:
config_yaml = path.join(tmpdir, _MODEL_CONFIG_YAML)
model_weights = path.join(tmpdir, _MODEL_WEIGHTS)
if hasattr(self, 'artifacts') and self.artifacts is not None:
for (conf_path, src) in self.artifacts.items():
try:
if src.path_type == model_utils.ArtifactPathType.LOCAL_PATH and os.path.exists(src.path):
shutil.copy2(src.path, tmpdir)
elif src.path_type == model_utils.ArtifactPathType.TAR_PATH:
cwd = os.getcwd()
try:
with tempfile.TemporaryDirectory() as archive_dir:
self.__unpack_nemo_file(path2file=_MODEL_RESTORE_PATH, out_folder=archive_dir)
os.chdir(archive_dir)
shutil.copy2(src.path, tmpdir)
finally:
os.chdir(cwd)
else:
raise ValueError(f"Invalid ArchivePathType found: {src.path_type}")
except Exception:
logging.error(f"Could not copy artifact {src} used in {conf_path}")
self.to_config_file(path2yaml_file=config_yaml)
torch.save(self.state_dict(), model_weights)
self.__make_nemo_file_from_folder(filename=save_path, source_dir=tmpdir)
def _eff_save_to(self, save_path: str):
NeMoCookbook().save_to(obj=self, save_path=save_path)
@rank_zero_only
def save_to(self, save_path: str):
if not is_global_rank_zero():
return
if _EFF_PRESENT_ and self.use_eff_save():
self._eff_save_to(save_path)
else:
self._default_save_to(save_path)
@classmethod
def _default_restore_from(
cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = False,
return_config: bool = False,
):
cwd = os.getcwd()
if map_location is None:
if torch.cuda.is_available():
map_location = torch.device('cuda')
else:
map_location = torch.device('cpu')
with tempfile.TemporaryDirectory() as tmpdir:
try:
cls._set_model_restore_state(is_being_restored=True)
cls.__unpack_nemo_file(path2file=restore_path, out_folder=tmpdir)
os.chdir(tmpdir)
if override_config_path is None:
config_yaml = path.join(tmpdir, _MODEL_CONFIG_YAML)
else:
config_yaml = override_config_path
if not isinstance(config_yaml, (OmegaConf, DictConfig)):
conf = OmegaConf.load(config_yaml)
else:
conf = config_yaml
if override_config_path is not None:
conf = OmegaConf.to_container(conf, resolve=True)
conf = OmegaConf.create(conf)
if 'model' in conf:
conf = conf.model
if return_config:
instance = conf
else:
model_weights = path.join(tmpdir, _MODEL_WEIGHTS)
OmegaConf.set_struct(conf, True)
instance = cls.from_config_dict(config=conf)
instance = instance.to(map_location)
instance.load_state_dict(torch.load(model_weights, map_location=map_location), strict=strict)
logging.info(f'Model {cls.__name__} was successfully restored from {restore_path}.')
finally:
cls._set_model_restore_state(is_being_restored=False)
os.chdir(cwd)
return instance
@classmethod
def _eff_restore_from(
cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = False,
return_config: bool = False,
):
if return_config is True:
raise NotImplementedError("`return_config` is not implemented for EFF based restoration of models.")
return NeMoCookbook().restore_from(
restore_path=restore_path,
obj_cls=cls,
override_config_path=override_config_path,
map_location=map_location,
strict=strict,
)
@classmethod
def restore_from(
cls,
restore_path: str,
override_config_path: Optional[Union[OmegaConf, str]] = None,
map_location: Optional[torch.device] = None,
strict: bool = False,
return_config: bool = False,
):
if not path.exists(restore_path):
raise FileNotFoundError(f"Can't find {restore_path}")
global _MODEL_RESTORE_PATH
_MODEL_RESTORE_PATH = os.path.abspath(os.path.expanduser(restore_path))
if _EFF_PRESENT_:
# Try to load the EFF archive.
try:
return cls._eff_restore_from(restore_path, override_config_path, map_location, strict, return_config)
except (FileNotFoundError, TypeError):
# Default to the old .nemo tar archive restore method.
return cls._default_restore_from(
restore_path, override_config_path, map_location, strict, return_config
)
else:
# Load .nemo tar archive using the old restore method.
return cls._default_restore_from(restore_path, override_config_path, map_location, strict, return_config)
@classmethod
def extract_state_dict_from(cls, restore_path: str, save_dir: str, split_by_module: bool = False):
if not path.exists(restore_path):
raise FileExistsError(f"Can't find {restore_path}")
cwd = os.getcwd()
save_dir = os.path.abspath(save_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
with tempfile.TemporaryDirectory() as tmpdir:
try:
cls.__unpack_nemo_file(path2file=restore_path, out_folder=tmpdir)
os.chdir(tmpdir)
model_weights = path.join(tmpdir, _MODEL_WEIGHTS)
state_dict = torch.load(model_weights)
if not split_by_module:
filepath = os.path.join(save_dir, _MODEL_WEIGHTS)
torch.save(state_dict, filepath)
else:
key_set = set([key.split(".")[0] for key in state_dict.keys()])
for primary_key in key_set:
inner_keys = [key for key in state_dict.keys() if key.split(".")[0] == primary_key]
state_dict_subset = {
".".join(inner_key.split(".")[1:]): state_dict[inner_key] for inner_key in inner_keys
}
filepath = os.path.join(save_dir, f"{primary_key}.ckpt")
torch.save(state_dict_subset, filepath)
logging.info(f'Checkpoints from {restore_path} were successfully extracted into {save_dir}.')
finally:
os.chdir(cwd)
return state_dict
@classmethod
def load_from_checkpoint(
cls,
checkpoint_path: str,
*args,
map_location: Optional[Union[Dict[str, str], str, torch.device, int, Callable]] = None,
hparams_file: Optional[str] = None,
strict: bool = True,
**kwargs,
):
checkpoint = None
try:
cls._set_model_restore_state(is_being_restored=True)
checkpoint = super().load_from_checkpoint(
checkpoint_path=checkpoint_path,
*args,
map_location=map_location,
hparams_file=hparams_file,
strict=strict,
**kwargs,
)
finally:
cls._set_model_restore_state(is_being_restored=False)
return checkpoint
@abstractmethod
def setup_training_data(self, train_data_config: Union[DictConfig, Dict]):
pass
@abstractmethod
def setup_validation_data(self, val_data_config: Union[DictConfig, Dict]):
pass
def setup_test_data(self, test_data_config: Union[DictConfig, Dict]):
raise NotImplementedError()
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict]):
self._val_dl_idx = 0
self._validation_names = None
self._validation_dl = None
self._update_dataset_config(dataset_name='validation', config=val_data_config)
try:
self._multi_dataset_mode = True
model_utils.resolve_validation_dataloaders(model=self)
finally:
self._multi_dataset_mode = False
if self._validation_names is None:
if self._validation_dl is not None and type(self._validation_dl) in [list, tuple]:
self._validation_names = ['val_{}_'.format(idx) for idx in range(len(self._validation_dl))]
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
self._test_dl_idx = 0
self._test_names = None
self._test_dl = None
self._update_dataset_config(dataset_name='test', config=test_data_config)
try:
self._multi_dataset_mode = True
model_utils.resolve_test_dataloaders(model=self)
finally:
self._multi_dataset_mode = False
if self._test_names is None:
if self._test_dl is not None and type(self._test_dl) in [list, tuple]:
self._test_names = ['test_{}_'.format(idx) for idx in range(len(self._test_dl))]
def setup_optimization(self, optim_config: Optional[Union[DictConfig, Dict]] = None):
if optim_config is None:
if self._cfg is not None and hasattr(self._cfg, 'optim'):
optim_config = self._cfg.optim
if optim_config is None:
logging.info('No optimizer config provided, therefore no optimizer was created')
return
else:
if not isinstance(optim_config, DictConfig):
optim_config = OmegaConf.create(optim_config)
if self._cfg is not None and hasattr(self._cfg, 'optim'):
if self._cfg.optim is None:
self._cfg.optim = copy.deepcopy(optim_config)
else:
with open_dict(self._cfg.optim):
self._cfg.optim = copy.deepcopy(optim_config)
if optim_config is not None and isinstance(optim_config, DictConfig):
optim_config = OmegaConf.to_container(optim_config, resolve=True)
if 'sched' in optim_config and self._trainer is not None:
if not isinstance(self._trainer.accumulate_grad_batches, int):
raise ValueError("We do not currently support gradient acculumation that is not an integer.")
if self._trainer.max_steps is None:
optim_config['sched']['t_max_epochs'] = self._trainer.max_epochs
optim_config['sched']['t_accumulate_grad_batches'] = self._trainer.accumulate_grad_batches
optim_config['sched']['t_limit_train_batches'] = self._trainer.limit_train_batches
if self._trainer.distributed_backend is None:
optim_config['sched']['t_num_workers'] = self._trainer.num_gpus or 1
elif self._trainer.distributed_backend == "ddp_cpu":
optim_config['sched']['t_num_workers'] = self._trainer.num_processes * self._trainer.num_nodes
elif self._trainer.distributed_backend == "ddp":
optim_config['sched']['t_num_workers'] = self._trainer.num_gpus * self._trainer.num_nodes
else:
logging.warning(
f"The lightning trainer received accelerator: {self._trainer.distributed_backend}. We "
"recommend to use 'ddp' instead."
)
optim_config['sched']['t_num_workers'] = self._trainer.num_gpus * self._trainer.num_nodes
else:
optim_config['sched']['max_steps'] = self._trainer.max_steps
optim_config = OmegaConf.create(optim_config)
optim_config = OmegaConf.to_container(optim_config, resolve=True)
if 'sched' in optim_config:
scheduler_config = optim_config.pop('sched')
else:
scheduler_config = None
optimizer_cls = optim_config.get('_target_', None)
if optimizer_cls is None:
optimizer_name = optim_config.get('name', 'adam')
else:
if inspect.isclass(optimizer_cls):
optimizer_name = optimizer_cls.__name__.lower()
else:
optimizer_name = optimizer_cls.split(".")[-1].lower()
lr = optim_config.get('lr', None)
if 'args' in optim_config:
optimizer_args = optim_config.pop('args')
optimizer_args = optim.parse_optimizer_args(optimizer_name, optimizer_args)
else:
optimizer_args = copy.deepcopy(optim_config)
optimizer_args.pop('name', None)
optimizer_args.pop('cls', None)
optimizer_args.pop('lr', None)
if lr is not None:
optimizer_args['lr'] = lr
# Actually instantiate the optimizer
if optimizer_cls is not None:
if inspect.isclass(optimizer_cls):
optimizer = optimizer_cls(self.parameters(), **optimizer_args)
logging.info("Optimizer config = %s", str(optimizer))
self._optimizer = optimizer
else:
# Attempt class path resolution
try:
optimizer_cls = OmegaConf.create({'_target_': optimizer_cls})
if lr is not None:
optimizer_config = {'lr': lr}
else:
optimizer_config = {}
optimizer_config.update(optimizer_args)
optimizer_instance = hydra.utils.instantiate(
optimizer_cls, self.parameters(), **optimizer_config
) # type: DictConfig
logging.info("Optimizer config = %s", str(optimizer_instance))
self._optimizer = optimizer_instance
except Exception as e:
logging.error(
"Could not instantiate class path - {} with kwargs {}".format(
optimizer_cls, str(optimizer_config)
)
)
raise e
else:
optimizer = optim.get_optimizer(optimizer_name)
optimizer = optimizer(self.parameters(), **optimizer_args)
logging.info("Optimizer config = %s", str(optimizer))
self._optimizer = optimizer
# Try to instantiate scheduler for optimizer
self._scheduler = prepare_lr_scheduler(
optimizer=self._optimizer, scheduler_config=scheduler_config, train_dataloader=self._train_dl
)
# Return the optimizer with/without scheduler
# This return allows multiple optimizers or schedulers to be created
return self._optimizer, self._scheduler
def configure_optimizers(self):
self.setup_optimization()
if self._scheduler is None:
return self._optimizer
else:
return [self._optimizer], [self._scheduler]
def train_dataloader(self):
if self._train_dl is not None:
return self._train_dl
def val_dataloader(self):
if self._validation_dl is not None:
return self._validation_dl
def test_dataloader(self):
if self._test_dl is not None:
return self._test_dl
def validation_epoch_end(
self, outputs: Union[List[Dict[str, torch.Tensor]], List[List[Dict[str, torch.Tensor]]]]
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
# Case where we dont provide data loaders
if outputs is not None and len(outputs) == 0:
return {}
# Case where we provide exactly 1 data loader
if type(outputs[0]) == dict:
output_dict = self.multi_validation_epoch_end(outputs, dataloader_idx=0)
if output_dict is not None and 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
return output_dict
else: # Case where we provide more than 1 data loader
output_dict = {'log': {}}
# The output is a list of list of dicts, outer list corresponds to dataloader idx
for dataloader_idx, val_outputs in enumerate(outputs):
# Get prefix and dispatch call to multi epoch end
dataloader_prefix = self.get_validation_dataloader_prefix(dataloader_idx)
dataloader_logs = self.multi_validation_epoch_end(val_outputs, dataloader_idx=dataloader_idx)
# If result was not provided, generate empty dict
dataloader_logs = dataloader_logs or {}
# Perform `val_loss` resolution first (if provided outside logs)
if 'val_loss' in dataloader_logs:
if 'val_loss' not in output_dict and dataloader_idx == self._val_dl_idx:
output_dict['val_loss'] = dataloader_logs['val_loss']
# For every item in the result dictionary
for k, v in dataloader_logs.items():
# If the key is `log`
if k == 'log':
# Parse every element of the log, and attach the prefix name of the data loader
log_dict = {}
for k_log, v_log in v.items():
# If we are logging the metric, but dont provide it at result level,
# store it twice - once in log and once in result level.
# Also mark log with prefix name to avoid log level clash with other data loaders
if k_log not in output_dict['log'] and dataloader_idx == self._val_dl_idx:
new_k_log = k_log
# Also insert duplicate key with prefix for ease of comparison / avoid name clash
log_dict[dataloader_prefix + k_log] = v_log
else:
# Simply prepend prefix to key and save
new_k_log = dataloader_prefix + k_log
# Store log value
log_dict[new_k_log] = v_log
# Update log storage of individual data loader
output_logs = output_dict['log']
output_logs.update(log_dict)
# Update global log storage
output_dict['log'] = output_logs
else:
# If any values are stored outside 'log', simply prefix name and store
new_k = dataloader_prefix + k
output_dict[new_k] = v
if 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
# return everything else
return output_dict
def test_epoch_end(
self, outputs: Union[List[Dict[str, torch.Tensor]], List[List[Dict[str, torch.Tensor]]]]
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
# Case where we dont provide data loaders
if outputs is not None and len(outputs) == 0:
return {}
# Case where we provide exactly 1 data loader
if type(outputs[0]) == dict:
output_dict = self.multi_test_epoch_end(outputs, dataloader_idx=0)
if output_dict is not None and 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
return output_dict
else: # Case where we provide more than 1 data loader
output_dict = {'log': {}}
# The output is a list of list of dicts, outer list corresponds to dataloader idx
for dataloader_idx, test_outputs in enumerate(outputs):
# Get prefix and dispatch call to multi epoch end
dataloader_prefix = self.get_test_dataloader_prefix(dataloader_idx)
dataloader_logs = self.multi_test_epoch_end(test_outputs, dataloader_idx=dataloader_idx)
# If result was not provided, generate empty dict
dataloader_logs = dataloader_logs or {}
# Perform `test_loss` resolution first (if provided outside logs)
if 'test_loss' in dataloader_logs:
if 'test_loss' not in output_dict and dataloader_idx == self._test_dl_idx:
output_dict['test_loss'] = dataloader_logs['test_loss']
# For every item in the result dictionary
for k, v in dataloader_logs.items():
# If the key is `log`
if k == 'log':
# Parse every element of the log, and attach the prefix name of the data loader
log_dict = {}
for k_log, v_log in v.items():
# If we are logging the loss, but dont provide it at result level,
# store it twice - once in log and once in result level.
# Also mark log with prefix name to avoid log level clash with other data loaders
if k_log not in output_dict['log'] and dataloader_idx == self._test_dl_idx:
new_k_log = k_log
# Also insert duplicate key with prefix for ease of comparison / avoid name clash
log_dict[dataloader_prefix + k_log] = v_log
else:
# Simply prepend prefix to key and save
new_k_log = dataloader_prefix + k_log
log_dict[new_k_log] = v_log
# Update log storage of individual data loader
output_logs = output_dict.get('log', {})
output_logs.update(log_dict)
# Update global log storage
output_dict['log'] = output_logs
else:
# If any values are stored outside 'log', simply prefix name and store
new_k = dataloader_prefix + k
output_dict[new_k] = v
if 'log' in output_dict:
self.log_dict(output_dict.pop('log'), on_epoch=True)
# return everything else
return output_dict
def multi_validation_epoch_end(
self, outputs: List[Dict[str, torch.Tensor]], dataloader_idx: int = 0
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
logging.warning(
"Multi data loader support has been enabled, but "
"`multi_validation_epoch_end(outputs, dataloader_idx) has not been implemented.\n"
"If you require multi data loader support for validation sets, please override this method.\n"
"If you do not require multi data loader support, please instead override "
"`validation_epoch_end(outputs)."
)
def multi_test_epoch_end(
self, outputs: List[Dict[str, torch.Tensor]], dataloader_idx: int = 0
) -> Optional[Dict[str, Dict[str, torch.Tensor]]]:
logging.warning(
"Multi data loader support has been enabled, but "
"`multi_test_epoch_end(outputs, dataloader_idx) has not been implemented.\n"
"If you require multi data loader support for validation sets, please override this method.\n"
"If you do not require multi data loader support, please instead override "
"`test_epoch_end(outputs)."
)
def get_validation_dataloader_prefix(self, dataloader_idx: int = 0) -> str:
return self._validation_names[dataloader_idx]
def get_test_dataloader_prefix(self, dataloader_idx: int = 0) -> str:
return self._test_names[dataloader_idx]
def teardown(self, stage: str):
if stage == 'fit':
# Update env variable to bypass multi gpu issue after training
# This fix affects usage of trainer.test() after trainer.train()
# If trainer.train() was done on multiple GPUs, then trainer.test()
# will try to do ddp, even if its a new Trainer object with just 1 GPU.
# Temporary patch to fix that
if 'PL_TRAINER_GPUS' in os.environ:
os.environ.pop('PL_TRAINER_GPUS')
super().teardown(stage)
def prepare_test(self, trainer: 'Trainer') -> bool:
if not hasattr(self._cfg, 'test_ds'):
logging.info("No `test_ds` config found within the manifest.")
return False
# Replace ddp multi-gpu until PTL has a fix
DDP_WARN = """\n\nDuring testing, it is currently advisable to construct a new Trainer "
"with single GPU and no DDP to obtain accurate results.
"Following pattern should be used: "
"gpu = 1 if cfg.trainer.gpus != 0 else 0"
"trainer = Trainer(gpus=gpu)"
"if model.prepare_test(trainer):"
" trainer.test(model)\n\n"""
if trainer is not None:
if trainer.num_gpus > 1:
logging.warning(DDP_WARN)
return False
# Assign trainer to the model
self.set_trainer(trainer)
return True
def set_trainer(self, trainer: Trainer):
self._trainer = trainer
self.set_world_size(self._trainer)
def set_world_size(self, trainer: Trainer):
# Update AppState with world information from trainer
if isinstance(trainer, Trainer):
app_state = AppState()
if self._trainer.num_gpus and self._trainer.num_nodes:
app_state.world_size = self._trainer.num_gpus * self._trainer.num_nodes
else:
logging.warning(f'World size can only be set by PyTorch Lightning Trainer.')
def _update_dataset_config(self, dataset_name: str, config: Optional[Union[DictConfig, Dict]]):
if hasattr(self, '_multi_dataset_mode') and self._multi_dataset_mode is True:
return
if config is not None:
if not isinstance(config, DictConfig):
config = OmegaConf.create(config)
if dataset_name in ['train', 'validation', 'test']:
OmegaConf.set_struct(self.cfg, False)
key_name = dataset_name + "_ds"
self.cfg[key_name] = config
OmegaConf.set_struct(self.cfg, True)
# Update hyper parameters by calling property setter
self.cfg = self._cfg
else:
raise ValueError("`dataset_name` when updating config must be one of [train, validation, test]")
@property
def num_weights(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
@property
def cfg(self):
return self._cfg
@cfg.setter
def cfg(self, cfg):
self._cfg = cfg
self._set_hparams(cfg)
@staticmethod
def __make_nemo_file_from_folder(filename, source_dir):
with tarfile.open(filename, "w:gz") as tar:
# tar.add(source_dir, arcname=path.basename(source_dir))
tar.add(source_dir, arcname="./")
@staticmethod
def __unpack_nemo_file(path2file: str, out_folder: str) -> str:
if not path.exists(path2file):
raise FileNotFoundError(f"{path2file} does not exist")
tar = tarfile.open(path2file, "r:gz")
tar.extractall(path=out_folder)
tar.close()
return out_folder
@staticmethod
def _is_model_being_restored() -> bool:
global _MODEL_IS_RESTORED
return _MODEL_IS_RESTORED
@staticmethod
def _set_model_restore_state(is_being_restored: bool):
global _MODEL_IS_RESTORED
_MODEL_IS_RESTORED = is_being_restored
@staticmethod
def _is_restore_type_tarfile() -> bool:
global _MODEL_RESTORE_PATH
if _MODEL_RESTORE_PATH is None:
return False
else:
if tarfile.is_tarfile(_MODEL_RESTORE_PATH):
return True
else:
return False
@staticmethod
def set_eff_save(use_eff_save: bool):
global _MODEL_EFF_SAVE
_MODEL_EFF_SAVE = use_eff_save
@staticmethod
def use_eff_save() -> bool:
global _MODEL_EFF_SAVE
return _MODEL_EFF_SAVE
| true | true |
f7fae0ac1740e6da96a906a649440d334593392e | 8,637 | py | Python | refactorings/utils/scope_listener.py | mossj77/CodART | ac83a49a4aa9310b09da12fb476a84586812310b | [
"MIT"
] | 1 | 2021-10-10T23:56:49.000Z | 2021-10-10T23:56:49.000Z | refactorings/utils/scope_listener.py | pouorix/CodART | 84b35a5a14e583d88319d7f6c2de8dc3b3dc83b2 | [
"MIT"
] | null | null | null | refactorings/utils/scope_listener.py | pouorix/CodART | 84b35a5a14e583d88319d7f6c2de8dc3b3dc83b2 | [
"MIT"
] | null | null | null | from typing import Optional
from antlr4 import FileStream, ParseTreeWalker
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from gen.java.JavaLexer import JavaLexer
from .utils_listener_fast import *
from enum import Enum
class ScopeType(Enum):
PACKAGE = 0
CLASS = 1
METHOD = 2
STATIC_BLOCK = 3
BLOCK_STATEMENT = 4
CONSTRUCTOR = 5
class Scope:
def __init__(self, name: str, scope_type: ScopeType, scope_number: int, parent=None):
self.parent: Optional[Scope] = parent
self.children: List[Scope] = []
self.name = name
self.type = scope_type
self.scope_number = scope_number
self.declared_vars = []
self.used_vars = []
def __str__(self):
return f"scope: {self.name} {self.type}"
class ScopeListener(UtilsListener):
def __init__(self, filename: str):
super().__init__(filename)
self.root: Optional[Scope] = None
self.current_scope: Optional[Scope] = None
def enterPackageDeclaration(self, ctx:JavaParser.PackageDeclarationContext):
super().enterPackageDeclaration(ctx)
self.root = Scope(ctx.qualifiedName().getText(), ScopeType.PACKAGE, 0)
self.current_scope = self.root
def exitCompilationUnit(self, ctx:JavaParser.CompilationUnitContext):
super().exitCompilationUnit(ctx)
self.current_scope = None
def enterClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext):
super().enterClassDeclaration(ctx)
if self.current_scope is None:
return
scope = Scope(ctx.IDENTIFIER().getText(), ScopeType.CLASS, self.current_scope.scope_number + 1, self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
def exitClassBody(self, ctx:JavaParser.ClassBodyContext):
super().exitClassBody(ctx)
self.current_scope = self.current_scope.parent
def enterClassBodyDeclaration(self, ctx: JavaParser.ClassBodyDeclarationContext):
super().enterClassBodyDeclaration(ctx)
if self.current_scope is None:
return
if ctx.STATIC() is not None:
scope = Scope("STATIC", ScopeType.STATIC_BLOCK, self.current_scope.scope_number + 1, self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
return
if ctx.block() is None:
return
scope = Scope("NON_STATIC", ScopeType.BLOCK_STATEMENT, self.current_scope.scope_number + 1, self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
def exitClassBodyDeclaration(self, ctx:JavaParser.ClassBodyDeclarationContext):
if self.current_scope.type == ScopeType.BLOCK_STATEMENT \
or self.current_scope.type == ScopeType.STATIC_BLOCK:
self.current_scope = self.current_scope.parent
def enterMethodBody(self, ctx: JavaParser.MethodBodyContext):
super().enterMethodBody(ctx)
if self.current_scope is None:
return
scope = Scope(self.current_method_identifier, ScopeType.METHOD, self.current_scope.scope_number + 1,
self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
setattr(self.current_method, "scope", scope)
def exitMethodBody(self, ctx:JavaParser.MethodBodyContext):
super().enterMethodBody(ctx)
self.current_scope = self.current_scope.parent
def enterConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext):
super().enterConstructorDeclaration(ctx)
scope = Scope(ctx.IDENTIFIER().getText(), ScopeType.CONSTRUCTOR, self.current_scope.scope_number + 1,
self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
#
# def exitConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext):
# super().exitConstructorDeclaration(ctx)
# self.current_scope = self.current_scope.parent
def enterBlockStatement(self, ctx:JavaParser.BlockStatementContext):
super().enterBlockStatement(ctx)
if self.current_scope is None:
return
if self.current_scope.type == ScopeType.CONSTRUCTOR:
return
scope = Scope("BLOCK", ScopeType.BLOCK_STATEMENT, self.current_scope.scope_number + 1,
self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
def exitBlockStatement(self, ctx:JavaParser.BlockStatementContext):
super().exitBlockStatement(ctx)
self.current_scope = self.current_scope.parent
def enterStatement(self, ctx:JavaParser.StatementContext):
super().enterStatement(ctx)
if self.current_scope is None:
return
if ctx.IF():
self.current_scope.name = "IF"
return
if ctx.ELSE():
self.current_scope.name = "ELSE"
return
if ctx.SWITCH():
self.current_scope.name = "SWITCH"
return
if ctx.FOR():
self.current_scope.name = "FOR"
return
if ctx.WHILE():
self.current_scope.name = "WHILE"
return
if ctx.DO():
self.current_scope.name = "DO"
return
if ctx.TRY():
self.current_scope.name = "TRY"
return
def enterVariableDeclarator(self, ctx: JavaParser.VariableDeclaratorContext):
super().enterVariableDeclarator(ctx)
if self.current_local_var_type is None:
return
self.current_scope.declared_vars.append(self.current_method.body_local_vars_and_expr_names[-1])
# def exitFieldDeclaration(self, ctx: JavaParser.FieldDeclarationContext):
# super().exitFieldDeclaration(ctx)
# self.current_scope.declared_vars.append(self.package.classes[self.current_class_identifier].fields[field.name])
# self.field_enter_count -= 1
# if self.current_class_identifier is not None and self.field_enter_count == 0:
# for i in range(len(self.current_field_ids)):
# field_id = self.current_field_ids[i]
# dims = self.current_field_dims[i]
# field_init = self.current_field_inits[i]
# var_ctx = self.current_field_var_ctxs[i]
# field = Field(
# package_name=self.package.name,
# class_name=self.current_class_identifier,
# parser_context=self.current_field_decl[2],
# filename=self.filename,
# file_info=self.file_info
# )
# field.modifiers = self.current_field_decl[0]
# field.modifiers_parser_contexts = self.current_field_decl[3]
# field.datatype = self.current_field_decl[1] + dims
# field.name = field_id
# field.initializer = field_init
# field.neighbor_names = [x for x in self.current_field_ids if x != field_id]
# field.all_variable_declarator_contexts = self.current_field_var_ctxs
# field.index_in_variable_declarators = i
# self.package.classes[self.current_class_identifier].fields[field.name] = field
# self.current_field_decl = None
def get_program2(source_files: list, print_status = False) -> Program:
program = Program()
listener: Optional[ScopeListener] = None
for filename in source_files:
if print_status:
print("Parsing " + filename)
stream = FileStream(filename, encoding='utf8')
lexer = JavaLexer(stream)
token_stream = CommonTokenStream(lexer)
parser = JavaParser(token_stream)
tree = parser.compilationUnit()
listener = ScopeListener(filename)
walker = ParseTreeWalker()
walker.walk(listener, tree)
if not listener.package.name in program.packages:
program.packages[listener.package.name] = listener.package
else:
for classes_name in listener.package.classes:
program.packages[listener.package.name].classes[classes_name]=listener.package.classes[classes_name]
# if listener is not None:
# setattr(program, "scope", listener.root)
return program
if __name__ == '__main__':
filename = "/home/loop/IdeaProjects/Sample/src/sample2/Test4.java"
program = get_program2([filename])
print() | 39.619266 | 123 | 0.654973 | from typing import Optional
from antlr4 import FileStream, ParseTreeWalker
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from gen.java.JavaLexer import JavaLexer
from .utils_listener_fast import *
from enum import Enum
class ScopeType(Enum):
PACKAGE = 0
CLASS = 1
METHOD = 2
STATIC_BLOCK = 3
BLOCK_STATEMENT = 4
CONSTRUCTOR = 5
class Scope:
def __init__(self, name: str, scope_type: ScopeType, scope_number: int, parent=None):
self.parent: Optional[Scope] = parent
self.children: List[Scope] = []
self.name = name
self.type = scope_type
self.scope_number = scope_number
self.declared_vars = []
self.used_vars = []
def __str__(self):
return f"scope: {self.name} {self.type}"
class ScopeListener(UtilsListener):
def __init__(self, filename: str):
super().__init__(filename)
self.root: Optional[Scope] = None
self.current_scope: Optional[Scope] = None
def enterPackageDeclaration(self, ctx:JavaParser.PackageDeclarationContext):
super().enterPackageDeclaration(ctx)
self.root = Scope(ctx.qualifiedName().getText(), ScopeType.PACKAGE, 0)
self.current_scope = self.root
def exitCompilationUnit(self, ctx:JavaParser.CompilationUnitContext):
super().exitCompilationUnit(ctx)
self.current_scope = None
def enterClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext):
super().enterClassDeclaration(ctx)
if self.current_scope is None:
return
scope = Scope(ctx.IDENTIFIER().getText(), ScopeType.CLASS, self.current_scope.scope_number + 1, self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
def exitClassBody(self, ctx:JavaParser.ClassBodyContext):
super().exitClassBody(ctx)
self.current_scope = self.current_scope.parent
def enterClassBodyDeclaration(self, ctx: JavaParser.ClassBodyDeclarationContext):
super().enterClassBodyDeclaration(ctx)
if self.current_scope is None:
return
if ctx.STATIC() is not None:
scope = Scope("STATIC", ScopeType.STATIC_BLOCK, self.current_scope.scope_number + 1, self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
return
if ctx.block() is None:
return
scope = Scope("NON_STATIC", ScopeType.BLOCK_STATEMENT, self.current_scope.scope_number + 1, self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
def exitClassBodyDeclaration(self, ctx:JavaParser.ClassBodyDeclarationContext):
if self.current_scope.type == ScopeType.BLOCK_STATEMENT \
or self.current_scope.type == ScopeType.STATIC_BLOCK:
self.current_scope = self.current_scope.parent
def enterMethodBody(self, ctx: JavaParser.MethodBodyContext):
super().enterMethodBody(ctx)
if self.current_scope is None:
return
scope = Scope(self.current_method_identifier, ScopeType.METHOD, self.current_scope.scope_number + 1,
self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
setattr(self.current_method, "scope", scope)
def exitMethodBody(self, ctx:JavaParser.MethodBodyContext):
super().enterMethodBody(ctx)
self.current_scope = self.current_scope.parent
def enterConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext):
super().enterConstructorDeclaration(ctx)
scope = Scope(ctx.IDENTIFIER().getText(), ScopeType.CONSTRUCTOR, self.current_scope.scope_number + 1,
self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
def enterBlockStatement(self, ctx:JavaParser.BlockStatementContext):
super().enterBlockStatement(ctx)
if self.current_scope is None:
return
if self.current_scope.type == ScopeType.CONSTRUCTOR:
return
scope = Scope("BLOCK", ScopeType.BLOCK_STATEMENT, self.current_scope.scope_number + 1,
self.current_scope)
self.current_scope.children.append(scope)
self.current_scope = scope
def exitBlockStatement(self, ctx:JavaParser.BlockStatementContext):
super().exitBlockStatement(ctx)
self.current_scope = self.current_scope.parent
def enterStatement(self, ctx:JavaParser.StatementContext):
super().enterStatement(ctx)
if self.current_scope is None:
return
if ctx.IF():
self.current_scope.name = "IF"
return
if ctx.ELSE():
self.current_scope.name = "ELSE"
return
if ctx.SWITCH():
self.current_scope.name = "SWITCH"
return
if ctx.FOR():
self.current_scope.name = "FOR"
return
if ctx.WHILE():
self.current_scope.name = "WHILE"
return
if ctx.DO():
self.current_scope.name = "DO"
return
if ctx.TRY():
self.current_scope.name = "TRY"
return
def enterVariableDeclarator(self, ctx: JavaParser.VariableDeclaratorContext):
super().enterVariableDeclarator(ctx)
if self.current_local_var_type is None:
return
self.current_scope.declared_vars.append(self.current_method.body_local_vars_and_expr_names[-1])
def get_program2(source_files: list, print_status = False) -> Program:
program = Program()
listener: Optional[ScopeListener] = None
for filename in source_files:
if print_status:
print("Parsing " + filename)
stream = FileStream(filename, encoding='utf8')
lexer = JavaLexer(stream)
token_stream = CommonTokenStream(lexer)
parser = JavaParser(token_stream)
tree = parser.compilationUnit()
listener = ScopeListener(filename)
walker = ParseTreeWalker()
walker.walk(listener, tree)
if not listener.package.name in program.packages:
program.packages[listener.package.name] = listener.package
else:
for classes_name in listener.package.classes:
program.packages[listener.package.name].classes[classes_name]=listener.package.classes[classes_name]
return program
if __name__ == '__main__':
filename = "/home/loop/IdeaProjects/Sample/src/sample2/Test4.java"
program = get_program2([filename])
print() | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.